提交 80d69009 编写于 作者: D Dave Airlie

Merge tag 'drm-intel-next-2015-11-20-merged' of...

Merge tag 'drm-intel-next-2015-11-20-merged' of git://anongit.freedesktop.org/drm-intel into drm-next

drm-intel-next-2015-11-20-rebased:
4 weeks because of my vacation, so a bit more:
- final bits of the typesafe register mmio functions (Ville)
- power domain fix for hdmi detection (Imre)
- tons of fixes and improvements to the psr code (Rodrigo)
- refactoring of the dp detection code (Ander)
- complete rework of the dmc loader and dc5/dc6 handling (Imre, Patrik and
  others)
- dp compliance improvements from Shubhangi Shrivastava
- stop_machine hack from Chris to fix corruptions when updating GTT ptes on bsw
- lots of fifo underrun fixes from Ville
- big pile of fbc fixes and improvements from Paulo
- fix fbdev failures paths (Tvrtko and Lukas Wunner)
- dp link training refactoring (Ander)
- interruptible prepare_plane for atomic (Maarten)
- basic kabylake support (Deepak&Rodrigo)
- don't leak ringspace on resets (Chris)
drm-intel-next-2015-10-23:
- 2nd attempt at atomic watermarks from Matt, but just prep for now
- fixes all over

* tag 'drm-intel-next-2015-11-20-merged' of git://anongit.freedesktop.org/drm-intel: (209 commits)
  drm/i915: Update DRIVER_DATE to 20151120
  drm/i915: take a power domain reference while checking the HDMI live status
  drm/i915: take a power domain ref only when needed during HDMI detect
  drm/i915: Tear down fbdev if initialization fails
  async: export current_is_async()
  Revert "drm/i915: Initialize HWS page address after GPU reset"
  drm/i915: Fix oops caused by fbdev initialization failure
  drm/i915: Fix i915_ggtt_view_equal to handle rotation correctly
  drm/i915: Stuff rotation params into view union
  drm/i915: Drop return value from intel_fill_fb_ggtt_view
  drm/i915 : Fix to remove unnecsessary checks in postclose function.
  drm/i915: add MISSING_CASE to a few port/aux power domain helpers
  drm/i915/ddi: fix intel_display_port_aux_power_domain() after HDMI detect
  drm/i915: Remove platform specific *_dp_detect() functions
  drm/i915: Don't do edp panel detection in g4x_dp_detect()
  drm/i915: Send TP1 TP2/3 even when panel claims no NO_TRAIN_ON_EXIT.
  drm/i915: PSR: Don't Skip aux handshake on DP_PSR_NO_TRAIN_ON_EXIT.
  drm/i915: Reduce PSR re-activation time for VLV/CHV.
  drm/i915: Delay first PSR activation.
  drm/i915: Type safe register read/write
  ...
...@@ -4177,17 +4177,21 @@ int num_ioctls;</synopsis> ...@@ -4177,17 +4177,21 @@ int num_ioctls;</synopsis>
</sect2> </sect2>
</sect1> </sect1>
<sect1> <sect1>
<title>GuC-based Command Submission</title> <title>GuC</title>
<sect2> <sect2>
<title>GuC</title> <title>GuC-specific firmware loader</title>
!Pdrivers/gpu/drm/i915/intel_guc_loader.c GuC-specific firmware loader !Pdrivers/gpu/drm/i915/intel_guc_loader.c GuC-specific firmware loader
!Idrivers/gpu/drm/i915/intel_guc_loader.c !Idrivers/gpu/drm/i915/intel_guc_loader.c
</sect2> </sect2>
<sect2> <sect2>
<title>GuC Client</title> <title>GuC-based command submission</title>
!Pdrivers/gpu/drm/i915/i915_guc_submission.c GuC-based command submissison !Pdrivers/gpu/drm/i915/i915_guc_submission.c GuC-based command submission
!Idrivers/gpu/drm/i915/i915_guc_submission.c !Idrivers/gpu/drm/i915/i915_guc_submission.c
</sect2> </sect2>
<sect2>
<title>GuC Firmware Layout</title>
!Pdrivers/gpu/drm/i915/intel_guc_fwif.h GuC Firmware Layout
</sect2>
</sect1> </sect1>
<sect1> <sect1>
......
...@@ -547,6 +547,7 @@ static const struct pci_device_id intel_stolen_ids[] __initconst = { ...@@ -547,6 +547,7 @@ static const struct pci_device_id intel_stolen_ids[] __initconst = {
INTEL_CHV_IDS(&chv_stolen_funcs), INTEL_CHV_IDS(&chv_stolen_funcs),
INTEL_SKL_IDS(&gen9_stolen_funcs), INTEL_SKL_IDS(&gen9_stolen_funcs),
INTEL_BXT_IDS(&gen9_stolen_funcs), INTEL_BXT_IDS(&gen9_stolen_funcs),
INTEL_KBL_IDS(&gen9_stolen_funcs),
}; };
static void __init intel_graphics_stolen(int num, int slot, int func) static void __init intel_graphics_stolen(int num, int slot, int func)
......
...@@ -10,6 +10,7 @@ config DRM_I915 ...@@ -10,6 +10,7 @@ config DRM_I915
# the shmem_readpage() which depends upon tmpfs # the shmem_readpage() which depends upon tmpfs
select SHMEM select SHMEM
select TMPFS select TMPFS
select STOP_MACHINE
select DRM_KMS_HELPER select DRM_KMS_HELPER
select DRM_PANEL select DRM_PANEL
select DRM_MIPI_DSI select DRM_MIPI_DSI
......
...@@ -77,6 +77,7 @@ i915-y += dvo_ch7017.o \ ...@@ -77,6 +77,7 @@ i915-y += dvo_ch7017.o \
dvo_tfp410.o \ dvo_tfp410.o \
intel_crt.o \ intel_crt.o \
intel_ddi.o \ intel_ddi.o \
intel_dp_link_training.o \
intel_dp_mst.o \ intel_dp_mst.o \
intel_dp.o \ intel_dp.o \
intel_dsi.o \ intel_dsi.o \
......
...@@ -32,7 +32,8 @@ struct intel_dvo_device { ...@@ -32,7 +32,8 @@ struct intel_dvo_device {
const char *name; const char *name;
int type; int type;
/* DVOA/B/C output register */ /* DVOA/B/C output register */
u32 dvo_reg; i915_reg_t dvo_reg;
i915_reg_t dvo_srcdim_reg;
/* GPIO register used for i2c bus to control this device */ /* GPIO register used for i2c bus to control this device */
u32 gpio; u32 gpio;
int slave_addr; int slave_addr;
......
...@@ -407,14 +407,14 @@ static const struct drm_i915_cmd_table hsw_blt_ring_cmds[] = { ...@@ -407,14 +407,14 @@ static const struct drm_i915_cmd_table hsw_blt_ring_cmds[] = {
* LRI. * LRI.
*/ */
struct drm_i915_reg_descriptor { struct drm_i915_reg_descriptor {
u32 addr; i915_reg_t addr;
u32 mask; u32 mask;
u32 value; u32 value;
}; };
/* Convenience macro for adding 32-bit registers. */ /* Convenience macro for adding 32-bit registers. */
#define REG32(address, ...) \ #define REG32(_reg, ...) \
{ .addr = address, __VA_ARGS__ } { .addr = (_reg), __VA_ARGS__ }
/* /*
* Convenience macro for adding 64-bit registers. * Convenience macro for adding 64-bit registers.
...@@ -423,8 +423,13 @@ struct drm_i915_reg_descriptor { ...@@ -423,8 +423,13 @@ struct drm_i915_reg_descriptor {
* access commands only allow 32-bit accesses. Hence, we have to include * access commands only allow 32-bit accesses. Hence, we have to include
* entries for both halves of the 64-bit registers. * entries for both halves of the 64-bit registers.
*/ */
#define REG64(addr) \ #define REG64(_reg) \
REG32(addr), REG32(addr + sizeof(u32)) { .addr = _reg }, \
{ .addr = _reg ## _UDW }
#define REG64_IDX(_reg, idx) \
{ .addr = _reg(idx) }, \
{ .addr = _reg ## _UDW(idx) }
static const struct drm_i915_reg_descriptor gen7_render_regs[] = { static const struct drm_i915_reg_descriptor gen7_render_regs[] = {
REG64(GPGPU_THREADS_DISPATCHED), REG64(GPGPU_THREADS_DISPATCHED),
...@@ -451,14 +456,14 @@ static const struct drm_i915_reg_descriptor gen7_render_regs[] = { ...@@ -451,14 +456,14 @@ static const struct drm_i915_reg_descriptor gen7_render_regs[] = {
REG32(GEN7_GPGPU_DISPATCHDIMX), REG32(GEN7_GPGPU_DISPATCHDIMX),
REG32(GEN7_GPGPU_DISPATCHDIMY), REG32(GEN7_GPGPU_DISPATCHDIMY),
REG32(GEN7_GPGPU_DISPATCHDIMZ), REG32(GEN7_GPGPU_DISPATCHDIMZ),
REG64(GEN7_SO_NUM_PRIMS_WRITTEN(0)), REG64_IDX(GEN7_SO_NUM_PRIMS_WRITTEN, 0),
REG64(GEN7_SO_NUM_PRIMS_WRITTEN(1)), REG64_IDX(GEN7_SO_NUM_PRIMS_WRITTEN, 1),
REG64(GEN7_SO_NUM_PRIMS_WRITTEN(2)), REG64_IDX(GEN7_SO_NUM_PRIMS_WRITTEN, 2),
REG64(GEN7_SO_NUM_PRIMS_WRITTEN(3)), REG64_IDX(GEN7_SO_NUM_PRIMS_WRITTEN, 3),
REG64(GEN7_SO_PRIM_STORAGE_NEEDED(0)), REG64_IDX(GEN7_SO_PRIM_STORAGE_NEEDED, 0),
REG64(GEN7_SO_PRIM_STORAGE_NEEDED(1)), REG64_IDX(GEN7_SO_PRIM_STORAGE_NEEDED, 1),
REG64(GEN7_SO_PRIM_STORAGE_NEEDED(2)), REG64_IDX(GEN7_SO_PRIM_STORAGE_NEEDED, 2),
REG64(GEN7_SO_PRIM_STORAGE_NEEDED(3)), REG64_IDX(GEN7_SO_PRIM_STORAGE_NEEDED, 3),
REG32(GEN7_SO_WRITE_OFFSET(0)), REG32(GEN7_SO_WRITE_OFFSET(0)),
REG32(GEN7_SO_WRITE_OFFSET(1)), REG32(GEN7_SO_WRITE_OFFSET(1)),
REG32(GEN7_SO_WRITE_OFFSET(2)), REG32(GEN7_SO_WRITE_OFFSET(2)),
...@@ -592,7 +597,7 @@ static bool check_sorted(int ring_id, ...@@ -592,7 +597,7 @@ static bool check_sorted(int ring_id,
bool ret = true; bool ret = true;
for (i = 0; i < reg_count; i++) { for (i = 0; i < reg_count; i++) {
u32 curr = reg_table[i].addr; u32 curr = i915_mmio_reg_offset(reg_table[i].addr);
if (curr < previous) { if (curr < previous) {
DRM_ERROR("CMD: table not sorted ring=%d entry=%d reg=0x%08X prev=0x%08X\n", DRM_ERROR("CMD: table not sorted ring=%d entry=%d reg=0x%08X prev=0x%08X\n",
...@@ -847,7 +852,7 @@ find_reg(const struct drm_i915_reg_descriptor *table, ...@@ -847,7 +852,7 @@ find_reg(const struct drm_i915_reg_descriptor *table,
int i; int i;
for (i = 0; i < count; i++) { for (i = 0; i < count; i++) {
if (table[i].addr == addr) if (i915_mmio_reg_offset(table[i].addr) == addr)
return &table[i]; return &table[i];
} }
} }
...@@ -1023,7 +1028,7 @@ static bool check_cmd(const struct intel_engine_cs *ring, ...@@ -1023,7 +1028,7 @@ static bool check_cmd(const struct intel_engine_cs *ring,
* to the register. Hence, limit OACONTROL writes to * to the register. Hence, limit OACONTROL writes to
* only MI_LOAD_REGISTER_IMM commands. * only MI_LOAD_REGISTER_IMM commands.
*/ */
if (reg_addr == OACONTROL) { if (reg_addr == i915_mmio_reg_offset(OACONTROL)) {
if (desc->cmd.value == MI_LOAD_REGISTER_MEM) { if (desc->cmd.value == MI_LOAD_REGISTER_MEM) {
DRM_DEBUG_DRIVER("CMD: Rejected LRM to OACONTROL\n"); DRM_DEBUG_DRIVER("CMD: Rejected LRM to OACONTROL\n");
return false; return false;
......
...@@ -1252,18 +1252,21 @@ static int i915_frequency_info(struct seq_file *m, void *unused) ...@@ -1252,18 +1252,21 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
max_freq = (IS_BROXTON(dev) ? rp_state_cap >> 0 : max_freq = (IS_BROXTON(dev) ? rp_state_cap >> 0 :
rp_state_cap >> 16) & 0xff; rp_state_cap >> 16) & 0xff;
max_freq *= (IS_SKYLAKE(dev) ? GEN9_FREQ_SCALER : 1); max_freq *= (IS_SKYLAKE(dev) || IS_KABYLAKE(dev) ?
GEN9_FREQ_SCALER : 1);
seq_printf(m, "Lowest (RPN) frequency: %dMHz\n", seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
intel_gpu_freq(dev_priv, max_freq)); intel_gpu_freq(dev_priv, max_freq));
max_freq = (rp_state_cap & 0xff00) >> 8; max_freq = (rp_state_cap & 0xff00) >> 8;
max_freq *= (IS_SKYLAKE(dev) ? GEN9_FREQ_SCALER : 1); max_freq *= (IS_SKYLAKE(dev) || IS_KABYLAKE(dev) ?
GEN9_FREQ_SCALER : 1);
seq_printf(m, "Nominal (RP1) frequency: %dMHz\n", seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
intel_gpu_freq(dev_priv, max_freq)); intel_gpu_freq(dev_priv, max_freq));
max_freq = (IS_BROXTON(dev) ? rp_state_cap >> 16 : max_freq = (IS_BROXTON(dev) ? rp_state_cap >> 16 :
rp_state_cap >> 0) & 0xff; rp_state_cap >> 0) & 0xff;
max_freq *= (IS_SKYLAKE(dev) ? GEN9_FREQ_SCALER : 1); max_freq *= (IS_SKYLAKE(dev) || IS_KABYLAKE(dev) ?
GEN9_FREQ_SCALER : 1);
seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n", seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
intel_gpu_freq(dev_priv, max_freq)); intel_gpu_freq(dev_priv, max_freq));
seq_printf(m, "Max overclocked frequency: %dMHz\n", seq_printf(m, "Max overclocked frequency: %dMHz\n",
...@@ -1523,7 +1526,7 @@ static int gen6_drpc_info(struct seq_file *m) ...@@ -1523,7 +1526,7 @@ static int gen6_drpc_info(struct seq_file *m)
seq_printf(m, "RC information accurate: %s\n", yesno(count < 51)); seq_printf(m, "RC information accurate: %s\n", yesno(count < 51));
} }
gt_core_status = readl(dev_priv->regs + GEN6_GT_CORE_STATUS); gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS);
trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true); trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
rpmodectl1 = I915_READ(GEN6_RP_CONTROL); rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
...@@ -1640,7 +1643,7 @@ static int i915_fbc_status(struct seq_file *m, void *unused) ...@@ -1640,7 +1643,7 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
seq_puts(m, "FBC enabled\n"); seq_puts(m, "FBC enabled\n");
else else
seq_printf(m, "FBC disabled: %s\n", seq_printf(m, "FBC disabled: %s\n",
intel_no_fbc_reason_str(dev_priv->fbc.no_fbc_reason)); dev_priv->fbc.no_fbc_reason);
if (INTEL_INFO(dev_priv)->gen >= 7) if (INTEL_INFO(dev_priv)->gen >= 7)
seq_printf(m, "Compressing: %s\n", seq_printf(m, "Compressing: %s\n",
...@@ -1801,7 +1804,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused) ...@@ -1801,7 +1804,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
if (ret) if (ret)
goto out; goto out;
if (IS_SKYLAKE(dev)) { if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
/* Convert GT frequency to 50 HZ units */ /* Convert GT frequency to 50 HZ units */
min_gpu_freq = min_gpu_freq =
dev_priv->rps.min_freq_softlimit / GEN9_FREQ_SCALER; dev_priv->rps.min_freq_softlimit / GEN9_FREQ_SCALER;
...@@ -1821,7 +1824,8 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused) ...@@ -1821,7 +1824,8 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
&ia_freq); &ia_freq);
seq_printf(m, "%d\t\t%d\t\t\t\t%d\n", seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
intel_gpu_freq(dev_priv, (gpu_freq * intel_gpu_freq(dev_priv, (gpu_freq *
(IS_SKYLAKE(dev) ? GEN9_FREQ_SCALER : 1))), (IS_SKYLAKE(dev) || IS_KABYLAKE(dev) ?
GEN9_FREQ_SCALER : 1))),
((ia_freq >> 0) & 0xff) * 100, ((ia_freq >> 0) & 0xff) * 100,
((ia_freq >> 8) & 0xff) * 100); ((ia_freq >> 8) & 0xff) * 100);
} }
...@@ -1873,17 +1877,19 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data) ...@@ -1873,17 +1877,19 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
ifbdev = dev_priv->fbdev; ifbdev = dev_priv->fbdev;
fb = to_intel_framebuffer(ifbdev->helper.fb); if (ifbdev) {
fb = to_intel_framebuffer(ifbdev->helper.fb);
seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
fb->base.width, seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
fb->base.height, fb->base.width,
fb->base.depth, fb->base.height,
fb->base.bits_per_pixel, fb->base.depth,
fb->base.modifier[0], fb->base.bits_per_pixel,
atomic_read(&fb->base.refcount.refcount)); fb->base.modifier[0],
describe_obj(m, fb->obj); atomic_read(&fb->base.refcount.refcount));
seq_putc(m, '\n'); describe_obj(m, fb->obj);
seq_putc(m, '\n');
}
#endif #endif
mutex_lock(&dev->mode_config.fb_lock); mutex_lock(&dev->mode_config.fb_lock);
...@@ -2402,6 +2408,12 @@ static int i915_guc_load_status_info(struct seq_file *m, void *data) ...@@ -2402,6 +2408,12 @@ static int i915_guc_load_status_info(struct seq_file *m, void *data)
guc_fw->guc_fw_major_wanted, guc_fw->guc_fw_minor_wanted); guc_fw->guc_fw_major_wanted, guc_fw->guc_fw_minor_wanted);
seq_printf(m, "\tversion found: %d.%d\n", seq_printf(m, "\tversion found: %d.%d\n",
guc_fw->guc_fw_major_found, guc_fw->guc_fw_minor_found); guc_fw->guc_fw_major_found, guc_fw->guc_fw_minor_found);
seq_printf(m, "\theader: offset is %d; size = %d\n",
guc_fw->header_offset, guc_fw->header_size);
seq_printf(m, "\tuCode: offset is %d; size = %d\n",
guc_fw->ucode_offset, guc_fw->ucode_size);
seq_printf(m, "\tRSA: offset is %d; size = %d\n",
guc_fw->rsa_offset, guc_fw->rsa_size);
tmp = I915_READ(GUC_STATUS); tmp = I915_READ(GUC_STATUS);
...@@ -2550,7 +2562,7 @@ static int i915_edp_psr_status(struct seq_file *m, void *data) ...@@ -2550,7 +2562,7 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
yesno(work_busy(&dev_priv->psr.work.work))); yesno(work_busy(&dev_priv->psr.work.work)));
if (HAS_DDI(dev)) if (HAS_DDI(dev))
enabled = I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE; enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
else { else {
for_each_pipe(dev_priv, pipe) { for_each_pipe(dev_priv, pipe) {
stat[pipe] = I915_READ(VLV_PSRSTAT(pipe)) & stat[pipe] = I915_READ(VLV_PSRSTAT(pipe)) &
...@@ -2572,7 +2584,7 @@ static int i915_edp_psr_status(struct seq_file *m, void *data) ...@@ -2572,7 +2584,7 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
/* CHV PSR has no kind of performance counter */ /* CHV PSR has no kind of performance counter */
if (HAS_DDI(dev)) { if (HAS_DDI(dev)) {
psrperf = I915_READ(EDP_PSR_PERF_CNT(dev)) & psrperf = I915_READ(EDP_PSR_PERF_CNT) &
EDP_PSR_PERF_CNT_MASK; EDP_PSR_PERF_CNT_MASK;
seq_printf(m, "Performance_Counter: %u\n", psrperf); seq_printf(m, "Performance_Counter: %u\n", psrperf);
...@@ -2696,24 +2708,16 @@ static const char *power_domain_str(enum intel_display_power_domain domain) ...@@ -2696,24 +2708,16 @@ static const char *power_domain_str(enum intel_display_power_domain domain)
return "TRANSCODER_C"; return "TRANSCODER_C";
case POWER_DOMAIN_TRANSCODER_EDP: case POWER_DOMAIN_TRANSCODER_EDP:
return "TRANSCODER_EDP"; return "TRANSCODER_EDP";
case POWER_DOMAIN_PORT_DDI_A_2_LANES: case POWER_DOMAIN_PORT_DDI_A_LANES:
return "PORT_DDI_A_2_LANES"; return "PORT_DDI_A_LANES";
case POWER_DOMAIN_PORT_DDI_A_4_LANES: case POWER_DOMAIN_PORT_DDI_B_LANES:
return "PORT_DDI_A_4_LANES"; return "PORT_DDI_B_LANES";
case POWER_DOMAIN_PORT_DDI_B_2_LANES: case POWER_DOMAIN_PORT_DDI_C_LANES:
return "PORT_DDI_B_2_LANES"; return "PORT_DDI_C_LANES";
case POWER_DOMAIN_PORT_DDI_B_4_LANES: case POWER_DOMAIN_PORT_DDI_D_LANES:
return "PORT_DDI_B_4_LANES"; return "PORT_DDI_D_LANES";
case POWER_DOMAIN_PORT_DDI_C_2_LANES: case POWER_DOMAIN_PORT_DDI_E_LANES:
return "PORT_DDI_C_2_LANES"; return "PORT_DDI_E_LANES";
case POWER_DOMAIN_PORT_DDI_C_4_LANES:
return "PORT_DDI_C_4_LANES";
case POWER_DOMAIN_PORT_DDI_D_2_LANES:
return "PORT_DDI_D_2_LANES";
case POWER_DOMAIN_PORT_DDI_D_4_LANES:
return "PORT_DDI_D_4_LANES";
case POWER_DOMAIN_PORT_DDI_E_2_LANES:
return "PORT_DDI_E_2_LANES";
case POWER_DOMAIN_PORT_DSI: case POWER_DOMAIN_PORT_DSI:
return "PORT_DSI"; return "PORT_DSI";
case POWER_DOMAIN_PORT_CRT: case POWER_DOMAIN_PORT_CRT:
...@@ -2734,6 +2738,10 @@ static const char *power_domain_str(enum intel_display_power_domain domain) ...@@ -2734,6 +2738,10 @@ static const char *power_domain_str(enum intel_display_power_domain domain)
return "AUX_C"; return "AUX_C";
case POWER_DOMAIN_AUX_D: case POWER_DOMAIN_AUX_D:
return "AUX_D"; return "AUX_D";
case POWER_DOMAIN_GMBUS:
return "GMBUS";
case POWER_DOMAIN_MODESET:
return "MODESET";
case POWER_DOMAIN_INIT: case POWER_DOMAIN_INIT:
return "INIT"; return "INIT";
default: default:
...@@ -2777,6 +2785,51 @@ static int i915_power_domain_info(struct seq_file *m, void *unused) ...@@ -2777,6 +2785,51 @@ static int i915_power_domain_info(struct seq_file *m, void *unused)
return 0; return 0;
} }
static int i915_dmc_info(struct seq_file *m, void *unused)
{
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_csr *csr;
if (!HAS_CSR(dev)) {
seq_puts(m, "not supported\n");
return 0;
}
csr = &dev_priv->csr;
intel_runtime_pm_get(dev_priv);
seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
seq_printf(m, "path: %s\n", csr->fw_path);
if (!csr->dmc_payload)
goto out;
seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
CSR_VERSION_MINOR(csr->version));
if (IS_SKYLAKE(dev) && csr->version >= CSR_VERSION(1, 6)) {
seq_printf(m, "DC3 -> DC5 count: %d\n",
I915_READ(SKL_CSR_DC3_DC5_COUNT));
seq_printf(m, "DC5 -> DC6 count: %d\n",
I915_READ(SKL_CSR_DC5_DC6_COUNT));
} else if (IS_BROXTON(dev) && csr->version >= CSR_VERSION(1, 4)) {
seq_printf(m, "DC3 -> DC5 count: %d\n",
I915_READ(BXT_CSR_DC3_DC5_COUNT));
}
out:
seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0)));
seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE));
seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL));
intel_runtime_pm_put(dev_priv);
return 0;
}
static void intel_seq_print_mode(struct seq_file *m, int tabs, static void intel_seq_print_mode(struct seq_file *m, int tabs,
struct drm_display_mode *mode) struct drm_display_mode *mode)
{ {
...@@ -2944,6 +2997,107 @@ static bool cursor_position(struct drm_device *dev, int pipe, int *x, int *y) ...@@ -2944,6 +2997,107 @@ static bool cursor_position(struct drm_device *dev, int pipe, int *x, int *y)
return cursor_active(dev, pipe); return cursor_active(dev, pipe);
} }
static const char *plane_type(enum drm_plane_type type)
{
switch (type) {
case DRM_PLANE_TYPE_OVERLAY:
return "OVL";
case DRM_PLANE_TYPE_PRIMARY:
return "PRI";
case DRM_PLANE_TYPE_CURSOR:
return "CUR";
/*
* Deliberately omitting default: to generate compiler warnings
* when a new drm_plane_type gets added.
*/
}
return "unknown";
}
static const char *plane_rotation(unsigned int rotation)
{
static char buf[48];
/*
* According to doc only one DRM_ROTATE_ is allowed but this
* will print them all to visualize if the values are misused
*/
snprintf(buf, sizeof(buf),
"%s%s%s%s%s%s(0x%08x)",
(rotation & BIT(DRM_ROTATE_0)) ? "0 " : "",
(rotation & BIT(DRM_ROTATE_90)) ? "90 " : "",
(rotation & BIT(DRM_ROTATE_180)) ? "180 " : "",
(rotation & BIT(DRM_ROTATE_270)) ? "270 " : "",
(rotation & BIT(DRM_REFLECT_X)) ? "FLIPX " : "",
(rotation & BIT(DRM_REFLECT_Y)) ? "FLIPY " : "",
rotation);
return buf;
}
static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
{
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct intel_plane *intel_plane;
for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
struct drm_plane_state *state;
struct drm_plane *plane = &intel_plane->base;
if (!plane->state) {
seq_puts(m, "plane->state is NULL!\n");
continue;
}
state = plane->state;
seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n",
plane->base.id,
plane_type(intel_plane->base.type),
state->crtc_x, state->crtc_y,
state->crtc_w, state->crtc_h,
(state->src_x >> 16),
((state->src_x & 0xffff) * 15625) >> 10,
(state->src_y >> 16),
((state->src_y & 0xffff) * 15625) >> 10,
(state->src_w >> 16),
((state->src_w & 0xffff) * 15625) >> 10,
(state->src_h >> 16),
((state->src_h & 0xffff) * 15625) >> 10,
state->fb ? drm_get_format_name(state->fb->pixel_format) : "N/A",
plane_rotation(state->rotation));
}
}
static void intel_scaler_info(struct seq_file *m, struct intel_crtc *intel_crtc)
{
struct intel_crtc_state *pipe_config;
int num_scalers = intel_crtc->num_scalers;
int i;
pipe_config = to_intel_crtc_state(intel_crtc->base.state);
/* Not all platformas have a scaler */
if (num_scalers) {
seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
num_scalers,
pipe_config->scaler_state.scaler_users,
pipe_config->scaler_state.scaler_id);
for (i = 0; i < SKL_NUM_SCALERS; i++) {
struct intel_scaler *sc =
&pipe_config->scaler_state.scalers[i];
seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
i, yesno(sc->in_use), sc->mode);
}
seq_puts(m, "\n");
} else {
seq_puts(m, "\tNo scalers available on this platform\n");
}
}
static int i915_display_info(struct seq_file *m, void *unused) static int i915_display_info(struct seq_file *m, void *unused)
{ {
struct drm_info_node *node = m->private; struct drm_info_node *node = m->private;
...@@ -2963,10 +3117,12 @@ static int i915_display_info(struct seq_file *m, void *unused) ...@@ -2963,10 +3117,12 @@ static int i915_display_info(struct seq_file *m, void *unused)
pipe_config = to_intel_crtc_state(crtc->base.state); pipe_config = to_intel_crtc_state(crtc->base.state);
seq_printf(m, "CRTC %d: pipe: %c, active=%s (size=%dx%d)\n", seq_printf(m, "CRTC %d: pipe: %c, active=%s, (size=%dx%d), dither=%s, bpp=%d\n",
crtc->base.base.id, pipe_name(crtc->pipe), crtc->base.base.id, pipe_name(crtc->pipe),
yesno(pipe_config->base.active), yesno(pipe_config->base.active),
pipe_config->pipe_src_w, pipe_config->pipe_src_h); pipe_config->pipe_src_w, pipe_config->pipe_src_h,
yesno(pipe_config->dither), pipe_config->pipe_bpp);
if (pipe_config->base.active) { if (pipe_config->base.active) {
intel_crtc_info(m, crtc); intel_crtc_info(m, crtc);
...@@ -2976,6 +3132,8 @@ static int i915_display_info(struct seq_file *m, void *unused) ...@@ -2976,6 +3132,8 @@ static int i915_display_info(struct seq_file *m, void *unused)
x, y, crtc->base.cursor->state->crtc_w, x, y, crtc->base.cursor->state->crtc_w,
crtc->base.cursor->state->crtc_h, crtc->base.cursor->state->crtc_h,
crtc->cursor_addr, yesno(active)); crtc->cursor_addr, yesno(active));
intel_scaler_info(m, crtc);
intel_plane_info(m, crtc);
} }
seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n", seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n",
...@@ -3110,7 +3268,8 @@ static int i915_wa_registers(struct seq_file *m, void *unused) ...@@ -3110,7 +3268,8 @@ static int i915_wa_registers(struct seq_file *m, void *unused)
seq_printf(m, "Workarounds applied: %d\n", dev_priv->workarounds.count); seq_printf(m, "Workarounds applied: %d\n", dev_priv->workarounds.count);
for (i = 0; i < dev_priv->workarounds.count; ++i) { for (i = 0; i < dev_priv->workarounds.count; ++i) {
u32 addr, mask, value, read; i915_reg_t addr;
u32 mask, value, read;
bool ok; bool ok;
addr = dev_priv->workarounds.reg[i].addr; addr = dev_priv->workarounds.reg[i].addr;
...@@ -3119,7 +3278,7 @@ static int i915_wa_registers(struct seq_file *m, void *unused) ...@@ -3119,7 +3278,7 @@ static int i915_wa_registers(struct seq_file *m, void *unused)
read = I915_READ(addr); read = I915_READ(addr);
ok = (value & mask) == (read & mask); ok = (value & mask) == (read & mask);
seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X, read: 0x%08x, status: %s\n", seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X, read: 0x%08x, status: %s\n",
addr, value, mask, read, ok ? "OK" : "FAIL"); i915_mmio_reg_offset(addr), value, mask, read, ok ? "OK" : "FAIL");
} }
intel_runtime_pm_put(dev_priv); intel_runtime_pm_put(dev_priv);
...@@ -5023,7 +5182,7 @@ static void gen9_sseu_device_status(struct drm_device *dev, ...@@ -5023,7 +5182,7 @@ static void gen9_sseu_device_status(struct drm_device *dev,
stat->slice_total++; stat->slice_total++;
if (IS_SKYLAKE(dev)) if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
ss_cnt = INTEL_INFO(dev)->subslice_per_slice; ss_cnt = INTEL_INFO(dev)->subslice_per_slice;
for (ss = 0; ss < ss_max; ss++) { for (ss = 0; ss < ss_max; ss++) {
...@@ -5236,6 +5395,7 @@ static const struct drm_info_list i915_debugfs_list[] = { ...@@ -5236,6 +5395,7 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_energy_uJ", i915_energy_uJ, 0}, {"i915_energy_uJ", i915_energy_uJ, 0},
{"i915_runtime_pm_status", i915_runtime_pm_status, 0}, {"i915_runtime_pm_status", i915_runtime_pm_status, 0},
{"i915_power_domain_info", i915_power_domain_info, 0}, {"i915_power_domain_info", i915_power_domain_info, 0},
{"i915_dmc_info", i915_dmc_info, 0},
{"i915_display_info", i915_display_info, 0}, {"i915_display_info", i915_display_info, 0},
{"i915_semaphore_status", i915_semaphore_status, 0}, {"i915_semaphore_status", i915_semaphore_status, 0},
{"i915_shared_dplls_info", i915_shared_dplls_info, 0}, {"i915_shared_dplls_info", i915_shared_dplls_info, 0},
......
...@@ -28,7 +28,6 @@ ...@@ -28,7 +28,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/async.h>
#include <drm/drmP.h> #include <drm/drmP.h>
#include <drm/drm_crtc_helper.h> #include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h> #include <drm/drm_fb_helper.h>
...@@ -338,7 +337,7 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_ ...@@ -338,7 +337,7 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_
i915_resume_switcheroo(dev); i915_resume_switcheroo(dev);
dev->switch_power_state = DRM_SWITCH_POWER_ON; dev->switch_power_state = DRM_SWITCH_POWER_ON;
} else { } else {
pr_err("switched off\n"); pr_info("switched off\n");
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
i915_suspend_switcheroo(dev, pmm); i915_suspend_switcheroo(dev, pmm);
dev->switch_power_state = DRM_SWITCH_POWER_OFF; dev->switch_power_state = DRM_SWITCH_POWER_OFF;
...@@ -396,7 +395,9 @@ static int i915_load_modeset_init(struct drm_device *dev) ...@@ -396,7 +395,9 @@ static int i915_load_modeset_init(struct drm_device *dev)
if (ret) if (ret)
goto cleanup_vga_switcheroo; goto cleanup_vga_switcheroo;
intel_power_domains_init_hw(dev_priv); intel_power_domains_init_hw(dev_priv, false);
intel_csr_ucode_init(dev_priv);
ret = intel_irq_install(dev_priv); ret = intel_irq_install(dev_priv);
if (ret) if (ret)
...@@ -437,7 +438,7 @@ static int i915_load_modeset_init(struct drm_device *dev) ...@@ -437,7 +438,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
* scanning against hotplug events. Hence do this first and ignore the * scanning against hotplug events. Hence do this first and ignore the
* tiny window where we will loose hotplug notifactions. * tiny window where we will loose hotplug notifactions.
*/ */
async_schedule(intel_fbdev_initial_config, dev_priv); intel_fbdev_initial_config_async(dev);
drm_kms_helper_poll_init(dev); drm_kms_helper_poll_init(dev);
...@@ -663,7 +664,8 @@ static void gen9_sseu_info_init(struct drm_device *dev) ...@@ -663,7 +664,8 @@ static void gen9_sseu_info_init(struct drm_device *dev)
* supports EU power gating on devices with more than one EU * supports EU power gating on devices with more than one EU
* pair per subslice. * pair per subslice.
*/ */
info->has_slice_pg = (IS_SKYLAKE(dev) && (info->slice_total > 1)); info->has_slice_pg = ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) &&
(info->slice_total > 1));
info->has_subslice_pg = (IS_BROXTON(dev) && (info->subslice_total > 1)); info->has_subslice_pg = (IS_BROXTON(dev) && (info->subslice_total > 1));
info->has_eu_pg = (info->eu_per_subslice > 2); info->has_eu_pg = (info->eu_per_subslice > 2);
} }
...@@ -890,7 +892,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) ...@@ -890,7 +892,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
spin_lock_init(&dev_priv->mmio_flip_lock); spin_lock_init(&dev_priv->mmio_flip_lock);
mutex_init(&dev_priv->sb_lock); mutex_init(&dev_priv->sb_lock);
mutex_init(&dev_priv->modeset_restore_lock); mutex_init(&dev_priv->modeset_restore_lock);
mutex_init(&dev_priv->csr_lock);
mutex_init(&dev_priv->av_mutex); mutex_init(&dev_priv->av_mutex);
intel_pm_setup(dev); intel_pm_setup(dev);
...@@ -937,9 +938,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) ...@@ -937,9 +938,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
intel_uncore_init(dev); intel_uncore_init(dev);
/* Load CSR Firmware for SKL */
intel_csr_ucode_init(dev);
ret = i915_gem_gtt_init(dev); ret = i915_gem_gtt_init(dev);
if (ret) if (ret)
goto out_freecsr; goto out_freecsr;
...@@ -1113,7 +1111,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) ...@@ -1113,7 +1111,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
out_gtt: out_gtt:
i915_global_gtt_cleanup(dev); i915_global_gtt_cleanup(dev);
out_freecsr: out_freecsr:
intel_csr_ucode_fini(dev); intel_csr_ucode_fini(dev_priv);
intel_uncore_fini(dev); intel_uncore_fini(dev);
pci_iounmap(dev->pdev, dev_priv->regs); pci_iounmap(dev->pdev, dev_priv->regs);
put_bridge: put_bridge:
...@@ -1131,6 +1129,8 @@ int i915_driver_unload(struct drm_device *dev) ...@@ -1131,6 +1129,8 @@ int i915_driver_unload(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
int ret; int ret;
intel_fbdev_fini(dev);
i915_audio_component_cleanup(dev_priv); i915_audio_component_cleanup(dev_priv);
ret = i915_gem_suspend(dev); ret = i915_gem_suspend(dev);
...@@ -1153,8 +1153,6 @@ int i915_driver_unload(struct drm_device *dev) ...@@ -1153,8 +1153,6 @@ int i915_driver_unload(struct drm_device *dev)
acpi_video_unregister(); acpi_video_unregister();
intel_fbdev_fini(dev);
drm_vblank_cleanup(dev); drm_vblank_cleanup(dev);
intel_modeset_cleanup(dev); intel_modeset_cleanup(dev);
...@@ -1196,7 +1194,7 @@ int i915_driver_unload(struct drm_device *dev) ...@@ -1196,7 +1194,7 @@ int i915_driver_unload(struct drm_device *dev)
intel_fbc_cleanup_cfb(dev_priv); intel_fbc_cleanup_cfb(dev_priv);
i915_gem_cleanup_stolen(dev); i915_gem_cleanup_stolen(dev);
intel_csr_ucode_fini(dev); intel_csr_ucode_fini(dev_priv);
intel_teardown_gmbus(dev); intel_teardown_gmbus(dev);
intel_teardown_mchbar(dev); intel_teardown_mchbar(dev);
...@@ -1264,8 +1262,6 @@ void i915_driver_postclose(struct drm_device *dev, struct drm_file *file) ...@@ -1264,8 +1262,6 @@ void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
{ {
struct drm_i915_file_private *file_priv = file->driver_priv; struct drm_i915_file_private *file_priv = file->driver_priv;
if (file_priv && file_priv->bsd_ring)
file_priv->bsd_ring = NULL;
kfree(file_priv); kfree(file_priv);
} }
......
...@@ -383,6 +383,7 @@ static const struct intel_device_info intel_skylake_gt3_info = { ...@@ -383,6 +383,7 @@ static const struct intel_device_info intel_skylake_gt3_info = {
static const struct intel_device_info intel_broxton_info = { static const struct intel_device_info intel_broxton_info = {
.is_preliminary = 1, .is_preliminary = 1,
.is_broxton = 1,
.gen = 9, .gen = 9,
.need_gfx_hws = 1, .has_hotplug = 1, .need_gfx_hws = 1, .has_hotplug = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
...@@ -394,50 +395,81 @@ static const struct intel_device_info intel_broxton_info = { ...@@ -394,50 +395,81 @@ static const struct intel_device_info intel_broxton_info = {
IVB_CURSOR_OFFSETS, IVB_CURSOR_OFFSETS,
}; };
static const struct intel_device_info intel_kabylake_info = {
.is_preliminary = 1,
.is_kabylake = 1,
.gen = 9,
.num_pipes = 3,
.need_gfx_hws = 1, .has_hotplug = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
.has_llc = 1,
.has_ddi = 1,
.has_fpga_dbg = 1,
.has_fbc = 1,
GEN_DEFAULT_PIPEOFFSETS,
IVB_CURSOR_OFFSETS,
};
static const struct intel_device_info intel_kabylake_gt3_info = {
.is_preliminary = 1,
.is_kabylake = 1,
.gen = 9,
.num_pipes = 3,
.need_gfx_hws = 1, .has_hotplug = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
.has_llc = 1,
.has_ddi = 1,
.has_fpga_dbg = 1,
.has_fbc = 1,
GEN_DEFAULT_PIPEOFFSETS,
IVB_CURSOR_OFFSETS,
};
/* /*
* Make sure any device matches here are from most specific to most * Make sure any device matches here are from most specific to most
* general. For example, since the Quanta match is based on the subsystem * general. For example, since the Quanta match is based on the subsystem
* and subvendor IDs, we need it to come before the more general IVB * and subvendor IDs, we need it to come before the more general IVB
* PCI ID matches, otherwise we'll use the wrong info struct above. * PCI ID matches, otherwise we'll use the wrong info struct above.
*/ */
#define INTEL_PCI_IDS \ static const struct pci_device_id pciidlist[] = {
INTEL_I830_IDS(&intel_i830_info), \ INTEL_I830_IDS(&intel_i830_info),
INTEL_I845G_IDS(&intel_845g_info), \ INTEL_I845G_IDS(&intel_845g_info),
INTEL_I85X_IDS(&intel_i85x_info), \ INTEL_I85X_IDS(&intel_i85x_info),
INTEL_I865G_IDS(&intel_i865g_info), \ INTEL_I865G_IDS(&intel_i865g_info),
INTEL_I915G_IDS(&intel_i915g_info), \ INTEL_I915G_IDS(&intel_i915g_info),
INTEL_I915GM_IDS(&intel_i915gm_info), \ INTEL_I915GM_IDS(&intel_i915gm_info),
INTEL_I945G_IDS(&intel_i945g_info), \ INTEL_I945G_IDS(&intel_i945g_info),
INTEL_I945GM_IDS(&intel_i945gm_info), \ INTEL_I945GM_IDS(&intel_i945gm_info),
INTEL_I965G_IDS(&intel_i965g_info), \ INTEL_I965G_IDS(&intel_i965g_info),
INTEL_G33_IDS(&intel_g33_info), \ INTEL_G33_IDS(&intel_g33_info),
INTEL_I965GM_IDS(&intel_i965gm_info), \ INTEL_I965GM_IDS(&intel_i965gm_info),
INTEL_GM45_IDS(&intel_gm45_info), \ INTEL_GM45_IDS(&intel_gm45_info),
INTEL_G45_IDS(&intel_g45_info), \ INTEL_G45_IDS(&intel_g45_info),
INTEL_PINEVIEW_IDS(&intel_pineview_info), \ INTEL_PINEVIEW_IDS(&intel_pineview_info),
INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info), \ INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info),
INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info), \ INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info),
INTEL_SNB_D_IDS(&intel_sandybridge_d_info), \ INTEL_SNB_D_IDS(&intel_sandybridge_d_info),
INTEL_SNB_M_IDS(&intel_sandybridge_m_info), \ INTEL_SNB_M_IDS(&intel_sandybridge_m_info),
INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */ \ INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */
INTEL_IVB_M_IDS(&intel_ivybridge_m_info), \ INTEL_IVB_M_IDS(&intel_ivybridge_m_info),
INTEL_IVB_D_IDS(&intel_ivybridge_d_info), \ INTEL_IVB_D_IDS(&intel_ivybridge_d_info),
INTEL_HSW_D_IDS(&intel_haswell_d_info), \ INTEL_HSW_D_IDS(&intel_haswell_d_info),
INTEL_HSW_M_IDS(&intel_haswell_m_info), \ INTEL_HSW_M_IDS(&intel_haswell_m_info),
INTEL_VLV_M_IDS(&intel_valleyview_m_info), \ INTEL_VLV_M_IDS(&intel_valleyview_m_info),
INTEL_VLV_D_IDS(&intel_valleyview_d_info), \ INTEL_VLV_D_IDS(&intel_valleyview_d_info),
INTEL_BDW_GT12M_IDS(&intel_broadwell_m_info), \ INTEL_BDW_GT12M_IDS(&intel_broadwell_m_info),
INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info), \ INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info),
INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info), \ INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info),
INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info), \ INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info),
INTEL_CHV_IDS(&intel_cherryview_info), \ INTEL_CHV_IDS(&intel_cherryview_info),
INTEL_SKL_GT1_IDS(&intel_skylake_info), \ INTEL_SKL_GT1_IDS(&intel_skylake_info),
INTEL_SKL_GT2_IDS(&intel_skylake_info), \ INTEL_SKL_GT2_IDS(&intel_skylake_info),
INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info), \ INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info),
INTEL_BXT_IDS(&intel_broxton_info) INTEL_BXT_IDS(&intel_broxton_info),
INTEL_KBL_GT1_IDS(&intel_kabylake_info),
static const struct pci_device_id pciidlist[] = { /* aka */ INTEL_KBL_GT2_IDS(&intel_kabylake_info),
INTEL_PCI_IDS, INTEL_KBL_GT3_IDS(&intel_kabylake_gt3_info),
INTEL_KBL_GT4_IDS(&intel_kabylake_gt3_info),
{0, 0, 0} {0, 0, 0}
}; };
...@@ -463,7 +495,7 @@ static enum intel_pch intel_virt_detect_pch(struct drm_device *dev) ...@@ -463,7 +495,7 @@ static enum intel_pch intel_virt_detect_pch(struct drm_device *dev)
} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
ret = PCH_LPT; ret = PCH_LPT;
DRM_DEBUG_KMS("Assuming LynxPoint PCH\n"); DRM_DEBUG_KMS("Assuming LynxPoint PCH\n");
} else if (IS_SKYLAKE(dev)) { } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
ret = PCH_SPT; ret = PCH_SPT;
DRM_DEBUG_KMS("Assuming SunrisePoint PCH\n"); DRM_DEBUG_KMS("Assuming SunrisePoint PCH\n");
} }
...@@ -526,11 +558,13 @@ void intel_detect_pch(struct drm_device *dev) ...@@ -526,11 +558,13 @@ void intel_detect_pch(struct drm_device *dev)
} else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) { } else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_SPT; dev_priv->pch_type = PCH_SPT;
DRM_DEBUG_KMS("Found SunrisePoint PCH\n"); DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
WARN_ON(!IS_SKYLAKE(dev)); WARN_ON(!IS_SKYLAKE(dev) &&
!IS_KABYLAKE(dev));
} else if (id == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) { } else if (id == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_SPT; dev_priv->pch_type = PCH_SPT;
DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n"); DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
WARN_ON(!IS_SKYLAKE(dev)); WARN_ON(!IS_SKYLAKE(dev) &&
!IS_KABYLAKE(dev));
} else if (id == INTEL_PCH_P2X_DEVICE_ID_TYPE) { } else if (id == INTEL_PCH_P2X_DEVICE_ID_TYPE) {
dev_priv->pch_type = intel_virt_detect_pch(dev); dev_priv->pch_type = intel_virt_detect_pch(dev);
} else } else
...@@ -570,26 +604,6 @@ bool i915_semaphore_is_enabled(struct drm_device *dev) ...@@ -570,26 +604,6 @@ bool i915_semaphore_is_enabled(struct drm_device *dev)
return true; return true;
} }
void i915_firmware_load_error_print(const char *fw_path, int err)
{
DRM_ERROR("failed to load firmware %s (%d)\n", fw_path, err);
/*
* If the reason is not known assume -ENOENT since that's the most
* usual failure mode.
*/
if (!err)
err = -ENOENT;
if (!(IS_BUILTIN(CONFIG_DRM_I915) && err == -ENOENT))
return;
DRM_ERROR(
"The driver is built-in, so to load the firmware you need to\n"
"include it either in the kernel (see CONFIG_EXTRA_FIRMWARE) or\n"
"in your initrd/initramfs image.\n");
}
static void intel_suspend_encoders(struct drm_i915_private *dev_priv) static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
{ {
struct drm_device *dev = dev_priv->dev; struct drm_device *dev = dev_priv->dev;
...@@ -608,7 +622,6 @@ static void intel_suspend_encoders(struct drm_i915_private *dev_priv) ...@@ -608,7 +622,6 @@ static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
static int intel_suspend_complete(struct drm_i915_private *dev_priv); static int intel_suspend_complete(struct drm_i915_private *dev_priv);
static int vlv_resume_prepare(struct drm_i915_private *dev_priv, static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
bool rpm_resume); bool rpm_resume);
static int skl_resume_prepare(struct drm_i915_private *dev_priv);
static int bxt_resume_prepare(struct drm_i915_private *dev_priv); static int bxt_resume_prepare(struct drm_i915_private *dev_priv);
...@@ -679,6 +692,9 @@ static int i915_drm_suspend(struct drm_device *dev) ...@@ -679,6 +692,9 @@ static int i915_drm_suspend(struct drm_device *dev)
intel_display_set_init_power(dev_priv, false); intel_display_set_init_power(dev_priv, false);
if (HAS_CSR(dev_priv))
flush_work(&dev_priv->csr.work);
return 0; return 0;
} }
...@@ -687,10 +703,13 @@ static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation) ...@@ -687,10 +703,13 @@ static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation)
struct drm_i915_private *dev_priv = drm_dev->dev_private; struct drm_i915_private *dev_priv = drm_dev->dev_private;
int ret; int ret;
intel_power_domains_suspend(dev_priv);
ret = intel_suspend_complete(dev_priv); ret = intel_suspend_complete(dev_priv);
if (ret) { if (ret) {
DRM_ERROR("Suspend complete failed: %d\n", ret); DRM_ERROR("Suspend complete failed: %d\n", ret);
intel_power_domains_init_hw(dev_priv, true);
return ret; return ret;
} }
...@@ -838,13 +857,11 @@ static int i915_drm_resume_early(struct drm_device *dev) ...@@ -838,13 +857,11 @@ static int i915_drm_resume_early(struct drm_device *dev)
if (IS_BROXTON(dev)) if (IS_BROXTON(dev))
ret = bxt_resume_prepare(dev_priv); ret = bxt_resume_prepare(dev_priv);
else if (IS_SKYLAKE(dev_priv))
ret = skl_resume_prepare(dev_priv);
else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
hsw_disable_pc8(dev_priv); hsw_disable_pc8(dev_priv);
intel_uncore_sanitize(dev); intel_uncore_sanitize(dev);
intel_power_domains_init_hw(dev_priv); intel_power_domains_init_hw(dev_priv, true);
return ret; return ret;
} }
...@@ -1051,15 +1068,6 @@ static int i915_pm_resume(struct device *dev) ...@@ -1051,15 +1068,6 @@ static int i915_pm_resume(struct device *dev)
return i915_drm_resume(drm_dev); return i915_drm_resume(drm_dev);
} }
static int skl_suspend_complete(struct drm_i915_private *dev_priv)
{
/* Enabling DC6 is not a hard requirement to enter runtime D3 */
skl_uninit_cdclk(dev_priv);
return 0;
}
static int hsw_suspend_complete(struct drm_i915_private *dev_priv) static int hsw_suspend_complete(struct drm_i915_private *dev_priv)
{ {
hsw_enable_pc8(dev_priv); hsw_enable_pc8(dev_priv);
...@@ -1099,16 +1107,6 @@ static int bxt_resume_prepare(struct drm_i915_private *dev_priv) ...@@ -1099,16 +1107,6 @@ static int bxt_resume_prepare(struct drm_i915_private *dev_priv)
return 0; return 0;
} }
static int skl_resume_prepare(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
skl_init_cdclk(dev_priv);
intel_csr_load_program(dev);
return 0;
}
/* /*
* Save all Gunit registers that may be lost after a D3 and a subsequent * Save all Gunit registers that may be lost after a D3 and a subsequent
* S0i[R123] transition. The list of registers needing a save/restore is * S0i[R123] transition. The list of registers needing a save/restore is
...@@ -1572,8 +1570,6 @@ static int intel_runtime_resume(struct device *device) ...@@ -1572,8 +1570,6 @@ static int intel_runtime_resume(struct device *device)
if (IS_BROXTON(dev)) if (IS_BROXTON(dev))
ret = bxt_resume_prepare(dev_priv); ret = bxt_resume_prepare(dev_priv);
else if (IS_SKYLAKE(dev))
ret = skl_resume_prepare(dev_priv);
else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
hsw_disable_pc8(dev_priv); hsw_disable_pc8(dev_priv);
else if (IS_VALLEYVIEW(dev_priv)) else if (IS_VALLEYVIEW(dev_priv))
...@@ -1616,8 +1612,6 @@ static int intel_suspend_complete(struct drm_i915_private *dev_priv) ...@@ -1616,8 +1612,6 @@ static int intel_suspend_complete(struct drm_i915_private *dev_priv)
if (IS_BROXTON(dev_priv)) if (IS_BROXTON(dev_priv))
ret = bxt_suspend_complete(dev_priv); ret = bxt_suspend_complete(dev_priv);
else if (IS_SKYLAKE(dev_priv))
ret = skl_suspend_complete(dev_priv);
else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
ret = hsw_suspend_complete(dev_priv); ret = hsw_suspend_complete(dev_priv);
else if (IS_VALLEYVIEW(dev_priv)) else if (IS_VALLEYVIEW(dev_priv))
......
...@@ -57,7 +57,7 @@ ...@@ -57,7 +57,7 @@
#define DRIVER_NAME "i915" #define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics" #define DRIVER_DESC "Intel Graphics"
#define DRIVER_DATE "20151010" #define DRIVER_DATE "20151120"
#undef WARN_ON #undef WARN_ON
/* Many gcc seem to no see through this and fall over :( */ /* Many gcc seem to no see through this and fall over :( */
...@@ -180,15 +180,11 @@ enum intel_display_power_domain { ...@@ -180,15 +180,11 @@ enum intel_display_power_domain {
POWER_DOMAIN_TRANSCODER_B, POWER_DOMAIN_TRANSCODER_B,
POWER_DOMAIN_TRANSCODER_C, POWER_DOMAIN_TRANSCODER_C,
POWER_DOMAIN_TRANSCODER_EDP, POWER_DOMAIN_TRANSCODER_EDP,
POWER_DOMAIN_PORT_DDI_A_2_LANES, POWER_DOMAIN_PORT_DDI_A_LANES,
POWER_DOMAIN_PORT_DDI_A_4_LANES, POWER_DOMAIN_PORT_DDI_B_LANES,
POWER_DOMAIN_PORT_DDI_B_2_LANES, POWER_DOMAIN_PORT_DDI_C_LANES,
POWER_DOMAIN_PORT_DDI_B_4_LANES, POWER_DOMAIN_PORT_DDI_D_LANES,
POWER_DOMAIN_PORT_DDI_C_2_LANES, POWER_DOMAIN_PORT_DDI_E_LANES,
POWER_DOMAIN_PORT_DDI_C_4_LANES,
POWER_DOMAIN_PORT_DDI_D_2_LANES,
POWER_DOMAIN_PORT_DDI_D_4_LANES,
POWER_DOMAIN_PORT_DDI_E_2_LANES,
POWER_DOMAIN_PORT_DSI, POWER_DOMAIN_PORT_DSI,
POWER_DOMAIN_PORT_CRT, POWER_DOMAIN_PORT_CRT,
POWER_DOMAIN_PORT_OTHER, POWER_DOMAIN_PORT_OTHER,
...@@ -199,6 +195,8 @@ enum intel_display_power_domain { ...@@ -199,6 +195,8 @@ enum intel_display_power_domain {
POWER_DOMAIN_AUX_B, POWER_DOMAIN_AUX_B,
POWER_DOMAIN_AUX_C, POWER_DOMAIN_AUX_C,
POWER_DOMAIN_AUX_D, POWER_DOMAIN_AUX_D,
POWER_DOMAIN_GMBUS,
POWER_DOMAIN_MODESET,
POWER_DOMAIN_INIT, POWER_DOMAIN_INIT,
POWER_DOMAIN_NUM, POWER_DOMAIN_NUM,
...@@ -630,11 +628,9 @@ struct drm_i915_display_funcs { ...@@ -630,11 +628,9 @@ struct drm_i915_display_funcs {
int target, int refclk, int target, int refclk,
struct dpll *match_clock, struct dpll *match_clock,
struct dpll *best_clock); struct dpll *best_clock);
int (*compute_pipe_wm)(struct intel_crtc *crtc,
struct drm_atomic_state *state);
void (*update_wm)(struct drm_crtc *crtc); void (*update_wm)(struct drm_crtc *crtc);
void (*update_sprite_wm)(struct drm_plane *plane,
struct drm_crtc *crtc,
uint32_t sprite_width, uint32_t sprite_height,
int pixel_size, bool enable, bool scaled);
int (*modeset_calc_cdclk)(struct drm_atomic_state *state); int (*modeset_calc_cdclk)(struct drm_atomic_state *state);
void (*modeset_commit_cdclk)(struct drm_atomic_state *state); void (*modeset_commit_cdclk)(struct drm_atomic_state *state);
/* Returns the active state of the crtc, and if the crtc is active, /* Returns the active state of the crtc, and if the crtc is active,
...@@ -692,18 +688,18 @@ struct intel_uncore_funcs { ...@@ -692,18 +688,18 @@ struct intel_uncore_funcs {
void (*force_wake_put)(struct drm_i915_private *dev_priv, void (*force_wake_put)(struct drm_i915_private *dev_priv,
enum forcewake_domains domains); enum forcewake_domains domains);
uint8_t (*mmio_readb)(struct drm_i915_private *dev_priv, off_t offset, bool trace); uint8_t (*mmio_readb)(struct drm_i915_private *dev_priv, i915_reg_t r, bool trace);
uint16_t (*mmio_readw)(struct drm_i915_private *dev_priv, off_t offset, bool trace); uint16_t (*mmio_readw)(struct drm_i915_private *dev_priv, i915_reg_t r, bool trace);
uint32_t (*mmio_readl)(struct drm_i915_private *dev_priv, off_t offset, bool trace); uint32_t (*mmio_readl)(struct drm_i915_private *dev_priv, i915_reg_t r, bool trace);
uint64_t (*mmio_readq)(struct drm_i915_private *dev_priv, off_t offset, bool trace); uint64_t (*mmio_readq)(struct drm_i915_private *dev_priv, i915_reg_t r, bool trace);
void (*mmio_writeb)(struct drm_i915_private *dev_priv, off_t offset, void (*mmio_writeb)(struct drm_i915_private *dev_priv, i915_reg_t r,
uint8_t val, bool trace); uint8_t val, bool trace);
void (*mmio_writew)(struct drm_i915_private *dev_priv, off_t offset, void (*mmio_writew)(struct drm_i915_private *dev_priv, i915_reg_t r,
uint16_t val, bool trace); uint16_t val, bool trace);
void (*mmio_writel)(struct drm_i915_private *dev_priv, off_t offset, void (*mmio_writel)(struct drm_i915_private *dev_priv, i915_reg_t r,
uint32_t val, bool trace); uint32_t val, bool trace);
void (*mmio_writeq)(struct drm_i915_private *dev_priv, off_t offset, void (*mmio_writeq)(struct drm_i915_private *dev_priv, i915_reg_t r,
uint64_t val, bool trace); uint64_t val, bool trace);
}; };
...@@ -720,11 +716,11 @@ struct intel_uncore { ...@@ -720,11 +716,11 @@ struct intel_uncore {
enum forcewake_domain_id id; enum forcewake_domain_id id;
unsigned wake_count; unsigned wake_count;
struct timer_list timer; struct timer_list timer;
u32 reg_set; i915_reg_t reg_set;
u32 val_set; u32 val_set;
u32 val_clear; u32 val_clear;
u32 reg_ack; i915_reg_t reg_ack;
u32 reg_post; i915_reg_t reg_post;
u32 val_reset; u32 val_reset;
} fw_domain[FW_DOMAIN_ID_COUNT]; } fw_domain[FW_DOMAIN_ID_COUNT];
}; };
...@@ -739,20 +735,19 @@ struct intel_uncore { ...@@ -739,20 +735,19 @@ struct intel_uncore {
#define for_each_fw_domain(domain__, dev_priv__, i__) \ #define for_each_fw_domain(domain__, dev_priv__, i__) \
for_each_fw_domain_mask(domain__, FORCEWAKE_ALL, dev_priv__, i__) for_each_fw_domain_mask(domain__, FORCEWAKE_ALL, dev_priv__, i__)
enum csr_state { #define CSR_VERSION(major, minor) ((major) << 16 | (minor))
FW_UNINITIALIZED = 0, #define CSR_VERSION_MAJOR(version) ((version) >> 16)
FW_LOADED, #define CSR_VERSION_MINOR(version) ((version) & 0xffff)
FW_FAILED
};
struct intel_csr { struct intel_csr {
struct work_struct work;
const char *fw_path; const char *fw_path;
uint32_t *dmc_payload; uint32_t *dmc_payload;
uint32_t dmc_fw_size; uint32_t dmc_fw_size;
uint32_t version;
uint32_t mmio_count; uint32_t mmio_count;
uint32_t mmioaddr[8]; i915_reg_t mmioaddr[8];
uint32_t mmiodata[8]; uint32_t mmiodata[8];
enum csr_state state;
}; };
#define DEV_INFO_FOR_EACH_FLAG(func, sep) \ #define DEV_INFO_FOR_EACH_FLAG(func, sep) \
...@@ -770,6 +765,8 @@ struct intel_csr { ...@@ -770,6 +765,8 @@ struct intel_csr {
func(is_valleyview) sep \ func(is_valleyview) sep \
func(is_haswell) sep \ func(is_haswell) sep \
func(is_skylake) sep \ func(is_skylake) sep \
func(is_broxton) sep \
func(is_kabylake) sep \
func(is_preliminary) sep \ func(is_preliminary) sep \
func(has_fbc) sep \ func(has_fbc) sep \
func(has_pipe_cxsr) sep \ func(has_pipe_cxsr) sep \
...@@ -928,24 +925,7 @@ struct i915_fbc { ...@@ -928,24 +925,7 @@ struct i915_fbc {
struct drm_framebuffer *fb; struct drm_framebuffer *fb;
} *fbc_work; } *fbc_work;
enum no_fbc_reason { const char *no_fbc_reason;
FBC_OK, /* FBC is enabled */
FBC_UNSUPPORTED, /* FBC is not supported by this chipset */
FBC_NO_OUTPUT, /* no outputs enabled to compress */
FBC_STOLEN_TOO_SMALL, /* not enough space for buffers */
FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */
FBC_MODE_TOO_LARGE, /* mode too large for compression */
FBC_BAD_PLANE, /* fbc not supported on plane */
FBC_NOT_TILED, /* buffer not tiled */
FBC_MULTIPLE_PIPES, /* more than one pipe active */
FBC_MODULE_PARAM,
FBC_CHIP_DEFAULT, /* disabled by default on this chip */
FBC_ROTATION, /* rotation is not supported */
FBC_IN_DBG_MASTER, /* kernel debugger is active */
FBC_BAD_STRIDE, /* stride is not supported */
FBC_PIXEL_RATE, /* pixel rate is too big */
FBC_PIXEL_FORMAT /* pixel format is invalid */
} no_fbc_reason;
bool (*fbc_enabled)(struct drm_i915_private *dev_priv); bool (*fbc_enabled)(struct drm_i915_private *dev_priv);
void (*enable_fbc)(struct intel_crtc *crtc); void (*enable_fbc)(struct intel_crtc *crtc);
...@@ -1019,7 +999,7 @@ struct intel_gmbus { ...@@ -1019,7 +999,7 @@ struct intel_gmbus {
struct i2c_adapter adapter; struct i2c_adapter adapter;
u32 force_bit; u32 force_bit;
u32 reg0; u32 reg0;
u32 gpio_reg; i915_reg_t gpio_reg;
struct i2c_algo_bit_data bit_algo; struct i2c_algo_bit_data bit_algo;
struct drm_i915_private *dev_priv; struct drm_i915_private *dev_priv;
}; };
...@@ -1668,7 +1648,7 @@ struct i915_frontbuffer_tracking { ...@@ -1668,7 +1648,7 @@ struct i915_frontbuffer_tracking {
}; };
struct i915_wa_reg { struct i915_wa_reg {
u32 addr; i915_reg_t addr;
u32 value; u32 value;
/* bitmask representing WA bits */ /* bitmask representing WA bits */
u32 mask; u32 mask;
...@@ -1697,6 +1677,13 @@ struct i915_execbuffer_params { ...@@ -1697,6 +1677,13 @@ struct i915_execbuffer_params {
struct drm_i915_gem_request *request; struct drm_i915_gem_request *request;
}; };
/* used in computing the new watermarks state */
struct intel_wm_config {
unsigned int num_pipes_active;
bool sprites_enabled;
bool sprites_scaled;
};
struct drm_i915_private { struct drm_i915_private {
struct drm_device *dev; struct drm_device *dev;
struct kmem_cache *objects; struct kmem_cache *objects;
...@@ -1717,9 +1704,6 @@ struct drm_i915_private { ...@@ -1717,9 +1704,6 @@ struct drm_i915_private {
struct intel_csr csr; struct intel_csr csr;
/* Display CSR-related protection */
struct mutex csr_lock;
struct intel_gmbus gmbus[GMBUS_NUM_PINS]; struct intel_gmbus gmbus[GMBUS_NUM_PINS];
/** gmbus_mutex protects against concurrent usage of the single hw gmbus /** gmbus_mutex protects against concurrent usage of the single hw gmbus
...@@ -1734,6 +1718,8 @@ struct drm_i915_private { ...@@ -1734,6 +1718,8 @@ struct drm_i915_private {
/* MMIO base address for MIPI regs */ /* MMIO base address for MIPI regs */
uint32_t mipi_mmio_base; uint32_t mipi_mmio_base;
uint32_t psr_mmio_base;
wait_queue_head_t gmbus_wait_queue; wait_queue_head_t gmbus_wait_queue;
struct pci_dev *bridge_dev; struct pci_dev *bridge_dev;
...@@ -1921,6 +1907,9 @@ struct drm_i915_private { ...@@ -1921,6 +1907,9 @@ struct drm_i915_private {
*/ */
uint16_t skl_latency[8]; uint16_t skl_latency[8];
/* Committed wm config */
struct intel_wm_config config;
/* /*
* The skl_wm_values structure is a bit too big for stack * The skl_wm_values structure is a bit too big for stack
* allocation, so we keep the staging struct where we store * allocation, so we keep the staging struct where we store
...@@ -2435,6 +2424,15 @@ struct drm_i915_cmd_table { ...@@ -2435,6 +2424,15 @@ struct drm_i915_cmd_table {
#define INTEL_DEVID(p) (INTEL_INFO(p)->device_id) #define INTEL_DEVID(p) (INTEL_INFO(p)->device_id)
#define INTEL_REVID(p) (__I915__(p)->dev->pdev->revision) #define INTEL_REVID(p) (__I915__(p)->dev->pdev->revision)
#define REVID_FOREVER 0xff
/*
* Return true if revision is in range [since,until] inclusive.
*
* Use 0 for open-ended since, and REVID_FOREVER for open-ended until.
*/
#define IS_REVID(p, since, until) \
(INTEL_REVID(p) >= (since) && INTEL_REVID(p) <= (until))
#define IS_I830(dev) (INTEL_DEVID(dev) == 0x3577) #define IS_I830(dev) (INTEL_DEVID(dev) == 0x3577)
#define IS_845G(dev) (INTEL_DEVID(dev) == 0x2562) #define IS_845G(dev) (INTEL_DEVID(dev) == 0x2562)
#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x) #define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x)
...@@ -2461,7 +2459,8 @@ struct drm_i915_cmd_table { ...@@ -2461,7 +2459,8 @@ struct drm_i915_cmd_table {
#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell) #define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
#define IS_BROADWELL(dev) (!INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev)) #define IS_BROADWELL(dev) (!INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev))
#define IS_SKYLAKE(dev) (INTEL_INFO(dev)->is_skylake) #define IS_SKYLAKE(dev) (INTEL_INFO(dev)->is_skylake)
#define IS_BROXTON(dev) (!INTEL_INFO(dev)->is_skylake && IS_GEN9(dev)) #define IS_BROXTON(dev) (INTEL_INFO(dev)->is_broxton)
#define IS_KABYLAKE(dev) (INTEL_INFO(dev)->is_kabylake)
#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) #define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
#define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \ #define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \
(INTEL_DEVID(dev) & 0xFF00) == 0x0C00) (INTEL_DEVID(dev) & 0xFF00) == 0x0C00)
...@@ -2496,16 +2495,21 @@ struct drm_i915_cmd_table { ...@@ -2496,16 +2495,21 @@ struct drm_i915_cmd_table {
#define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary) #define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary)
#define SKL_REVID_A0 (0x0) #define SKL_REVID_A0 0x0
#define SKL_REVID_B0 (0x1) #define SKL_REVID_B0 0x1
#define SKL_REVID_C0 (0x2) #define SKL_REVID_C0 0x2
#define SKL_REVID_D0 (0x3) #define SKL_REVID_D0 0x3
#define SKL_REVID_E0 (0x4) #define SKL_REVID_E0 0x4
#define SKL_REVID_F0 (0x5) #define SKL_REVID_F0 0x5
#define IS_SKL_REVID(p, since, until) (IS_SKYLAKE(p) && IS_REVID(p, since, until))
#define BXT_REVID_A0 0x0
#define BXT_REVID_A1 0x1
#define BXT_REVID_B0 0x3
#define BXT_REVID_C0 0x9
#define BXT_REVID_A0 (0x0) #define IS_BXT_REVID(p, since, until) (IS_BROXTON(p) && IS_REVID(p, since, until))
#define BXT_REVID_B0 (0x3)
#define BXT_REVID_C0 (0x9)
/* /*
* The genX designation typically refers to the render engine, so render * The genX designation typically refers to the render engine, so render
...@@ -2577,10 +2581,10 @@ struct drm_i915_cmd_table { ...@@ -2577,10 +2581,10 @@ struct drm_i915_cmd_table {
#define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg) #define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg)
#define HAS_PSR(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev) || \ #define HAS_PSR(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev) || \
IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev) || \ IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev) || \
IS_SKYLAKE(dev)) IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
#define HAS_RUNTIME_PM(dev) (IS_GEN6(dev) || IS_HASWELL(dev) || \ #define HAS_RUNTIME_PM(dev) (IS_GEN6(dev) || IS_HASWELL(dev) || \
IS_BROADWELL(dev) || IS_VALLEYVIEW(dev) || \ IS_BROADWELL(dev) || IS_VALLEYVIEW(dev) || \
IS_SKYLAKE(dev)) IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
#define HAS_RC6(dev) (INTEL_INFO(dev)->gen >= 6) #define HAS_RC6(dev) (INTEL_INFO(dev)->gen >= 6)
#define HAS_RC6p(dev) (INTEL_INFO(dev)->gen == 6 || IS_IVYBRIDGE(dev)) #define HAS_RC6p(dev) (INTEL_INFO(dev)->gen == 6 || IS_IVYBRIDGE(dev))
...@@ -2640,6 +2644,7 @@ struct i915_params { ...@@ -2640,6 +2644,7 @@ struct i915_params {
int panel_use_ssc; int panel_use_ssc;
int vbt_sdvo_panel_type; int vbt_sdvo_panel_type;
int enable_rc6; int enable_rc6;
int enable_dc;
int enable_fbc; int enable_fbc;
int enable_ppgtt; int enable_ppgtt;
int enable_execlists; int enable_execlists;
...@@ -2688,7 +2693,6 @@ extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv); ...@@ -2688,7 +2693,6 @@ extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv); extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
extern void i915_update_gfx_val(struct drm_i915_private *dev_priv); extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on); int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on);
void i915_firmware_load_error_print(const char *fw_path, int err);
/* intel_hotplug.c */ /* intel_hotplug.c */
void intel_hpd_irq_handler(struct drm_device *dev, u32 pin_mask, u32 long_mask); void intel_hpd_irq_handler(struct drm_device *dev, u32 pin_mask, u32 long_mask);
...@@ -2995,8 +2999,6 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write); ...@@ -2995,8 +2999,6 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
int __must_check int __must_check
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
u32 alignment, u32 alignment,
struct intel_engine_cs *pipelined,
struct drm_i915_gem_request **pipelined_request,
const struct i915_ggtt_view *view); const struct i915_ggtt_view *view);
void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj, void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj,
const struct i915_ggtt_view *view); const struct i915_ggtt_view *view);
...@@ -3351,7 +3353,6 @@ extern void intel_set_rps(struct drm_device *dev, u8 val); ...@@ -3351,7 +3353,6 @@ extern void intel_set_rps(struct drm_device *dev, u8 val);
extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv,
bool enable); bool enable);
extern void intel_detect_pch(struct drm_device *dev); extern void intel_detect_pch(struct drm_device *dev);
extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
extern int intel_enable_rc6(const struct drm_device *dev); extern int intel_enable_rc6(const struct drm_device *dev);
extern bool i915_semaphore_is_enabled(struct drm_device *dev); extern bool i915_semaphore_is_enabled(struct drm_device *dev);
...@@ -3434,6 +3435,32 @@ int intel_freq_opcode(struct drm_i915_private *dev_priv, int val); ...@@ -3434,6 +3435,32 @@ int intel_freq_opcode(struct drm_i915_private *dev_priv, int val);
#define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg) #define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg)
#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg) #define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg)
#define __raw_read(x, s) \
static inline uint##x##_t __raw_i915_read##x(struct drm_i915_private *dev_priv, \
i915_reg_t reg) \
{ \
return read##s(dev_priv->regs + i915_mmio_reg_offset(reg)); \
}
#define __raw_write(x, s) \
static inline void __raw_i915_write##x(struct drm_i915_private *dev_priv, \
i915_reg_t reg, uint##x##_t val) \
{ \
write##s(val, dev_priv->regs + i915_mmio_reg_offset(reg)); \
}
__raw_read(8, b)
__raw_read(16, w)
__raw_read(32, l)
__raw_read(64, q)
__raw_write(8, b)
__raw_write(16, w)
__raw_write(32, l)
__raw_write(64, q)
#undef __raw_read
#undef __raw_write
/* These are untraced mmio-accessors that are only valid to be used inside /* These are untraced mmio-accessors that are only valid to be used inside
* criticial sections inside IRQ handlers where forcewake is explicitly * criticial sections inside IRQ handlers where forcewake is explicitly
* controlled. * controlled.
...@@ -3441,8 +3468,8 @@ int intel_freq_opcode(struct drm_i915_private *dev_priv, int val); ...@@ -3441,8 +3468,8 @@ int intel_freq_opcode(struct drm_i915_private *dev_priv, int val);
* Note: Should only be used between intel_uncore_forcewake_irqlock() and * Note: Should only be used between intel_uncore_forcewake_irqlock() and
* intel_uncore_forcewake_irqunlock(). * intel_uncore_forcewake_irqunlock().
*/ */
#define I915_READ_FW(reg__) readl(dev_priv->regs + (reg__)) #define I915_READ_FW(reg__) __raw_i915_read32(dev_priv, (reg__))
#define I915_WRITE_FW(reg__, val__) writel(val__, dev_priv->regs + (reg__)) #define I915_WRITE_FW(reg__, val__) __raw_i915_write32(dev_priv, (reg__), (val__))
#define POSTING_READ_FW(reg__) (void)I915_READ_FW(reg__) #define POSTING_READ_FW(reg__) (void)I915_READ_FW(reg__)
/* "Broadcast RGB" property */ /* "Broadcast RGB" property */
...@@ -3450,7 +3477,7 @@ int intel_freq_opcode(struct drm_i915_private *dev_priv, int val); ...@@ -3450,7 +3477,7 @@ int intel_freq_opcode(struct drm_i915_private *dev_priv, int val);
#define INTEL_BROADCAST_RGB_FULL 1 #define INTEL_BROADCAST_RGB_FULL 1
#define INTEL_BROADCAST_RGB_LIMITED 2 #define INTEL_BROADCAST_RGB_LIMITED 2
static inline uint32_t i915_vgacntrl_reg(struct drm_device *dev) static inline i915_reg_t i915_vgacntrl_reg(struct drm_device *dev)
{ {
if (IS_VALLEYVIEW(dev)) if (IS_VALLEYVIEW(dev))
return VLV_VGACNTRL; return VLV_VGACNTRL;
......
...@@ -2737,6 +2737,8 @@ static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv, ...@@ -2737,6 +2737,8 @@ static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv, static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
struct intel_engine_cs *ring) struct intel_engine_cs *ring)
{ {
struct intel_ringbuffer *buffer;
while (!list_empty(&ring->active_list)) { while (!list_empty(&ring->active_list)) {
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
...@@ -2752,18 +2754,23 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv, ...@@ -2752,18 +2754,23 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
* are the ones that keep the context and ringbuffer backing objects * are the ones that keep the context and ringbuffer backing objects
* pinned in place. * pinned in place.
*/ */
while (!list_empty(&ring->execlist_queue)) {
struct drm_i915_gem_request *submit_req;
submit_req = list_first_entry(&ring->execlist_queue, if (i915.enable_execlists) {
struct drm_i915_gem_request, spin_lock_irq(&ring->execlist_lock);
execlist_link); while (!list_empty(&ring->execlist_queue)) {
list_del(&submit_req->execlist_link); struct drm_i915_gem_request *submit_req;
submit_req = list_first_entry(&ring->execlist_queue,
struct drm_i915_gem_request,
execlist_link);
list_del(&submit_req->execlist_link);
if (submit_req->ctx != ring->default_context) if (submit_req->ctx != ring->default_context)
intel_lr_context_unpin(submit_req); intel_lr_context_unpin(submit_req);
i915_gem_request_unreference(submit_req); i915_gem_request_unreference(submit_req);
}
spin_unlock_irq(&ring->execlist_lock);
} }
/* /*
...@@ -2782,6 +2789,18 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv, ...@@ -2782,6 +2789,18 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
i915_gem_request_retire(request); i915_gem_request_retire(request);
} }
/* Having flushed all requests from all queues, we know that all
* ringbuffers must now be empty. However, since we do not reclaim
* all space when retiring the request (to prevent HEADs colliding
* with rapid ringbuffer wraparound) the amount of available space
* upon reset is less than when we start. Do one more pass over
* all the ringbuffers to reset last_retired_head.
*/
list_for_each_entry(buffer, &ring->buffers, link) {
buffer->last_retired_head = buffer->tail;
intel_ring_update_space(buffer);
}
} }
void i915_gem_reset(struct drm_device *dev) void i915_gem_reset(struct drm_device *dev)
...@@ -3826,7 +3845,7 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, ...@@ -3826,7 +3845,7 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
* cacheline, whereas normally such cachelines would get * cacheline, whereas normally such cachelines would get
* invalidated. * invalidated.
*/ */
if (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0) if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
return -ENODEV; return -ENODEV;
level = I915_CACHE_LLC; level = I915_CACHE_LLC;
...@@ -3869,17 +3888,11 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, ...@@ -3869,17 +3888,11 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
int int
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
u32 alignment, u32 alignment,
struct intel_engine_cs *pipelined,
struct drm_i915_gem_request **pipelined_request,
const struct i915_ggtt_view *view) const struct i915_ggtt_view *view)
{ {
u32 old_read_domains, old_write_domain; u32 old_read_domains, old_write_domain;
int ret; int ret;
ret = i915_gem_object_sync(obj, pipelined, pipelined_request);
if (ret)
return ret;
/* Mark the pin_display early so that we account for the /* Mark the pin_display early so that we account for the
* display coherency whilst setting up the cache domains. * display coherency whilst setting up the cache domains.
*/ */
...@@ -4476,10 +4489,8 @@ struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj, ...@@ -4476,10 +4489,8 @@ struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
{ {
struct i915_vma *vma; struct i915_vma *vma;
list_for_each_entry(vma, &obj->vma_list, vma_link) { list_for_each_entry(vma, &obj->vma_list, vma_link) {
if (i915_is_ggtt(vma->vm) && if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL &&
vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL) vma->vm == vm)
continue;
if (vma->vm == vm)
return vma; return vma;
} }
return NULL; return NULL;
...@@ -4568,7 +4579,6 @@ int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice) ...@@ -4568,7 +4579,6 @@ int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice)
struct intel_engine_cs *ring = req->ring; struct intel_engine_cs *ring = req->ring;
struct drm_device *dev = ring->dev; struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
u32 reg_base = GEN7_L3LOG_BASE + (slice * 0x200);
u32 *remap_info = dev_priv->l3_parity.remap_info[slice]; u32 *remap_info = dev_priv->l3_parity.remap_info[slice];
int i, ret; int i, ret;
...@@ -4584,10 +4594,10 @@ int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice) ...@@ -4584,10 +4594,10 @@ int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice)
* here because no other code should access these registers other than * here because no other code should access these registers other than
* at initialization time. * at initialization time.
*/ */
for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) { for (i = 0; i < GEN7_L3LOG_SIZE / 4; i++) {
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit(ring, reg_base + i); intel_ring_emit_reg(ring, GEN7_L3LOG(slice, i));
intel_ring_emit(ring, remap_info[i/4]); intel_ring_emit(ring, remap_info[i]);
} }
intel_ring_advance(ring); intel_ring_advance(ring);
...@@ -4755,18 +4765,9 @@ i915_gem_init_hw(struct drm_device *dev) ...@@ -4755,18 +4765,9 @@ i915_gem_init_hw(struct drm_device *dev)
if (HAS_GUC_UCODE(dev)) { if (HAS_GUC_UCODE(dev)) {
ret = intel_guc_ucode_load(dev); ret = intel_guc_ucode_load(dev);
if (ret) { if (ret) {
/* DRM_ERROR("Failed to initialize GuC, error %d\n", ret);
* If we got an error and GuC submission is enabled, map ret = -EIO;
* the error to -EIO so the GPU will be declared wedged. goto out;
* OTOH, if we didn't intend to use the GuC anyway, just
* discard the error and carry on.
*/
DRM_ERROR("Failed to initialize GuC, error %d%s\n", ret,
i915.enable_guc_submission ? "" :
" (ignored)");
ret = i915.enable_guc_submission ? -EIO : 0;
if (ret)
goto out;
} }
} }
......
...@@ -556,7 +556,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags) ...@@ -556,7 +556,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
if (signaller == ring) if (signaller == ring)
continue; continue;
intel_ring_emit(ring, RING_PSMI_CTL(signaller->mmio_base)); intel_ring_emit_reg(ring, RING_PSMI_CTL(signaller->mmio_base));
intel_ring_emit(ring, _MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE)); intel_ring_emit(ring, _MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
} }
} }
...@@ -581,7 +581,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags) ...@@ -581,7 +581,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
if (signaller == ring) if (signaller == ring)
continue; continue;
intel_ring_emit(ring, RING_PSMI_CTL(signaller->mmio_base)); intel_ring_emit_reg(ring, RING_PSMI_CTL(signaller->mmio_base));
intel_ring_emit(ring, _MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE)); intel_ring_emit(ring, _MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
} }
} }
...@@ -925,6 +925,14 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data, ...@@ -925,6 +925,14 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
case I915_CONTEXT_PARAM_NO_ZEROMAP: case I915_CONTEXT_PARAM_NO_ZEROMAP:
args->value = ctx->flags & CONTEXT_NO_ZEROMAP; args->value = ctx->flags & CONTEXT_NO_ZEROMAP;
break; break;
case I915_CONTEXT_PARAM_GTT_SIZE:
if (ctx->ppgtt)
args->value = ctx->ppgtt->base.total;
else if (to_i915(dev)->mm.aliasing_ppgtt)
args->value = to_i915(dev)->mm.aliasing_ppgtt->base.total;
else
args->value = to_i915(dev)->gtt.base.total;
break;
default: default:
ret = -EINVAL; ret = -EINVAL;
break; break;
......
...@@ -1114,7 +1114,7 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev, ...@@ -1114,7 +1114,7 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev,
for (i = 0; i < 4; i++) { for (i = 0; i < 4; i++) {
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit(ring, GEN7_SO_WRITE_OFFSET(i)); intel_ring_emit_reg(ring, GEN7_SO_WRITE_OFFSET(i));
intel_ring_emit(ring, 0); intel_ring_emit(ring, 0);
} }
...@@ -1241,7 +1241,7 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params, ...@@ -1241,7 +1241,7 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
intel_ring_emit(ring, MI_NOOP); intel_ring_emit(ring, MI_NOOP);
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit(ring, INSTPM); intel_ring_emit_reg(ring, INSTPM);
intel_ring_emit(ring, instp_mask << 16 | instp_mode); intel_ring_emit(ring, instp_mask << 16 | instp_mode);
intel_ring_advance(ring); intel_ring_advance(ring);
......
...@@ -59,7 +59,7 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg, ...@@ -59,7 +59,7 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg,
struct drm_i915_gem_object *obj) struct drm_i915_gem_object *obj)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
int fence_reg_lo, fence_reg_hi; i915_reg_t fence_reg_lo, fence_reg_hi;
int fence_pitch_shift; int fence_pitch_shift;
if (INTEL_INFO(dev)->gen >= 6) { if (INTEL_INFO(dev)->gen >= 6) {
......
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
*/ */
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/stop_machine.h>
#include <drm/drmP.h> #include <drm/drmP.h>
#include <drm/i915_drm.h> #include <drm/i915_drm.h>
#include "i915_drv.h" #include "i915_drv.h"
...@@ -104,9 +105,11 @@ static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt) ...@@ -104,9 +105,11 @@ static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
{ {
bool has_aliasing_ppgtt; bool has_aliasing_ppgtt;
bool has_full_ppgtt; bool has_full_ppgtt;
bool has_full_48bit_ppgtt;
has_aliasing_ppgtt = INTEL_INFO(dev)->gen >= 6; has_aliasing_ppgtt = INTEL_INFO(dev)->gen >= 6;
has_full_ppgtt = INTEL_INFO(dev)->gen >= 7; has_full_ppgtt = INTEL_INFO(dev)->gen >= 7;
has_full_48bit_ppgtt = IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9;
if (intel_vgpu_active(dev)) if (intel_vgpu_active(dev))
has_full_ppgtt = false; /* emulation is too hard */ has_full_ppgtt = false; /* emulation is too hard */
...@@ -125,6 +128,9 @@ static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt) ...@@ -125,6 +128,9 @@ static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
if (enable_ppgtt == 2 && has_full_ppgtt) if (enable_ppgtt == 2 && has_full_ppgtt)
return 2; return 2;
if (enable_ppgtt == 3 && has_full_48bit_ppgtt)
return 3;
#ifdef CONFIG_INTEL_IOMMU #ifdef CONFIG_INTEL_IOMMU
/* Disable ppgtt on SNB if VT-d is on. */ /* Disable ppgtt on SNB if VT-d is on. */
if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) { if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) {
...@@ -141,7 +147,7 @@ static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt) ...@@ -141,7 +147,7 @@ static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
} }
if (INTEL_INFO(dev)->gen >= 8 && i915.enable_execlists) if (INTEL_INFO(dev)->gen >= 8 && i915.enable_execlists)
return 2; return has_full_48bit_ppgtt ? 3 : 2;
else else
return has_aliasing_ppgtt ? 1 : 0; return has_aliasing_ppgtt ? 1 : 0;
} }
...@@ -661,10 +667,10 @@ static int gen8_write_pdp(struct drm_i915_gem_request *req, ...@@ -661,10 +667,10 @@ static int gen8_write_pdp(struct drm_i915_gem_request *req,
return ret; return ret;
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit(ring, GEN8_RING_PDP_UDW(ring, entry)); intel_ring_emit_reg(ring, GEN8_RING_PDP_UDW(ring, entry));
intel_ring_emit(ring, upper_32_bits(addr)); intel_ring_emit(ring, upper_32_bits(addr));
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit(ring, GEN8_RING_PDP_LDW(ring, entry)); intel_ring_emit_reg(ring, GEN8_RING_PDP_LDW(ring, entry));
intel_ring_emit(ring, lower_32_bits(addr)); intel_ring_emit(ring, lower_32_bits(addr));
intel_ring_advance(ring); intel_ring_advance(ring);
...@@ -904,14 +910,13 @@ static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create) ...@@ -904,14 +910,13 @@ static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
enum vgt_g2v_type msg; enum vgt_g2v_type msg;
struct drm_device *dev = ppgtt->base.dev; struct drm_device *dev = ppgtt->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
unsigned int offset = vgtif_reg(pdp0_lo);
int i; int i;
if (USES_FULL_48BIT_PPGTT(dev)) { if (USES_FULL_48BIT_PPGTT(dev)) {
u64 daddr = px_dma(&ppgtt->pml4); u64 daddr = px_dma(&ppgtt->pml4);
I915_WRITE(offset, lower_32_bits(daddr)); I915_WRITE(vgtif_reg(pdp[0].lo), lower_32_bits(daddr));
I915_WRITE(offset + 4, upper_32_bits(daddr)); I915_WRITE(vgtif_reg(pdp[0].hi), upper_32_bits(daddr));
msg = (create ? VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE : msg = (create ? VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE :
VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY); VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY);
...@@ -919,10 +924,8 @@ static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create) ...@@ -919,10 +924,8 @@ static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
for (i = 0; i < GEN8_LEGACY_PDPES; i++) { for (i = 0; i < GEN8_LEGACY_PDPES; i++) {
u64 daddr = i915_page_dir_dma_addr(ppgtt, i); u64 daddr = i915_page_dir_dma_addr(ppgtt, i);
I915_WRITE(offset, lower_32_bits(daddr)); I915_WRITE(vgtif_reg(pdp[i].lo), lower_32_bits(daddr));
I915_WRITE(offset + 4, upper_32_bits(daddr)); I915_WRITE(vgtif_reg(pdp[i].hi), upper_32_bits(daddr));
offset += 8;
} }
msg = (create ? VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE : msg = (create ? VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE :
...@@ -1662,9 +1665,9 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt, ...@@ -1662,9 +1665,9 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
return ret; return ret;
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2)); intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
intel_ring_emit(ring, RING_PP_DIR_DCLV(ring)); intel_ring_emit_reg(ring, RING_PP_DIR_DCLV(ring));
intel_ring_emit(ring, PP_DIR_DCLV_2G); intel_ring_emit(ring, PP_DIR_DCLV_2G);
intel_ring_emit(ring, RING_PP_DIR_BASE(ring)); intel_ring_emit_reg(ring, RING_PP_DIR_BASE(ring));
intel_ring_emit(ring, get_pd_offset(ppgtt)); intel_ring_emit(ring, get_pd_offset(ppgtt));
intel_ring_emit(ring, MI_NOOP); intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring); intel_ring_advance(ring);
...@@ -1699,9 +1702,9 @@ static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt, ...@@ -1699,9 +1702,9 @@ static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
return ret; return ret;
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2)); intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
intel_ring_emit(ring, RING_PP_DIR_DCLV(ring)); intel_ring_emit_reg(ring, RING_PP_DIR_DCLV(ring));
intel_ring_emit(ring, PP_DIR_DCLV_2G); intel_ring_emit(ring, PP_DIR_DCLV_2G);
intel_ring_emit(ring, RING_PP_DIR_BASE(ring)); intel_ring_emit_reg(ring, RING_PP_DIR_BASE(ring));
intel_ring_emit(ring, get_pd_offset(ppgtt)); intel_ring_emit(ring, get_pd_offset(ppgtt));
intel_ring_emit(ring, MI_NOOP); intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring); intel_ring_advance(ring);
...@@ -2528,6 +2531,26 @@ static int ggtt_bind_vma(struct i915_vma *vma, ...@@ -2528,6 +2531,26 @@ static int ggtt_bind_vma(struct i915_vma *vma,
return 0; return 0;
} }
struct ggtt_bind_vma__cb {
struct i915_vma *vma;
enum i915_cache_level cache_level;
u32 flags;
};
static int ggtt_bind_vma__cb(void *_arg)
{
struct ggtt_bind_vma__cb *arg = _arg;
return ggtt_bind_vma(arg->vma, arg->cache_level, arg->flags);
}
static int ggtt_bind_vma__BKL(struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 flags)
{
struct ggtt_bind_vma__cb arg = { vma, cache_level, flags };
return stop_machine(ggtt_bind_vma__cb, &arg, NULL);
}
static int aliasing_gtt_bind_vma(struct i915_vma *vma, static int aliasing_gtt_bind_vma(struct i915_vma *vma,
enum i915_cache_level cache_level, enum i915_cache_level cache_level,
u32 flags) u32 flags)
...@@ -2995,6 +3018,9 @@ static int gen8_gmch_probe(struct drm_device *dev, ...@@ -2995,6 +3018,9 @@ static int gen8_gmch_probe(struct drm_device *dev,
dev_priv->gtt.base.bind_vma = ggtt_bind_vma; dev_priv->gtt.base.bind_vma = ggtt_bind_vma;
dev_priv->gtt.base.unbind_vma = ggtt_unbind_vma; dev_priv->gtt.base.unbind_vma = ggtt_unbind_vma;
if (IS_CHERRYVIEW(dev))
dev_priv->gtt.base.bind_vma = ggtt_bind_vma__BKL;
return ret; return ret;
} }
...@@ -3302,7 +3328,7 @@ static struct sg_table * ...@@ -3302,7 +3328,7 @@ static struct sg_table *
intel_rotate_fb_obj_pages(struct i915_ggtt_view *ggtt_view, intel_rotate_fb_obj_pages(struct i915_ggtt_view *ggtt_view,
struct drm_i915_gem_object *obj) struct drm_i915_gem_object *obj)
{ {
struct intel_rotation_info *rot_info = &ggtt_view->rotation_info; struct intel_rotation_info *rot_info = &ggtt_view->params.rotation_info;
unsigned int size_pages = rot_info->size >> PAGE_SHIFT; unsigned int size_pages = rot_info->size >> PAGE_SHIFT;
unsigned int size_pages_uv; unsigned int size_pages_uv;
struct sg_page_iter sg_iter; struct sg_page_iter sg_iter;
...@@ -3534,7 +3560,7 @@ i915_ggtt_view_size(struct drm_i915_gem_object *obj, ...@@ -3534,7 +3560,7 @@ i915_ggtt_view_size(struct drm_i915_gem_object *obj,
if (view->type == I915_GGTT_VIEW_NORMAL) { if (view->type == I915_GGTT_VIEW_NORMAL) {
return obj->base.size; return obj->base.size;
} else if (view->type == I915_GGTT_VIEW_ROTATED) { } else if (view->type == I915_GGTT_VIEW_ROTATED) {
return view->rotation_info.size; return view->params.rotation_info.size;
} else if (view->type == I915_GGTT_VIEW_PARTIAL) { } else if (view->type == I915_GGTT_VIEW_PARTIAL) {
return view->params.partial.size << PAGE_SHIFT; return view->params.partial.size << PAGE_SHIFT;
} else { } else {
......
...@@ -156,13 +156,10 @@ struct i915_ggtt_view { ...@@ -156,13 +156,10 @@ struct i915_ggtt_view {
u64 offset; u64 offset;
unsigned int size; unsigned int size;
} partial; } partial;
struct intel_rotation_info rotation_info;
} params; } params;
struct sg_table *pages; struct sg_table *pages;
union {
struct intel_rotation_info rotation_info;
};
}; };
extern const struct i915_ggtt_view i915_ggtt_view_normal; extern const struct i915_ggtt_view i915_ggtt_view_normal;
...@@ -556,7 +553,7 @@ i915_ggtt_view_equal(const struct i915_ggtt_view *a, ...@@ -556,7 +553,7 @@ i915_ggtt_view_equal(const struct i915_ggtt_view *a,
if (a->type != b->type) if (a->type != b->type)
return false; return false;
if (a->type == I915_GGTT_VIEW_PARTIAL) if (a->type != I915_GGTT_VIEW_NORMAL)
return !memcmp(&a->params, &b->params, sizeof(a->params)); return !memcmp(&a->params, &b->params, sizeof(a->params));
return true; return true;
} }
......
...@@ -433,7 +433,8 @@ int i915_gem_init_stolen(struct drm_device *dev) ...@@ -433,7 +433,8 @@ int i915_gem_init_stolen(struct drm_device *dev)
&reserved_size); &reserved_size);
break; break;
default: default:
if (IS_BROADWELL(dev_priv) || IS_SKYLAKE(dev_priv)) if (IS_BROADWELL(dev_priv) ||
IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev))
bdw_get_stolen_reserved(dev_priv, &reserved_base, bdw_get_stolen_reserved(dev_priv, &reserved_base,
&reserved_size); &reserved_size);
else else
......
...@@ -176,6 +176,8 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, ...@@ -176,6 +176,8 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
return -EINVAL; return -EINVAL;
} }
intel_runtime_pm_get(dev_priv);
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
if (obj->pin_display || obj->framebuffer_references) { if (obj->pin_display || obj->framebuffer_references) {
ret = -EBUSY; ret = -EBUSY;
...@@ -269,6 +271,8 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, ...@@ -269,6 +271,8 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
drm_gem_object_unreference(&obj->base); drm_gem_object_unreference(&obj->base);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
intel_runtime_pm_put(dev_priv);
return ret; return ret;
} }
......
...@@ -366,6 +366,17 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, ...@@ -366,6 +366,17 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
err_printf(m, "Suspend count: %u\n", error->suspend_count); err_printf(m, "Suspend count: %u\n", error->suspend_count);
err_printf(m, "PCI ID: 0x%04x\n", dev->pdev->device); err_printf(m, "PCI ID: 0x%04x\n", dev->pdev->device);
err_printf(m, "IOMMU enabled?: %d\n", error->iommu); err_printf(m, "IOMMU enabled?: %d\n", error->iommu);
if (HAS_CSR(dev)) {
struct intel_csr *csr = &dev_priv->csr;
err_printf(m, "DMC loaded: %s\n",
yesno(csr->dmc_payload != NULL));
err_printf(m, "DMC fw version: %d.%d\n",
CSR_VERSION_MAJOR(csr->version),
CSR_VERSION_MINOR(csr->version));
}
err_printf(m, "EIR: 0x%08x\n", error->eir); err_printf(m, "EIR: 0x%08x\n", error->eir);
err_printf(m, "IER: 0x%08x\n", error->ier); err_printf(m, "IER: 0x%08x\n", error->ier);
if (INTEL_INFO(dev)->gen >= 8) { if (INTEL_INFO(dev)->gen >= 8) {
...@@ -862,7 +873,7 @@ static void i915_record_ring_state(struct drm_device *dev, ...@@ -862,7 +873,7 @@ static void i915_record_ring_state(struct drm_device *dev,
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
if (INTEL_INFO(dev)->gen >= 6) { if (INTEL_INFO(dev)->gen >= 6) {
ering->rc_psmi = I915_READ(ring->mmio_base + 0x50); ering->rc_psmi = I915_READ(RING_PSMI_CTL(ring->mmio_base));
ering->fault_reg = I915_READ(RING_FAULT_REG(ring)); ering->fault_reg = I915_READ(RING_FAULT_REG(ring));
if (INTEL_INFO(dev)->gen >= 8) if (INTEL_INFO(dev)->gen >= 8)
gen8_record_semaphore_state(dev_priv, error, ring, ering); gen8_record_semaphore_state(dev_priv, error, ring, ering);
...@@ -899,7 +910,7 @@ static void i915_record_ring_state(struct drm_device *dev, ...@@ -899,7 +910,7 @@ static void i915_record_ring_state(struct drm_device *dev,
ering->ctl = I915_READ_CTL(ring); ering->ctl = I915_READ_CTL(ring);
if (I915_NEED_GFX_HWS(dev)) { if (I915_NEED_GFX_HWS(dev)) {
int mmio; i915_reg_t mmio;
if (IS_GEN7(dev)) { if (IS_GEN7(dev)) {
switch (ring->id) { switch (ring->id) {
...@@ -1071,6 +1082,25 @@ static void i915_gem_record_rings(struct drm_device *dev, ...@@ -1071,6 +1082,25 @@ static void i915_gem_record_rings(struct drm_device *dev,
list_for_each_entry(request, &ring->request_list, list) { list_for_each_entry(request, &ring->request_list, list) {
struct drm_i915_error_request *erq; struct drm_i915_error_request *erq;
if (count >= error->ring[i].num_requests) {
/*
* If the ring request list was changed in
* between the point where the error request
* list was created and dimensioned and this
* point then just exit early to avoid crashes.
*
* We don't need to communicate that the
* request list changed state during error
* state capture and that the error state is
* slightly incorrect as a consequence since we
* are typically only interested in the request
* list state at the point of error state
* capture, not in any changes happening during
* the capture.
*/
break;
}
erq = &error->ring[i].requests[count++]; erq = &error->ring[i].requests[count++];
erq->seqno = request->seqno; erq->seqno = request->seqno;
erq->jiffies = request->emitted_jiffies; erq->jiffies = request->emitted_jiffies;
...@@ -1181,7 +1211,7 @@ static void i915_capture_reg_state(struct drm_i915_private *dev_priv, ...@@ -1181,7 +1211,7 @@ static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
if (IS_VALLEYVIEW(dev)) { if (IS_VALLEYVIEW(dev)) {
error->gtier[0] = I915_READ(GTIER); error->gtier[0] = I915_READ(GTIER);
error->ier = I915_READ(VLV_IER); error->ier = I915_READ(VLV_IER);
error->forcewake = I915_READ(FORCEWAKE_VLV); error->forcewake = I915_READ_FW(FORCEWAKE_VLV);
} }
if (IS_GEN7(dev)) if (IS_GEN7(dev))
...@@ -1193,14 +1223,14 @@ static void i915_capture_reg_state(struct drm_i915_private *dev_priv, ...@@ -1193,14 +1223,14 @@ static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
} }
if (IS_GEN6(dev)) { if (IS_GEN6(dev)) {
error->forcewake = I915_READ(FORCEWAKE); error->forcewake = I915_READ_FW(FORCEWAKE);
error->gab_ctl = I915_READ(GAB_CTL); error->gab_ctl = I915_READ(GAB_CTL);
error->gfx_mode = I915_READ(GFX_MODE); error->gfx_mode = I915_READ(GFX_MODE);
} }
/* 2: Registers which belong to multiple generations */ /* 2: Registers which belong to multiple generations */
if (INTEL_INFO(dev)->gen >= 7) if (INTEL_INFO(dev)->gen >= 7)
error->forcewake = I915_READ(FORCEWAKE_MT); error->forcewake = I915_READ_FW(FORCEWAKE_MT);
if (INTEL_INFO(dev)->gen >= 6) { if (INTEL_INFO(dev)->gen >= 6) {
error->derrmr = I915_READ(DERRMR); error->derrmr = I915_READ(DERRMR);
......
...@@ -26,7 +26,7 @@ ...@@ -26,7 +26,7 @@
/* Definitions of GuC H/W registers, bits, etc */ /* Definitions of GuC H/W registers, bits, etc */
#define GUC_STATUS 0xc000 #define GUC_STATUS _MMIO(0xc000)
#define GS_BOOTROM_SHIFT 1 #define GS_BOOTROM_SHIFT 1
#define GS_BOOTROM_MASK (0x7F << GS_BOOTROM_SHIFT) #define GS_BOOTROM_MASK (0x7F << GS_BOOTROM_SHIFT)
#define GS_BOOTROM_RSA_FAILED (0x50 << GS_BOOTROM_SHIFT) #define GS_BOOTROM_RSA_FAILED (0x50 << GS_BOOTROM_SHIFT)
...@@ -39,40 +39,41 @@ ...@@ -39,40 +39,41 @@
#define GS_MIA_MASK (0x07 << GS_MIA_SHIFT) #define GS_MIA_MASK (0x07 << GS_MIA_SHIFT)
#define GS_MIA_CORE_STATE (1 << GS_MIA_SHIFT) #define GS_MIA_CORE_STATE (1 << GS_MIA_SHIFT)
#define SOFT_SCRATCH(n) (0xc180 + ((n) * 4)) #define SOFT_SCRATCH(n) _MMIO(0xc180 + (n) * 4)
#define UOS_RSA_SCRATCH(i) (0xc200 + (i) * 4) #define UOS_RSA_SCRATCH(i) _MMIO(0xc200 + (i) * 4)
#define DMA_ADDR_0_LOW 0xc300 #define UOS_RSA_SCRATCH_MAX_COUNT 64
#define DMA_ADDR_0_HIGH 0xc304 #define DMA_ADDR_0_LOW _MMIO(0xc300)
#define DMA_ADDR_1_LOW 0xc308 #define DMA_ADDR_0_HIGH _MMIO(0xc304)
#define DMA_ADDR_1_HIGH 0xc30c #define DMA_ADDR_1_LOW _MMIO(0xc308)
#define DMA_ADDR_1_HIGH _MMIO(0xc30c)
#define DMA_ADDRESS_SPACE_WOPCM (7 << 16) #define DMA_ADDRESS_SPACE_WOPCM (7 << 16)
#define DMA_ADDRESS_SPACE_GTT (8 << 16) #define DMA_ADDRESS_SPACE_GTT (8 << 16)
#define DMA_COPY_SIZE 0xc310 #define DMA_COPY_SIZE _MMIO(0xc310)
#define DMA_CTRL 0xc314 #define DMA_CTRL _MMIO(0xc314)
#define UOS_MOVE (1<<4) #define UOS_MOVE (1<<4)
#define START_DMA (1<<0) #define START_DMA (1<<0)
#define DMA_GUC_WOPCM_OFFSET 0xc340 #define DMA_GUC_WOPCM_OFFSET _MMIO(0xc340)
#define GUC_WOPCM_OFFSET_VALUE 0x80000 /* 512KB */ #define GUC_WOPCM_OFFSET_VALUE 0x80000 /* 512KB */
#define GUC_MAX_IDLE_COUNT 0xC3E4 #define GUC_MAX_IDLE_COUNT _MMIO(0xC3E4)
#define GUC_WOPCM_SIZE 0xc050 #define GUC_WOPCM_SIZE _MMIO(0xc050)
#define GUC_WOPCM_SIZE_VALUE (0x80 << 12) /* 512KB */ #define GUC_WOPCM_SIZE_VALUE (0x80 << 12) /* 512KB */
/* GuC addresses below GUC_WOPCM_TOP don't map through the GTT */ /* GuC addresses below GUC_WOPCM_TOP don't map through the GTT */
#define GUC_WOPCM_TOP (GUC_WOPCM_SIZE_VALUE) #define GUC_WOPCM_TOP (GUC_WOPCM_SIZE_VALUE)
#define GEN8_GT_PM_CONFIG 0x138140 #define GEN8_GT_PM_CONFIG _MMIO(0x138140)
#define GEN9LP_GT_PM_CONFIG 0x138140 #define GEN9LP_GT_PM_CONFIG _MMIO(0x138140)
#define GEN9_GT_PM_CONFIG 0x13816c #define GEN9_GT_PM_CONFIG _MMIO(0x13816c)
#define GT_DOORBELL_ENABLE (1<<0) #define GT_DOORBELL_ENABLE (1<<0)
#define GEN8_GTCR 0x4274 #define GEN8_GTCR _MMIO(0x4274)
#define GEN8_GTCR_INVALIDATE (1<<0) #define GEN8_GTCR_INVALIDATE (1<<0)
#define GUC_ARAT_C6DIS 0xA178 #define GUC_ARAT_C6DIS _MMIO(0xA178)
#define GUC_SHIM_CONTROL 0xc064 #define GUC_SHIM_CONTROL _MMIO(0xc064)
#define GUC_DISABLE_SRAM_INIT_TO_ZEROES (1<<0) #define GUC_DISABLE_SRAM_INIT_TO_ZEROES (1<<0)
#define GUC_ENABLE_READ_CACHE_LOGIC (1<<1) #define GUC_ENABLE_READ_CACHE_LOGIC (1<<1)
#define GUC_ENABLE_MIA_CACHING (1<<2) #define GUC_ENABLE_MIA_CACHING (1<<2)
...@@ -89,21 +90,21 @@ ...@@ -89,21 +90,21 @@
GUC_ENABLE_READ_CACHE_FOR_WOPCM_DATA | \ GUC_ENABLE_READ_CACHE_FOR_WOPCM_DATA | \
GUC_ENABLE_MIA_CLOCK_GATING) GUC_ENABLE_MIA_CLOCK_GATING)
#define HOST2GUC_INTERRUPT 0xc4c8 #define HOST2GUC_INTERRUPT _MMIO(0xc4c8)
#define HOST2GUC_TRIGGER (1<<0) #define HOST2GUC_TRIGGER (1<<0)
#define DRBMISC1 0x1984 #define DRBMISC1 0x1984
#define DOORBELL_ENABLE (1<<0) #define DOORBELL_ENABLE (1<<0)
#define GEN8_DRBREGL(x) (0x1000 + (x) * 8) #define GEN8_DRBREGL(x) _MMIO(0x1000 + (x) * 8)
#define GEN8_DRB_VALID (1<<0) #define GEN8_DRB_VALID (1<<0)
#define GEN8_DRBREGU(x) (GEN8_DRBREGL(x) + 4) #define GEN8_DRBREGU(x) _MMIO(0x1000 + (x) * 8 + 4)
#define DE_GUCRMR 0x44054 #define DE_GUCRMR _MMIO(0x44054)
#define GUC_BCS_RCS_IER 0xC550 #define GUC_BCS_RCS_IER _MMIO(0xC550)
#define GUC_VCS2_VCS1_IER 0xC554 #define GUC_VCS2_VCS1_IER _MMIO(0xC554)
#define GUC_WD_VECS_IER 0xC558 #define GUC_WD_VECS_IER _MMIO(0xC558)
#define GUC_PM_P24C_IER 0xC55C #define GUC_PM_P24C_IER _MMIO(0xC55C)
#endif #endif
...@@ -27,7 +27,7 @@ ...@@ -27,7 +27,7 @@
#include "intel_guc.h" #include "intel_guc.h"
/** /**
* DOC: GuC Client * DOC: GuC-based command submission
* *
* i915_guc_client: * i915_guc_client:
* We use the term client to avoid confusion with contexts. A i915_guc_client is * We use the term client to avoid confusion with contexts. A i915_guc_client is
...@@ -161,9 +161,9 @@ static int host2guc_sample_forcewake(struct intel_guc *guc, ...@@ -161,9 +161,9 @@ static int host2guc_sample_forcewake(struct intel_guc *guc,
data[0] = HOST2GUC_ACTION_SAMPLE_FORCEWAKE; data[0] = HOST2GUC_ACTION_SAMPLE_FORCEWAKE;
/* WaRsDisableCoarsePowerGating:skl,bxt */ /* WaRsDisableCoarsePowerGating:skl,bxt */
if (!intel_enable_rc6(dev_priv->dev) || if (!intel_enable_rc6(dev_priv->dev) ||
(IS_BROXTON(dev) && (INTEL_REVID(dev) < BXT_REVID_B0)) || IS_BXT_REVID(dev, 0, BXT_REVID_A1) ||
(IS_SKL_GT3(dev) && (INTEL_REVID(dev) <= SKL_REVID_E0)) || (IS_SKL_GT3(dev) && IS_SKL_REVID(dev, 0, SKL_REVID_E0)) ||
(IS_SKL_GT4(dev) && (INTEL_REVID(dev) <= SKL_REVID_E0))) (IS_SKL_GT4(dev) && IS_SKL_REVID(dev, 0, SKL_REVID_E0)))
data[1] = 0; data[1] = 0;
else else
/* bit 0 and 1 are for Render and Media domain separately */ /* bit 0 and 1 are for Render and Media domain separately */
...@@ -258,7 +258,7 @@ static void guc_disable_doorbell(struct intel_guc *guc, ...@@ -258,7 +258,7 @@ static void guc_disable_doorbell(struct intel_guc *guc,
struct drm_i915_private *dev_priv = guc_to_i915(guc); struct drm_i915_private *dev_priv = guc_to_i915(guc);
struct guc_doorbell_info *doorbell; struct guc_doorbell_info *doorbell;
void *base; void *base;
int drbreg = GEN8_DRBREGL(client->doorbell_id); i915_reg_t drbreg = GEN8_DRBREGL(client->doorbell_id);
int value; int value;
base = kmap_atomic(i915_gem_object_get_page(client->client_obj, 0)); base = kmap_atomic(i915_gem_object_get_page(client->client_obj, 0));
...@@ -588,8 +588,7 @@ static void lr_context_update(struct drm_i915_gem_request *rq) ...@@ -588,8 +588,7 @@ static void lr_context_update(struct drm_i915_gem_request *rq)
/** /**
* i915_guc_submit() - Submit commands through GuC * i915_guc_submit() - Submit commands through GuC
* @client: the guc client where commands will go through * @client: the guc client where commands will go through
* @ctx: LRC where commands come from * @rq: request associated with the commands
* @ring: HW engine that will excute the commands
* *
* Return: 0 if succeed * Return: 0 if succeed
*/ */
...@@ -731,7 +730,8 @@ static void guc_client_free(struct drm_device *dev, ...@@ -731,7 +730,8 @@ static void guc_client_free(struct drm_device *dev,
* The kernel client to replace ExecList submission is created with * The kernel client to replace ExecList submission is created with
* NORMAL priority. Priority of a client for scheduler can be HIGH, * NORMAL priority. Priority of a client for scheduler can be HIGH,
* while a preemption context can use CRITICAL. * while a preemption context can use CRITICAL.
* @ctx the context to own the client (we use the default render context) * @ctx: the context that owns the client (we use the default render
* context)
* *
* Return: An i915_guc_client object if success. * Return: An i915_guc_client object if success.
*/ */
......
...@@ -139,7 +139,8 @@ static const u32 hpd_bxt[HPD_NUM_PINS] = { ...@@ -139,7 +139,8 @@ static const u32 hpd_bxt[HPD_NUM_PINS] = {
/* /*
* We should clear IMR at preinstall/uninstall, and just check at postinstall. * We should clear IMR at preinstall/uninstall, and just check at postinstall.
*/ */
static void gen5_assert_iir_is_zero(struct drm_i915_private *dev_priv, u32 reg) static void gen5_assert_iir_is_zero(struct drm_i915_private *dev_priv,
i915_reg_t reg)
{ {
u32 val = I915_READ(reg); u32 val = I915_READ(reg);
...@@ -147,7 +148,7 @@ static void gen5_assert_iir_is_zero(struct drm_i915_private *dev_priv, u32 reg) ...@@ -147,7 +148,7 @@ static void gen5_assert_iir_is_zero(struct drm_i915_private *dev_priv, u32 reg)
return; return;
WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
reg, val); i915_mmio_reg_offset(reg), val);
I915_WRITE(reg, 0xffffffff); I915_WRITE(reg, 0xffffffff);
POSTING_READ(reg); POSTING_READ(reg);
I915_WRITE(reg, 0xffffffff); I915_WRITE(reg, 0xffffffff);
...@@ -283,17 +284,17 @@ void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) ...@@ -283,17 +284,17 @@ void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
ilk_update_gt_irq(dev_priv, mask, 0); ilk_update_gt_irq(dev_priv, mask, 0);
} }
static u32 gen6_pm_iir(struct drm_i915_private *dev_priv) static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv)
{ {
return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR; return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
} }
static u32 gen6_pm_imr(struct drm_i915_private *dev_priv) static i915_reg_t gen6_pm_imr(struct drm_i915_private *dev_priv)
{ {
return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR; return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR;
} }
static u32 gen6_pm_ier(struct drm_i915_private *dev_priv) static i915_reg_t gen6_pm_ier(struct drm_i915_private *dev_priv)
{ {
return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER; return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER;
} }
...@@ -350,7 +351,7 @@ void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) ...@@ -350,7 +351,7 @@ void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
void gen6_reset_rps_interrupts(struct drm_device *dev) void gen6_reset_rps_interrupts(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t reg = gen6_pm_iir(dev_priv); i915_reg_t reg = gen6_pm_iir(dev_priv);
spin_lock_irq(&dev_priv->irq_lock); spin_lock_irq(&dev_priv->irq_lock);
I915_WRITE(reg, dev_priv->pm_rps_events); I915_WRITE(reg, dev_priv->pm_rps_events);
...@@ -477,7 +478,7 @@ static void ...@@ -477,7 +478,7 @@ static void
__i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
u32 enable_mask, u32 status_mask) u32 enable_mask, u32 status_mask)
{ {
u32 reg = PIPESTAT(pipe); i915_reg_t reg = PIPESTAT(pipe);
u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK; u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
assert_spin_locked(&dev_priv->irq_lock); assert_spin_locked(&dev_priv->irq_lock);
...@@ -504,7 +505,7 @@ static void ...@@ -504,7 +505,7 @@ static void
__i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, __i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
u32 enable_mask, u32 status_mask) u32 enable_mask, u32 status_mask)
{ {
u32 reg = PIPESTAT(pipe); i915_reg_t reg = PIPESTAT(pipe);
u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK; u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
assert_spin_locked(&dev_priv->irq_lock); assert_spin_locked(&dev_priv->irq_lock);
...@@ -665,8 +666,7 @@ static u32 i8xx_get_vblank_counter(struct drm_device *dev, unsigned int pipe) ...@@ -665,8 +666,7 @@ static u32 i8xx_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe) static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long high_frame; i915_reg_t high_frame, low_frame;
unsigned long low_frame;
u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal; u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
struct intel_crtc *intel_crtc = struct intel_crtc *intel_crtc =
to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
...@@ -717,9 +717,7 @@ static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe) ...@@ -717,9 +717,7 @@ static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
return I915_READ(PIPE_FRMCOUNT_G4X(pipe)); return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
} }
/* raw reads, only for fast reads of display block, no need for forcewake etc. */ /* I915_READ_FW, only for fast reads of display block, no need for forcewake etc. */
#define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
static int __intel_get_crtc_scanline(struct intel_crtc *crtc) static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
{ {
struct drm_device *dev = crtc->base.dev; struct drm_device *dev = crtc->base.dev;
...@@ -733,9 +731,9 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc) ...@@ -733,9 +731,9 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
vtotal /= 2; vtotal /= 2;
if (IS_GEN2(dev)) if (IS_GEN2(dev))
position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2; position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
else else
position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
/* /*
* On HSW, the DSL reg (0x70000) appears to return 0 if we * On HSW, the DSL reg (0x70000) appears to return 0 if we
...@@ -827,7 +825,7 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, ...@@ -827,7 +825,7 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
* We can split this into vertical and horizontal * We can split this into vertical and horizontal
* scanout position. * scanout position.
*/ */
position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; position = (I915_READ_FW(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
/* convert to pixel counts */ /* convert to pixel counts */
vbl_start *= htotal; vbl_start *= htotal;
...@@ -1188,7 +1186,7 @@ static void ivybridge_parity_work(struct work_struct *work) ...@@ -1188,7 +1186,7 @@ static void ivybridge_parity_work(struct work_struct *work)
POSTING_READ(GEN7_MISCCPCTL); POSTING_READ(GEN7_MISCCPCTL);
while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) { while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
u32 reg; i915_reg_t reg;
slice--; slice--;
if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev))) if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev)))
...@@ -1196,7 +1194,7 @@ static void ivybridge_parity_work(struct work_struct *work) ...@@ -1196,7 +1194,7 @@ static void ivybridge_parity_work(struct work_struct *work)
dev_priv->l3_parity.which_slice &= ~(1<<slice); dev_priv->l3_parity.which_slice &= ~(1<<slice);
reg = GEN7_L3CDERRST1 + (slice * 0x200); reg = GEN7_L3CDERRST1(slice);
error_status = I915_READ(reg); error_status = I915_READ(reg);
row = GEN7_PARITY_ERROR_ROW(error_status); row = GEN7_PARITY_ERROR_ROW(error_status);
...@@ -1290,70 +1288,69 @@ static void snb_gt_irq_handler(struct drm_device *dev, ...@@ -1290,70 +1288,69 @@ static void snb_gt_irq_handler(struct drm_device *dev,
ivybridge_parity_error_irq_handler(dev, gt_iir); ivybridge_parity_error_irq_handler(dev, gt_iir);
} }
static __always_inline void
gen8_cs_irq_handler(struct intel_engine_cs *ring, u32 iir, int test_shift)
{
if (iir & (GT_RENDER_USER_INTERRUPT << test_shift))
notify_ring(ring);
if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift))
intel_lrc_irq_handler(ring);
}
static irqreturn_t gen8_gt_irq_handler(struct drm_i915_private *dev_priv, static irqreturn_t gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
u32 master_ctl) u32 master_ctl)
{ {
irqreturn_t ret = IRQ_NONE; irqreturn_t ret = IRQ_NONE;
if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
u32 tmp = I915_READ_FW(GEN8_GT_IIR(0)); u32 iir = I915_READ_FW(GEN8_GT_IIR(0));
if (tmp) { if (iir) {
I915_WRITE_FW(GEN8_GT_IIR(0), tmp); I915_WRITE_FW(GEN8_GT_IIR(0), iir);
ret = IRQ_HANDLED; ret = IRQ_HANDLED;
if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT)) gen8_cs_irq_handler(&dev_priv->ring[RCS],
intel_lrc_irq_handler(&dev_priv->ring[RCS]); iir, GEN8_RCS_IRQ_SHIFT);
if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT))
notify_ring(&dev_priv->ring[RCS]);
if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT)) gen8_cs_irq_handler(&dev_priv->ring[BCS],
intel_lrc_irq_handler(&dev_priv->ring[BCS]); iir, GEN8_BCS_IRQ_SHIFT);
if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT))
notify_ring(&dev_priv->ring[BCS]);
} else } else
DRM_ERROR("The master control interrupt lied (GT0)!\n"); DRM_ERROR("The master control interrupt lied (GT0)!\n");
} }
if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) { if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
u32 tmp = I915_READ_FW(GEN8_GT_IIR(1)); u32 iir = I915_READ_FW(GEN8_GT_IIR(1));
if (tmp) { if (iir) {
I915_WRITE_FW(GEN8_GT_IIR(1), tmp); I915_WRITE_FW(GEN8_GT_IIR(1), iir);
ret = IRQ_HANDLED; ret = IRQ_HANDLED;
if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT)) gen8_cs_irq_handler(&dev_priv->ring[VCS],
intel_lrc_irq_handler(&dev_priv->ring[VCS]); iir, GEN8_VCS1_IRQ_SHIFT);
if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT))
notify_ring(&dev_priv->ring[VCS]);
if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT)) gen8_cs_irq_handler(&dev_priv->ring[VCS2],
intel_lrc_irq_handler(&dev_priv->ring[VCS2]); iir, GEN8_VCS2_IRQ_SHIFT);
if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT))
notify_ring(&dev_priv->ring[VCS2]);
} else } else
DRM_ERROR("The master control interrupt lied (GT1)!\n"); DRM_ERROR("The master control interrupt lied (GT1)!\n");
} }
if (master_ctl & GEN8_GT_VECS_IRQ) { if (master_ctl & GEN8_GT_VECS_IRQ) {
u32 tmp = I915_READ_FW(GEN8_GT_IIR(3)); u32 iir = I915_READ_FW(GEN8_GT_IIR(3));
if (tmp) { if (iir) {
I915_WRITE_FW(GEN8_GT_IIR(3), tmp); I915_WRITE_FW(GEN8_GT_IIR(3), iir);
ret = IRQ_HANDLED; ret = IRQ_HANDLED;
if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT)) gen8_cs_irq_handler(&dev_priv->ring[VECS],
intel_lrc_irq_handler(&dev_priv->ring[VECS]); iir, GEN8_VECS_IRQ_SHIFT);
if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT))
notify_ring(&dev_priv->ring[VECS]);
} else } else
DRM_ERROR("The master control interrupt lied (GT3)!\n"); DRM_ERROR("The master control interrupt lied (GT3)!\n");
} }
if (master_ctl & GEN8_GT_PM_IRQ) { if (master_ctl & GEN8_GT_PM_IRQ) {
u32 tmp = I915_READ_FW(GEN8_GT_IIR(2)); u32 iir = I915_READ_FW(GEN8_GT_IIR(2));
if (tmp & dev_priv->pm_rps_events) { if (iir & dev_priv->pm_rps_events) {
I915_WRITE_FW(GEN8_GT_IIR(2), I915_WRITE_FW(GEN8_GT_IIR(2),
tmp & dev_priv->pm_rps_events); iir & dev_priv->pm_rps_events);
ret = IRQ_HANDLED; ret = IRQ_HANDLED;
gen6_rps_irq_handler(dev_priv, tmp); gen6_rps_irq_handler(dev_priv, iir);
} else } else
DRM_ERROR("The master control interrupt lied (PM)!\n"); DRM_ERROR("The master control interrupt lied (PM)!\n");
} }
...@@ -1625,7 +1622,7 @@ static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir) ...@@ -1625,7 +1622,7 @@ static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
spin_lock(&dev_priv->irq_lock); spin_lock(&dev_priv->irq_lock);
for_each_pipe(dev_priv, pipe) { for_each_pipe(dev_priv, pipe) {
int reg; i915_reg_t reg;
u32 mask, iir_bit = 0; u32 mask, iir_bit = 0;
/* /*
...@@ -2354,9 +2351,13 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg) ...@@ -2354,9 +2351,13 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
spt_irq_handler(dev, pch_iir); spt_irq_handler(dev, pch_iir);
else else
cpt_irq_handler(dev, pch_iir); cpt_irq_handler(dev, pch_iir);
} else } else {
DRM_ERROR("The master control interrupt lied (SDE)!\n"); /*
* Like on previous PCH there seems to be something
* fishy going on with forwarding PCH interrupts.
*/
DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n");
}
} }
I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
...@@ -3869,7 +3870,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg) ...@@ -3869,7 +3870,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
for_each_pipe(dev_priv, pipe) { for_each_pipe(dev_priv, pipe) {
int reg = PIPESTAT(pipe); i915_reg_t reg = PIPESTAT(pipe);
pipe_stats[pipe] = I915_READ(reg); pipe_stats[pipe] = I915_READ(reg);
/* /*
...@@ -4050,7 +4051,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg) ...@@ -4050,7 +4051,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
for_each_pipe(dev_priv, pipe) { for_each_pipe(dev_priv, pipe) {
int reg = PIPESTAT(pipe); i915_reg_t reg = PIPESTAT(pipe);
pipe_stats[pipe] = I915_READ(reg); pipe_stats[pipe] = I915_READ(reg);
/* Clear the PIPE*STAT regs before the IIR */ /* Clear the PIPE*STAT regs before the IIR */
...@@ -4272,7 +4273,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg) ...@@ -4272,7 +4273,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
for_each_pipe(dev_priv, pipe) { for_each_pipe(dev_priv, pipe) {
int reg = PIPESTAT(pipe); i915_reg_t reg = PIPESTAT(pipe);
pipe_stats[pipe] = I915_READ(reg); pipe_stats[pipe] = I915_READ(reg);
/* /*
......
...@@ -32,6 +32,7 @@ struct i915_params i915 __read_mostly = { ...@@ -32,6 +32,7 @@ struct i915_params i915 __read_mostly = {
.panel_use_ssc = -1, .panel_use_ssc = -1,
.vbt_sdvo_panel_type = -1, .vbt_sdvo_panel_type = -1,
.enable_rc6 = -1, .enable_rc6 = -1,
.enable_dc = -1,
.enable_fbc = -1, .enable_fbc = -1,
.enable_execlists = -1, .enable_execlists = -1,
.enable_hangcheck = true, .enable_hangcheck = true,
...@@ -80,6 +81,11 @@ MODULE_PARM_DESC(enable_rc6, ...@@ -80,6 +81,11 @@ MODULE_PARM_DESC(enable_rc6,
"For example, 3 would enable rc6 and deep rc6, and 7 would enable everything. " "For example, 3 would enable rc6 and deep rc6, and 7 would enable everything. "
"default: -1 (use per-chip default)"); "default: -1 (use per-chip default)");
module_param_named_unsafe(enable_dc, i915.enable_dc, int, 0400);
MODULE_PARM_DESC(enable_dc,
"Enable power-saving display C-states. "
"(-1=auto [default]; 0=disable; 1=up to DC5; 2=up to DC6)");
module_param_named_unsafe(enable_fbc, i915.enable_fbc, int, 0600); module_param_named_unsafe(enable_fbc, i915.enable_fbc, int, 0600);
MODULE_PARM_DESC(enable_fbc, MODULE_PARM_DESC(enable_fbc,
"Enable frame buffer compression for power savings " "Enable frame buffer compression for power savings "
...@@ -112,7 +118,7 @@ MODULE_PARM_DESC(enable_hangcheck, ...@@ -112,7 +118,7 @@ MODULE_PARM_DESC(enable_hangcheck,
module_param_named_unsafe(enable_ppgtt, i915.enable_ppgtt, int, 0400); module_param_named_unsafe(enable_ppgtt, i915.enable_ppgtt, int, 0400);
MODULE_PARM_DESC(enable_ppgtt, MODULE_PARM_DESC(enable_ppgtt,
"Override PPGTT usage. " "Override PPGTT usage. "
"(-1=auto [default], 0=disabled, 1=aliasing, 2=full)"); "(-1=auto [default], 0=disabled, 1=aliasing, 2=full, 3=full with extended address space)");
module_param_named_unsafe(enable_execlists, i915.enable_execlists, int, 0400); module_param_named_unsafe(enable_execlists, i915.enable_execlists, int, 0400);
MODULE_PARM_DESC(enable_execlists, MODULE_PARM_DESC(enable_execlists,
...@@ -126,7 +132,7 @@ module_param_named_unsafe(preliminary_hw_support, i915.preliminary_hw_support, i ...@@ -126,7 +132,7 @@ module_param_named_unsafe(preliminary_hw_support, i915.preliminary_hw_support, i
MODULE_PARM_DESC(preliminary_hw_support, MODULE_PARM_DESC(preliminary_hw_support,
"Enable preliminary hardware support."); "Enable preliminary hardware support.");
module_param_named_unsafe(disable_power_well, i915.disable_power_well, int, 0600); module_param_named_unsafe(disable_power_well, i915.disable_power_well, int, 0400);
MODULE_PARM_DESC(disable_power_well, MODULE_PARM_DESC(disable_power_well,
"Disable display power wells when possible " "Disable display power wells when possible "
"(-1=auto [default], 0=power wells always on, 1=power wells disabled when possible)"); "(-1=auto [default], 0=power wells always on, 1=power wells disabled when possible)");
......
此差异已折叠。
...@@ -35,7 +35,8 @@ ...@@ -35,7 +35,8 @@
#define dev_to_drm_minor(d) dev_get_drvdata((d)) #define dev_to_drm_minor(d) dev_get_drvdata((d))
#ifdef CONFIG_PM #ifdef CONFIG_PM
static u32 calc_residency(struct drm_device *dev, const u32 reg) static u32 calc_residency(struct drm_device *dev,
i915_reg_t reg)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
u64 raw_time; /* 32b value may overflow during fixed point math */ u64 raw_time; /* 32b value may overflow during fixed point math */
......
...@@ -664,7 +664,7 @@ TRACE_EVENT(i915_flip_complete, ...@@ -664,7 +664,7 @@ TRACE_EVENT(i915_flip_complete,
); );
TRACE_EVENT_CONDITION(i915_reg_rw, TRACE_EVENT_CONDITION(i915_reg_rw,
TP_PROTO(bool write, u32 reg, u64 val, int len, bool trace), TP_PROTO(bool write, i915_reg_t reg, u64 val, int len, bool trace),
TP_ARGS(write, reg, val, len, trace), TP_ARGS(write, reg, val, len, trace),
...@@ -679,7 +679,7 @@ TRACE_EVENT_CONDITION(i915_reg_rw, ...@@ -679,7 +679,7 @@ TRACE_EVENT_CONDITION(i915_reg_rw,
TP_fast_assign( TP_fast_assign(
__entry->val = (u64)val; __entry->val = (u64)val;
__entry->reg = reg; __entry->reg = i915_mmio_reg_offset(reg);
__entry->write = write; __entry->write = write;
__entry->len = len; __entry->len = len;
), ),
......
...@@ -69,13 +69,13 @@ void i915_check_vgpu(struct drm_device *dev) ...@@ -69,13 +69,13 @@ void i915_check_vgpu(struct drm_device *dev)
if (!IS_HASWELL(dev)) if (!IS_HASWELL(dev))
return; return;
magic = readq(dev_priv->regs + vgtif_reg(magic)); magic = __raw_i915_read64(dev_priv, vgtif_reg(magic));
if (magic != VGT_MAGIC) if (magic != VGT_MAGIC)
return; return;
version = INTEL_VGT_IF_VERSION_ENCODE( version = INTEL_VGT_IF_VERSION_ENCODE(
readw(dev_priv->regs + vgtif_reg(version_major)), __raw_i915_read16(dev_priv, vgtif_reg(version_major)),
readw(dev_priv->regs + vgtif_reg(version_minor))); __raw_i915_read16(dev_priv, vgtif_reg(version_minor)));
if (version != INTEL_VGT_IF_VERSION) { if (version != INTEL_VGT_IF_VERSION) {
DRM_INFO("VGT interface version mismatch!\n"); DRM_INFO("VGT interface version mismatch!\n");
return; return;
......
...@@ -92,14 +92,10 @@ struct vgt_if { ...@@ -92,14 +92,10 @@ struct vgt_if {
uint32_t g2v_notify; uint32_t g2v_notify;
uint32_t rsv6[7]; uint32_t rsv6[7];
uint32_t pdp0_lo; struct {
uint32_t pdp0_hi; uint32_t lo;
uint32_t pdp1_lo; uint32_t hi;
uint32_t pdp1_hi; } pdp[4];
uint32_t pdp2_lo;
uint32_t pdp2_hi;
uint32_t pdp3_lo;
uint32_t pdp3_hi;
uint32_t execlist_context_descriptor_lo; uint32_t execlist_context_descriptor_lo;
uint32_t execlist_context_descriptor_hi; uint32_t execlist_context_descriptor_hi;
...@@ -108,7 +104,7 @@ struct vgt_if { ...@@ -108,7 +104,7 @@ struct vgt_if {
} __packed; } __packed;
#define vgtif_reg(x) \ #define vgtif_reg(x) \
(VGT_PVINFO_PAGE + (long)&((struct vgt_if *)NULL)->x) _MMIO((VGT_PVINFO_PAGE + (long)&((struct vgt_if *)NULL)->x))
/* vGPU display status to be used by the host side */ /* vGPU display status to be used by the host side */
#define VGT_DRV_DISPLAY_NOT_READY 0 #define VGT_DRV_DISPLAY_NOT_READY 0
......
...@@ -94,6 +94,7 @@ intel_crtc_duplicate_state(struct drm_crtc *crtc) ...@@ -94,6 +94,7 @@ intel_crtc_duplicate_state(struct drm_crtc *crtc)
__drm_atomic_helper_crtc_duplicate_state(crtc, &crtc_state->base); __drm_atomic_helper_crtc_duplicate_state(crtc, &crtc_state->base);
crtc_state->update_pipe = false; crtc_state->update_pipe = false;
crtc_state->disable_lp_wm = false;
return &crtc_state->base; return &crtc_state->base;
} }
...@@ -205,8 +206,6 @@ int intel_atomic_setup_scalers(struct drm_device *dev, ...@@ -205,8 +206,6 @@ int intel_atomic_setup_scalers(struct drm_device *dev,
* but since this plane is unchanged just do the * but since this plane is unchanged just do the
* minimum required validation. * minimum required validation.
*/ */
if (plane->type == DRM_PLANE_TYPE_PRIMARY)
intel_crtc->atomic.wait_for_flips = true;
crtc_state->base.planes_changed = true; crtc_state->base.planes_changed = true;
} }
......
...@@ -84,6 +84,7 @@ intel_plane_duplicate_state(struct drm_plane *plane) ...@@ -84,6 +84,7 @@ intel_plane_duplicate_state(struct drm_plane *plane)
state = &intel_state->base; state = &intel_state->base;
__drm_atomic_helper_plane_duplicate_state(plane, state); __drm_atomic_helper_plane_duplicate_state(plane, state);
intel_state->wait_req = NULL;
return state; return state;
} }
...@@ -100,6 +101,7 @@ void ...@@ -100,6 +101,7 @@ void
intel_plane_destroy_state(struct drm_plane *plane, intel_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state) struct drm_plane_state *state)
{ {
WARN_ON(state && to_intel_plane_state(state)->wait_req);
drm_atomic_helper_plane_destroy_state(plane, state); drm_atomic_helper_plane_destroy_state(plane, state);
} }
......
...@@ -161,9 +161,9 @@ static bool audio_rate_need_prog(struct intel_crtc *crtc, ...@@ -161,9 +161,9 @@ static bool audio_rate_need_prog(struct intel_crtc *crtc,
} }
static bool intel_eld_uptodate(struct drm_connector *connector, static bool intel_eld_uptodate(struct drm_connector *connector,
int reg_eldv, uint32_t bits_eldv, i915_reg_t reg_eldv, uint32_t bits_eldv,
int reg_elda, uint32_t bits_elda, i915_reg_t reg_elda, uint32_t bits_elda,
int reg_edid) i915_reg_t reg_edid)
{ {
struct drm_i915_private *dev_priv = connector->dev->dev_private; struct drm_i915_private *dev_priv = connector->dev->dev_private;
uint8_t *eld = connector->eld; uint8_t *eld = connector->eld;
...@@ -364,8 +364,7 @@ static void ilk_audio_codec_disable(struct intel_encoder *encoder) ...@@ -364,8 +364,7 @@ static void ilk_audio_codec_disable(struct intel_encoder *encoder)
enum port port = intel_dig_port->port; enum port port = intel_dig_port->port;
enum pipe pipe = intel_crtc->pipe; enum pipe pipe = intel_crtc->pipe;
uint32_t tmp, eldv; uint32_t tmp, eldv;
int aud_config; i915_reg_t aud_config, aud_cntrl_st2;
int aud_cntrl_st2;
DRM_DEBUG_KMS("Disable audio codec on port %c, pipe %c\n", DRM_DEBUG_KMS("Disable audio codec on port %c, pipe %c\n",
port_name(port), pipe_name(pipe)); port_name(port), pipe_name(pipe));
...@@ -416,10 +415,7 @@ static void ilk_audio_codec_enable(struct drm_connector *connector, ...@@ -416,10 +415,7 @@ static void ilk_audio_codec_enable(struct drm_connector *connector,
uint32_t eldv; uint32_t eldv;
uint32_t tmp; uint32_t tmp;
int len, i; int len, i;
int hdmiw_hdmiedid; i915_reg_t hdmiw_hdmiedid, aud_config, aud_cntl_st, aud_cntrl_st2;
int aud_config;
int aud_cntl_st;
int aud_cntrl_st2;
DRM_DEBUG_KMS("Enable audio codec on port %c, pipe %c, %u bytes ELD\n", DRM_DEBUG_KMS("Enable audio codec on port %c, pipe %c, %u bytes ELD\n",
port_name(port), pipe_name(pipe), drm_eld_size(eld)); port_name(port), pipe_name(pipe), drm_eld_size(eld));
...@@ -591,7 +587,7 @@ static void i915_audio_component_codec_wake_override(struct device *dev, ...@@ -591,7 +587,7 @@ static void i915_audio_component_codec_wake_override(struct device *dev,
struct drm_i915_private *dev_priv = dev_to_i915(dev); struct drm_i915_private *dev_priv = dev_to_i915(dev);
u32 tmp; u32 tmp;
if (!IS_SKYLAKE(dev_priv)) if (!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv))
return; return;
/* /*
...@@ -642,10 +638,11 @@ static int i915_audio_component_sync_audio_rate(struct device *dev, ...@@ -642,10 +638,11 @@ static int i915_audio_component_sync_audio_rate(struct device *dev,
u32 tmp; u32 tmp;
int n; int n;
/* HSW, BDW SKL need this fix */ /* HSW, BDW, SKL, KBL need this fix */
if (!IS_SKYLAKE(dev_priv) && if (!IS_SKYLAKE(dev_priv) &&
!IS_BROADWELL(dev_priv) && !IS_KABYLAKE(dev_priv) &&
!IS_HASWELL(dev_priv)) !IS_BROADWELL(dev_priv) &&
!IS_HASWELL(dev_priv))
return 0; return 0;
mutex_lock(&dev_priv->av_mutex); mutex_lock(&dev_priv->av_mutex);
......
...@@ -50,7 +50,7 @@ struct intel_crt { ...@@ -50,7 +50,7 @@ struct intel_crt {
* encoder's enable/disable callbacks */ * encoder's enable/disable callbacks */
struct intel_connector *connector; struct intel_connector *connector;
bool force_hotplug_required; bool force_hotplug_required;
u32 adpa_reg; i915_reg_t adpa_reg;
}; };
static struct intel_crt *intel_encoder_to_crt(struct intel_encoder *encoder) static struct intel_crt *intel_encoder_to_crt(struct intel_encoder *encoder)
...@@ -480,12 +480,8 @@ intel_crt_load_detect(struct intel_crt *crt) ...@@ -480,12 +480,8 @@ intel_crt_load_detect(struct intel_crt *crt)
uint32_t vsample; uint32_t vsample;
uint32_t vblank, vblank_start, vblank_end; uint32_t vblank, vblank_start, vblank_end;
uint32_t dsl; uint32_t dsl;
uint32_t bclrpat_reg; i915_reg_t bclrpat_reg, vtotal_reg,
uint32_t vtotal_reg; vblank_reg, vsync_reg, pipeconf_reg, pipe_dsl_reg;
uint32_t vblank_reg;
uint32_t vsync_reg;
uint32_t pipeconf_reg;
uint32_t pipe_dsl_reg;
uint8_t st00; uint8_t st00;
enum drm_connector_status status; enum drm_connector_status status;
...@@ -518,7 +514,7 @@ intel_crt_load_detect(struct intel_crt *crt) ...@@ -518,7 +514,7 @@ intel_crt_load_detect(struct intel_crt *crt)
/* Wait for next Vblank to substitue /* Wait for next Vblank to substitue
* border color for Color info */ * border color for Color info */
intel_wait_for_vblank(dev, pipe); intel_wait_for_vblank(dev, pipe);
st00 = I915_READ8(VGA_MSR_WRITE); st00 = I915_READ8(_VGA_MSR_WRITE);
status = ((st00 & (1 << 4)) != 0) ? status = ((st00 & (1 << 4)) != 0) ?
connector_status_connected : connector_status_connected :
connector_status_disconnected; connector_status_disconnected;
...@@ -563,7 +559,7 @@ intel_crt_load_detect(struct intel_crt *crt) ...@@ -563,7 +559,7 @@ intel_crt_load_detect(struct intel_crt *crt)
do { do {
count++; count++;
/* Read the ST00 VGA status register */ /* Read the ST00 VGA status register */
st00 = I915_READ8(VGA_MSR_WRITE); st00 = I915_READ8(_VGA_MSR_WRITE);
if (st00 & (1 << 4)) if (st00 & (1 << 4))
detect++; detect++;
} while ((I915_READ(pipe_dsl_reg) == dsl)); } while ((I915_READ(pipe_dsl_reg) == dsl));
......
...@@ -47,21 +47,10 @@ ...@@ -47,21 +47,10 @@
MODULE_FIRMWARE(I915_CSR_SKL); MODULE_FIRMWARE(I915_CSR_SKL);
MODULE_FIRMWARE(I915_CSR_BXT); MODULE_FIRMWARE(I915_CSR_BXT);
/* #define SKL_CSR_VERSION_REQUIRED CSR_VERSION(1, 23)
* SKL CSR registers for DC5 and DC6
*/
#define CSR_PROGRAM(i) (0x80000 + (i) * 4)
#define CSR_SSP_BASE_ADDR_GEN9 0x00002FC0
#define CSR_HTP_ADDR_SKL 0x00500034
#define CSR_SSP_BASE 0x8F074
#define CSR_HTP_SKL 0x8F004
#define CSR_LAST_WRITE 0x8F034
#define CSR_LAST_WRITE_VALUE 0xc003b400
/* MMIO address range for CSR program (0x80000 - 0x82FFF) */
#define CSR_MAX_FW_SIZE 0x2FFF #define CSR_MAX_FW_SIZE 0x2FFF
#define CSR_DEFAULT_FW_OFFSET 0xFFFFFFFF #define CSR_DEFAULT_FW_OFFSET 0xFFFFFFFF
#define CSR_MMIO_START_RANGE 0x80000
#define CSR_MMIO_END_RANGE 0x8FFFF
struct intel_css_header { struct intel_css_header {
/* 0x09 for DMC */ /* 0x09 for DMC */
...@@ -178,166 +167,134 @@ struct stepping_info { ...@@ -178,166 +167,134 @@ struct stepping_info {
}; };
static const struct stepping_info skl_stepping_info[] = { static const struct stepping_info skl_stepping_info[] = {
{'A', '0'}, {'B', '0'}, {'C', '0'}, {'A', '0'}, {'B', '0'}, {'C', '0'},
{'D', '0'}, {'E', '0'}, {'F', '0'}, {'D', '0'}, {'E', '0'}, {'F', '0'},
{'G', '0'}, {'H', '0'}, {'I', '0'} {'G', '0'}, {'H', '0'}, {'I', '0'}
}; };
static struct stepping_info bxt_stepping_info[] = { static const struct stepping_info bxt_stepping_info[] = {
{'A', '0'}, {'A', '1'}, {'A', '2'}, {'A', '0'}, {'A', '1'}, {'A', '2'},
{'B', '0'}, {'B', '1'}, {'B', '2'} {'B', '0'}, {'B', '1'}, {'B', '2'}
}; };
static char intel_get_stepping(struct drm_device *dev) static const struct stepping_info *intel_get_stepping_info(struct drm_device *dev)
{
if (IS_SKYLAKE(dev) && (dev->pdev->revision <
ARRAY_SIZE(skl_stepping_info)))
return skl_stepping_info[dev->pdev->revision].stepping;
else if (IS_BROXTON(dev) && (dev->pdev->revision <
ARRAY_SIZE(bxt_stepping_info)))
return bxt_stepping_info[dev->pdev->revision].stepping;
else
return -ENODATA;
}
static char intel_get_substepping(struct drm_device *dev)
{ {
if (IS_SKYLAKE(dev) && (dev->pdev->revision < const struct stepping_info *si;
ARRAY_SIZE(skl_stepping_info))) unsigned int size;
return skl_stepping_info[dev->pdev->revision].substepping;
else if (IS_BROXTON(dev) && (dev->pdev->revision < if (IS_SKYLAKE(dev)) {
ARRAY_SIZE(bxt_stepping_info))) size = ARRAY_SIZE(skl_stepping_info);
return bxt_stepping_info[dev->pdev->revision].substepping; si = skl_stepping_info;
else } else if (IS_BROXTON(dev)) {
return -ENODATA; size = ARRAY_SIZE(bxt_stepping_info);
} si = bxt_stepping_info;
} else {
/** return NULL;
* intel_csr_load_status_get() - to get firmware loading status. }
* @dev_priv: i915 device.
*
* This function helps to get the firmware loading status.
*
* Return: Firmware loading status.
*/
enum csr_state intel_csr_load_status_get(struct drm_i915_private *dev_priv)
{
enum csr_state state;
mutex_lock(&dev_priv->csr_lock); if (INTEL_REVID(dev) < size)
state = dev_priv->csr.state; return si + INTEL_REVID(dev);
mutex_unlock(&dev_priv->csr_lock);
return state; return NULL;
}
/**
* intel_csr_load_status_set() - help to set firmware loading status.
* @dev_priv: i915 device.
* @state: enumeration of firmware loading status.
*
* Set the firmware loading status.
*/
void intel_csr_load_status_set(struct drm_i915_private *dev_priv,
enum csr_state state)
{
mutex_lock(&dev_priv->csr_lock);
dev_priv->csr.state = state;
mutex_unlock(&dev_priv->csr_lock);
} }
/** /**
* intel_csr_load_program() - write the firmware from memory to register. * intel_csr_load_program() - write the firmware from memory to register.
* @dev: drm device. * @dev_priv: i915 drm device.
* *
* CSR firmware is read from a .bin file and kept in internal memory one time. * CSR firmware is read from a .bin file and kept in internal memory one time.
* Everytime display comes back from low power state this function is called to * Everytime display comes back from low power state this function is called to
* copy the firmware from internal memory to registers. * copy the firmware from internal memory to registers.
*/ */
void intel_csr_load_program(struct drm_device *dev) void intel_csr_load_program(struct drm_i915_private *dev_priv)
{ {
struct drm_i915_private *dev_priv = dev->dev_private;
u32 *payload = dev_priv->csr.dmc_payload; u32 *payload = dev_priv->csr.dmc_payload;
uint32_t i, fw_size; uint32_t i, fw_size;
if (!IS_GEN9(dev)) { if (!IS_GEN9(dev_priv)) {
DRM_ERROR("No CSR support available for this platform\n"); DRM_ERROR("No CSR support available for this platform\n");
return; return;
} }
/* if (!dev_priv->csr.dmc_payload) {
* FIXME: Firmware gets lost on S3/S4, but not when entering system DRM_ERROR("Tried to program CSR with empty payload\n");
* standby or suspend-to-idle (which is just like forced runtime pm).
* Unfortunately the ACPI subsystem doesn't yet give us a way to
* differentiate this, hence figure it out with this hack.
*/
if (I915_READ(CSR_PROGRAM(0)))
return; return;
}
mutex_lock(&dev_priv->csr_lock);
fw_size = dev_priv->csr.dmc_fw_size; fw_size = dev_priv->csr.dmc_fw_size;
for (i = 0; i < fw_size; i++) for (i = 0; i < fw_size; i++)
I915_WRITE(CSR_PROGRAM(i), payload[i]); I915_WRITE(CSR_PROGRAM(i), payload[i]);
for (i = 0; i < dev_priv->csr.mmio_count; i++) { for (i = 0; i < dev_priv->csr.mmio_count; i++) {
I915_WRITE(dev_priv->csr.mmioaddr[i], I915_WRITE(dev_priv->csr.mmioaddr[i],
dev_priv->csr.mmiodata[i]); dev_priv->csr.mmiodata[i]);
} }
dev_priv->csr.state = FW_LOADED;
mutex_unlock(&dev_priv->csr_lock);
} }
static void finish_csr_load(const struct firmware *fw, void *context) static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
const struct firmware *fw)
{ {
struct drm_i915_private *dev_priv = context;
struct drm_device *dev = dev_priv->dev; struct drm_device *dev = dev_priv->dev;
struct intel_css_header *css_header; struct intel_css_header *css_header;
struct intel_package_header *package_header; struct intel_package_header *package_header;
struct intel_dmc_header *dmc_header; struct intel_dmc_header *dmc_header;
struct intel_csr *csr = &dev_priv->csr; struct intel_csr *csr = &dev_priv->csr;
char stepping = intel_get_stepping(dev); const struct stepping_info *stepping_info = intel_get_stepping_info(dev);
char substepping = intel_get_substepping(dev); char stepping, substepping;
uint32_t dmc_offset = CSR_DEFAULT_FW_OFFSET, readcount = 0, nbytes; uint32_t dmc_offset = CSR_DEFAULT_FW_OFFSET, readcount = 0, nbytes;
uint32_t i; uint32_t i;
uint32_t *dmc_payload; uint32_t *dmc_payload;
bool fw_loaded = false;
if (!fw) { if (!fw)
i915_firmware_load_error_print(csr->fw_path, 0); return NULL;
goto out;
}
if ((stepping == -ENODATA) || (substepping == -ENODATA)) { if (!stepping_info) {
DRM_ERROR("Unknown stepping info, firmware loading failed\n"); DRM_ERROR("Unknown stepping info, firmware loading failed\n");
goto out; return NULL;
} }
stepping = stepping_info->stepping;
substepping = stepping_info->substepping;
/* Extract CSS Header information*/ /* Extract CSS Header information*/
css_header = (struct intel_css_header *)fw->data; css_header = (struct intel_css_header *)fw->data;
if (sizeof(struct intel_css_header) != if (sizeof(struct intel_css_header) !=
(css_header->header_len * 4)) { (css_header->header_len * 4)) {
DRM_ERROR("Firmware has wrong CSS header length %u bytes\n", DRM_ERROR("Firmware has wrong CSS header length %u bytes\n",
(css_header->header_len * 4)); (css_header->header_len * 4));
goto out; return NULL;
} }
csr->version = css_header->version;
if (IS_SKYLAKE(dev) && csr->version < SKL_CSR_VERSION_REQUIRED) {
DRM_INFO("Refusing to load old Skylake DMC firmware v%u.%u,"
" please upgrade to v%u.%u or later"
" [https://01.org/linuxgraphics/intel-linux-graphics-firmwares].\n",
CSR_VERSION_MAJOR(csr->version),
CSR_VERSION_MINOR(csr->version),
CSR_VERSION_MAJOR(SKL_CSR_VERSION_REQUIRED),
CSR_VERSION_MINOR(SKL_CSR_VERSION_REQUIRED));
return NULL;
}
readcount += sizeof(struct intel_css_header); readcount += sizeof(struct intel_css_header);
/* Extract Package Header information*/ /* Extract Package Header information*/
package_header = (struct intel_package_header *) package_header = (struct intel_package_header *)
&fw->data[readcount]; &fw->data[readcount];
if (sizeof(struct intel_package_header) != if (sizeof(struct intel_package_header) !=
(package_header->header_len * 4)) { (package_header->header_len * 4)) {
DRM_ERROR("Firmware has wrong package header length %u bytes\n", DRM_ERROR("Firmware has wrong package header length %u bytes\n",
(package_header->header_len * 4)); (package_header->header_len * 4));
goto out; return NULL;
} }
readcount += sizeof(struct intel_package_header); readcount += sizeof(struct intel_package_header);
/* Search for dmc_offset to find firware binary. */ /* Search for dmc_offset to find firware binary. */
for (i = 0; i < package_header->num_entries; i++) { for (i = 0; i < package_header->num_entries; i++) {
if (package_header->fw_info[i].substepping == '*' && if (package_header->fw_info[i].substepping == '*' &&
stepping == package_header->fw_info[i].stepping) { stepping == package_header->fw_info[i].stepping) {
dmc_offset = package_header->fw_info[i].offset; dmc_offset = package_header->fw_info[i].offset;
break; break;
} else if (stepping == package_header->fw_info[i].stepping && } else if (stepping == package_header->fw_info[i].stepping &&
...@@ -345,12 +302,12 @@ static void finish_csr_load(const struct firmware *fw, void *context) ...@@ -345,12 +302,12 @@ static void finish_csr_load(const struct firmware *fw, void *context)
dmc_offset = package_header->fw_info[i].offset; dmc_offset = package_header->fw_info[i].offset;
break; break;
} else if (package_header->fw_info[i].stepping == '*' && } else if (package_header->fw_info[i].stepping == '*' &&
package_header->fw_info[i].substepping == '*') package_header->fw_info[i].substepping == '*')
dmc_offset = package_header->fw_info[i].offset; dmc_offset = package_header->fw_info[i].offset;
} }
if (dmc_offset == CSR_DEFAULT_FW_OFFSET) { if (dmc_offset == CSR_DEFAULT_FW_OFFSET) {
DRM_ERROR("Firmware not supported for %c stepping\n", stepping); DRM_ERROR("Firmware not supported for %c stepping\n", stepping);
goto out; return NULL;
} }
readcount += dmc_offset; readcount += dmc_offset;
...@@ -358,26 +315,26 @@ static void finish_csr_load(const struct firmware *fw, void *context) ...@@ -358,26 +315,26 @@ static void finish_csr_load(const struct firmware *fw, void *context)
dmc_header = (struct intel_dmc_header *)&fw->data[readcount]; dmc_header = (struct intel_dmc_header *)&fw->data[readcount];
if (sizeof(struct intel_dmc_header) != (dmc_header->header_len)) { if (sizeof(struct intel_dmc_header) != (dmc_header->header_len)) {
DRM_ERROR("Firmware has wrong dmc header length %u bytes\n", DRM_ERROR("Firmware has wrong dmc header length %u bytes\n",
(dmc_header->header_len)); (dmc_header->header_len));
goto out; return NULL;
} }
readcount += sizeof(struct intel_dmc_header); readcount += sizeof(struct intel_dmc_header);
/* Cache the dmc header info. */ /* Cache the dmc header info. */
if (dmc_header->mmio_count > ARRAY_SIZE(csr->mmioaddr)) { if (dmc_header->mmio_count > ARRAY_SIZE(csr->mmioaddr)) {
DRM_ERROR("Firmware has wrong mmio count %u\n", DRM_ERROR("Firmware has wrong mmio count %u\n",
dmc_header->mmio_count); dmc_header->mmio_count);
goto out; return NULL;
} }
csr->mmio_count = dmc_header->mmio_count; csr->mmio_count = dmc_header->mmio_count;
for (i = 0; i < dmc_header->mmio_count; i++) { for (i = 0; i < dmc_header->mmio_count; i++) {
if (dmc_header->mmioaddr[i] < CSR_MMIO_START_RANGE || if (dmc_header->mmioaddr[i] < CSR_MMIO_START_RANGE ||
dmc_header->mmioaddr[i] > CSR_MMIO_END_RANGE) { dmc_header->mmioaddr[i] > CSR_MMIO_END_RANGE) {
DRM_ERROR(" Firmware has wrong mmio address 0x%x\n", DRM_ERROR(" Firmware has wrong mmio address 0x%x\n",
dmc_header->mmioaddr[i]); dmc_header->mmioaddr[i]);
goto out; return NULL;
} }
csr->mmioaddr[i] = dmc_header->mmioaddr[i]; csr->mmioaddr[i] = _MMIO(dmc_header->mmioaddr[i]);
csr->mmiodata[i] = dmc_header->mmiodata[i]; csr->mmiodata[i] = dmc_header->mmiodata[i];
} }
...@@ -385,56 +342,80 @@ static void finish_csr_load(const struct firmware *fw, void *context) ...@@ -385,56 +342,80 @@ static void finish_csr_load(const struct firmware *fw, void *context)
nbytes = dmc_header->fw_size * 4; nbytes = dmc_header->fw_size * 4;
if (nbytes > CSR_MAX_FW_SIZE) { if (nbytes > CSR_MAX_FW_SIZE) {
DRM_ERROR("CSR firmware too big (%u) bytes\n", nbytes); DRM_ERROR("CSR firmware too big (%u) bytes\n", nbytes);
goto out; return NULL;
} }
csr->dmc_fw_size = dmc_header->fw_size; csr->dmc_fw_size = dmc_header->fw_size;
csr->dmc_payload = kmalloc(nbytes, GFP_KERNEL); dmc_payload = kmalloc(nbytes, GFP_KERNEL);
if (!csr->dmc_payload) { if (!dmc_payload) {
DRM_ERROR("Memory allocation failed for dmc payload\n"); DRM_ERROR("Memory allocation failed for dmc payload\n");
goto out; return NULL;
} }
dmc_payload = csr->dmc_payload;
memcpy(dmc_payload, &fw->data[readcount], nbytes); memcpy(dmc_payload, &fw->data[readcount], nbytes);
return dmc_payload;
}
static void csr_load_work_fn(struct work_struct *work)
{
struct drm_i915_private *dev_priv;
struct intel_csr *csr;
const struct firmware *fw;
int ret;
dev_priv = container_of(work, typeof(*dev_priv), csr.work);
csr = &dev_priv->csr;
ret = request_firmware(&fw, dev_priv->csr.fw_path,
&dev_priv->dev->pdev->dev);
if (!fw)
goto out;
dev_priv->csr.dmc_payload = parse_csr_fw(dev_priv, fw);
if (!dev_priv->csr.dmc_payload)
goto out;
/* load csr program during system boot, as needed for DC states */ /* load csr program during system boot, as needed for DC states */
intel_csr_load_program(dev); intel_csr_load_program(dev_priv);
fw_loaded = true;
DRM_DEBUG_KMS("Finished loading %s\n", dev_priv->csr.fw_path);
out: out:
if (fw_loaded) if (dev_priv->csr.dmc_payload) {
intel_runtime_pm_put(dev_priv); intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
else
intel_csr_load_status_set(dev_priv, FW_FAILED); DRM_INFO("Finished loading %s (v%u.%u)\n",
dev_priv->csr.fw_path,
CSR_VERSION_MAJOR(csr->version),
CSR_VERSION_MINOR(csr->version));
} else {
DRM_ERROR("Failed to load DMC firmware, disabling rpm\n");
}
release_firmware(fw); release_firmware(fw);
} }
/** /**
* intel_csr_ucode_init() - initialize the firmware loading. * intel_csr_ucode_init() - initialize the firmware loading.
* @dev: drm device. * @dev_priv: i915 drm device.
* *
* This function is called at the time of loading the display driver to read * This function is called at the time of loading the display driver to read
* firmware from a .bin file and copied into a internal memory. * firmware from a .bin file and copied into a internal memory.
*/ */
void intel_csr_ucode_init(struct drm_device *dev) void intel_csr_ucode_init(struct drm_i915_private *dev_priv)
{ {
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_csr *csr = &dev_priv->csr; struct intel_csr *csr = &dev_priv->csr;
int ret;
if (!HAS_CSR(dev)) INIT_WORK(&dev_priv->csr.work, csr_load_work_fn);
if (!HAS_CSR(dev_priv))
return; return;
if (IS_SKYLAKE(dev)) if (IS_SKYLAKE(dev_priv))
csr->fw_path = I915_CSR_SKL; csr->fw_path = I915_CSR_SKL;
else if (IS_BROXTON(dev_priv)) else if (IS_BROXTON(dev_priv))
csr->fw_path = I915_CSR_BXT; csr->fw_path = I915_CSR_BXT;
else { else {
DRM_ERROR("Unexpected: no known CSR firmware for platform\n"); DRM_ERROR("Unexpected: no known CSR firmware for platform\n");
intel_csr_load_status_set(dev_priv, FW_FAILED);
return; return;
} }
...@@ -444,43 +425,24 @@ void intel_csr_ucode_init(struct drm_device *dev) ...@@ -444,43 +425,24 @@ void intel_csr_ucode_init(struct drm_device *dev)
* Obtain a runtime pm reference, until CSR is loaded, * Obtain a runtime pm reference, until CSR is loaded,
* to avoid entering runtime-suspend. * to avoid entering runtime-suspend.
*/ */
intel_runtime_pm_get(dev_priv); intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
/* CSR supported for platform, load firmware */ schedule_work(&dev_priv->csr.work);
ret = request_firmware_nowait(THIS_MODULE, true, csr->fw_path,
&dev_priv->dev->pdev->dev,
GFP_KERNEL, dev_priv,
finish_csr_load);
if (ret) {
i915_firmware_load_error_print(csr->fw_path, ret);
intel_csr_load_status_set(dev_priv, FW_FAILED);
}
} }
/** /**
* intel_csr_ucode_fini() - unload the CSR firmware. * intel_csr_ucode_fini() - unload the CSR firmware.
* @dev: drm device. * @dev_priv: i915 drm device.
* *
* Firmmware unloading includes freeing the internal momory and reset the * Firmmware unloading includes freeing the internal momory and reset the
* firmware loading status. * firmware loading status.
*/ */
void intel_csr_ucode_fini(struct drm_device *dev) void intel_csr_ucode_fini(struct drm_i915_private *dev_priv)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; if (!HAS_CSR(dev_priv))
if (!HAS_CSR(dev))
return; return;
intel_csr_load_status_set(dev_priv, FW_FAILED); flush_work(&dev_priv->csr.work);
kfree(dev_priv->csr.dmc_payload);
}
void assert_csr_loaded(struct drm_i915_private *dev_priv) kfree(dev_priv->csr.dmc_payload);
{
WARN_ONCE(intel_csr_load_status_get(dev_priv) != FW_LOADED,
"CSR is not loaded.\n");
WARN_ONCE(!I915_READ(CSR_PROGRAM(0)),
"CSR program storage start is NULL\n");
WARN_ONCE(!I915_READ(CSR_SSP_BASE), "CSR SSP Base Not fine\n");
WARN_ONCE(!I915_READ(CSR_HTP_SKL), "CSR HTP Not fine\n");
} }
...@@ -133,12 +133,12 @@ static const struct ddi_buf_trans skl_ddi_translations_dp[] = { ...@@ -133,12 +133,12 @@ static const struct ddi_buf_trans skl_ddi_translations_dp[] = {
{ 0x00002016, 0x000000A0, 0x0 }, { 0x00002016, 0x000000A0, 0x0 },
{ 0x00005012, 0x0000009B, 0x0 }, { 0x00005012, 0x0000009B, 0x0 },
{ 0x00007011, 0x00000088, 0x0 }, { 0x00007011, 0x00000088, 0x0 },
{ 0x00009010, 0x000000C7, 0x0 }, { 0x80009010, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */
{ 0x00002016, 0x0000009B, 0x0 }, { 0x00002016, 0x0000009B, 0x0 },
{ 0x00005012, 0x00000088, 0x0 }, { 0x00005012, 0x00000088, 0x0 },
{ 0x00007011, 0x000000C7, 0x0 }, { 0x80007011, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */
{ 0x00002016, 0x000000DF, 0x0 }, { 0x00002016, 0x000000DF, 0x0 },
{ 0x00005012, 0x000000C7, 0x0 }, { 0x80005012, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */
}; };
/* Skylake U */ /* Skylake U */
...@@ -146,12 +146,12 @@ static const struct ddi_buf_trans skl_u_ddi_translations_dp[] = { ...@@ -146,12 +146,12 @@ static const struct ddi_buf_trans skl_u_ddi_translations_dp[] = {
{ 0x0000201B, 0x000000A2, 0x0 }, { 0x0000201B, 0x000000A2, 0x0 },
{ 0x00005012, 0x00000088, 0x0 }, { 0x00005012, 0x00000088, 0x0 },
{ 0x00007011, 0x00000087, 0x0 }, { 0x00007011, 0x00000087, 0x0 },
{ 0x80009010, 0x000000C7, 0x1 }, /* Uses I_boost level 0x1 */ { 0x80009010, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */
{ 0x0000201B, 0x0000009D, 0x0 }, { 0x0000201B, 0x0000009D, 0x0 },
{ 0x00005012, 0x000000C7, 0x0 }, { 0x80005012, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */
{ 0x00007011, 0x000000C7, 0x0 }, { 0x80007011, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */
{ 0x00002016, 0x00000088, 0x0 }, { 0x00002016, 0x00000088, 0x0 },
{ 0x00005012, 0x000000C7, 0x0 }, { 0x80005012, 0x000000C0, 0x1 }, /* Uses I_boost level 0x1 */
}; };
/* Skylake Y */ /* Skylake Y */
...@@ -159,12 +159,12 @@ static const struct ddi_buf_trans skl_y_ddi_translations_dp[] = { ...@@ -159,12 +159,12 @@ static const struct ddi_buf_trans skl_y_ddi_translations_dp[] = {
{ 0x00000018, 0x000000A2, 0x0 }, { 0x00000018, 0x000000A2, 0x0 },
{ 0x00005012, 0x00000088, 0x0 }, { 0x00005012, 0x00000088, 0x0 },
{ 0x00007011, 0x00000087, 0x0 }, { 0x00007011, 0x00000087, 0x0 },
{ 0x80009010, 0x000000C7, 0x3 }, /* Uses I_boost level 0x3 */ { 0x80009010, 0x000000C0, 0x3 }, /* Uses I_boost level 0x3 */
{ 0x00000018, 0x0000009D, 0x0 }, { 0x00000018, 0x0000009D, 0x0 },
{ 0x00005012, 0x000000C7, 0x0 }, { 0x80005012, 0x000000C0, 0x3 }, /* Uses I_boost level 0x3 */
{ 0x00007011, 0x000000C7, 0x0 }, { 0x80007011, 0x000000C0, 0x3 }, /* Uses I_boost level 0x3 */
{ 0x00000018, 0x00000088, 0x0 }, { 0x00000018, 0x00000088, 0x0 },
{ 0x00005012, 0x000000C7, 0x0 }, { 0x80005012, 0x000000C0, 0x3 }, /* Uses I_boost level 0x3 */
}; };
/* /*
...@@ -345,7 +345,7 @@ enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder) ...@@ -345,7 +345,7 @@ enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder)
static bool static bool
intel_dig_port_supports_hdmi(const struct intel_digital_port *intel_dig_port) intel_dig_port_supports_hdmi(const struct intel_digital_port *intel_dig_port)
{ {
return intel_dig_port->hdmi.hdmi_reg; return i915_mmio_reg_valid(intel_dig_port->hdmi.hdmi_reg);
} }
static const struct ddi_buf_trans *skl_get_buf_trans_dp(struct drm_device *dev, static const struct ddi_buf_trans *skl_get_buf_trans_dp(struct drm_device *dev,
...@@ -448,7 +448,7 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port, ...@@ -448,7 +448,7 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port,
bxt_ddi_vswing_sequence(dev, hdmi_level, port, bxt_ddi_vswing_sequence(dev, hdmi_level, port,
INTEL_OUTPUT_HDMI); INTEL_OUTPUT_HDMI);
return; return;
} else if (IS_SKYLAKE(dev)) { } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
ddi_translations_fdi = NULL; ddi_translations_fdi = NULL;
ddi_translations_dp = ddi_translations_dp =
skl_get_buf_trans_dp(dev, &n_dp_entries); skl_get_buf_trans_dp(dev, &n_dp_entries);
...@@ -576,7 +576,7 @@ void intel_prepare_ddi(struct drm_device *dev) ...@@ -576,7 +576,7 @@ void intel_prepare_ddi(struct drm_device *dev)
static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv, static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
enum port port) enum port port)
{ {
uint32_t reg = DDI_BUF_CTL(port); i915_reg_t reg = DDI_BUF_CTL(port);
int i; int i;
for (i = 0; i < 16; i++) { for (i = 0; i < 16; i++) {
...@@ -931,7 +931,8 @@ static void hsw_wrpll_update_rnp(uint64_t freq2k, unsigned budget, ...@@ -931,7 +931,8 @@ static void hsw_wrpll_update_rnp(uint64_t freq2k, unsigned budget,
/* Otherwise a < c && b >= d, do nothing */ /* Otherwise a < c && b >= d, do nothing */
} }
static int hsw_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv, int reg) static int hsw_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv,
i915_reg_t reg)
{ {
int refclk = LC_FREQ; int refclk = LC_FREQ;
int n, p, r; int n, p, r;
...@@ -967,7 +968,7 @@ static int hsw_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv, int reg) ...@@ -967,7 +968,7 @@ static int hsw_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv, int reg)
static int skl_calc_wrpll_link(struct drm_i915_private *dev_priv, static int skl_calc_wrpll_link(struct drm_i915_private *dev_priv,
uint32_t dpll) uint32_t dpll)
{ {
uint32_t cfgcr1_reg, cfgcr2_reg; i915_reg_t cfgcr1_reg, cfgcr2_reg;
uint32_t cfgcr1_val, cfgcr2_val; uint32_t cfgcr1_val, cfgcr2_val;
uint32_t p0, p1, p2, dco_freq; uint32_t p0, p1, p2, dco_freq;
...@@ -1112,10 +1113,10 @@ static void hsw_ddi_clock_get(struct intel_encoder *encoder, ...@@ -1112,10 +1113,10 @@ static void hsw_ddi_clock_get(struct intel_encoder *encoder,
link_clock = 270000; link_clock = 270000;
break; break;
case PORT_CLK_SEL_WRPLL1: case PORT_CLK_SEL_WRPLL1:
link_clock = hsw_ddi_calc_wrpll_link(dev_priv, WRPLL_CTL1); link_clock = hsw_ddi_calc_wrpll_link(dev_priv, WRPLL_CTL(0));
break; break;
case PORT_CLK_SEL_WRPLL2: case PORT_CLK_SEL_WRPLL2:
link_clock = hsw_ddi_calc_wrpll_link(dev_priv, WRPLL_CTL2); link_clock = hsw_ddi_calc_wrpll_link(dev_priv, WRPLL_CTL(1));
break; break;
case PORT_CLK_SEL_SPLL: case PORT_CLK_SEL_SPLL:
pll = I915_READ(SPLL_CTL) & SPLL_PLL_FREQ_MASK; pll = I915_READ(SPLL_CTL) & SPLL_PLL_FREQ_MASK;
...@@ -1184,7 +1185,7 @@ void intel_ddi_clock_get(struct intel_encoder *encoder, ...@@ -1184,7 +1185,7 @@ void intel_ddi_clock_get(struct intel_encoder *encoder,
if (INTEL_INFO(dev)->gen <= 8) if (INTEL_INFO(dev)->gen <= 8)
hsw_ddi_clock_get(encoder, pipe_config); hsw_ddi_clock_get(encoder, pipe_config);
else if (IS_SKYLAKE(dev)) else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
skl_ddi_clock_get(encoder, pipe_config); skl_ddi_clock_get(encoder, pipe_config);
else if (IS_BROXTON(dev)) else if (IS_BROXTON(dev))
bxt_ddi_clock_get(encoder, pipe_config); bxt_ddi_clock_get(encoder, pipe_config);
...@@ -1780,7 +1781,7 @@ bool intel_ddi_pll_select(struct intel_crtc *intel_crtc, ...@@ -1780,7 +1781,7 @@ bool intel_ddi_pll_select(struct intel_crtc *intel_crtc,
struct intel_encoder *intel_encoder = struct intel_encoder *intel_encoder =
intel_ddi_get_crtc_new_encoder(crtc_state); intel_ddi_get_crtc_new_encoder(crtc_state);
if (IS_SKYLAKE(dev)) if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
return skl_ddi_pll_select(intel_crtc, crtc_state, return skl_ddi_pll_select(intel_crtc, crtc_state,
intel_encoder); intel_encoder);
else if (IS_BROXTON(dev)) else if (IS_BROXTON(dev))
...@@ -1942,7 +1943,7 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc) ...@@ -1942,7 +1943,7 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv, void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
enum transcoder cpu_transcoder) enum transcoder cpu_transcoder)
{ {
uint32_t reg = TRANS_DDI_FUNC_CTL(cpu_transcoder); i915_reg_t reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
uint32_t val = I915_READ(reg); uint32_t val = I915_READ(reg);
val &= ~(TRANS_DDI_FUNC_ENABLE | TRANS_DDI_PORT_MASK | TRANS_DDI_DP_VC_PAYLOAD_ALLOC); val &= ~(TRANS_DDI_FUNC_ENABLE | TRANS_DDI_PORT_MASK | TRANS_DDI_DP_VC_PAYLOAD_ALLOC);
...@@ -2097,21 +2098,21 @@ static void skl_ddi_set_iboost(struct drm_device *dev, u32 level, ...@@ -2097,21 +2098,21 @@ static void skl_ddi_set_iboost(struct drm_device *dev, u32 level,
iboost = dp_iboost; iboost = dp_iboost;
} else { } else {
ddi_translations = skl_get_buf_trans_dp(dev, &n_entries); ddi_translations = skl_get_buf_trans_dp(dev, &n_entries);
iboost = ddi_translations[port].i_boost; iboost = ddi_translations[level].i_boost;
} }
} else if (type == INTEL_OUTPUT_EDP) { } else if (type == INTEL_OUTPUT_EDP) {
if (dp_iboost) { if (dp_iboost) {
iboost = dp_iboost; iboost = dp_iboost;
} else { } else {
ddi_translations = skl_get_buf_trans_edp(dev, &n_entries); ddi_translations = skl_get_buf_trans_edp(dev, &n_entries);
iboost = ddi_translations[port].i_boost; iboost = ddi_translations[level].i_boost;
} }
} else if (type == INTEL_OUTPUT_HDMI) { } else if (type == INTEL_OUTPUT_HDMI) {
if (hdmi_iboost) { if (hdmi_iboost) {
iboost = hdmi_iboost; iboost = hdmi_iboost;
} else { } else {
ddi_translations = skl_get_buf_trans_hdmi(dev, &n_entries); ddi_translations = skl_get_buf_trans_hdmi(dev, &n_entries);
iboost = ddi_translations[port].i_boost; iboost = ddi_translations[level].i_boost;
} }
} else { } else {
return; return;
...@@ -2263,7 +2264,7 @@ uint32_t ddi_signal_levels(struct intel_dp *intel_dp) ...@@ -2263,7 +2264,7 @@ uint32_t ddi_signal_levels(struct intel_dp *intel_dp)
level = translate_signal_level(signal_levels); level = translate_signal_level(signal_levels);
if (IS_SKYLAKE(dev)) if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
skl_ddi_set_iboost(dev, level, port, encoder->type); skl_ddi_set_iboost(dev, level, port, encoder->type);
else if (IS_BROXTON(dev)) else if (IS_BROXTON(dev))
bxt_ddi_vswing_sequence(dev, level, port, encoder->type); bxt_ddi_vswing_sequence(dev, level, port, encoder->type);
...@@ -2271,30 +2272,21 @@ uint32_t ddi_signal_levels(struct intel_dp *intel_dp) ...@@ -2271,30 +2272,21 @@ uint32_t ddi_signal_levels(struct intel_dp *intel_dp)
return DDI_BUF_TRANS_SELECT(level); return DDI_BUF_TRANS_SELECT(level);
} }
static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder) void intel_ddi_clk_select(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{ {
struct drm_encoder *encoder = &intel_encoder->base; struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct drm_device *dev = encoder->dev; enum port port = intel_ddi_get_encoder_port(encoder);
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
enum port port = intel_ddi_get_encoder_port(intel_encoder);
int type = intel_encoder->type;
int hdmi_level;
if (type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
intel_edp_panel_on(intel_dp);
}
if (IS_SKYLAKE(dev)) { if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
uint32_t dpll = crtc->config->ddi_pll_sel; uint32_t dpll = pipe_config->ddi_pll_sel;
uint32_t val; uint32_t val;
/* /*
* DPLL0 is used for eDP and is the only "private" DPLL (as * DPLL0 is used for eDP and is the only "private" DPLL (as
* opposed to shared) on SKL * opposed to shared) on SKL
*/ */
if (type == INTEL_OUTPUT_EDP) { if (encoder->type == INTEL_OUTPUT_EDP) {
WARN_ON(dpll != SKL_DPLL0); WARN_ON(dpll != SKL_DPLL0);
val = I915_READ(DPLL_CTRL1); val = I915_READ(DPLL_CTRL1);
...@@ -2302,7 +2294,7 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder) ...@@ -2302,7 +2294,7 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
val &= ~(DPLL_CTRL1_HDMI_MODE(dpll) | val &= ~(DPLL_CTRL1_HDMI_MODE(dpll) |
DPLL_CTRL1_SSC(dpll) | DPLL_CTRL1_SSC(dpll) |
DPLL_CTRL1_LINK_RATE_MASK(dpll)); DPLL_CTRL1_LINK_RATE_MASK(dpll));
val |= crtc->config->dpll_hw_state.ctrl1 << (dpll * 6); val |= pipe_config->dpll_hw_state.ctrl1 << (dpll * 6);
I915_WRITE(DPLL_CTRL1, val); I915_WRITE(DPLL_CTRL1, val);
POSTING_READ(DPLL_CTRL1); POSTING_READ(DPLL_CTRL1);
...@@ -2318,10 +2310,28 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder) ...@@ -2318,10 +2310,28 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
I915_WRITE(DPLL_CTRL2, val); I915_WRITE(DPLL_CTRL2, val);
} else if (INTEL_INFO(dev)->gen < 9) { } else if (INTEL_INFO(dev_priv)->gen < 9) {
WARN_ON(crtc->config->ddi_pll_sel == PORT_CLK_SEL_NONE); WARN_ON(pipe_config->ddi_pll_sel == PORT_CLK_SEL_NONE);
I915_WRITE(PORT_CLK_SEL(port), crtc->config->ddi_pll_sel); I915_WRITE(PORT_CLK_SEL(port), pipe_config->ddi_pll_sel);
} }
}
static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
{
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
enum port port = intel_ddi_get_encoder_port(intel_encoder);
int type = intel_encoder->type;
int hdmi_level;
if (type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
intel_edp_panel_on(intel_dp);
}
intel_ddi_clk_select(intel_encoder, crtc->config);
if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) { if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
...@@ -2381,7 +2391,7 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder) ...@@ -2381,7 +2391,7 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder)
intel_edp_panel_off(intel_dp); intel_edp_panel_off(intel_dp);
} }
if (IS_SKYLAKE(dev)) if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
I915_WRITE(DPLL_CTRL2, (I915_READ(DPLL_CTRL2) | I915_WRITE(DPLL_CTRL2, (I915_READ(DPLL_CTRL2) |
DPLL_CTRL2_DDI_CLK_OFF(port))); DPLL_CTRL2_DDI_CLK_OFF(port)));
else if (INTEL_INFO(dev)->gen < 9) else if (INTEL_INFO(dev)->gen < 9)
...@@ -2553,7 +2563,7 @@ static const char * const skl_ddi_pll_names[] = { ...@@ -2553,7 +2563,7 @@ static const char * const skl_ddi_pll_names[] = {
}; };
struct skl_dpll_regs { struct skl_dpll_regs {
u32 ctl, cfgcr1, cfgcr2; i915_reg_t ctl, cfgcr1, cfgcr2;
}; };
/* this array is indexed by the *shared* pll id */ /* this array is indexed by the *shared* pll id */
...@@ -2566,13 +2576,13 @@ static const struct skl_dpll_regs skl_dpll_regs[3] = { ...@@ -2566,13 +2576,13 @@ static const struct skl_dpll_regs skl_dpll_regs[3] = {
}, },
{ {
/* DPLL 2 */ /* DPLL 2 */
.ctl = WRPLL_CTL1, .ctl = WRPLL_CTL(0),
.cfgcr1 = DPLL_CFGCR1(SKL_DPLL2), .cfgcr1 = DPLL_CFGCR1(SKL_DPLL2),
.cfgcr2 = DPLL_CFGCR2(SKL_DPLL2), .cfgcr2 = DPLL_CFGCR2(SKL_DPLL2),
}, },
{ {
/* DPLL 3 */ /* DPLL 3 */
.ctl = WRPLL_CTL2, .ctl = WRPLL_CTL(1),
.cfgcr1 = DPLL_CFGCR1(SKL_DPLL3), .cfgcr1 = DPLL_CFGCR1(SKL_DPLL3),
.cfgcr2 = DPLL_CFGCR2(SKL_DPLL3), .cfgcr2 = DPLL_CFGCR2(SKL_DPLL3),
}, },
...@@ -2992,22 +3002,22 @@ void intel_ddi_pll_init(struct drm_device *dev) ...@@ -2992,22 +3002,22 @@ void intel_ddi_pll_init(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t val = I915_READ(LCPLL_CTL); uint32_t val = I915_READ(LCPLL_CTL);
if (IS_SKYLAKE(dev)) if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
skl_shared_dplls_init(dev_priv); skl_shared_dplls_init(dev_priv);
else if (IS_BROXTON(dev)) else if (IS_BROXTON(dev))
bxt_shared_dplls_init(dev_priv); bxt_shared_dplls_init(dev_priv);
else else
hsw_shared_dplls_init(dev_priv); hsw_shared_dplls_init(dev_priv);
if (IS_SKYLAKE(dev)) { if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
int cdclk_freq; int cdclk_freq;
cdclk_freq = dev_priv->display.get_display_clock_speed(dev); cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
dev_priv->skl_boot_cdclk = cdclk_freq; dev_priv->skl_boot_cdclk = cdclk_freq;
if (skl_sanitize_cdclk(dev_priv))
DRM_DEBUG_KMS("Sanitized cdclk programmed by pre-os\n");
if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE)) if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE))
DRM_ERROR("LCPLL1 is disabled\n"); DRM_ERROR("LCPLL1 is disabled\n");
else
intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
} else if (IS_BROXTON(dev)) { } else if (IS_BROXTON(dev)) {
broxton_init_cdclk(dev); broxton_init_cdclk(dev);
broxton_ddi_phy_init(dev); broxton_ddi_phy_init(dev);
...@@ -3026,11 +3036,11 @@ void intel_ddi_pll_init(struct drm_device *dev) ...@@ -3026,11 +3036,11 @@ void intel_ddi_pll_init(struct drm_device *dev)
} }
} }
void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder) void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
{ {
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct intel_dp *intel_dp = &intel_dig_port->dp; struct drm_i915_private *dev_priv =
struct drm_i915_private *dev_priv = encoder->dev->dev_private; to_i915(intel_dig_port->base.base.dev);
enum port port = intel_dig_port->port; enum port port = intel_dig_port->port;
uint32_t val; uint32_t val;
bool wait = false; bool wait = false;
...@@ -3289,6 +3299,20 @@ void intel_ddi_init(struct drm_device *dev, enum port port) ...@@ -3289,6 +3299,20 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
(DDI_BUF_PORT_REVERSAL | (DDI_BUF_PORT_REVERSAL |
DDI_A_4_LANES); DDI_A_4_LANES);
/*
* Bspec says that DDI_A_4_LANES is the only supported configuration
* for Broxton. Yet some BIOS fail to set this bit on port A if eDP
* wasn't lit up at boot. Force this bit on in our internal
* configuration so that we use the proper lane count for our
* calculations.
*/
if (IS_BROXTON(dev) && port == PORT_A) {
if (!(intel_dig_port->saved_port_bits & DDI_A_4_LANES)) {
DRM_DEBUG_KMS("BXT BIOS forgot to set DDI_A_4_LANES for port A; fixing\n");
intel_dig_port->saved_port_bits |= DDI_A_4_LANES;
}
}
intel_encoder->type = INTEL_OUTPUT_UNKNOWN; intel_encoder->type = INTEL_OUTPUT_UNKNOWN;
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
intel_encoder->cloneable = 0; intel_encoder->cloneable = 0;
...@@ -3302,8 +3326,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port) ...@@ -3302,8 +3326,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
* On BXT A0/A1, sw needs to activate DDIA HPD logic and * On BXT A0/A1, sw needs to activate DDIA HPD logic and
* interrupts to check the external panel connection. * interrupts to check the external panel connection.
*/ */
if (IS_BROXTON(dev_priv) && (INTEL_REVID(dev) < BXT_REVID_B0) if (IS_BXT_REVID(dev, 0, BXT_REVID_A1) && port == PORT_B)
&& port == PORT_B)
dev_priv->hotplug.irq_port[PORT_A] = intel_dig_port; dev_priv->hotplug.irq_port[PORT_A] = intel_dig_port;
else else
dev_priv->hotplug.irq_port[port] = intel_dig_port; dev_priv->hotplug.irq_port[port] = intel_dig_port;
......
此差异已折叠。
此差异已折叠。
...@@ -173,20 +173,14 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder) ...@@ -173,20 +173,14 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder)
intel_mst->port = found->port; intel_mst->port = found->port;
if (intel_dp->active_mst_links == 0) { if (intel_dp->active_mst_links == 0) {
enum port port = intel_ddi_get_encoder_port(encoder); intel_ddi_clk_select(encoder, intel_crtc->config);
intel_dp_set_link_params(intel_dp, intel_crtc->config); intel_dp_set_link_params(intel_dp, intel_crtc->config);
/* FIXME: add support for SKL */
if (INTEL_INFO(dev)->gen < 9)
I915_WRITE(PORT_CLK_SEL(port),
intel_crtc->config->ddi_pll_sel);
intel_ddi_init_dp_buf_reg(&intel_dig_port->base); intel_ddi_init_dp_buf_reg(&intel_dig_port->base);
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
intel_dp_start_link_train(intel_dp); intel_dp_start_link_train(intel_dp);
intel_dp_stop_link_train(intel_dp); intel_dp_stop_link_train(intel_dp);
} }
...@@ -414,7 +408,10 @@ static void intel_connector_add_to_fbdev(struct intel_connector *connector) ...@@ -414,7 +408,10 @@ static void intel_connector_add_to_fbdev(struct intel_connector *connector)
{ {
#ifdef CONFIG_DRM_FBDEV_EMULATION #ifdef CONFIG_DRM_FBDEV_EMULATION
struct drm_i915_private *dev_priv = to_i915(connector->base.dev); struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
drm_fb_helper_add_one_connector(&dev_priv->fbdev->helper, &connector->base);
if (dev_priv->fbdev)
drm_fb_helper_add_one_connector(&dev_priv->fbdev->helper,
&connector->base);
#endif #endif
} }
...@@ -422,7 +419,10 @@ static void intel_connector_remove_from_fbdev(struct intel_connector *connector) ...@@ -422,7 +419,10 @@ static void intel_connector_remove_from_fbdev(struct intel_connector *connector)
{ {
#ifdef CONFIG_DRM_FBDEV_EMULATION #ifdef CONFIG_DRM_FBDEV_EMULATION
struct drm_i915_private *dev_priv = to_i915(connector->base.dev); struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
drm_fb_helper_remove_one_connector(&dev_priv->fbdev->helper, &connector->base);
if (dev_priv->fbdev)
drm_fb_helper_remove_one_connector(&dev_priv->fbdev->helper,
&connector->base);
#endif #endif
} }
......
此差异已折叠。
...@@ -60,7 +60,8 @@ static void wait_for_dsi_fifo_empty(struct intel_dsi *intel_dsi, enum port port) ...@@ -60,7 +60,8 @@ static void wait_for_dsi_fifo_empty(struct intel_dsi *intel_dsi, enum port port)
DRM_ERROR("DPI FIFOs are not empty\n"); DRM_ERROR("DPI FIFOs are not empty\n");
} }
static void write_data(struct drm_i915_private *dev_priv, u32 reg, static void write_data(struct drm_i915_private *dev_priv,
i915_reg_t reg,
const u8 *data, u32 len) const u8 *data, u32 len)
{ {
u32 i, j; u32 i, j;
...@@ -75,7 +76,8 @@ static void write_data(struct drm_i915_private *dev_priv, u32 reg, ...@@ -75,7 +76,8 @@ static void write_data(struct drm_i915_private *dev_priv, u32 reg,
} }
} }
static void read_data(struct drm_i915_private *dev_priv, u32 reg, static void read_data(struct drm_i915_private *dev_priv,
i915_reg_t reg,
u8 *data, u32 len) u8 *data, u32 len)
{ {
u32 i, j; u32 i, j;
...@@ -98,7 +100,8 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host, ...@@ -98,7 +100,8 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host,
struct mipi_dsi_packet packet; struct mipi_dsi_packet packet;
ssize_t ret; ssize_t ret;
const u8 *header, *data; const u8 *header, *data;
u32 data_reg, data_mask, ctrl_reg, ctrl_mask; i915_reg_t data_reg, ctrl_reg;
u32 data_mask, ctrl_mask;
ret = mipi_dsi_create_packet(&packet, msg); ret = mipi_dsi_create_packet(&packet, msg);
if (ret < 0) if (ret < 0)
...@@ -377,10 +380,10 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder) ...@@ -377,10 +380,10 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder)
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
enum port port; enum port port;
u32 temp;
u32 port_ctrl;
if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) { if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) {
u32 temp;
temp = I915_READ(VLV_CHICKEN_3); temp = I915_READ(VLV_CHICKEN_3);
temp &= ~PIXEL_OVERLAP_CNT_MASK | temp &= ~PIXEL_OVERLAP_CNT_MASK |
intel_dsi->pixel_overlap << intel_dsi->pixel_overlap <<
...@@ -389,8 +392,9 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder) ...@@ -389,8 +392,9 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder)
} }
for_each_dsi_port(port, intel_dsi->ports) { for_each_dsi_port(port, intel_dsi->ports) {
port_ctrl = IS_BROXTON(dev) ? BXT_MIPI_PORT_CTRL(port) : i915_reg_t port_ctrl = IS_BROXTON(dev) ?
MIPI_PORT_CTRL(port); BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port);
u32 temp;
temp = I915_READ(port_ctrl); temp = I915_READ(port_ctrl);
...@@ -416,13 +420,13 @@ static void intel_dsi_port_disable(struct intel_encoder *encoder) ...@@ -416,13 +420,13 @@ static void intel_dsi_port_disable(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
enum port port; enum port port;
u32 temp;
u32 port_ctrl;
for_each_dsi_port(port, intel_dsi->ports) { for_each_dsi_port(port, intel_dsi->ports) {
i915_reg_t port_ctrl = IS_BROXTON(dev) ?
BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port);
u32 temp;
/* de-assert ip_tg_enable signal */ /* de-assert ip_tg_enable signal */
port_ctrl = IS_BROXTON(dev) ? BXT_MIPI_PORT_CTRL(port) :
MIPI_PORT_CTRL(port);
temp = I915_READ(port_ctrl); temp = I915_READ(port_ctrl);
I915_WRITE(port_ctrl, temp & ~DPI_ENABLE); I915_WRITE(port_ctrl, temp & ~DPI_ENABLE);
POSTING_READ(port_ctrl); POSTING_READ(port_ctrl);
...@@ -580,11 +584,13 @@ static void intel_dsi_clear_device_ready(struct intel_encoder *encoder) ...@@ -580,11 +584,13 @@ static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
enum port port; enum port port;
u32 val;
u32 port_ctrl = 0;
DRM_DEBUG_KMS("\n"); DRM_DEBUG_KMS("\n");
for_each_dsi_port(port, intel_dsi->ports) { for_each_dsi_port(port, intel_dsi->ports) {
/* Common bit for both MIPI Port A & MIPI Port C on VLV/CHV */
i915_reg_t port_ctrl = IS_BROXTON(dev) ?
BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(PORT_A);
u32 val;
I915_WRITE(MIPI_DEVICE_READY(port), DEVICE_READY | I915_WRITE(MIPI_DEVICE_READY(port), DEVICE_READY |
ULPS_STATE_ENTER); ULPS_STATE_ENTER);
...@@ -598,12 +604,6 @@ static void intel_dsi_clear_device_ready(struct intel_encoder *encoder) ...@@ -598,12 +604,6 @@ static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
ULPS_STATE_ENTER); ULPS_STATE_ENTER);
usleep_range(2000, 2500); usleep_range(2000, 2500);
if (IS_BROXTON(dev))
port_ctrl = BXT_MIPI_PORT_CTRL(port);
else if (IS_VALLEYVIEW(dev))
/* Common bit for both MIPI Port A & MIPI Port C */
port_ctrl = MIPI_PORT_CTRL(PORT_A);
/* Wait till Clock lanes are in LP-00 state for MIPI Port A /* Wait till Clock lanes are in LP-00 state for MIPI Port A
* only. MIPI Port C has no similar bit for checking * only. MIPI Port C has no similar bit for checking
*/ */
...@@ -656,7 +656,6 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder, ...@@ -656,7 +656,6 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
struct drm_device *dev = encoder->base.dev; struct drm_device *dev = encoder->base.dev;
enum intel_display_power_domain power_domain; enum intel_display_power_domain power_domain;
u32 dpi_enabled, func, ctrl_reg;
enum port port; enum port port;
DRM_DEBUG_KMS("\n"); DRM_DEBUG_KMS("\n");
...@@ -667,9 +666,11 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder, ...@@ -667,9 +666,11 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
/* XXX: this only works for one DSI output */ /* XXX: this only works for one DSI output */
for_each_dsi_port(port, intel_dsi->ports) { for_each_dsi_port(port, intel_dsi->ports) {
i915_reg_t ctrl_reg = IS_BROXTON(dev) ?
BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port);
u32 dpi_enabled, func;
func = I915_READ(MIPI_DSI_FUNC_PRG(port)); func = I915_READ(MIPI_DSI_FUNC_PRG(port));
ctrl_reg = IS_BROXTON(dev) ? BXT_MIPI_PORT_CTRL(port) :
MIPI_PORT_CTRL(port);
dpi_enabled = I915_READ(ctrl_reg) & DPI_ENABLE; dpi_enabled = I915_READ(ctrl_reg) & DPI_ENABLE;
/* Due to some hardware limitations on BYT, MIPI Port C DPI /* Due to some hardware limitations on BYT, MIPI Port C DPI
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
...@@ -76,11 +76,17 @@ struct intel_guc_fw { ...@@ -76,11 +76,17 @@ struct intel_guc_fw {
uint16_t guc_fw_minor_wanted; uint16_t guc_fw_minor_wanted;
uint16_t guc_fw_major_found; uint16_t guc_fw_major_found;
uint16_t guc_fw_minor_found; uint16_t guc_fw_minor_found;
uint32_t header_size;
uint32_t header_offset;
uint32_t rsa_size;
uint32_t rsa_offset;
uint32_t ucode_size;
uint32_t ucode_offset;
}; };
struct intel_guc { struct intel_guc {
struct intel_guc_fw guc_fw; struct intel_guc_fw guc_fw;
uint32_t log_flags; uint32_t log_flags;
struct drm_i915_gem_object *log_obj; struct drm_i915_gem_object *log_obj;
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
...@@ -326,3 +326,4 @@ bool current_is_async(void) ...@@ -326,3 +326,4 @@ bool current_is_async(void)
return worker && worker->current_func == async_run_entry_fn; return worker && worker->current_func == async_run_entry_fn;
} }
EXPORT_SYMBOL_GPL(current_is_async);
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册