提交 40d201af 编写于 作者: D Dave Airlie

Merge tag 'drm-intel-next-2014-09-05' of git://anongit.freedesktop.org/drm-intel into drm-next

- final bits (again) for the rotation support (Sonika Jindal)
- support bl_power in the intel backlight (Jani)
- vdd handling improvements from Ville
- i830M fixes from Ville
- piles of prep work all over to make skl enabling just plug in (Damien, Sonika)
- rename DP training defines to reflect latest edp standards, this touches all
  drm drivers supporting DP (Sonika Jindal)
- cache edids during single detect cycle to avoid re-reading it for e.g. audio,
  from Chris
- move w/a for registers which are stored in the hw context to the context init
  code (Arun&Damien)
- edp panel power sequencer fixes, helps chv a lot (Ville)
- piles of other chv fixes all over
- much more paranoid pageflip handling with stall detection and better recovery
  from Chris
- small things all over, as usual

* tag 'drm-intel-next-2014-09-05' of git://anongit.freedesktop.org/drm-intel: (114 commits)
  drm/i915: Update DRIVER_DATE to 20140905
  drm/i915: Decouple the stuck pageflip on modeset
  drm/i915: Check for a stalled page flip after each vblank
  drm/i915: Introduce a for_each_plane() macro
  drm/i915: Rewrite ABS_DIFF() in a safer manner
  drm/i915: Add comments explaining the vdd on/off functions
  drm/i915: Move DP port disable to post_disable for pch platforms
  drm/i915: Enable DP port earlier
  drm/i915: Turn on panel power before doing aux transfers
  drm/i915: Be more careful when picking the initial power sequencer pipe
  drm/i915: Reset power sequencer pipe tracking when disp2d is off
  drm/i915: Track which port is using which pipe's power sequencer
  drm/i915: Fix edp vdd locking
  drm/i915: Reset the HEAD pointer for the ring after writing START
  drm/i915: Fix unsafe vma iteration in i915_drop_caches
  drm/i915: init sprites with univeral plane init function
  drm/i915: Check of !HAS_PCH_SPLIT() in PCH transcoder funcs
  drm/i915: Use HAS_GMCH_DISPLAY un underrun reporting code
  drm/i915: Use IS_BROADWELL() instead of IS_GEN8() in forcewake code
  drm/i915: Don't call gen8_fbc_sw_flush() on chv
  ...
...@@ -3091,7 +3091,7 @@ F: include/drm/drm_panel.h ...@@ -3091,7 +3091,7 @@ F: include/drm/drm_panel.h
F: Documentation/devicetree/bindings/panel/ F: Documentation/devicetree/bindings/panel/
INTEL DRM DRIVERS (excluding Poulsbo, Moorestown and derivative chipsets) INTEL DRM DRIVERS (excluding Poulsbo, Moorestown and derivative chipsets)
M: Daniel Vetter <daniel.vetter@ffwll.ch> M: Daniel Vetter <daniel.vetter@intel.com>
M: Jani Nikula <jani.nikula@linux.intel.com> M: Jani Nikula <jani.nikula@linux.intel.com>
L: intel-gfx@lists.freedesktop.org L: intel-gfx@lists.freedesktop.org
L: dri-devel@lists.freedesktop.org L: dri-devel@lists.freedesktop.org
......
...@@ -329,8 +329,8 @@ static int exynos_dp_link_start(struct exynos_dp_device *dp) ...@@ -329,8 +329,8 @@ static int exynos_dp_link_start(struct exynos_dp_device *dp)
return retval; return retval;
for (lane = 0; lane < lane_count; lane++) for (lane = 0; lane < lane_count; lane++)
buf[lane] = DP_TRAIN_PRE_EMPHASIS_0 | buf[lane] = DP_TRAIN_PRE_EMPH_LEVEL_0 |
DP_TRAIN_VOLTAGE_SWING_400; DP_TRAIN_VOLTAGE_SWING_LEVEL_0;
retval = exynos_dp_write_bytes_to_dpcd(dp, DP_TRAINING_LANE0_SET, retval = exynos_dp_write_bytes_to_dpcd(dp, DP_TRAINING_LANE0_SET,
lane_count, buf); lane_count, buf);
......
...@@ -1089,7 +1089,7 @@ static char *link_train_names[] = { ...@@ -1089,7 +1089,7 @@ static char *link_train_names[] = {
}; };
#endif #endif
#define CDV_DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_1200 #define CDV_DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_LEVEL_3
/* /*
static uint8_t static uint8_t
cdv_intel_dp_pre_emphasis_max(uint8_t voltage_swing) cdv_intel_dp_pre_emphasis_max(uint8_t voltage_swing)
...@@ -1276,7 +1276,7 @@ cdv_intel_dp_set_vswing_premph(struct gma_encoder *encoder, uint8_t signal_level ...@@ -1276,7 +1276,7 @@ cdv_intel_dp_set_vswing_premph(struct gma_encoder *encoder, uint8_t signal_level
cdv_sb_write(dev, ddi_reg->VSwing2, dp_vswing_premph_table[index]); cdv_sb_write(dev, ddi_reg->VSwing2, dp_vswing_premph_table[index]);
/* ;gfx_dpio_set_reg(0x814c, 0x40802040) */ /* ;gfx_dpio_set_reg(0x814c, 0x40802040) */
if ((vswing + premph) == DP_TRAIN_VOLTAGE_SWING_1200) if ((vswing + premph) == DP_TRAIN_VOLTAGE_SWING_LEVEL_3)
cdv_sb_write(dev, ddi_reg->VSwing3, 0x70802040); cdv_sb_write(dev, ddi_reg->VSwing3, 0x70802040);
else else
cdv_sb_write(dev, ddi_reg->VSwing3, 0x40802040); cdv_sb_write(dev, ddi_reg->VSwing3, 0x40802040);
......
...@@ -116,30 +116,30 @@ parse_edp(struct drm_psb_private *dev_priv, struct bdb_header *bdb) ...@@ -116,30 +116,30 @@ parse_edp(struct drm_psb_private *dev_priv, struct bdb_header *bdb)
switch (edp_link_params->preemphasis) { switch (edp_link_params->preemphasis) {
case 0: case 0:
dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_0; dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_0;
break; break;
case 1: case 1:
dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_3_5; dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_1;
break; break;
case 2: case 2:
dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_6; dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_2;
break; break;
case 3: case 3:
dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_9_5; dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_3;
break; break;
} }
switch (edp_link_params->vswing) { switch (edp_link_params->vswing) {
case 0: case 0:
dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_400; dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_0;
break; break;
case 1: case 1:
dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_600; dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_1;
break; break;
case 2: case 2:
dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_800; dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
break; break;
case 3: case 3:
dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_1200; dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
break; break;
} }
DRM_DEBUG_KMS("VBT reports EDP: VSwing %d, Preemph %d\n", DRM_DEBUG_KMS("VBT reports EDP: VSwing %d, Preemph %d\n",
......
此差异已折叠。
...@@ -136,7 +136,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) ...@@ -136,7 +136,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
obj->last_read_seqno, obj->last_read_seqno,
obj->last_write_seqno, obj->last_write_seqno,
obj->last_fenced_seqno, obj->last_fenced_seqno,
i915_cache_level_str(obj->cache_level), i915_cache_level_str(to_i915(obj->base.dev), obj->cache_level),
obj->dirty ? " dirty" : "", obj->dirty ? " dirty" : "",
obj->madv == I915_MADV_DONTNEED ? " purgeable" : ""); obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
if (obj->base.name) if (obj->base.name)
...@@ -515,6 +515,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data) ...@@ -515,6 +515,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
{ {
struct drm_info_node *node = m->private; struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev; struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long flags; unsigned long flags;
struct intel_crtc *crtc; struct intel_crtc *crtc;
int ret; int ret;
...@@ -534,6 +535,8 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data) ...@@ -534,6 +535,8 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
seq_printf(m, "No flip due on pipe %c (plane %c)\n", seq_printf(m, "No flip due on pipe %c (plane %c)\n",
pipe, plane); pipe, plane);
} else { } else {
u32 addr;
if (atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) { if (atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
seq_printf(m, "Flip queued on pipe %c (plane %c)\n", seq_printf(m, "Flip queued on pipe %c (plane %c)\n",
pipe, plane); pipe, plane);
...@@ -541,23 +544,35 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data) ...@@ -541,23 +544,35 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n", seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n",
pipe, plane); pipe, plane);
} }
if (work->flip_queued_ring) {
seq_printf(m, "Flip queued on %s at seqno %u, next seqno %u [current breadcrumb %u], completed? %d\n",
work->flip_queued_ring->name,
work->flip_queued_seqno,
dev_priv->next_seqno,
work->flip_queued_ring->get_seqno(work->flip_queued_ring, true),
i915_seqno_passed(work->flip_queued_ring->get_seqno(work->flip_queued_ring, true),
work->flip_queued_seqno));
} else
seq_printf(m, "Flip not associated with any ring\n");
seq_printf(m, "Flip queued on frame %d, (was ready on frame %d), now %d\n",
work->flip_queued_vblank,
work->flip_ready_vblank,
drm_vblank_count(dev, crtc->pipe));
if (work->enable_stall_check) if (work->enable_stall_check)
seq_puts(m, "Stall check enabled, "); seq_puts(m, "Stall check enabled, ");
else else
seq_puts(m, "Stall check waiting for page flip ioctl, "); seq_puts(m, "Stall check waiting for page flip ioctl, ");
seq_printf(m, "%d prepares\n", atomic_read(&work->pending)); seq_printf(m, "%d prepares\n", atomic_read(&work->pending));
if (work->old_fb_obj) { if (INTEL_INFO(dev)->gen >= 4)
struct drm_i915_gem_object *obj = work->old_fb_obj; addr = I915_HI_DISPBASE(I915_READ(DSPSURF(crtc->plane)));
if (obj) else
seq_printf(m, "Old framebuffer gtt_offset 0x%08lx\n", addr = I915_READ(DSPADDR(crtc->plane));
i915_gem_obj_ggtt_offset(obj)); seq_printf(m, "Current scanout address 0x%08x\n", addr);
}
if (work->pending_flip_obj) { if (work->pending_flip_obj) {
struct drm_i915_gem_object *obj = work->pending_flip_obj; seq_printf(m, "New framebuffer address 0x%08lx\n", (long)work->gtt_offset);
if (obj) seq_printf(m, "MMIO update completed? %d\n", addr == work->gtt_offset);
seq_printf(m, "New framebuffer gtt_offset 0x%08lx\n",
i915_gem_obj_ggtt_offset(obj));
} }
} }
spin_unlock_irqrestore(&dev->event_lock, flags); spin_unlock_irqrestore(&dev->event_lock, flags);
...@@ -650,7 +665,6 @@ static int i915_interrupt_info(struct seq_file *m, void *data) ...@@ -650,7 +665,6 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
intel_runtime_pm_get(dev_priv); intel_runtime_pm_get(dev_priv);
if (IS_CHERRYVIEW(dev)) { if (IS_CHERRYVIEW(dev)) {
int i;
seq_printf(m, "Master Interrupt Control:\t%08x\n", seq_printf(m, "Master Interrupt Control:\t%08x\n",
I915_READ(GEN8_MASTER_IRQ)); I915_READ(GEN8_MASTER_IRQ));
...@@ -662,7 +676,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data) ...@@ -662,7 +676,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
I915_READ(VLV_IIR_RW)); I915_READ(VLV_IIR_RW));
seq_printf(m, "Display IMR:\t%08x\n", seq_printf(m, "Display IMR:\t%08x\n",
I915_READ(VLV_IMR)); I915_READ(VLV_IMR));
for_each_pipe(pipe) for_each_pipe(dev_priv, pipe)
seq_printf(m, "Pipe %c stat:\t%08x\n", seq_printf(m, "Pipe %c stat:\t%08x\n",
pipe_name(pipe), pipe_name(pipe),
I915_READ(PIPESTAT(pipe))); I915_READ(PIPESTAT(pipe)));
...@@ -702,7 +716,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data) ...@@ -702,7 +716,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
i, I915_READ(GEN8_GT_IER(i))); i, I915_READ(GEN8_GT_IER(i)));
} }
for_each_pipe(pipe) { for_each_pipe(dev_priv, pipe) {
if (!intel_display_power_enabled(dev_priv, if (!intel_display_power_enabled(dev_priv,
POWER_DOMAIN_PIPE(pipe))) { POWER_DOMAIN_PIPE(pipe))) {
seq_printf(m, "Pipe %c power disabled\n", seq_printf(m, "Pipe %c power disabled\n",
...@@ -749,7 +763,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data) ...@@ -749,7 +763,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
I915_READ(VLV_IIR_RW)); I915_READ(VLV_IIR_RW));
seq_printf(m, "Display IMR:\t%08x\n", seq_printf(m, "Display IMR:\t%08x\n",
I915_READ(VLV_IMR)); I915_READ(VLV_IMR));
for_each_pipe(pipe) for_each_pipe(dev_priv, pipe)
seq_printf(m, "Pipe %c stat:\t%08x\n", seq_printf(m, "Pipe %c stat:\t%08x\n",
pipe_name(pipe), pipe_name(pipe),
I915_READ(PIPESTAT(pipe))); I915_READ(PIPESTAT(pipe)));
...@@ -785,7 +799,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data) ...@@ -785,7 +799,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
I915_READ(IIR)); I915_READ(IIR));
seq_printf(m, "Interrupt mask: %08x\n", seq_printf(m, "Interrupt mask: %08x\n",
I915_READ(IMR)); I915_READ(IMR));
for_each_pipe(pipe) for_each_pipe(dev_priv, pipe)
seq_printf(m, "Pipe %c stat: %08x\n", seq_printf(m, "Pipe %c stat: %08x\n",
pipe_name(pipe), pipe_name(pipe),
I915_READ(PIPESTAT(pipe))); I915_READ(PIPESTAT(pipe)));
...@@ -933,7 +947,7 @@ static ssize_t i915_error_state_read(struct file *file, char __user *userbuf, ...@@ -933,7 +947,7 @@ static ssize_t i915_error_state_read(struct file *file, char __user *userbuf,
ssize_t ret_count = 0; ssize_t ret_count = 0;
int ret; int ret;
ret = i915_error_state_buf_init(&error_str, count, *pos); ret = i915_error_state_buf_init(&error_str, to_i915(error_priv->dev), count, *pos);
if (ret) if (ret)
return ret; return ret;
...@@ -1030,6 +1044,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused) ...@@ -1030,6 +1044,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
u32 rpstat, cagf, reqf; u32 rpstat, cagf, reqf;
u32 rpupei, rpcurup, rpprevup; u32 rpupei, rpcurup, rpprevup;
u32 rpdownei, rpcurdown, rpprevdown; u32 rpdownei, rpcurdown, rpprevdown;
u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
int max_freq; int max_freq;
/* RPSTAT1 is in the GT power well */ /* RPSTAT1 is in the GT power well */
...@@ -1067,12 +1082,21 @@ static int i915_frequency_info(struct seq_file *m, void *unused) ...@@ -1067,12 +1082,21 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL); gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
if (IS_GEN6(dev) || IS_GEN7(dev)) {
pm_ier = I915_READ(GEN6_PMIER);
pm_imr = I915_READ(GEN6_PMIMR);
pm_isr = I915_READ(GEN6_PMISR);
pm_iir = I915_READ(GEN6_PMIIR);
pm_mask = I915_READ(GEN6_PMINTRMSK);
} else {
pm_ier = I915_READ(GEN8_GT_IER(2));
pm_imr = I915_READ(GEN8_GT_IMR(2));
pm_isr = I915_READ(GEN8_GT_ISR(2));
pm_iir = I915_READ(GEN8_GT_IIR(2));
pm_mask = I915_READ(GEN6_PMINTRMSK);
}
seq_printf(m, "PM IER=0x%08x IMR=0x%08x ISR=0x%08x IIR=0x%08x, MASK=0x%08x\n", seq_printf(m, "PM IER=0x%08x IMR=0x%08x ISR=0x%08x IIR=0x%08x, MASK=0x%08x\n",
I915_READ(GEN6_PMIER), pm_ier, pm_imr, pm_isr, pm_iir, pm_mask);
I915_READ(GEN6_PMIMR),
I915_READ(GEN6_PMISR),
I915_READ(GEN6_PMIIR),
I915_READ(GEN6_PMINTRMSK));
seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status); seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
seq_printf(m, "Render p-state ratio: %d\n", seq_printf(m, "Render p-state ratio: %d\n",
(gt_perf_status & 0xff00) >> 8); (gt_perf_status & 0xff00) >> 8);
...@@ -1371,7 +1395,7 @@ static int i915_drpc_info(struct seq_file *m, void *unused) ...@@ -1371,7 +1395,7 @@ static int i915_drpc_info(struct seq_file *m, void *unused)
if (IS_VALLEYVIEW(dev)) if (IS_VALLEYVIEW(dev))
return vlv_drpc_info(m); return vlv_drpc_info(m);
else if (IS_GEN6(dev) || IS_GEN7(dev)) else if (INTEL_INFO(dev)->gen >= 6)
return gen6_drpc_info(m); return gen6_drpc_info(m);
else else
return ironlake_drpc_info(m); return ironlake_drpc_info(m);
...@@ -2618,6 +2642,40 @@ static int i915_shared_dplls_info(struct seq_file *m, void *unused) ...@@ -2618,6 +2642,40 @@ static int i915_shared_dplls_info(struct seq_file *m, void *unused)
return 0; return 0;
} }
static int i915_wa_registers(struct seq_file *m, void *unused)
{
int i;
int ret;
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
intel_runtime_pm_get(dev_priv);
seq_printf(m, "Workarounds applied: %d\n", dev_priv->num_wa_regs);
for (i = 0; i < dev_priv->num_wa_regs; ++i) {
u32 addr, mask;
addr = dev_priv->intel_wa_regs[i].addr;
mask = dev_priv->intel_wa_regs[i].mask;
dev_priv->intel_wa_regs[i].value = I915_READ(addr) | mask;
if (dev_priv->intel_wa_regs[i].addr)
seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n",
dev_priv->intel_wa_regs[i].addr,
dev_priv->intel_wa_regs[i].value,
dev_priv->intel_wa_regs[i].mask);
}
intel_runtime_pm_put(dev_priv);
mutex_unlock(&dev->struct_mutex);
return 0;
}
struct pipe_crc_info { struct pipe_crc_info {
const char *name; const char *name;
struct drm_device *dev; struct drm_device *dev;
...@@ -3769,8 +3827,6 @@ i915_drop_caches_set(void *data, u64 val) ...@@ -3769,8 +3827,6 @@ i915_drop_caches_set(void *data, u64 val)
struct drm_device *dev = data; struct drm_device *dev = data;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj, *next; struct drm_i915_gem_object *obj, *next;
struct i915_address_space *vm;
struct i915_vma *vma, *x;
int ret; int ret;
DRM_DEBUG("Dropping caches: 0x%08llx\n", val); DRM_DEBUG("Dropping caches: 0x%08llx\n", val);
...@@ -3791,16 +3847,23 @@ i915_drop_caches_set(void *data, u64 val) ...@@ -3791,16 +3847,23 @@ i915_drop_caches_set(void *data, u64 val)
i915_gem_retire_requests(dev); i915_gem_retire_requests(dev);
if (val & DROP_BOUND) { if (val & DROP_BOUND) {
list_for_each_entry(vm, &dev_priv->vm_list, global_link) { list_for_each_entry_safe(obj, next, &dev_priv->mm.bound_list,
list_for_each_entry_safe(vma, x, &vm->inactive_list, global_list) {
mm_list) { struct i915_vma *vma, *v;
ret = 0;
drm_gem_object_reference(&obj->base);
list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link) {
if (vma->pin_count) if (vma->pin_count)
continue; continue;
ret = i915_vma_unbind(vma); ret = i915_vma_unbind(vma);
if (ret) if (ret)
goto unlock; break;
} }
drm_gem_object_unreference(&obj->base);
if (ret)
goto unlock;
} }
} }
...@@ -4149,6 +4212,7 @@ static const struct drm_info_list i915_debugfs_list[] = { ...@@ -4149,6 +4212,7 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_semaphore_status", i915_semaphore_status, 0}, {"i915_semaphore_status", i915_semaphore_status, 0},
{"i915_shared_dplls_info", i915_shared_dplls_info, 0}, {"i915_shared_dplls_info", i915_shared_dplls_info, 0},
{"i915_dp_mst_info", i915_dp_mst_info, 0}, {"i915_dp_mst_info", i915_dp_mst_info, 0},
{"i915_wa_registers", i915_wa_registers, 0},
}; };
#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list) #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
...@@ -4178,7 +4242,7 @@ void intel_display_crc_init(struct drm_device *dev) ...@@ -4178,7 +4242,7 @@ void intel_display_crc_init(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
enum pipe pipe; enum pipe pipe;
for_each_pipe(pipe) { for_each_pipe(dev_priv, pipe) {
struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
pipe_crc->opened = false; pipe_crc->opened = false;
......
...@@ -28,6 +28,7 @@ ...@@ -28,6 +28,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/async.h>
#include <drm/drmP.h> #include <drm/drmP.h>
#include <drm/drm_crtc_helper.h> #include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h> #include <drm/drm_fb_helper.h>
...@@ -1381,7 +1382,7 @@ static int i915_load_modeset_init(struct drm_device *dev) ...@@ -1381,7 +1382,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
* scanning against hotplug events. Hence do this first and ignore the * scanning against hotplug events. Hence do this first and ignore the
* tiny window where we will loose hotplug notifactions. * tiny window where we will loose hotplug notifactions.
*/ */
intel_fbdev_initial_config(dev); async_schedule(intel_fbdev_initial_config, dev_priv);
drm_kms_helper_poll_init(dev); drm_kms_helper_poll_init(dev);
...@@ -1534,10 +1535,10 @@ static void intel_device_info_runtime_init(struct drm_device *dev) ...@@ -1534,10 +1535,10 @@ static void intel_device_info_runtime_init(struct drm_device *dev)
info = (struct intel_device_info *)&dev_priv->info; info = (struct intel_device_info *)&dev_priv->info;
if (IS_VALLEYVIEW(dev)) if (IS_VALLEYVIEW(dev))
for_each_pipe(pipe) for_each_pipe(dev_priv, pipe)
info->num_sprites[pipe] = 2; info->num_sprites[pipe] = 2;
else else
for_each_pipe(pipe) for_each_pipe(dev_priv, pipe)
info->num_sprites[pipe] = 1; info->num_sprites[pipe] = 1;
if (i915.disable_display) { if (i915.disable_display) {
......
...@@ -844,7 +844,13 @@ int i915_reset(struct drm_device *dev) ...@@ -844,7 +844,13 @@ int i915_reset(struct drm_device *dev)
!dev_priv->ums.mm_suspended) { !dev_priv->ums.mm_suspended) {
dev_priv->ums.mm_suspended = 0; dev_priv->ums.mm_suspended = 0;
/* Used to prevent gem_check_wedged returning -EAGAIN during gpu reset */
dev_priv->gpu_error.reload_in_reset = true;
ret = i915_gem_init_hw(dev); ret = i915_gem_init_hw(dev);
dev_priv->gpu_error.reload_in_reset = false;
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
if (ret) { if (ret) {
DRM_ERROR("Failed hw init on reset %d\n", ret); DRM_ERROR("Failed hw init on reset %d\n", ret);
...@@ -1456,13 +1462,29 @@ static int intel_runtime_suspend(struct device *device) ...@@ -1456,13 +1462,29 @@ static int intel_runtime_suspend(struct device *device)
dev_priv->pm.suspended = true; dev_priv->pm.suspended = true;
/* /*
* current versions of firmware which depend on this opregion * FIXME: We really should find a document that references the arguments
* notification have repurposed the D1 definition to mean * used below!
* "runtime suspended" vs. what you would normally expect (D3)
* to distinguish it from notifications that might be sent
* via the suspend path.
*/ */
intel_opregion_notify_adapter(dev, PCI_D1); if (IS_HASWELL(dev)) {
/*
* current versions of firmware which depend on this opregion
* notification have repurposed the D1 definition to mean
* "runtime suspended" vs. what you would normally expect (D3)
* to distinguish it from notifications that might be sent via
* the suspend path.
*/
intel_opregion_notify_adapter(dev, PCI_D1);
} else {
/*
* On Broadwell, if we use PCI_D1 the PCH DDI ports will stop
* being detected, and the call we do at intel_runtime_resume()
* won't be able to restore them. Since PCI_D3hot matches the
* actual specification and appears to be working, use it. Let's
* assume the other non-Haswell platforms will stay the same as
* Broadwell.
*/
intel_opregion_notify_adapter(dev, PCI_D3hot);
}
DRM_DEBUG_KMS("Device suspended\n"); DRM_DEBUG_KMS("Device suspended\n");
return 0; return 0;
...@@ -1685,6 +1707,8 @@ static void __exit i915_exit(void) ...@@ -1685,6 +1707,8 @@ static void __exit i915_exit(void)
module_init(i915_init); module_init(i915_init);
module_exit(i915_exit); module_exit(i915_exit);
MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_AUTHOR("Tungsten Graphics, Inc.");
MODULE_AUTHOR("Intel Corporation");
MODULE_DESCRIPTION(DRIVER_DESC); MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL and additional rights"); MODULE_LICENSE("GPL and additional rights");
...@@ -37,6 +37,7 @@ ...@@ -37,6 +37,7 @@
#include "intel_ringbuffer.h" #include "intel_ringbuffer.h"
#include "intel_lrc.h" #include "intel_lrc.h"
#include "i915_gem_gtt.h" #include "i915_gem_gtt.h"
#include "i915_gem_render_state.h"
#include <linux/io-mapping.h> #include <linux/io-mapping.h>
#include <linux/i2c.h> #include <linux/i2c.h>
#include <linux/i2c-algo-bit.h> #include <linux/i2c-algo-bit.h>
...@@ -51,11 +52,9 @@ ...@@ -51,11 +52,9 @@
/* General customization: /* General customization:
*/ */
#define DRIVER_AUTHOR "Tungsten Graphics, Inc."
#define DRIVER_NAME "i915" #define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics" #define DRIVER_DESC "Intel Graphics"
#define DRIVER_DATE "20140822" #define DRIVER_DATE "20140905"
enum pipe { enum pipe {
INVALID_PIPE = -1, INVALID_PIPE = -1,
...@@ -164,7 +163,10 @@ enum hpd_pin { ...@@ -164,7 +163,10 @@ enum hpd_pin {
I915_GEM_DOMAIN_INSTRUCTION | \ I915_GEM_DOMAIN_INSTRUCTION | \
I915_GEM_DOMAIN_VERTEX) I915_GEM_DOMAIN_VERTEX)
#define for_each_pipe(p) for ((p) = 0; (p) < INTEL_INFO(dev)->num_pipes; (p)++) #define for_each_pipe(__dev_priv, __p) \
for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++)
#define for_each_plane(pipe, p) \
for ((p) = 0; (p) < INTEL_INFO(dev)->num_sprites[(pipe)] + 1; (p)++)
#define for_each_sprite(p, s) for ((s) = 0; (s) < INTEL_INFO(dev)->num_sprites[(p)]; (s)++) #define for_each_sprite(p, s) for ((s) = 0; (s) < INTEL_INFO(dev)->num_sprites[(p)]; (s)++)
#define for_each_crtc(dev, crtc) \ #define for_each_crtc(dev, crtc) \
...@@ -639,6 +641,7 @@ struct intel_context { ...@@ -639,6 +641,7 @@ struct intel_context {
} legacy_hw_ctx; } legacy_hw_ctx;
/* Execlists */ /* Execlists */
bool rcs_initialized;
struct { struct {
struct drm_i915_gem_object *state; struct drm_i915_gem_object *state;
struct intel_ringbuffer *ringbuf; struct intel_ringbuffer *ringbuf;
...@@ -712,6 +715,7 @@ enum intel_sbi_destination { ...@@ -712,6 +715,7 @@ enum intel_sbi_destination {
#define QUIRK_LVDS_SSC_DISABLE (1<<1) #define QUIRK_LVDS_SSC_DISABLE (1<<1)
#define QUIRK_INVERT_BRIGHTNESS (1<<2) #define QUIRK_INVERT_BRIGHTNESS (1<<2)
#define QUIRK_BACKLIGHT_PRESENT (1<<3) #define QUIRK_BACKLIGHT_PRESENT (1<<3)
#define QUIRK_PIPEB_FORCE (1<<4)
struct intel_fbdev; struct intel_fbdev;
struct intel_fbc_work; struct intel_fbc_work;
...@@ -941,6 +945,23 @@ struct intel_rps_ei { ...@@ -941,6 +945,23 @@ struct intel_rps_ei {
u32 media_c0; u32 media_c0;
}; };
struct intel_rps_bdw_cal {
u32 it_threshold_pct; /* interrupt, in percentage */
u32 eval_interval; /* evaluation interval, in us */
u32 last_ts;
u32 last_c0;
bool is_up;
};
struct intel_rps_bdw_turbo {
struct intel_rps_bdw_cal up;
struct intel_rps_bdw_cal down;
struct timer_list flip_timer;
u32 timeout;
atomic_t flip_received;
struct work_struct work_max_freq;
};
struct intel_gen6_power_mgmt { struct intel_gen6_power_mgmt {
/* work and pm_iir are protected by dev_priv->irq_lock */ /* work and pm_iir are protected by dev_priv->irq_lock */
struct work_struct work; struct work_struct work;
...@@ -974,6 +995,9 @@ struct intel_gen6_power_mgmt { ...@@ -974,6 +995,9 @@ struct intel_gen6_power_mgmt {
bool enabled; bool enabled;
struct delayed_work delayed_resume_work; struct delayed_work delayed_resume_work;
bool is_bdw_sw_turbo; /* Switch of BDW software turbo */
struct intel_rps_bdw_turbo sw_turbo; /* Calculate RP interrupt timing */
/* manual wa residency calculations */ /* manual wa residency calculations */
struct intel_rps_ei up_ei, down_ei; struct intel_rps_ei up_ei, down_ei;
...@@ -1171,6 +1195,7 @@ struct i915_gem_mm { ...@@ -1171,6 +1195,7 @@ struct i915_gem_mm {
}; };
struct drm_i915_error_state_buf { struct drm_i915_error_state_buf {
struct drm_i915_private *i915;
unsigned bytes; unsigned bytes;
unsigned size; unsigned size;
int err; int err;
...@@ -1243,6 +1268,9 @@ struct i915_gpu_error { ...@@ -1243,6 +1268,9 @@ struct i915_gpu_error {
/* For missed irq/seqno simulation. */ /* For missed irq/seqno simulation. */
unsigned int test_irq_rings; unsigned int test_irq_rings;
/* Used to prevent gem_check_wedged returning -EAGAIN during gpu reset */
bool reload_in_reset;
}; };
enum modeset_restore { enum modeset_restore {
...@@ -1505,6 +1533,9 @@ struct drm_i915_private { ...@@ -1505,6 +1533,9 @@ struct drm_i915_private {
/* LVDS info */ /* LVDS info */
bool no_aux_handshake; bool no_aux_handshake;
/* protects panel power sequencer state */
struct mutex pps_mutex;
struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */ struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */ int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
int num_fence_regs; /* 8 on pre-965, 16 otherwise */ int num_fence_regs; /* 8 on pre-965, 16 otherwise */
...@@ -1556,6 +1587,20 @@ struct drm_i915_private { ...@@ -1556,6 +1587,20 @@ struct drm_i915_private {
struct intel_shared_dpll shared_dplls[I915_NUM_PLLS]; struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
int dpio_phy_iosf_port[I915_NUM_PHYS_VLV]; int dpio_phy_iosf_port[I915_NUM_PHYS_VLV];
/*
* workarounds are currently applied at different places and
* changes are being done to consolidate them so exact count is
* not clear at this point, use a max value for now.
*/
#define I915_MAX_WA_REGS 16
struct {
u32 addr;
u32 value;
/* bitmask representing WA bits */
u32 mask;
} intel_wa_regs[I915_MAX_WA_REGS];
u32 num_wa_regs;
/* Reclocking support */ /* Reclocking support */
bool render_reclock_avail; bool render_reclock_avail;
bool lvds_downclock_avail; bool lvds_downclock_avail;
...@@ -1639,6 +1684,8 @@ struct drm_i915_private { ...@@ -1639,6 +1684,8 @@ struct drm_i915_private {
*/ */
struct workqueue_struct *dp_wq; struct workqueue_struct *dp_wq;
uint32_t bios_vgacntr;
/* Old dri1 support infrastructure, beware the dragons ya fools entering /* Old dri1 support infrastructure, beware the dragons ya fools entering
* here! */ * here! */
struct i915_dri1_state dri1; struct i915_dri1_state dri1;
...@@ -2596,8 +2643,6 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, ...@@ -2596,8 +2643,6 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file); struct drm_file *file);
/* i915_gem_render_state.c */
int i915_gem_render_state_init(struct intel_engine_cs *ring);
/* i915_gem_evict.c */ /* i915_gem_evict.c */
int __must_check i915_gem_evict_something(struct drm_device *dev, int __must_check i915_gem_evict_something(struct drm_device *dev,
struct i915_address_space *vm, struct i915_address_space *vm,
...@@ -2665,6 +2710,7 @@ void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...); ...@@ -2665,6 +2710,7 @@ void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...);
int i915_error_state_to_str(struct drm_i915_error_state_buf *estr, int i915_error_state_to_str(struct drm_i915_error_state_buf *estr,
const struct i915_error_state_file_priv *error); const struct i915_error_state_file_priv *error);
int i915_error_state_buf_init(struct drm_i915_error_state_buf *eb, int i915_error_state_buf_init(struct drm_i915_error_state_buf *eb,
struct drm_i915_private *i915,
size_t count, loff_t pos); size_t count, loff_t pos);
static inline void i915_error_state_buf_release( static inline void i915_error_state_buf_release(
struct drm_i915_error_state_buf *eb) struct drm_i915_error_state_buf *eb)
...@@ -2679,7 +2725,7 @@ void i915_error_state_put(struct i915_error_state_file_priv *error_priv); ...@@ -2679,7 +2725,7 @@ void i915_error_state_put(struct i915_error_state_file_priv *error_priv);
void i915_destroy_error_state(struct drm_device *dev); void i915_destroy_error_state(struct drm_device *dev);
void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone); void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone);
const char *i915_cache_level_str(int type); const char *i915_cache_level_str(struct drm_i915_private *i915, int type);
/* i915_cmd_parser.c */ /* i915_cmd_parser.c */
int i915_cmd_parser_get_version(void); int i915_cmd_parser_get_version(void);
...@@ -2771,10 +2817,13 @@ extern void intel_modeset_setup_hw_state(struct drm_device *dev, ...@@ -2771,10 +2817,13 @@ extern void intel_modeset_setup_hw_state(struct drm_device *dev,
extern void i915_redisable_vga(struct drm_device *dev); extern void i915_redisable_vga(struct drm_device *dev);
extern void i915_redisable_vga_power_on(struct drm_device *dev); extern void i915_redisable_vga_power_on(struct drm_device *dev);
extern bool intel_fbc_enabled(struct drm_device *dev); extern bool intel_fbc_enabled(struct drm_device *dev);
extern void gen8_fbc_sw_flush(struct drm_device *dev, u32 value);
extern void intel_disable_fbc(struct drm_device *dev); extern void intel_disable_fbc(struct drm_device *dev);
extern bool ironlake_set_drps(struct drm_device *dev, u8 val); extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
extern void intel_init_pch_refclk(struct drm_device *dev); extern void intel_init_pch_refclk(struct drm_device *dev);
extern void gen6_set_rps(struct drm_device *dev, u8 val); extern void gen6_set_rps(struct drm_device *dev, u8 val);
extern void bdw_software_turbo(struct drm_device *dev);
extern void gen8_flip_interrupt(struct drm_device *dev);
extern void valleyview_set_rps(struct drm_device *dev, u8 val); extern void valleyview_set_rps(struct drm_device *dev, u8 val);
extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv,
bool enable); bool enable);
......
...@@ -1085,7 +1085,13 @@ i915_gem_check_wedge(struct i915_gpu_error *error, ...@@ -1085,7 +1085,13 @@ i915_gem_check_wedge(struct i915_gpu_error *error,
if (i915_terminally_wedged(error)) if (i915_terminally_wedged(error))
return -EIO; return -EIO;
return -EAGAIN; /*
* Check if GPU Reset is in progress - we need intel_ring_begin
* to work properly to reinit the hw state while the gpu is
* still marked as reset-in-progress. Handle this with a flag.
*/
if (!error->reload_in_reset)
return -EAGAIN;
} }
return 0; return 0;
...@@ -2982,9 +2988,11 @@ int i915_gpu_idle(struct drm_device *dev) ...@@ -2982,9 +2988,11 @@ int i915_gpu_idle(struct drm_device *dev)
/* Flush everything onto the inactive list. */ /* Flush everything onto the inactive list. */
for_each_ring(ring, dev_priv, i) { for_each_ring(ring, dev_priv, i) {
ret = i915_switch_context(ring, ring->default_context); if (!i915.enable_execlists) {
if (ret) ret = i915_switch_context(ring, ring->default_context);
return ret; if (ret)
return ret;
}
ret = intel_ring_idle(ring); ret = intel_ring_idle(ring);
if (ret) if (ret)
...@@ -4658,11 +4666,46 @@ intel_enable_blt(struct drm_device *dev) ...@@ -4658,11 +4666,46 @@ intel_enable_blt(struct drm_device *dev)
return true; return true;
} }
static void init_unused_ring(struct drm_device *dev, u32 base)
{
struct drm_i915_private *dev_priv = dev->dev_private;
I915_WRITE(RING_CTL(base), 0);
I915_WRITE(RING_HEAD(base), 0);
I915_WRITE(RING_TAIL(base), 0);
I915_WRITE(RING_START(base), 0);
}
static void init_unused_rings(struct drm_device *dev)
{
if (IS_I830(dev)) {
init_unused_ring(dev, PRB1_BASE);
init_unused_ring(dev, SRB0_BASE);
init_unused_ring(dev, SRB1_BASE);
init_unused_ring(dev, SRB2_BASE);
init_unused_ring(dev, SRB3_BASE);
} else if (IS_GEN2(dev)) {
init_unused_ring(dev, SRB0_BASE);
init_unused_ring(dev, SRB1_BASE);
} else if (IS_GEN3(dev)) {
init_unused_ring(dev, PRB1_BASE);
init_unused_ring(dev, PRB2_BASE);
}
}
int i915_gem_init_rings(struct drm_device *dev) int i915_gem_init_rings(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
int ret; int ret;
/*
* At least 830 can leave some of the unused rings
* "active" (ie. head != tail) after resume which
* will prevent c3 entry. Makes sure all unused rings
* are totally idle.
*/
init_unused_rings(dev);
ret = intel_init_render_ring_buffer(dev); ret = intel_init_render_ring_buffer(dev);
if (ret) if (ret)
return ret; return ret;
......
...@@ -289,34 +289,23 @@ void i915_gem_context_reset(struct drm_device *dev) ...@@ -289,34 +289,23 @@ void i915_gem_context_reset(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
int i; int i;
/* Prevent the hardware from restoring the last context (which hung) on /* In execlists mode we will unreference the context when the execlist
* the next switch */ * queue is cleared and the requests destroyed.
*/
if (i915.enable_execlists)
return;
for (i = 0; i < I915_NUM_RINGS; i++) { for (i = 0; i < I915_NUM_RINGS; i++) {
struct intel_engine_cs *ring = &dev_priv->ring[i]; struct intel_engine_cs *ring = &dev_priv->ring[i];
struct intel_context *dctx = ring->default_context;
struct intel_context *lctx = ring->last_context; struct intel_context *lctx = ring->last_context;
/* Do a fake switch to the default context */ if (lctx) {
if (lctx == dctx) if (lctx->legacy_hw_ctx.rcs_state && i == RCS)
continue; i915_gem_object_ggtt_unpin(lctx->legacy_hw_ctx.rcs_state);
if (!lctx) i915_gem_context_unreference(lctx);
continue; ring->last_context = NULL;
if (dctx->legacy_hw_ctx.rcs_state && i == RCS) {
WARN_ON(i915_gem_obj_ggtt_pin(dctx->legacy_hw_ctx.rcs_state,
get_context_alignment(dev), 0));
/* Fake a finish/inactive */
dctx->legacy_hw_ctx.rcs_state->base.write_domain = 0;
dctx->legacy_hw_ctx.rcs_state->active = 0;
} }
if (lctx->legacy_hw_ctx.rcs_state && i == RCS)
i915_gem_object_ggtt_unpin(lctx->legacy_hw_ctx.rcs_state);
i915_gem_context_unreference(lctx);
i915_gem_context_reference(dctx);
ring->last_context = dctx;
} }
} }
...@@ -412,12 +401,11 @@ int i915_gem_context_enable(struct drm_i915_private *dev_priv) ...@@ -412,12 +401,11 @@ int i915_gem_context_enable(struct drm_i915_private *dev_priv)
struct intel_engine_cs *ring; struct intel_engine_cs *ring;
int ret, i; int ret, i;
/* FIXME: We should make this work, even in reset */
if (i915_reset_in_progress(&dev_priv->gpu_error))
return 0;
BUG_ON(!dev_priv->ring[RCS].default_context); BUG_ON(!dev_priv->ring[RCS].default_context);
if (i915.enable_execlists)
return 0;
for_each_ring(ring, dev_priv, i) { for_each_ring(ring, dev_priv, i) {
ret = i915_switch_context(ring, ring->default_context); ret = i915_switch_context(ring, ring->default_context);
if (ret) if (ret)
...@@ -479,6 +467,7 @@ mi_set_context(struct intel_engine_cs *ring, ...@@ -479,6 +467,7 @@ mi_set_context(struct intel_engine_cs *ring,
struct intel_context *new_context, struct intel_context *new_context,
u32 hw_flags) u32 hw_flags)
{ {
u32 flags = hw_flags | MI_MM_SPACE_GTT;
int ret; int ret;
/* w/a: If Flush TLB Invalidation Mode is enabled, driver must do a TLB /* w/a: If Flush TLB Invalidation Mode is enabled, driver must do a TLB
...@@ -492,6 +481,10 @@ mi_set_context(struct intel_engine_cs *ring, ...@@ -492,6 +481,10 @@ mi_set_context(struct intel_engine_cs *ring,
return ret; return ret;
} }
/* These flags are for resource streamer on HSW+ */
if (!IS_HASWELL(ring->dev) && INTEL_INFO(ring->dev)->gen < 8)
flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN);
ret = intel_ring_begin(ring, 6); ret = intel_ring_begin(ring, 6);
if (ret) if (ret)
return ret; return ret;
...@@ -505,10 +498,7 @@ mi_set_context(struct intel_engine_cs *ring, ...@@ -505,10 +498,7 @@ mi_set_context(struct intel_engine_cs *ring,
intel_ring_emit(ring, MI_NOOP); intel_ring_emit(ring, MI_NOOP);
intel_ring_emit(ring, MI_SET_CONTEXT); intel_ring_emit(ring, MI_SET_CONTEXT);
intel_ring_emit(ring, i915_gem_obj_ggtt_offset(new_context->legacy_hw_ctx.rcs_state) | intel_ring_emit(ring, i915_gem_obj_ggtt_offset(new_context->legacy_hw_ctx.rcs_state) |
MI_MM_SPACE_GTT | flags);
MI_SAVE_EXT_STATE_EN |
MI_RESTORE_EXT_STATE_EN |
hw_flags);
/* /*
* w/a: MI_SET_CONTEXT must always be followed by MI_NOOP * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
* WaMiSetContext_Hang:snb,ivb,vlv * WaMiSetContext_Hang:snb,ivb,vlv
...@@ -558,7 +548,7 @@ static int do_switch(struct intel_engine_cs *ring, ...@@ -558,7 +548,7 @@ static int do_switch(struct intel_engine_cs *ring,
from = ring->last_context; from = ring->last_context;
if (to->ppgtt) { if (to->ppgtt) {
ret = to->ppgtt->switch_mm(to->ppgtt, ring, false); ret = to->ppgtt->switch_mm(to->ppgtt, ring);
if (ret) if (ret)
goto unpin_out; goto unpin_out;
} }
...@@ -638,6 +628,12 @@ static int do_switch(struct intel_engine_cs *ring, ...@@ -638,6 +628,12 @@ static int do_switch(struct intel_engine_cs *ring,
ring->last_context = to; ring->last_context = to;
if (uninitialized) { if (uninitialized) {
if (ring->init_context) {
ret = ring->init_context(ring);
if (ret)
DRM_ERROR("ring init context: %d\n", ret);
}
ret = i915_gem_render_state_init(ring); ret = i915_gem_render_state_init(ring);
if (ret) if (ret)
DRM_ERROR("init render state: %d\n", ret); DRM_ERROR("init render state: %d\n", ret);
...@@ -658,14 +654,19 @@ static int do_switch(struct intel_engine_cs *ring, ...@@ -658,14 +654,19 @@ static int do_switch(struct intel_engine_cs *ring,
* *
* The context life cycle is simple. The context refcount is incremented and * The context life cycle is simple. The context refcount is incremented and
* decremented by 1 and create and destroy. If the context is in use by the GPU, * decremented by 1 and create and destroy. If the context is in use by the GPU,
* it will have a refoucnt > 1. This allows us to destroy the context abstract * it will have a refcount > 1. This allows us to destroy the context abstract
* object while letting the normal object tracking destroy the backing BO. * object while letting the normal object tracking destroy the backing BO.
*
* This function should not be used in execlists mode. Instead the context is
* switched by writing to the ELSP and requests keep a reference to their
* context.
*/ */
int i915_switch_context(struct intel_engine_cs *ring, int i915_switch_context(struct intel_engine_cs *ring,
struct intel_context *to) struct intel_context *to)
{ {
struct drm_i915_private *dev_priv = ring->dev->dev_private; struct drm_i915_private *dev_priv = ring->dev->dev_private;
WARN_ON(i915.enable_execlists);
WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex)); WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
if (to->legacy_hw_ctx.rcs_state == NULL) { /* We have the fake context */ if (to->legacy_hw_ctx.rcs_state == NULL) { /* We have the fake context */
......
...@@ -204,19 +204,12 @@ static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr, ...@@ -204,19 +204,12 @@ static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr,
/* Broadwell Page Directory Pointer Descriptors */ /* Broadwell Page Directory Pointer Descriptors */
static int gen8_write_pdp(struct intel_engine_cs *ring, unsigned entry, static int gen8_write_pdp(struct intel_engine_cs *ring, unsigned entry,
uint64_t val, bool synchronous) uint64_t val)
{ {
struct drm_i915_private *dev_priv = ring->dev->dev_private;
int ret; int ret;
BUG_ON(entry >= 4); BUG_ON(entry >= 4);
if (synchronous) {
I915_WRITE(GEN8_RING_PDP_UDW(ring, entry), val >> 32);
I915_WRITE(GEN8_RING_PDP_LDW(ring, entry), (u32)val);
return 0;
}
ret = intel_ring_begin(ring, 6); ret = intel_ring_begin(ring, 6);
if (ret) if (ret)
return ret; return ret;
...@@ -233,8 +226,7 @@ static int gen8_write_pdp(struct intel_engine_cs *ring, unsigned entry, ...@@ -233,8 +226,7 @@ static int gen8_write_pdp(struct intel_engine_cs *ring, unsigned entry,
} }
static int gen8_mm_switch(struct i915_hw_ppgtt *ppgtt, static int gen8_mm_switch(struct i915_hw_ppgtt *ppgtt,
struct intel_engine_cs *ring, struct intel_engine_cs *ring)
bool synchronous)
{ {
int i, ret; int i, ret;
...@@ -243,7 +235,7 @@ static int gen8_mm_switch(struct i915_hw_ppgtt *ppgtt, ...@@ -243,7 +235,7 @@ static int gen8_mm_switch(struct i915_hw_ppgtt *ppgtt,
for (i = used_pd - 1; i >= 0; i--) { for (i = used_pd - 1; i >= 0; i--) {
dma_addr_t addr = ppgtt->pd_dma_addr[i]; dma_addr_t addr = ppgtt->pd_dma_addr[i];
ret = gen8_write_pdp(ring, i, addr, synchronous); ret = gen8_write_pdp(ring, i, addr);
if (ret) if (ret)
return ret; return ret;
} }
...@@ -708,29 +700,10 @@ static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt) ...@@ -708,29 +700,10 @@ static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt)
} }
static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt, static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
struct intel_engine_cs *ring, struct intel_engine_cs *ring)
bool synchronous)
{ {
struct drm_device *dev = ppgtt->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret; int ret;
/* If we're in reset, we can assume the GPU is sufficiently idle to
* manually frob these bits. Ideally we could use the ring functions,
* except our error handling makes it quite difficult (can't use
* intel_ring_begin, ring->flush, or intel_ring_advance)
*
* FIXME: We should try not to special case reset
*/
if (synchronous ||
i915_reset_in_progress(&dev_priv->gpu_error)) {
WARN_ON(ppgtt != dev_priv->mm.aliasing_ppgtt);
I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt));
POSTING_READ(RING_PP_DIR_BASE(ring));
return 0;
}
/* NB: TLBs must be flushed and invalidated before a switch */ /* NB: TLBs must be flushed and invalidated before a switch */
ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
if (ret) if (ret)
...@@ -752,29 +725,10 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt, ...@@ -752,29 +725,10 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
} }
static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt, static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
struct intel_engine_cs *ring, struct intel_engine_cs *ring)
bool synchronous)
{ {
struct drm_device *dev = ppgtt->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret; int ret;
/* If we're in reset, we can assume the GPU is sufficiently idle to
* manually frob these bits. Ideally we could use the ring functions,
* except our error handling makes it quite difficult (can't use
* intel_ring_begin, ring->flush, or intel_ring_advance)
*
* FIXME: We should try not to special case reset
*/
if (synchronous ||
i915_reset_in_progress(&dev_priv->gpu_error)) {
WARN_ON(ppgtt != dev_priv->mm.aliasing_ppgtt);
I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt));
POSTING_READ(RING_PP_DIR_BASE(ring));
return 0;
}
/* NB: TLBs must be flushed and invalidated before a switch */ /* NB: TLBs must be flushed and invalidated before a switch */
ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
if (ret) if (ret)
...@@ -803,14 +757,11 @@ static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt, ...@@ -803,14 +757,11 @@ static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
} }
static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt, static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
struct intel_engine_cs *ring, struct intel_engine_cs *ring)
bool synchronous)
{ {
struct drm_device *dev = ppgtt->base.dev; struct drm_device *dev = ppgtt->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
if (!synchronous)
return 0;
I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G); I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt)); I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt));
...@@ -826,12 +777,6 @@ static void gen8_ppgtt_enable(struct drm_device *dev) ...@@ -826,12 +777,6 @@ static void gen8_ppgtt_enable(struct drm_device *dev)
struct intel_engine_cs *ring; struct intel_engine_cs *ring;
int j; int j;
/* In the case of execlists, PPGTT is enabled by the context descriptor
* and the PDPs are contained within the context itself. We don't
* need to do anything here. */
if (i915.enable_execlists)
return;
for_each_ring(ring, dev_priv, j) { for_each_ring(ring, dev_priv, j) {
I915_WRITE(RING_MODE_GEN7(ring), I915_WRITE(RING_MODE_GEN7(ring),
_MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
...@@ -1175,6 +1120,12 @@ int i915_ppgtt_init_hw(struct drm_device *dev) ...@@ -1175,6 +1120,12 @@ int i915_ppgtt_init_hw(struct drm_device *dev)
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
int i, ret = 0; int i, ret = 0;
/* In the case of execlists, PPGTT is enabled by the context descriptor
* and the PDPs are contained within the context itself. We don't
* need to do anything here. */
if (i915.enable_execlists)
return 0;
if (!USES_PPGTT(dev)) if (!USES_PPGTT(dev))
return 0; return 0;
...@@ -1189,7 +1140,7 @@ int i915_ppgtt_init_hw(struct drm_device *dev) ...@@ -1189,7 +1140,7 @@ int i915_ppgtt_init_hw(struct drm_device *dev)
if (ppgtt) { if (ppgtt) {
for_each_ring(ring, dev_priv, i) { for_each_ring(ring, dev_priv, i) {
ret = ppgtt->switch_mm(ppgtt, ring, true); ret = ppgtt->switch_mm(ppgtt, ring);
if (ret != 0) if (ret != 0)
return ret; return ret;
} }
...@@ -2190,8 +2141,10 @@ static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj, ...@@ -2190,8 +2141,10 @@ static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj,
/* Keep GGTT vmas first to make debug easier */ /* Keep GGTT vmas first to make debug easier */
if (i915_is_ggtt(vm)) if (i915_is_ggtt(vm))
list_add(&vma->vma_link, &obj->vma_list); list_add(&vma->vma_link, &obj->vma_list);
else else {
list_add_tail(&vma->vma_link, &obj->vma_list); list_add_tail(&vma->vma_link, &obj->vma_list);
i915_ppgtt_get(i915_vm_to_ppgtt(vm));
}
return vma; return vma;
} }
...@@ -2206,8 +2159,5 @@ i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj, ...@@ -2206,8 +2159,5 @@ i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
if (!vma) if (!vma)
vma = __i915_gem_vma_create(obj, vm); vma = __i915_gem_vma_create(obj, vm);
if (!i915_is_ggtt(vm))
i915_ppgtt_get(i915_vm_to_ppgtt(vm));
return vma; return vma;
} }
...@@ -264,8 +264,7 @@ struct i915_hw_ppgtt { ...@@ -264,8 +264,7 @@ struct i915_hw_ppgtt {
int (*enable)(struct i915_hw_ppgtt *ppgtt); int (*enable)(struct i915_hw_ppgtt *ppgtt);
int (*switch_mm)(struct i915_hw_ppgtt *ppgtt, int (*switch_mm)(struct i915_hw_ppgtt *ppgtt,
struct intel_engine_cs *ring, struct intel_engine_cs *ring);
bool synchronous);
void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m); void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m);
}; };
......
...@@ -28,13 +28,6 @@ ...@@ -28,13 +28,6 @@
#include "i915_drv.h" #include "i915_drv.h"
#include "intel_renderstate.h" #include "intel_renderstate.h"
struct render_state {
const struct intel_renderstate_rodata *rodata;
struct drm_i915_gem_object *obj;
u64 ggtt_offset;
int gen;
};
static const struct intel_renderstate_rodata * static const struct intel_renderstate_rodata *
render_state_get_rodata(struct drm_device *dev, const int gen) render_state_get_rodata(struct drm_device *dev, const int gen)
{ {
...@@ -127,30 +120,47 @@ static int render_state_setup(struct render_state *so) ...@@ -127,30 +120,47 @@ static int render_state_setup(struct render_state *so)
return 0; return 0;
} }
static void render_state_fini(struct render_state *so) void i915_gem_render_state_fini(struct render_state *so)
{ {
i915_gem_object_ggtt_unpin(so->obj); i915_gem_object_ggtt_unpin(so->obj);
drm_gem_object_unreference(&so->obj->base); drm_gem_object_unreference(&so->obj->base);
} }
int i915_gem_render_state_init(struct intel_engine_cs *ring) int i915_gem_render_state_prepare(struct intel_engine_cs *ring,
struct render_state *so)
{ {
struct render_state so;
int ret; int ret;
if (WARN_ON(ring->id != RCS)) if (WARN_ON(ring->id != RCS))
return -ENOENT; return -ENOENT;
ret = render_state_init(&so, ring->dev); ret = render_state_init(so, ring->dev);
if (ret) if (ret)
return ret; return ret;
if (so.rodata == NULL) if (so->rodata == NULL)
return 0; return 0;
ret = render_state_setup(&so); ret = render_state_setup(so);
if (ret) {
i915_gem_render_state_fini(so);
return ret;
}
return 0;
}
int i915_gem_render_state_init(struct intel_engine_cs *ring)
{
struct render_state so;
int ret;
ret = i915_gem_render_state_prepare(ring, &so);
if (ret) if (ret)
goto out; return ret;
if (so.rodata == NULL)
return 0;
ret = ring->dispatch_execbuffer(ring, ret = ring->dispatch_execbuffer(ring,
so.ggtt_offset, so.ggtt_offset,
...@@ -164,6 +174,6 @@ int i915_gem_render_state_init(struct intel_engine_cs *ring) ...@@ -164,6 +174,6 @@ int i915_gem_render_state_init(struct intel_engine_cs *ring)
ret = __i915_add_request(ring, NULL, so.obj, NULL); ret = __i915_add_request(ring, NULL, so.obj, NULL);
/* __i915_add_request moves object to inactive if it fails */ /* __i915_add_request moves object to inactive if it fails */
out: out:
render_state_fini(&so); i915_gem_render_state_fini(&so);
return ret; return ret;
} }
/*
* Copyright © 2014 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _I915_GEM_RENDER_STATE_H_
#define _I915_GEM_RENDER_STATE_H_
#include <linux/types.h>
struct intel_renderstate_rodata {
const u32 *reloc;
const u32 *batch;
const u32 batch_items;
};
struct render_state {
const struct intel_renderstate_rodata *rodata;
struct drm_i915_gem_object *obj;
u64 ggtt_offset;
int gen;
};
int i915_gem_render_state_init(struct intel_engine_cs *ring);
void i915_gem_render_state_fini(struct render_state *so);
int i915_gem_render_state_prepare(struct intel_engine_cs *ring,
struct render_state *so);
#endif /* _I915_GEM_RENDER_STATE_H_ */
...@@ -91,7 +91,14 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev) ...@@ -91,7 +91,14 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
if (IS_VALLEYVIEW(dev)) { if (INTEL_INFO(dev)->gen >= 8 || IS_VALLEYVIEW(dev)) {
/*
* On BDW+, swizzling is not used. We leave the CPU memory
* controller in charge of optimizing memory accesses without
* the extra address manipulation GPU side.
*
* VLV and CHV don't have GPU swizzling.
*/
swizzle_x = I915_BIT_6_SWIZZLE_NONE; swizzle_x = I915_BIT_6_SWIZZLE_NONE;
swizzle_y = I915_BIT_6_SWIZZLE_NONE; swizzle_y = I915_BIT_6_SWIZZLE_NONE;
} else if (INTEL_INFO(dev)->gen >= 6) { } else if (INTEL_INFO(dev)->gen >= 6) {
......
...@@ -208,7 +208,7 @@ static void print_error_buffers(struct drm_i915_error_state_buf *m, ...@@ -208,7 +208,7 @@ static void print_error_buffers(struct drm_i915_error_state_buf *m,
err_puts(m, err->userptr ? " userptr" : ""); err_puts(m, err->userptr ? " userptr" : "");
err_puts(m, err->ring != -1 ? " " : ""); err_puts(m, err->ring != -1 ? " " : "");
err_puts(m, ring_str(err->ring)); err_puts(m, ring_str(err->ring));
err_puts(m, i915_cache_level_str(err->cache_level)); err_puts(m, i915_cache_level_str(m->i915, err->cache_level));
if (err->name) if (err->name)
err_printf(m, " (name: %d)", err->name); err_printf(m, " (name: %d)", err->name);
...@@ -494,9 +494,11 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, ...@@ -494,9 +494,11 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
} }
int i915_error_state_buf_init(struct drm_i915_error_state_buf *ebuf, int i915_error_state_buf_init(struct drm_i915_error_state_buf *ebuf,
struct drm_i915_private *i915,
size_t count, loff_t pos) size_t count, loff_t pos)
{ {
memset(ebuf, 0, sizeof(*ebuf)); memset(ebuf, 0, sizeof(*ebuf));
ebuf->i915 = i915;
/* We need to have enough room to store any i915_error_state printf /* We need to have enough room to store any i915_error_state printf
* so that we can move it to start position. * so that we can move it to start position.
...@@ -558,24 +560,54 @@ static void i915_error_state_free(struct kref *error_ref) ...@@ -558,24 +560,54 @@ static void i915_error_state_free(struct kref *error_ref)
} }
static struct drm_i915_error_object * static struct drm_i915_error_object *
i915_error_object_create_sized(struct drm_i915_private *dev_priv, i915_error_object_create(struct drm_i915_private *dev_priv,
struct drm_i915_gem_object *src, struct drm_i915_gem_object *src,
struct i915_address_space *vm, struct i915_address_space *vm)
const int num_pages)
{ {
struct drm_i915_error_object *dst; struct drm_i915_error_object *dst;
int i; int num_pages;
bool use_ggtt;
int i = 0;
u32 reloc_offset; u32 reloc_offset;
if (src == NULL || src->pages == NULL) if (src == NULL || src->pages == NULL)
return NULL; return NULL;
num_pages = src->base.size >> PAGE_SHIFT;
dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), GFP_ATOMIC); dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), GFP_ATOMIC);
if (dst == NULL) if (dst == NULL)
return NULL; return NULL;
reloc_offset = dst->gtt_offset = i915_gem_obj_offset(src, vm); if (i915_gem_obj_bound(src, vm))
for (i = 0; i < num_pages; i++) { dst->gtt_offset = i915_gem_obj_offset(src, vm);
else
dst->gtt_offset = -1;
reloc_offset = dst->gtt_offset;
use_ggtt = (src->cache_level == I915_CACHE_NONE &&
i915_is_ggtt(vm) &&
src->has_global_gtt_mapping &&
reloc_offset + num_pages * PAGE_SIZE <= dev_priv->gtt.mappable_end);
/* Cannot access stolen address directly, try to use the aperture */
if (src->stolen) {
use_ggtt = true;
if (!src->has_global_gtt_mapping)
goto unwind;
reloc_offset = i915_gem_obj_ggtt_offset(src);
if (reloc_offset + num_pages * PAGE_SIZE > dev_priv->gtt.mappable_end)
goto unwind;
}
/* Cannot access snooped pages through the aperture */
if (use_ggtt && src->cache_level != I915_CACHE_NONE && !HAS_LLC(dev_priv->dev))
goto unwind;
dst->page_count = num_pages;
while (num_pages--) {
unsigned long flags; unsigned long flags;
void *d; void *d;
...@@ -584,10 +616,7 @@ i915_error_object_create_sized(struct drm_i915_private *dev_priv, ...@@ -584,10 +616,7 @@ i915_error_object_create_sized(struct drm_i915_private *dev_priv,
goto unwind; goto unwind;
local_irq_save(flags); local_irq_save(flags);
if (src->cache_level == I915_CACHE_NONE && if (use_ggtt) {
reloc_offset < dev_priv->gtt.mappable_end &&
src->has_global_gtt_mapping &&
i915_is_ggtt(vm)) {
void __iomem *s; void __iomem *s;
/* Simply ignore tiling or any overlapping fence. /* Simply ignore tiling or any overlapping fence.
...@@ -599,14 +628,6 @@ i915_error_object_create_sized(struct drm_i915_private *dev_priv, ...@@ -599,14 +628,6 @@ i915_error_object_create_sized(struct drm_i915_private *dev_priv,
reloc_offset); reloc_offset);
memcpy_fromio(d, s, PAGE_SIZE); memcpy_fromio(d, s, PAGE_SIZE);
io_mapping_unmap_atomic(s); io_mapping_unmap_atomic(s);
} else if (src->stolen) {
unsigned long offset;
offset = dev_priv->mm.stolen_base;
offset += src->stolen->start;
offset += i << PAGE_SHIFT;
memcpy_fromio(d, (void __iomem *) offset, PAGE_SIZE);
} else { } else {
struct page *page; struct page *page;
void *s; void *s;
...@@ -623,11 +644,9 @@ i915_error_object_create_sized(struct drm_i915_private *dev_priv, ...@@ -623,11 +644,9 @@ i915_error_object_create_sized(struct drm_i915_private *dev_priv,
} }
local_irq_restore(flags); local_irq_restore(flags);
dst->pages[i] = d; dst->pages[i++] = d;
reloc_offset += PAGE_SIZE; reloc_offset += PAGE_SIZE;
} }
dst->page_count = num_pages;
return dst; return dst;
...@@ -637,13 +656,8 @@ i915_error_object_create_sized(struct drm_i915_private *dev_priv, ...@@ -637,13 +656,8 @@ i915_error_object_create_sized(struct drm_i915_private *dev_priv,
kfree(dst); kfree(dst);
return NULL; return NULL;
} }
#define i915_error_object_create(dev_priv, src, vm) \
i915_error_object_create_sized((dev_priv), (src), (vm), \
(src)->base.size>>PAGE_SHIFT)
#define i915_error_ggtt_object_create(dev_priv, src) \ #define i915_error_ggtt_object_create(dev_priv, src) \
i915_error_object_create_sized((dev_priv), (src), &(dev_priv)->gtt.base, \ i915_error_object_create((dev_priv), (src), &(dev_priv)->gtt.base)
(src)->base.size>>PAGE_SHIFT)
static void capture_bo(struct drm_i915_error_buffer *err, static void capture_bo(struct drm_i915_error_buffer *err,
struct i915_vma *vma) struct i915_vma *vma)
...@@ -900,9 +914,6 @@ static void i915_record_ring_state(struct drm_device *dev, ...@@ -900,9 +914,6 @@ static void i915_record_ring_state(struct drm_device *dev,
ering->hws = I915_READ(mmio); ering->hws = I915_READ(mmio);
} }
ering->cpu_ring_head = ring->buffer->head;
ering->cpu_ring_tail = ring->buffer->tail;
ering->hangcheck_score = ring->hangcheck.score; ering->hangcheck_score = ring->hangcheck.score;
ering->hangcheck_action = ring->hangcheck.action; ering->hangcheck_action = ring->hangcheck.action;
...@@ -965,6 +976,7 @@ static void i915_gem_record_rings(struct drm_device *dev, ...@@ -965,6 +976,7 @@ static void i915_gem_record_rings(struct drm_device *dev,
for (i = 0; i < I915_NUM_RINGS; i++) { for (i = 0; i < I915_NUM_RINGS; i++) {
struct intel_engine_cs *ring = &dev_priv->ring[i]; struct intel_engine_cs *ring = &dev_priv->ring[i];
struct intel_ringbuffer *rbuf;
error->ring[i].pid = -1; error->ring[i].pid = -1;
...@@ -992,8 +1004,7 @@ static void i915_gem_record_rings(struct drm_device *dev, ...@@ -992,8 +1004,7 @@ static void i915_gem_record_rings(struct drm_device *dev,
request->batch_obj, request->batch_obj,
vm); vm);
if (HAS_BROKEN_CS_TLB(dev_priv->dev) && if (HAS_BROKEN_CS_TLB(dev_priv->dev))
ring->scratch.obj)
error->ring[i].wa_batchbuffer = error->ring[i].wa_batchbuffer =
i915_error_ggtt_object_create(dev_priv, i915_error_ggtt_object_create(dev_priv,
ring->scratch.obj); ring->scratch.obj);
...@@ -1012,12 +1023,27 @@ static void i915_gem_record_rings(struct drm_device *dev, ...@@ -1012,12 +1023,27 @@ static void i915_gem_record_rings(struct drm_device *dev,
} }
} }
if (i915.enable_execlists) {
/* TODO: This is only a small fix to keep basic error
* capture working, but we need to add more information
* for it to be useful (e.g. dump the context being
* executed).
*/
if (request)
rbuf = request->ctx->engine[ring->id].ringbuf;
else
rbuf = ring->default_context->engine[ring->id].ringbuf;
} else
rbuf = ring->buffer;
error->ring[i].cpu_ring_head = rbuf->head;
error->ring[i].cpu_ring_tail = rbuf->tail;
error->ring[i].ringbuffer = error->ring[i].ringbuffer =
i915_error_ggtt_object_create(dev_priv, ring->buffer->obj); i915_error_ggtt_object_create(dev_priv, rbuf->obj);
if (ring->status_page.obj) error->ring[i].hws_page =
error->ring[i].hws_page = i915_error_ggtt_object_create(dev_priv, ring->status_page.obj);
i915_error_ggtt_object_create(dev_priv, ring->status_page.obj);
i915_gem_record_active_context(ring, error, &error->ring[i]); i915_gem_record_active_context(ring, error, &error->ring[i]);
...@@ -1331,11 +1357,11 @@ void i915_destroy_error_state(struct drm_device *dev) ...@@ -1331,11 +1357,11 @@ void i915_destroy_error_state(struct drm_device *dev)
kref_put(&error->ref, i915_error_state_free); kref_put(&error->ref, i915_error_state_free);
} }
const char *i915_cache_level_str(int type) const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
{ {
switch (type) { switch (type) {
case I915_CACHE_NONE: return " uncached"; case I915_CACHE_NONE: return " uncached";
case I915_CACHE_LLC: return " snooped or LLC"; case I915_CACHE_LLC: return HAS_LLC(i915) ? " LLC" : " snooped";
case I915_CACHE_L3_LLC: return " L3+LLC"; case I915_CACHE_L3_LLC: return " L3+LLC";
case I915_CACHE_WT: return " WT"; case I915_CACHE_WT: return " WT";
default: return ""; default: return "";
......
...@@ -238,7 +238,7 @@ static bool ivb_can_enable_err_int(struct drm_device *dev) ...@@ -238,7 +238,7 @@ static bool ivb_can_enable_err_int(struct drm_device *dev)
assert_spin_locked(&dev_priv->irq_lock); assert_spin_locked(&dev_priv->irq_lock);
for_each_pipe(pipe) { for_each_pipe(dev_priv, pipe) {
crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
if (crtc->cpu_fifo_underrun_disabled) if (crtc->cpu_fifo_underrun_disabled)
...@@ -296,7 +296,7 @@ static bool cpt_can_enable_serr_int(struct drm_device *dev) ...@@ -296,7 +296,7 @@ static bool cpt_can_enable_serr_int(struct drm_device *dev)
assert_spin_locked(&dev_priv->irq_lock); assert_spin_locked(&dev_priv->irq_lock);
for_each_pipe(pipe) { for_each_pipe(dev_priv, pipe) {
crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
if (crtc->pch_fifo_underrun_disabled) if (crtc->pch_fifo_underrun_disabled)
...@@ -497,7 +497,7 @@ static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev, ...@@ -497,7 +497,7 @@ static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
old = !intel_crtc->cpu_fifo_underrun_disabled; old = !intel_crtc->cpu_fifo_underrun_disabled;
intel_crtc->cpu_fifo_underrun_disabled = !enable; intel_crtc->cpu_fifo_underrun_disabled = !enable;
if (INTEL_INFO(dev)->gen < 5 || IS_VALLEYVIEW(dev)) if (HAS_GMCH_DISPLAY(dev))
i9xx_set_fifo_underrun_reporting(dev, pipe, enable, old); i9xx_set_fifo_underrun_reporting(dev, pipe, enable, old);
else if (IS_GEN5(dev) || IS_GEN6(dev)) else if (IS_GEN5(dev) || IS_GEN6(dev))
ironlake_set_fifo_underrun_reporting(dev, pipe, enable); ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
...@@ -1979,6 +1979,27 @@ static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) ...@@ -1979,6 +1979,27 @@ static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
res1, res2); res1, res2);
} }
void gen8_flip_interrupt(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (!dev_priv->rps.is_bdw_sw_turbo)
return;
if(atomic_read(&dev_priv->rps.sw_turbo.flip_received)) {
mod_timer(&dev_priv->rps.sw_turbo.flip_timer,
usecs_to_jiffies(dev_priv->rps.sw_turbo.timeout) + jiffies);
}
else {
dev_priv->rps.sw_turbo.flip_timer.expires =
usecs_to_jiffies(dev_priv->rps.sw_turbo.timeout) + jiffies;
add_timer(&dev_priv->rps.sw_turbo.flip_timer);
atomic_set(&dev_priv->rps.sw_turbo.flip_received, true);
}
bdw_software_turbo(dev);
}
/* The RPS events need forcewake, so we add them to a work queue and mask their /* The RPS events need forcewake, so we add them to a work queue and mask their
* IMR bits until the work is done. Other interrupts can be processed without * IMR bits until the work is done. Other interrupts can be processed without
* the work queue. */ * the work queue. */
...@@ -2020,7 +2041,7 @@ static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir) ...@@ -2020,7 +2041,7 @@ static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
int pipe; int pipe;
spin_lock(&dev_priv->irq_lock); spin_lock(&dev_priv->irq_lock);
for_each_pipe(pipe) { for_each_pipe(dev_priv, pipe) {
int reg; int reg;
u32 mask, iir_bit = 0; u32 mask, iir_bit = 0;
...@@ -2065,9 +2086,10 @@ static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir) ...@@ -2065,9 +2086,10 @@ static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
} }
spin_unlock(&dev_priv->irq_lock); spin_unlock(&dev_priv->irq_lock);
for_each_pipe(pipe) { for_each_pipe(dev_priv, pipe) {
if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS) if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
intel_pipe_handle_vblank(dev, pipe); intel_pipe_handle_vblank(dev, pipe))
intel_check_page_flip(dev, pipe);
if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) { if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) {
intel_prepare_page_flip(dev, pipe); intel_prepare_page_flip(dev, pipe);
...@@ -2234,7 +2256,7 @@ static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir) ...@@ -2234,7 +2256,7 @@ static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
DRM_ERROR("PCH poison interrupt\n"); DRM_ERROR("PCH poison interrupt\n");
if (pch_iir & SDE_FDI_MASK) if (pch_iir & SDE_FDI_MASK)
for_each_pipe(pipe) for_each_pipe(dev_priv, pipe)
DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
pipe_name(pipe), pipe_name(pipe),
I915_READ(FDI_RX_IIR(pipe))); I915_READ(FDI_RX_IIR(pipe)));
...@@ -2265,7 +2287,7 @@ static void ivb_err_int_handler(struct drm_device *dev) ...@@ -2265,7 +2287,7 @@ static void ivb_err_int_handler(struct drm_device *dev)
if (err_int & ERR_INT_POISON) if (err_int & ERR_INT_POISON)
DRM_ERROR("Poison interrupt\n"); DRM_ERROR("Poison interrupt\n");
for_each_pipe(pipe) { for_each_pipe(dev_priv, pipe) {
if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) { if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) {
if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
false)) false))
...@@ -2342,7 +2364,7 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) ...@@ -2342,7 +2364,7 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
DRM_DEBUG_DRIVER("Audio CP change interrupt\n"); DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
if (pch_iir & SDE_FDI_MASK_CPT) if (pch_iir & SDE_FDI_MASK_CPT)
for_each_pipe(pipe) for_each_pipe(dev_priv, pipe)
DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
pipe_name(pipe), pipe_name(pipe),
I915_READ(FDI_RX_IIR(pipe))); I915_READ(FDI_RX_IIR(pipe)));
...@@ -2365,9 +2387,10 @@ static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir) ...@@ -2365,9 +2387,10 @@ static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
if (de_iir & DE_POISON) if (de_iir & DE_POISON)
DRM_ERROR("Poison interrupt\n"); DRM_ERROR("Poison interrupt\n");
for_each_pipe(pipe) { for_each_pipe(dev_priv, pipe) {
if (de_iir & DE_PIPE_VBLANK(pipe)) if (de_iir & DE_PIPE_VBLANK(pipe) &&
intel_pipe_handle_vblank(dev, pipe); intel_pipe_handle_vblank(dev, pipe))
intel_check_page_flip(dev, pipe);
if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe)) if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, false)) if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
...@@ -2415,9 +2438,10 @@ static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir) ...@@ -2415,9 +2438,10 @@ static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
if (de_iir & DE_GSE_IVB) if (de_iir & DE_GSE_IVB)
intel_opregion_asle_intr(dev); intel_opregion_asle_intr(dev);
for_each_pipe(pipe) { for_each_pipe(dev_priv, pipe) {
if (de_iir & (DE_PIPE_VBLANK_IVB(pipe))) if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) &&
intel_pipe_handle_vblank(dev, pipe); intel_pipe_handle_vblank(dev, pipe))
intel_check_page_flip(dev, pipe);
/* plane/pipes map 1:1 on ilk+ */ /* plane/pipes map 1:1 on ilk+ */
if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) { if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) {
...@@ -2562,7 +2586,7 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg) ...@@ -2562,7 +2586,7 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
DRM_ERROR("The master control interrupt lied (DE PORT)!\n"); DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
} }
for_each_pipe(pipe) { for_each_pipe(dev_priv, pipe) {
uint32_t pipe_iir; uint32_t pipe_iir;
if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe))) if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
...@@ -2572,8 +2596,9 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg) ...@@ -2572,8 +2596,9 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
if (pipe_iir) { if (pipe_iir) {
ret = IRQ_HANDLED; ret = IRQ_HANDLED;
I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir); I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
if (pipe_iir & GEN8_PIPE_VBLANK) if (pipe_iir & GEN8_PIPE_VBLANK &&
intel_pipe_handle_vblank(dev, pipe); intel_pipe_handle_vblank(dev, pipe))
intel_check_page_flip(dev, pipe);
if (pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE) { if (pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE) {
intel_prepare_page_flip(dev, pipe); intel_prepare_page_flip(dev, pipe);
...@@ -2781,7 +2806,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev) ...@@ -2781,7 +2806,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
if (eir & I915_ERROR_MEMORY_REFRESH) { if (eir & I915_ERROR_MEMORY_REFRESH) {
pr_err("memory refresh error:\n"); pr_err("memory refresh error:\n");
for_each_pipe(pipe) for_each_pipe(dev_priv, pipe)
pr_err("pipe %c stat: 0x%08x\n", pr_err("pipe %c stat: 0x%08x\n",
pipe_name(pipe), I915_READ(PIPESTAT(pipe))); pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
/* pipestat has already been acked */ /* pipestat has already been acked */
...@@ -2878,52 +2903,6 @@ void i915_handle_error(struct drm_device *dev, bool wedged, ...@@ -2878,52 +2903,6 @@ void i915_handle_error(struct drm_device *dev, bool wedged,
schedule_work(&dev_priv->gpu_error.work); schedule_work(&dev_priv->gpu_error.work);
} }
static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_i915_gem_object *obj;
struct intel_unpin_work *work;
unsigned long flags;
bool stall_detected;
/* Ignore early vblank irqs */
if (intel_crtc == NULL)
return;
spin_lock_irqsave(&dev->event_lock, flags);
work = intel_crtc->unpin_work;
if (work == NULL ||
atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE ||
!work->enable_stall_check) {
/* Either the pending flip IRQ arrived, or we're too early. Don't check */
spin_unlock_irqrestore(&dev->event_lock, flags);
return;
}
/* Potential stall - if we see that the flip has happened, assume a missed interrupt */
obj = work->pending_flip_obj;
if (INTEL_INFO(dev)->gen >= 4) {
int dspsurf = DSPSURF(intel_crtc->plane);
stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
i915_gem_obj_ggtt_offset(obj);
} else {
int dspaddr = DSPADDR(intel_crtc->plane);
stall_detected = I915_READ(dspaddr) == (i915_gem_obj_ggtt_offset(obj) +
crtc->y * crtc->primary->fb->pitches[0] +
crtc->x * crtc->primary->fb->bits_per_pixel/8);
}
spin_unlock_irqrestore(&dev->event_lock, flags);
if (stall_detected) {
DRM_DEBUG_DRIVER("Pageflip stall detected\n");
intel_prepare_page_flip(dev, intel_crtc->plane);
}
}
/* Called from drm generic code, passed 'crtc' which /* Called from drm generic code, passed 'crtc' which
* we use as a pipe index * we use as a pipe index
*/ */
...@@ -3459,7 +3438,7 @@ static void valleyview_irq_preinstall(struct drm_device *dev) ...@@ -3459,7 +3438,7 @@ static void valleyview_irq_preinstall(struct drm_device *dev)
I915_WRITE(PORT_HOTPLUG_EN, 0); I915_WRITE(PORT_HOTPLUG_EN, 0);
I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
for_each_pipe(pipe) for_each_pipe(dev_priv, pipe)
I915_WRITE(PIPESTAT(pipe), 0xffff); I915_WRITE(PIPESTAT(pipe), 0xffff);
I915_WRITE(VLV_IIR, 0xffffffff); I915_WRITE(VLV_IIR, 0xffffffff);
I915_WRITE(VLV_IMR, 0xffffffff); I915_WRITE(VLV_IMR, 0xffffffff);
...@@ -3485,7 +3464,7 @@ static void gen8_irq_reset(struct drm_device *dev) ...@@ -3485,7 +3464,7 @@ static void gen8_irq_reset(struct drm_device *dev)
gen8_gt_irq_reset(dev_priv); gen8_gt_irq_reset(dev_priv);
for_each_pipe(pipe) for_each_pipe(dev_priv, pipe)
if (intel_display_power_enabled(dev_priv, if (intel_display_power_enabled(dev_priv,
POWER_DOMAIN_PIPE(pipe))) POWER_DOMAIN_PIPE(pipe)))
GEN8_IRQ_RESET_NDX(DE_PIPE, pipe); GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
...@@ -3528,7 +3507,7 @@ static void cherryview_irq_preinstall(struct drm_device *dev) ...@@ -3528,7 +3507,7 @@ static void cherryview_irq_preinstall(struct drm_device *dev)
I915_WRITE(PORT_HOTPLUG_EN, 0); I915_WRITE(PORT_HOTPLUG_EN, 0);
I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
for_each_pipe(pipe) for_each_pipe(dev_priv, pipe)
I915_WRITE(PIPESTAT(pipe), 0xffff); I915_WRITE(PIPESTAT(pipe), 0xffff);
I915_WRITE(VLV_IMR, 0xffffffff); I915_WRITE(VLV_IMR, 0xffffffff);
...@@ -3799,8 +3778,6 @@ static int valleyview_irq_postinstall(struct drm_device *dev) ...@@ -3799,8 +3778,6 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv) static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
{ {
int i;
/* These are interrupts we'll toggle with the ring mask register */ /* These are interrupts we'll toggle with the ring mask register */
uint32_t gt_interrupts[] = { uint32_t gt_interrupts[] = {
GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT | GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
...@@ -3817,15 +3794,15 @@ static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv) ...@@ -3817,15 +3794,15 @@ static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
}; };
for (i = 0; i < ARRAY_SIZE(gt_interrupts); i++)
GEN8_IRQ_INIT_NDX(GT, i, ~gt_interrupts[i], gt_interrupts[i]);
dev_priv->pm_irq_mask = 0xffffffff; dev_priv->pm_irq_mask = 0xffffffff;
GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, dev_priv->pm_rps_events);
GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
} }
static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
{ {
struct drm_device *dev = dev_priv->dev;
uint32_t de_pipe_masked = GEN8_PIPE_PRIMARY_FLIP_DONE | uint32_t de_pipe_masked = GEN8_PIPE_PRIMARY_FLIP_DONE |
GEN8_PIPE_CDCLK_CRC_DONE | GEN8_PIPE_CDCLK_CRC_DONE |
GEN8_DE_PIPE_IRQ_FAULT_ERRORS; GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
...@@ -3836,7 +3813,7 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) ...@@ -3836,7 +3813,7 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked; dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked; dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
for_each_pipe(pipe) for_each_pipe(dev_priv, pipe)
if (intel_display_power_enabled(dev_priv, if (intel_display_power_enabled(dev_priv,
POWER_DOMAIN_PIPE(pipe))) POWER_DOMAIN_PIPE(pipe)))
GEN8_IRQ_INIT_NDX(DE_PIPE, pipe, GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
...@@ -3881,12 +3858,12 @@ static int cherryview_irq_postinstall(struct drm_device *dev) ...@@ -3881,12 +3858,12 @@ static int cherryview_irq_postinstall(struct drm_device *dev)
*/ */
dev_priv->irq_mask = ~enable_mask; dev_priv->irq_mask = ~enable_mask;
for_each_pipe(pipe) for_each_pipe(dev_priv, pipe)
I915_WRITE(PIPESTAT(pipe), 0xffff); I915_WRITE(PIPESTAT(pipe), 0xffff);
spin_lock_irqsave(&dev_priv->irq_lock, irqflags); spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
for_each_pipe(pipe) for_each_pipe(dev_priv, pipe)
i915_enable_pipestat(dev_priv, pipe, pipestat_enable); i915_enable_pipestat(dev_priv, pipe, pipestat_enable);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
...@@ -3923,7 +3900,7 @@ static void valleyview_irq_uninstall(struct drm_device *dev) ...@@ -3923,7 +3900,7 @@ static void valleyview_irq_uninstall(struct drm_device *dev)
I915_WRITE(VLV_MASTER_IER, 0); I915_WRITE(VLV_MASTER_IER, 0);
for_each_pipe(pipe) for_each_pipe(dev_priv, pipe)
I915_WRITE(PIPESTAT(pipe), 0xffff); I915_WRITE(PIPESTAT(pipe), 0xffff);
I915_WRITE(HWSTAM, 0xffffffff); I915_WRITE(HWSTAM, 0xffffffff);
...@@ -3985,7 +3962,7 @@ do { \ ...@@ -3985,7 +3962,7 @@ do { \
I915_WRITE(PORT_HOTPLUG_EN, 0); I915_WRITE(PORT_HOTPLUG_EN, 0);
I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
for_each_pipe(pipe) for_each_pipe(dev_priv, pipe)
I915_WRITE(PIPESTAT(pipe), 0xffff); I915_WRITE(PIPESTAT(pipe), 0xffff);
I915_WRITE(VLV_IMR, 0xffffffff); I915_WRITE(VLV_IMR, 0xffffffff);
...@@ -4009,7 +3986,7 @@ static void i8xx_irq_preinstall(struct drm_device * dev) ...@@ -4009,7 +3986,7 @@ static void i8xx_irq_preinstall(struct drm_device * dev)
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
int pipe; int pipe;
for_each_pipe(pipe) for_each_pipe(dev_priv, pipe)
I915_WRITE(PIPESTAT(pipe), 0); I915_WRITE(PIPESTAT(pipe), 0);
I915_WRITE16(IMR, 0xffff); I915_WRITE16(IMR, 0xffff);
I915_WRITE16(IER, 0x0); I915_WRITE16(IER, 0x0);
...@@ -4063,7 +4040,7 @@ static bool i8xx_handle_vblank(struct drm_device *dev, ...@@ -4063,7 +4040,7 @@ static bool i8xx_handle_vblank(struct drm_device *dev,
return false; return false;
if ((iir & flip_pending) == 0) if ((iir & flip_pending) == 0)
return false; goto check_page_flip;
intel_prepare_page_flip(dev, plane); intel_prepare_page_flip(dev, plane);
...@@ -4074,11 +4051,14 @@ static bool i8xx_handle_vblank(struct drm_device *dev, ...@@ -4074,11 +4051,14 @@ static bool i8xx_handle_vblank(struct drm_device *dev,
* an interrupt per se, we watch for the change at vblank. * an interrupt per se, we watch for the change at vblank.
*/ */
if (I915_READ16(ISR) & flip_pending) if (I915_READ16(ISR) & flip_pending)
return false; goto check_page_flip;
intel_finish_page_flip(dev, pipe); intel_finish_page_flip(dev, pipe);
return true; return true;
check_page_flip:
intel_check_page_flip(dev, pipe);
return false;
} }
static irqreturn_t i8xx_irq_handler(int irq, void *arg) static irqreturn_t i8xx_irq_handler(int irq, void *arg)
...@@ -4109,7 +4089,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg) ...@@ -4109,7 +4089,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
"Command parser error, iir 0x%08x", "Command parser error, iir 0x%08x",
iir); iir);
for_each_pipe(pipe) { for_each_pipe(dev_priv, pipe) {
int reg = PIPESTAT(pipe); int reg = PIPESTAT(pipe);
pipe_stats[pipe] = I915_READ(reg); pipe_stats[pipe] = I915_READ(reg);
...@@ -4129,7 +4109,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg) ...@@ -4129,7 +4109,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
if (iir & I915_USER_INTERRUPT) if (iir & I915_USER_INTERRUPT)
notify_ring(dev, &dev_priv->ring[RCS]); notify_ring(dev, &dev_priv->ring[RCS]);
for_each_pipe(pipe) { for_each_pipe(dev_priv, pipe) {
int plane = pipe; int plane = pipe;
if (HAS_FBC(dev)) if (HAS_FBC(dev))
plane = !plane; plane = !plane;
...@@ -4157,7 +4137,7 @@ static void i8xx_irq_uninstall(struct drm_device * dev) ...@@ -4157,7 +4137,7 @@ static void i8xx_irq_uninstall(struct drm_device * dev)
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
int pipe; int pipe;
for_each_pipe(pipe) { for_each_pipe(dev_priv, pipe) {
/* Clear enable bits; then clear status bits */ /* Clear enable bits; then clear status bits */
I915_WRITE(PIPESTAT(pipe), 0); I915_WRITE(PIPESTAT(pipe), 0);
I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
...@@ -4178,7 +4158,7 @@ static void i915_irq_preinstall(struct drm_device * dev) ...@@ -4178,7 +4158,7 @@ static void i915_irq_preinstall(struct drm_device * dev)
} }
I915_WRITE16(HWSTAM, 0xeffe); I915_WRITE16(HWSTAM, 0xeffe);
for_each_pipe(pipe) for_each_pipe(dev_priv, pipe)
I915_WRITE(PIPESTAT(pipe), 0); I915_WRITE(PIPESTAT(pipe), 0);
I915_WRITE(IMR, 0xffffffff); I915_WRITE(IMR, 0xffffffff);
I915_WRITE(IER, 0x0); I915_WRITE(IER, 0x0);
...@@ -4248,7 +4228,7 @@ static bool i915_handle_vblank(struct drm_device *dev, ...@@ -4248,7 +4228,7 @@ static bool i915_handle_vblank(struct drm_device *dev,
return false; return false;
if ((iir & flip_pending) == 0) if ((iir & flip_pending) == 0)
return false; goto check_page_flip;
intel_prepare_page_flip(dev, plane); intel_prepare_page_flip(dev, plane);
...@@ -4259,11 +4239,14 @@ static bool i915_handle_vblank(struct drm_device *dev, ...@@ -4259,11 +4239,14 @@ static bool i915_handle_vblank(struct drm_device *dev,
* an interrupt per se, we watch for the change at vblank. * an interrupt per se, we watch for the change at vblank.
*/ */
if (I915_READ(ISR) & flip_pending) if (I915_READ(ISR) & flip_pending)
return false; goto check_page_flip;
intel_finish_page_flip(dev, pipe); intel_finish_page_flip(dev, pipe);
return true; return true;
check_page_flip:
intel_check_page_flip(dev, pipe);
return false;
} }
static irqreturn_t i915_irq_handler(int irq, void *arg) static irqreturn_t i915_irq_handler(int irq, void *arg)
...@@ -4293,7 +4276,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg) ...@@ -4293,7 +4276,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
"Command parser error, iir 0x%08x", "Command parser error, iir 0x%08x",
iir); iir);
for_each_pipe(pipe) { for_each_pipe(dev_priv, pipe) {
int reg = PIPESTAT(pipe); int reg = PIPESTAT(pipe);
pipe_stats[pipe] = I915_READ(reg); pipe_stats[pipe] = I915_READ(reg);
...@@ -4319,7 +4302,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg) ...@@ -4319,7 +4302,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
if (iir & I915_USER_INTERRUPT) if (iir & I915_USER_INTERRUPT)
notify_ring(dev, &dev_priv->ring[RCS]); notify_ring(dev, &dev_priv->ring[RCS]);
for_each_pipe(pipe) { for_each_pipe(dev_priv, pipe) {
int plane = pipe; int plane = pipe;
if (HAS_FBC(dev)) if (HAS_FBC(dev))
plane = !plane; plane = !plane;
...@@ -4377,7 +4360,7 @@ static void i915_irq_uninstall(struct drm_device * dev) ...@@ -4377,7 +4360,7 @@ static void i915_irq_uninstall(struct drm_device * dev)
} }
I915_WRITE16(HWSTAM, 0xffff); I915_WRITE16(HWSTAM, 0xffff);
for_each_pipe(pipe) { for_each_pipe(dev_priv, pipe) {
/* Clear enable bits; then clear status bits */ /* Clear enable bits; then clear status bits */
I915_WRITE(PIPESTAT(pipe), 0); I915_WRITE(PIPESTAT(pipe), 0);
I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
...@@ -4397,7 +4380,7 @@ static void i965_irq_preinstall(struct drm_device * dev) ...@@ -4397,7 +4380,7 @@ static void i965_irq_preinstall(struct drm_device * dev)
I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
I915_WRITE(HWSTAM, 0xeffe); I915_WRITE(HWSTAM, 0xeffe);
for_each_pipe(pipe) for_each_pipe(dev_priv, pipe)
I915_WRITE(PIPESTAT(pipe), 0); I915_WRITE(PIPESTAT(pipe), 0);
I915_WRITE(IMR, 0xffffffff); I915_WRITE(IMR, 0xffffffff);
I915_WRITE(IER, 0x0); I915_WRITE(IER, 0x0);
...@@ -4522,7 +4505,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg) ...@@ -4522,7 +4505,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
"Command parser error, iir 0x%08x", "Command parser error, iir 0x%08x",
iir); iir);
for_each_pipe(pipe) { for_each_pipe(dev_priv, pipe) {
int reg = PIPESTAT(pipe); int reg = PIPESTAT(pipe);
pipe_stats[pipe] = I915_READ(reg); pipe_stats[pipe] = I915_READ(reg);
...@@ -4553,7 +4536,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg) ...@@ -4553,7 +4536,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
if (iir & I915_BSD_USER_INTERRUPT) if (iir & I915_BSD_USER_INTERRUPT)
notify_ring(dev, &dev_priv->ring[VCS]); notify_ring(dev, &dev_priv->ring[VCS]);
for_each_pipe(pipe) { for_each_pipe(dev_priv, pipe) {
if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
i915_handle_vblank(dev, pipe, pipe, iir)) i915_handle_vblank(dev, pipe, pipe, iir))
flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe); flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
...@@ -4610,12 +4593,12 @@ static void i965_irq_uninstall(struct drm_device * dev) ...@@ -4610,12 +4593,12 @@ static void i965_irq_uninstall(struct drm_device * dev)
I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
I915_WRITE(HWSTAM, 0xffffffff); I915_WRITE(HWSTAM, 0xffffffff);
for_each_pipe(pipe) for_each_pipe(dev_priv, pipe)
I915_WRITE(PIPESTAT(pipe), 0); I915_WRITE(PIPESTAT(pipe), 0);
I915_WRITE(IMR, 0xffffffff); I915_WRITE(IMR, 0xffffffff);
I915_WRITE(IER, 0x0); I915_WRITE(IER, 0x0);
for_each_pipe(pipe) for_each_pipe(dev_priv, pipe)
I915_WRITE(PIPESTAT(pipe), I915_WRITE(PIPESTAT(pipe),
I915_READ(PIPESTAT(pipe)) & 0x8000ffff); I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
I915_WRITE(IIR, I915_READ(IIR)); I915_WRITE(IIR, I915_READ(IIR));
...@@ -4673,8 +4656,8 @@ void intel_irq_init(struct drm_device *dev) ...@@ -4673,8 +4656,8 @@ void intel_irq_init(struct drm_device *dev)
INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
/* Let's track the enabled rps events */ /* Let's track the enabled rps events */
if (IS_VALLEYVIEW(dev)) if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev))
/* WaGsvRC0ResidenncyMethod:VLV */ /* WaGsvRC0ResidencyMethod:vlv */
dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED; dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
else else
dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS; dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
......
...@@ -1030,6 +1030,13 @@ enum punit_power_well { ...@@ -1030,6 +1030,13 @@ enum punit_power_well {
#define PGTBL_ADDRESS_LO_MASK 0xfffff000 /* bits [31:12] */ #define PGTBL_ADDRESS_LO_MASK 0xfffff000 /* bits [31:12] */
#define PGTBL_ADDRESS_HI_MASK 0x000000f0 /* bits [35:32] (gen4) */ #define PGTBL_ADDRESS_HI_MASK 0x000000f0 /* bits [35:32] (gen4) */
#define PGTBL_ER 0x02024 #define PGTBL_ER 0x02024
#define PRB0_BASE (0x2030-0x30)
#define PRB1_BASE (0x2040-0x30) /* 830,gen3 */
#define PRB2_BASE (0x2050-0x30) /* gen3 */
#define SRB0_BASE (0x2100-0x30) /* gen2 */
#define SRB1_BASE (0x2110-0x30) /* gen2 */
#define SRB2_BASE (0x2120-0x30) /* 830 */
#define SRB3_BASE (0x2130-0x30) /* 830 */
#define RENDER_RING_BASE 0x02000 #define RENDER_RING_BASE 0x02000
#define BSD_RING_BASE 0x04000 #define BSD_RING_BASE 0x04000
#define GEN6_BSD_RING_BASE 0x12000 #define GEN6_BSD_RING_BASE 0x12000
...@@ -1276,6 +1283,10 @@ enum punit_power_well { ...@@ -1276,6 +1283,10 @@ enum punit_power_well {
#define INSTPM_TLB_INVALIDATE (1<<9) #define INSTPM_TLB_INVALIDATE (1<<9)
#define INSTPM_SYNC_FLUSH (1<<5) #define INSTPM_SYNC_FLUSH (1<<5)
#define ACTHD 0x020c8 #define ACTHD 0x020c8
#define MEM_MODE 0x020cc
#define MEM_DISPLAY_B_TRICKLE_FEED_DISABLE (1<<3) /* 830 only */
#define MEM_DISPLAY_A_TRICKLE_FEED_DISABLE (1<<2) /* 830/845 only */
#define MEM_DISPLAY_TRICKLE_FEED_DISABLE (1<<2) /* 85x only */
#define FW_BLC 0x020d8 #define FW_BLC 0x020d8
#define FW_BLC2 0x020dc #define FW_BLC2 0x020dc
#define FW_BLC_SELF 0x020e0 /* 915+ only */ #define FW_BLC_SELF 0x020e0 /* 915+ only */
...@@ -4218,6 +4229,7 @@ enum punit_power_well { ...@@ -4218,6 +4229,7 @@ enum punit_power_well {
#define DISPPLANE_NO_LINE_DOUBLE 0 #define DISPPLANE_NO_LINE_DOUBLE 0
#define DISPPLANE_STEREO_POLARITY_FIRST 0 #define DISPPLANE_STEREO_POLARITY_FIRST 0
#define DISPPLANE_STEREO_POLARITY_SECOND (1<<18) #define DISPPLANE_STEREO_POLARITY_SECOND (1<<18)
#define DISPPLANE_ROTATE_180 (1<<15)
#define DISPPLANE_TRICKLE_FEED_DISABLE (1<<14) /* Ironlake */ #define DISPPLANE_TRICKLE_FEED_DISABLE (1<<14) /* Ironlake */
#define DISPPLANE_TILED (1<<10) #define DISPPLANE_TILED (1<<10)
#define _DSPAADDR 0x70184 #define _DSPAADDR 0x70184
...@@ -5356,8 +5368,7 @@ enum punit_power_well { ...@@ -5356,8 +5368,7 @@ enum punit_power_well {
#define PIPEA_PP_STATUS (VLV_DISPLAY_BASE + 0x61200) #define PIPEA_PP_STATUS (VLV_DISPLAY_BASE + 0x61200)
#define PIPEA_PP_CONTROL (VLV_DISPLAY_BASE + 0x61204) #define PIPEA_PP_CONTROL (VLV_DISPLAY_BASE + 0x61204)
#define PIPEA_PP_ON_DELAYS (VLV_DISPLAY_BASE + 0x61208) #define PIPEA_PP_ON_DELAYS (VLV_DISPLAY_BASE + 0x61208)
#define PANEL_PORT_SELECT_DPB_VLV (1 << 30) #define PANEL_PORT_SELECT_VLV(port) ((port) << 30)
#define PANEL_PORT_SELECT_DPC_VLV (2 << 30)
#define PIPEA_PP_OFF_DELAYS (VLV_DISPLAY_BASE + 0x6120c) #define PIPEA_PP_OFF_DELAYS (VLV_DISPLAY_BASE + 0x6120c)
#define PIPEA_PP_DIVISOR (VLV_DISPLAY_BASE + 0x61210) #define PIPEA_PP_DIVISOR (VLV_DISPLAY_BASE + 0x61210)
...@@ -5566,6 +5577,10 @@ enum punit_power_well { ...@@ -5566,6 +5577,10 @@ enum punit_power_well {
#define GEN8_UCGCTL6 0x9430 #define GEN8_UCGCTL6 0x9430
#define GEN8_SDEUNIT_CLOCK_GATE_DISABLE (1<<14) #define GEN8_SDEUNIT_CLOCK_GATE_DISABLE (1<<14)
#define TIMESTAMP_CTR 0x44070
#define FREQ_1_28_US(us) (((us) * 100) >> 7)
#define MCHBAR_PCU_C0 (MCHBAR_MIRROR_BASE_SNB + 0x5960)
#define GEN6_GFXPAUSE 0xA000 #define GEN6_GFXPAUSE 0xA000
#define GEN6_RPNSWREQ 0xA008 #define GEN6_RPNSWREQ 0xA008
#define GEN6_TURBO_DISABLE (1<<31) #define GEN6_TURBO_DISABLE (1<<31)
...@@ -5654,12 +5669,6 @@ enum punit_power_well { ...@@ -5654,12 +5669,6 @@ enum punit_power_well {
GEN6_PM_RP_DOWN_THRESHOLD | \ GEN6_PM_RP_DOWN_THRESHOLD | \
GEN6_PM_RP_DOWN_TIMEOUT) GEN6_PM_RP_DOWN_TIMEOUT)
#define CHV_CZ_CLOCK_FREQ_MODE_200 200
#define CHV_CZ_CLOCK_FREQ_MODE_267 267
#define CHV_CZ_CLOCK_FREQ_MODE_320 320
#define CHV_CZ_CLOCK_FREQ_MODE_333 333
#define CHV_CZ_CLOCK_FREQ_MODE_400 400
#define GEN7_GT_SCRATCH_BASE 0x4F100 #define GEN7_GT_SCRATCH_BASE 0x4F100
#define GEN7_GT_SCRATCH_REG_NUM 8 #define GEN7_GT_SCRATCH_REG_NUM 8
...@@ -5975,15 +5984,7 @@ enum punit_power_well { ...@@ -5975,15 +5984,7 @@ enum punit_power_well {
#define DDI_BUF_CTL_B 0x64100 #define DDI_BUF_CTL_B 0x64100
#define DDI_BUF_CTL(port) _PORT(port, DDI_BUF_CTL_A, DDI_BUF_CTL_B) #define DDI_BUF_CTL(port) _PORT(port, DDI_BUF_CTL_A, DDI_BUF_CTL_B)
#define DDI_BUF_CTL_ENABLE (1<<31) #define DDI_BUF_CTL_ENABLE (1<<31)
#define DDI_BUF_EMP_400MV_0DB_HSW (0<<24) /* Sel0 */ #define DDI_BUF_TRANS_SELECT(n) ((n) << 24)
#define DDI_BUF_EMP_400MV_3_5DB_HSW (1<<24) /* Sel1 */
#define DDI_BUF_EMP_400MV_6DB_HSW (2<<24) /* Sel2 */
#define DDI_BUF_EMP_400MV_9_5DB_HSW (3<<24) /* Sel3 */
#define DDI_BUF_EMP_600MV_0DB_HSW (4<<24) /* Sel4 */
#define DDI_BUF_EMP_600MV_3_5DB_HSW (5<<24) /* Sel5 */
#define DDI_BUF_EMP_600MV_6DB_HSW (6<<24) /* Sel6 */
#define DDI_BUF_EMP_800MV_0DB_HSW (7<<24) /* Sel7 */
#define DDI_BUF_EMP_800MV_3_5DB_HSW (8<<24) /* Sel8 */
#define DDI_BUF_EMP_MASK (0xf<<24) #define DDI_BUF_EMP_MASK (0xf<<24)
#define DDI_BUF_PORT_REVERSAL (1<<16) #define DDI_BUF_PORT_REVERSAL (1<<16)
#define DDI_BUF_IS_IDLE (1<<7) #define DDI_BUF_IS_IDLE (1<<7)
......
...@@ -540,7 +540,7 @@ static ssize_t error_state_read(struct file *filp, struct kobject *kobj, ...@@ -540,7 +540,7 @@ static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
memset(&error_priv, 0, sizeof(error_priv)); memset(&error_priv, 0, sizeof(error_priv));
ret = i915_error_state_buf_init(&error_str, count, off); ret = i915_error_state_buf_init(&error_str, to_i915(dev), count, off);
if (ret) if (ret)
return ret; return ret;
......
...@@ -627,16 +627,16 @@ parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb) ...@@ -627,16 +627,16 @@ parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
switch (edp_link_params->preemphasis) { switch (edp_link_params->preemphasis) {
case EDP_PREEMPHASIS_NONE: case EDP_PREEMPHASIS_NONE:
dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_0; dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_0;
break; break;
case EDP_PREEMPHASIS_3_5dB: case EDP_PREEMPHASIS_3_5dB:
dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_3_5; dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_1;
break; break;
case EDP_PREEMPHASIS_6dB: case EDP_PREEMPHASIS_6dB:
dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_6; dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_2;
break; break;
case EDP_PREEMPHASIS_9_5dB: case EDP_PREEMPHASIS_9_5dB:
dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_9_5; dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_3;
break; break;
default: default:
DRM_DEBUG_KMS("VBT has unknown eDP pre-emphasis value %u\n", DRM_DEBUG_KMS("VBT has unknown eDP pre-emphasis value %u\n",
...@@ -646,16 +646,16 @@ parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb) ...@@ -646,16 +646,16 @@ parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
switch (edp_link_params->vswing) { switch (edp_link_params->vswing) {
case EDP_VSWING_0_4V: case EDP_VSWING_0_4V:
dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_400; dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_0;
break; break;
case EDP_VSWING_0_6V: case EDP_VSWING_0_6V:
dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_600; dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_1;
break; break;
case EDP_VSWING_0_8V: case EDP_VSWING_0_8V:
dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_800; dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
break; break;
case EDP_VSWING_1_2V: case EDP_VSWING_1_2V:
dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_1200; dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
break; break;
default: default:
DRM_DEBUG_KMS("VBT has unknown eDP voltage swing value %u\n", DRM_DEBUG_KMS("VBT has unknown eDP voltage swing value %u\n",
......
...@@ -28,98 +28,103 @@ ...@@ -28,98 +28,103 @@
#include "i915_drv.h" #include "i915_drv.h"
#include "intel_drv.h" #include "intel_drv.h"
struct ddi_buf_trans {
u32 trans1; /* balance leg enable, de-emph level */
u32 trans2; /* vref sel, vswing */
};
/* HDMI/DVI modes ignore everything but the last 2 items. So we share /* HDMI/DVI modes ignore everything but the last 2 items. So we share
* them for both DP and FDI transports, allowing those ports to * them for both DP and FDI transports, allowing those ports to
* automatically adapt to HDMI connections as well * automatically adapt to HDMI connections as well
*/ */
static const u32 hsw_ddi_translations_dp[] = { static const struct ddi_buf_trans hsw_ddi_translations_dp[] = {
0x00FFFFFF, 0x0006000E, { 0x00FFFFFF, 0x0006000E },
0x00D75FFF, 0x0005000A, { 0x00D75FFF, 0x0005000A },
0x00C30FFF, 0x00040006, { 0x00C30FFF, 0x00040006 },
0x80AAAFFF, 0x000B0000, { 0x80AAAFFF, 0x000B0000 },
0x00FFFFFF, 0x0005000A, { 0x00FFFFFF, 0x0005000A },
0x00D75FFF, 0x000C0004, { 0x00D75FFF, 0x000C0004 },
0x80C30FFF, 0x000B0000, { 0x80C30FFF, 0x000B0000 },
0x00FFFFFF, 0x00040006, { 0x00FFFFFF, 0x00040006 },
0x80D75FFF, 0x000B0000, { 0x80D75FFF, 0x000B0000 },
}; };
static const u32 hsw_ddi_translations_fdi[] = { static const struct ddi_buf_trans hsw_ddi_translations_fdi[] = {
0x00FFFFFF, 0x0007000E, { 0x00FFFFFF, 0x0007000E },
0x00D75FFF, 0x000F000A, { 0x00D75FFF, 0x000F000A },
0x00C30FFF, 0x00060006, { 0x00C30FFF, 0x00060006 },
0x00AAAFFF, 0x001E0000, { 0x00AAAFFF, 0x001E0000 },
0x00FFFFFF, 0x000F000A, { 0x00FFFFFF, 0x000F000A },
0x00D75FFF, 0x00160004, { 0x00D75FFF, 0x00160004 },
0x00C30FFF, 0x001E0000, { 0x00C30FFF, 0x001E0000 },
0x00FFFFFF, 0x00060006, { 0x00FFFFFF, 0x00060006 },
0x00D75FFF, 0x001E0000, { 0x00D75FFF, 0x001E0000 },
}; };
static const u32 hsw_ddi_translations_hdmi[] = { static const struct ddi_buf_trans hsw_ddi_translations_hdmi[] = {
/* Idx NT mV diff T mV diff db */ /* Idx NT mV d T mV d db */
0x00FFFFFF, 0x0006000E, /* 0: 400 400 0 */ { 0x00FFFFFF, 0x0006000E }, /* 0: 400 400 0 */
0x00E79FFF, 0x000E000C, /* 1: 400 500 2 */ { 0x00E79FFF, 0x000E000C }, /* 1: 400 500 2 */
0x00D75FFF, 0x0005000A, /* 2: 400 600 3.5 */ { 0x00D75FFF, 0x0005000A }, /* 2: 400 600 3.5 */
0x00FFFFFF, 0x0005000A, /* 3: 600 600 0 */ { 0x00FFFFFF, 0x0005000A }, /* 3: 600 600 0 */
0x00E79FFF, 0x001D0007, /* 4: 600 750 2 */ { 0x00E79FFF, 0x001D0007 }, /* 4: 600 750 2 */
0x00D75FFF, 0x000C0004, /* 5: 600 900 3.5 */ { 0x00D75FFF, 0x000C0004 }, /* 5: 600 900 3.5 */
0x00FFFFFF, 0x00040006, /* 6: 800 800 0 */ { 0x00FFFFFF, 0x00040006 }, /* 6: 800 800 0 */
0x80E79FFF, 0x00030002, /* 7: 800 1000 2 */ { 0x80E79FFF, 0x00030002 }, /* 7: 800 1000 2 */
0x00FFFFFF, 0x00140005, /* 8: 850 850 0 */ { 0x00FFFFFF, 0x00140005 }, /* 8: 850 850 0 */
0x00FFFFFF, 0x000C0004, /* 9: 900 900 0 */ { 0x00FFFFFF, 0x000C0004 }, /* 9: 900 900 0 */
0x00FFFFFF, 0x001C0003, /* 10: 950 950 0 */ { 0x00FFFFFF, 0x001C0003 }, /* 10: 950 950 0 */
0x80FFFFFF, 0x00030002, /* 11: 1000 1000 0 */ { 0x80FFFFFF, 0x00030002 }, /* 11: 1000 1000 0 */
}; };
static const u32 bdw_ddi_translations_edp[] = { static const struct ddi_buf_trans bdw_ddi_translations_edp[] = {
0x00FFFFFF, 0x00000012, { 0x00FFFFFF, 0x00000012 },
0x00EBAFFF, 0x00020011, { 0x00EBAFFF, 0x00020011 },
0x00C71FFF, 0x0006000F, { 0x00C71FFF, 0x0006000F },
0x00AAAFFF, 0x000E000A, { 0x00AAAFFF, 0x000E000A },
0x00FFFFFF, 0x00020011, { 0x00FFFFFF, 0x00020011 },
0x00DB6FFF, 0x0005000F, { 0x00DB6FFF, 0x0005000F },
0x00BEEFFF, 0x000A000C, { 0x00BEEFFF, 0x000A000C },
0x00FFFFFF, 0x0005000F, { 0x00FFFFFF, 0x0005000F },
0x00DB6FFF, 0x000A000C, { 0x00DB6FFF, 0x000A000C },
}; };
static const u32 bdw_ddi_translations_dp[] = { static const struct ddi_buf_trans bdw_ddi_translations_dp[] = {
0x00FFFFFF, 0x0007000E, { 0x00FFFFFF, 0x0007000E },
0x00D75FFF, 0x000E000A, { 0x00D75FFF, 0x000E000A },
0x00BEFFFF, 0x00140006, { 0x00BEFFFF, 0x00140006 },
0x80B2CFFF, 0x001B0002, { 0x80B2CFFF, 0x001B0002 },
0x00FFFFFF, 0x000E000A, { 0x00FFFFFF, 0x000E000A },
0x00D75FFF, 0x00180004, { 0x00D75FFF, 0x00180004 },
0x80CB2FFF, 0x001B0002, { 0x80CB2FFF, 0x001B0002 },
0x00F7DFFF, 0x00180004, { 0x00F7DFFF, 0x00180004 },
0x80D75FFF, 0x001B0002, { 0x80D75FFF, 0x001B0002 },
}; };
static const u32 bdw_ddi_translations_fdi[] = { static const struct ddi_buf_trans bdw_ddi_translations_fdi[] = {
0x00FFFFFF, 0x0001000E, { 0x00FFFFFF, 0x0001000E },
0x00D75FFF, 0x0004000A, { 0x00D75FFF, 0x0004000A },
0x00C30FFF, 0x00070006, { 0x00C30FFF, 0x00070006 },
0x00AAAFFF, 0x000C0000, { 0x00AAAFFF, 0x000C0000 },
0x00FFFFFF, 0x0004000A, { 0x00FFFFFF, 0x0004000A },
0x00D75FFF, 0x00090004, { 0x00D75FFF, 0x00090004 },
0x00C30FFF, 0x000C0000, { 0x00C30FFF, 0x000C0000 },
0x00FFFFFF, 0x00070006, { 0x00FFFFFF, 0x00070006 },
0x00D75FFF, 0x000C0000, { 0x00D75FFF, 0x000C0000 },
}; };
static const u32 bdw_ddi_translations_hdmi[] = { static const struct ddi_buf_trans bdw_ddi_translations_hdmi[] = {
/* Idx NT mV diff T mV diff db */ /* Idx NT mV d T mV df db */
0x00FFFFFF, 0x0007000E, /* 0: 400 400 0 */ { 0x00FFFFFF, 0x0007000E }, /* 0: 400 400 0 */
0x00D75FFF, 0x000E000A, /* 1: 400 600 3.5 */ { 0x00D75FFF, 0x000E000A }, /* 1: 400 600 3.5 */
0x00BEFFFF, 0x00140006, /* 2: 400 800 6 */ { 0x00BEFFFF, 0x00140006 }, /* 2: 400 800 6 */
0x00FFFFFF, 0x0009000D, /* 3: 450 450 0 */ { 0x00FFFFFF, 0x0009000D }, /* 3: 450 450 0 */
0x00FFFFFF, 0x000E000A, /* 4: 600 600 0 */ { 0x00FFFFFF, 0x000E000A }, /* 4: 600 600 0 */
0x00D7FFFF, 0x00140006, /* 5: 600 800 2.5 */ { 0x00D7FFFF, 0x00140006 }, /* 5: 600 800 2.5 */
0x80CB2FFF, 0x001B0002, /* 6: 600 1000 4.5 */ { 0x80CB2FFF, 0x001B0002 }, /* 6: 600 1000 4.5 */
0x00FFFFFF, 0x00140006, /* 7: 800 800 0 */ { 0x00FFFFFF, 0x00140006 }, /* 7: 800 800 0 */
0x80E79FFF, 0x001B0002, /* 8: 800 1000 2 */ { 0x80E79FFF, 0x001B0002 }, /* 8: 800 1000 2 */
0x80FFFFFF, 0x001B0002, /* 9: 1000 1000 0 */ { 0x80FFFFFF, 0x001B0002 }, /* 9: 1000 1000 0 */
}; };
enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder) enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder)
...@@ -158,25 +163,25 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port) ...@@ -158,25 +163,25 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port)
u32 reg; u32 reg;
int i, n_hdmi_entries, hdmi_800mV_0dB; int i, n_hdmi_entries, hdmi_800mV_0dB;
int hdmi_level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift; int hdmi_level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift;
const u32 *ddi_translations_fdi; const struct ddi_buf_trans *ddi_translations_fdi;
const u32 *ddi_translations_dp; const struct ddi_buf_trans *ddi_translations_dp;
const u32 *ddi_translations_edp; const struct ddi_buf_trans *ddi_translations_edp;
const u32 *ddi_translations_hdmi; const struct ddi_buf_trans *ddi_translations_hdmi;
const u32 *ddi_translations; const struct ddi_buf_trans *ddi_translations;
if (IS_BROADWELL(dev)) { if (IS_BROADWELL(dev)) {
ddi_translations_fdi = bdw_ddi_translations_fdi; ddi_translations_fdi = bdw_ddi_translations_fdi;
ddi_translations_dp = bdw_ddi_translations_dp; ddi_translations_dp = bdw_ddi_translations_dp;
ddi_translations_edp = bdw_ddi_translations_edp; ddi_translations_edp = bdw_ddi_translations_edp;
ddi_translations_hdmi = bdw_ddi_translations_hdmi; ddi_translations_hdmi = bdw_ddi_translations_hdmi;
n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi) / 2; n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
hdmi_800mV_0dB = 7; hdmi_800mV_0dB = 7;
} else if (IS_HASWELL(dev)) { } else if (IS_HASWELL(dev)) {
ddi_translations_fdi = hsw_ddi_translations_fdi; ddi_translations_fdi = hsw_ddi_translations_fdi;
ddi_translations_dp = hsw_ddi_translations_dp; ddi_translations_dp = hsw_ddi_translations_dp;
ddi_translations_edp = hsw_ddi_translations_dp; ddi_translations_edp = hsw_ddi_translations_dp;
ddi_translations_hdmi = hsw_ddi_translations_hdmi; ddi_translations_hdmi = hsw_ddi_translations_hdmi;
n_hdmi_entries = ARRAY_SIZE(hsw_ddi_translations_hdmi) / 2; n_hdmi_entries = ARRAY_SIZE(hsw_ddi_translations_hdmi);
hdmi_800mV_0dB = 6; hdmi_800mV_0dB = 6;
} else { } else {
WARN(1, "ddi translation table missing\n"); WARN(1, "ddi translation table missing\n");
...@@ -184,7 +189,7 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port) ...@@ -184,7 +189,7 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port)
ddi_translations_fdi = bdw_ddi_translations_fdi; ddi_translations_fdi = bdw_ddi_translations_fdi;
ddi_translations_dp = bdw_ddi_translations_dp; ddi_translations_dp = bdw_ddi_translations_dp;
ddi_translations_hdmi = bdw_ddi_translations_hdmi; ddi_translations_hdmi = bdw_ddi_translations_hdmi;
n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi) / 2; n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
hdmi_800mV_0dB = 7; hdmi_800mV_0dB = 7;
} }
...@@ -211,7 +216,9 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port) ...@@ -211,7 +216,9 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port)
for (i = 0, reg = DDI_BUF_TRANS(port); for (i = 0, reg = DDI_BUF_TRANS(port);
i < ARRAY_SIZE(hsw_ddi_translations_fdi); i++) { i < ARRAY_SIZE(hsw_ddi_translations_fdi); i++) {
I915_WRITE(reg, ddi_translations[i]); I915_WRITE(reg, ddi_translations[i].trans1);
reg += 4;
I915_WRITE(reg, ddi_translations[i].trans2);
reg += 4; reg += 4;
} }
...@@ -221,10 +228,10 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port) ...@@ -221,10 +228,10 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port)
hdmi_level = hdmi_800mV_0dB; hdmi_level = hdmi_800mV_0dB;
/* Entry 9 is for HDMI: */ /* Entry 9 is for HDMI: */
for (i = 0; i < 2; i++) { I915_WRITE(reg, ddi_translations_hdmi[hdmi_level].trans1);
I915_WRITE(reg, ddi_translations_hdmi[hdmi_level * 2 + i]); reg += 4;
reg += 4; I915_WRITE(reg, ddi_translations_hdmi[hdmi_level].trans2);
} reg += 4;
} }
/* Program DDI buffers translations for DP. By default, program ports A-D in DP /* Program DDI buffers translations for DP. By default, program ports A-D in DP
...@@ -241,18 +248,6 @@ void intel_prepare_ddi(struct drm_device *dev) ...@@ -241,18 +248,6 @@ void intel_prepare_ddi(struct drm_device *dev)
intel_prepare_ddi_buffers(dev, port); intel_prepare_ddi_buffers(dev, port);
} }
static const long hsw_ddi_buf_ctl_values[] = {
DDI_BUF_EMP_400MV_0DB_HSW,
DDI_BUF_EMP_400MV_3_5DB_HSW,
DDI_BUF_EMP_400MV_6DB_HSW,
DDI_BUF_EMP_400MV_9_5DB_HSW,
DDI_BUF_EMP_600MV_0DB_HSW,
DDI_BUF_EMP_600MV_3_5DB_HSW,
DDI_BUF_EMP_600MV_6DB_HSW,
DDI_BUF_EMP_800MV_0DB_HSW,
DDI_BUF_EMP_800MV_3_5DB_HSW
};
static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv, static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
enum port port) enum port port)
{ {
...@@ -312,7 +307,7 @@ void hsw_fdi_link_train(struct drm_crtc *crtc) ...@@ -312,7 +307,7 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
/* Start the training iterating through available voltages and emphasis, /* Start the training iterating through available voltages and emphasis,
* testing each value twice. */ * testing each value twice. */
for (i = 0; i < ARRAY_SIZE(hsw_ddi_buf_ctl_values) * 2; i++) { for (i = 0; i < ARRAY_SIZE(hsw_ddi_translations_fdi) * 2; i++) {
/* Configure DP_TP_CTL with auto-training */ /* Configure DP_TP_CTL with auto-training */
I915_WRITE(DP_TP_CTL(PORT_E), I915_WRITE(DP_TP_CTL(PORT_E),
DP_TP_CTL_FDI_AUTOTRAIN | DP_TP_CTL_FDI_AUTOTRAIN |
...@@ -327,7 +322,7 @@ void hsw_fdi_link_train(struct drm_crtc *crtc) ...@@ -327,7 +322,7 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
I915_WRITE(DDI_BUF_CTL(PORT_E), I915_WRITE(DDI_BUF_CTL(PORT_E),
DDI_BUF_CTL_ENABLE | DDI_BUF_CTL_ENABLE |
((intel_crtc->config.fdi_lanes - 1) << 1) | ((intel_crtc->config.fdi_lanes - 1) << 1) |
hsw_ddi_buf_ctl_values[i / 2]); DDI_BUF_TRANS_SELECT(i / 2));
POSTING_READ(DDI_BUF_CTL(PORT_E)); POSTING_READ(DDI_BUF_CTL(PORT_E));
udelay(600); udelay(600);
...@@ -402,7 +397,7 @@ void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder) ...@@ -402,7 +397,7 @@ void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder)
enc_to_dig_port(&encoder->base); enc_to_dig_port(&encoder->base);
intel_dp->DP = intel_dig_port->saved_port_bits | intel_dp->DP = intel_dig_port->saved_port_bits |
DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW; DDI_BUF_CTL_ENABLE | DDI_BUF_TRANS_SELECT(0);
intel_dp->DP |= DDI_PORT_WIDTH(intel_dp->lane_count); intel_dp->DP |= DDI_PORT_WIDTH(intel_dp->lane_count);
} }
...@@ -429,7 +424,7 @@ intel_ddi_get_crtc_encoder(struct drm_crtc *crtc) ...@@ -429,7 +424,7 @@ intel_ddi_get_crtc_encoder(struct drm_crtc *crtc)
} }
#define LC_FREQ 2700 #define LC_FREQ 2700
#define LC_FREQ_2K (LC_FREQ * 2000) #define LC_FREQ_2K U64_C(LC_FREQ * 2000)
#define P_MIN 2 #define P_MIN 2
#define P_MAX 64 #define P_MAX 64
...@@ -441,7 +436,11 @@ intel_ddi_get_crtc_encoder(struct drm_crtc *crtc) ...@@ -441,7 +436,11 @@ intel_ddi_get_crtc_encoder(struct drm_crtc *crtc)
#define VCO_MIN 2400 #define VCO_MIN 2400
#define VCO_MAX 4800 #define VCO_MAX 4800
#define ABS_DIFF(a, b) ((a > b) ? (a - b) : (b - a)) #define abs_diff(a, b) ({ \
typeof(a) __a = (a); \
typeof(b) __b = (b); \
(void) (&__a == &__b); \
__a > __b ? (__a - __b) : (__b - __a); })
struct wrpll_rnp { struct wrpll_rnp {
unsigned p, n2, r2; unsigned p, n2, r2;
...@@ -551,9 +550,9 @@ static void wrpll_update_rnp(uint64_t freq2k, unsigned budget, ...@@ -551,9 +550,9 @@ static void wrpll_update_rnp(uint64_t freq2k, unsigned budget,
*/ */
a = freq2k * budget * p * r2; a = freq2k * budget * p * r2;
b = freq2k * budget * best->p * best->r2; b = freq2k * budget * best->p * best->r2;
diff = ABS_DIFF((freq2k * p * r2), (LC_FREQ_2K * n2)); diff = abs_diff(freq2k * p * r2, LC_FREQ_2K * n2);
diff_best = ABS_DIFF((freq2k * best->p * best->r2), diff_best = abs_diff(freq2k * best->p * best->r2,
(LC_FREQ_2K * best->n2)); LC_FREQ_2K * best->n2);
c = 1000000 * diff; c = 1000000 * diff;
d = 1000000 * diff_best; d = 1000000 * diff_best;
......
此差异已折叠。
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
#ifndef __INTEL_DRV_H__ #ifndef __INTEL_DRV_H__
#define __INTEL_DRV_H__ #define __INTEL_DRV_H__
#include <linux/async.h>
#include <linux/i2c.h> #include <linux/i2c.h>
#include <linux/hdmi.h> #include <linux/hdmi.h>
#include <drm/i915_drm.h> #include <drm/i915_drm.h>
...@@ -179,6 +180,8 @@ struct intel_panel { ...@@ -179,6 +180,8 @@ struct intel_panel {
bool active_low_pwm; bool active_low_pwm;
struct backlight_device *device; struct backlight_device *device;
} backlight; } backlight;
void (*backlight_power)(struct intel_connector *, bool enable);
}; };
struct intel_connector { struct intel_connector {
...@@ -211,6 +214,7 @@ struct intel_connector { ...@@ -211,6 +214,7 @@ struct intel_connector {
/* Cached EDID for eDP and LVDS. May hold ERR_PTR for invalid EDID. */ /* Cached EDID for eDP and LVDS. May hold ERR_PTR for invalid EDID. */
struct edid *edid; struct edid *edid;
struct edid *detect_edid;
/* since POLL and HPD connectors may use the same HPD line keep the native /* since POLL and HPD connectors may use the same HPD line keep the native
state of connector->polled in case hotplug storm detection changes it */ state of connector->polled in case hotplug storm detection changes it */
...@@ -566,6 +570,12 @@ struct intel_dp { ...@@ -566,6 +570,12 @@ struct intel_dp {
struct notifier_block edp_notifier; struct notifier_block edp_notifier;
/*
* Pipe whose power sequencer is currently locked into
* this port. Only relevant on VLV/CHV.
*/
enum pipe pps_pipe;
bool use_tps3; bool use_tps3;
bool can_mst; /* this port supports mst */ bool can_mst; /* this port supports mst */
bool is_mst; bool is_mst;
...@@ -664,6 +674,10 @@ struct intel_unpin_work { ...@@ -664,6 +674,10 @@ struct intel_unpin_work {
#define INTEL_FLIP_COMPLETE 2 #define INTEL_FLIP_COMPLETE 2
u32 flip_count; u32 flip_count;
u32 gtt_offset; u32 gtt_offset;
struct intel_engine_cs *flip_queued_ring;
u32 flip_queued_seqno;
int flip_queued_vblank;
int flip_ready_vblank;
bool enable_stall_check; bool enable_stall_check;
}; };
...@@ -828,7 +842,6 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, ...@@ -828,7 +842,6 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv, enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
enum pipe pipe); enum pipe pipe);
void intel_wait_for_vblank(struct drm_device *dev, int pipe); void intel_wait_for_vblank(struct drm_device *dev, int pipe);
void intel_wait_for_pipe_off(struct drm_device *dev, int pipe);
int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp); int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp);
void vlv_wait_port_ready(struct drm_i915_private *dev_priv, void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
struct intel_digital_port *dport); struct intel_digital_port *dport);
...@@ -849,6 +862,7 @@ __intel_framebuffer_create(struct drm_device *dev, ...@@ -849,6 +862,7 @@ __intel_framebuffer_create(struct drm_device *dev,
void intel_prepare_page_flip(struct drm_device *dev, int plane); void intel_prepare_page_flip(struct drm_device *dev, int plane);
void intel_finish_page_flip(struct drm_device *dev, int pipe); void intel_finish_page_flip(struct drm_device *dev, int pipe);
void intel_finish_page_flip_plane(struct drm_device *dev, int plane); void intel_finish_page_flip_plane(struct drm_device *dev, int plane);
void intel_check_page_flip(struct drm_device *dev, int pipe);
/* shared dpll functions */ /* shared dpll functions */
struct intel_shared_dpll *intel_crtc_to_shared_dpll(struct intel_crtc *crtc); struct intel_shared_dpll *intel_crtc_to_shared_dpll(struct intel_crtc *crtc);
...@@ -937,6 +951,7 @@ void intel_dp_mst_suspend(struct drm_device *dev); ...@@ -937,6 +951,7 @@ void intel_dp_mst_suspend(struct drm_device *dev);
void intel_dp_mst_resume(struct drm_device *dev); void intel_dp_mst_resume(struct drm_device *dev);
int intel_dp_max_link_bw(struct intel_dp *intel_dp); int intel_dp_max_link_bw(struct intel_dp *intel_dp);
void intel_dp_hot_plug(struct intel_encoder *intel_encoder); void intel_dp_hot_plug(struct intel_encoder *intel_encoder);
void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv);
/* intel_dp_mst.c */ /* intel_dp_mst.c */
int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id); int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id);
void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port); void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port);
...@@ -951,7 +966,7 @@ void intel_dvo_init(struct drm_device *dev); ...@@ -951,7 +966,7 @@ void intel_dvo_init(struct drm_device *dev);
/* legacy fbdev emulation in intel_fbdev.c */ /* legacy fbdev emulation in intel_fbdev.c */
#ifdef CONFIG_DRM_I915_FBDEV #ifdef CONFIG_DRM_I915_FBDEV
extern int intel_fbdev_init(struct drm_device *dev); extern int intel_fbdev_init(struct drm_device *dev);
extern void intel_fbdev_initial_config(struct drm_device *dev); extern void intel_fbdev_initial_config(void *data, async_cookie_t cookie);
extern void intel_fbdev_fini(struct drm_device *dev); extern void intel_fbdev_fini(struct drm_device *dev);
extern void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous); extern void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous);
extern void intel_fbdev_output_poll_changed(struct drm_device *dev); extern void intel_fbdev_output_poll_changed(struct drm_device *dev);
...@@ -962,7 +977,7 @@ static inline int intel_fbdev_init(struct drm_device *dev) ...@@ -962,7 +977,7 @@ static inline int intel_fbdev_init(struct drm_device *dev)
return 0; return 0;
} }
static inline void intel_fbdev_initial_config(struct drm_device *dev) static inline void intel_fbdev_initial_config(void *data, async_cookie_t cookie)
{ {
} }
...@@ -1093,6 +1108,9 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob); ...@@ -1093,6 +1108,9 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob);
int intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane); int intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane);
void intel_flush_primary_plane(struct drm_i915_private *dev_priv, void intel_flush_primary_plane(struct drm_i915_private *dev_priv,
enum plane plane); enum plane plane);
int intel_plane_set_property(struct drm_plane *plane,
struct drm_property *prop,
uint64_t val);
int intel_plane_restore(struct drm_plane *plane); int intel_plane_restore(struct drm_plane *plane);
void intel_plane_disable(struct drm_plane *plane); void intel_plane_disable(struct drm_plane *plane);
int intel_sprite_set_colorkey(struct drm_device *dev, void *data, int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
......
...@@ -85,7 +85,7 @@ static const struct intel_dvo_device intel_dvo_devices[] = { ...@@ -85,7 +85,7 @@ static const struct intel_dvo_device intel_dvo_devices[] = {
{ {
.type = INTEL_DVO_CHIP_TMDS, .type = INTEL_DVO_CHIP_TMDS,
.name = "ns2501", .name = "ns2501",
.dvo_reg = DVOC, .dvo_reg = DVOB,
.slave_addr = NS2501_ADDR, .slave_addr = NS2501_ADDR,
.dev_ops = &ns2501_ops, .dev_ops = &ns2501_ops,
} }
...@@ -185,12 +185,13 @@ static void intel_enable_dvo(struct intel_encoder *encoder) ...@@ -185,12 +185,13 @@ static void intel_enable_dvo(struct intel_encoder *encoder)
u32 dvo_reg = intel_dvo->dev.dvo_reg; u32 dvo_reg = intel_dvo->dev.dvo_reg;
u32 temp = I915_READ(dvo_reg); u32 temp = I915_READ(dvo_reg);
I915_WRITE(dvo_reg, temp | DVO_ENABLE);
I915_READ(dvo_reg);
intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev, intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev,
&crtc->config.requested_mode, &crtc->config.requested_mode,
&crtc->config.adjusted_mode); &crtc->config.adjusted_mode);
I915_WRITE(dvo_reg, temp | DVO_ENABLE);
I915_READ(dvo_reg);
intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true); intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true);
} }
...@@ -226,10 +227,6 @@ static void intel_dvo_dpms(struct drm_connector *connector, int mode) ...@@ -226,10 +227,6 @@ static void intel_dvo_dpms(struct drm_connector *connector, int mode)
intel_crtc_update_dpms(crtc); intel_crtc_update_dpms(crtc);
intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev,
&config->requested_mode,
&config->adjusted_mode);
intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true); intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true);
} else { } else {
intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, false); intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, false);
......
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
* David Airlie * David Airlie
*/ */
#include <linux/async.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/console.h> #include <linux/console.h>
...@@ -332,24 +333,6 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper, ...@@ -332,24 +333,6 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
int num_connectors_enabled = 0; int num_connectors_enabled = 0;
int num_connectors_detected = 0; int num_connectors_detected = 0;
/*
* If the user specified any force options, just bail here
* and use that config.
*/
for (i = 0; i < fb_helper->connector_count; i++) {
struct drm_fb_helper_connector *fb_conn;
struct drm_connector *connector;
fb_conn = fb_helper->connector_info[i];
connector = fb_conn->connector;
if (!enabled[i])
continue;
if (connector->force != DRM_FORCE_UNSPECIFIED)
return false;
}
save_enabled = kcalloc(dev->mode_config.num_connector, sizeof(bool), save_enabled = kcalloc(dev->mode_config.num_connector, sizeof(bool),
GFP_KERNEL); GFP_KERNEL);
if (!save_enabled) if (!save_enabled)
...@@ -375,8 +358,18 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper, ...@@ -375,8 +358,18 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
continue; continue;
} }
if (connector->force == DRM_FORCE_OFF) {
DRM_DEBUG_KMS("connector %s is disabled by user, skipping\n",
connector->name);
enabled[i] = false;
continue;
}
encoder = connector->encoder; encoder = connector->encoder;
if (!encoder || WARN_ON(!encoder->crtc)) { if (!encoder || WARN_ON(!encoder->crtc)) {
if (connector->force > DRM_FORCE_OFF)
goto bail;
DRM_DEBUG_KMS("connector %s has no encoder or crtc, skipping\n", DRM_DEBUG_KMS("connector %s has no encoder or crtc, skipping\n",
connector->name); connector->name);
enabled[i] = false; enabled[i] = false;
...@@ -395,8 +388,7 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper, ...@@ -395,8 +388,7 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
for (j = 0; j < fb_helper->connector_count; j++) { for (j = 0; j < fb_helper->connector_count; j++) {
if (crtcs[j] == new_crtc) { if (crtcs[j] == new_crtc) {
DRM_DEBUG_KMS("fallback: cloned configuration\n"); DRM_DEBUG_KMS("fallback: cloned configuration\n");
fallback = true; goto bail;
goto out;
} }
} }
...@@ -467,8 +459,8 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper, ...@@ -467,8 +459,8 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
fallback = true; fallback = true;
} }
out:
if (fallback) { if (fallback) {
bail:
DRM_DEBUG_KMS("Not using firmware configuration\n"); DRM_DEBUG_KMS("Not using firmware configuration\n");
memcpy(enabled, save_enabled, dev->mode_config.num_connector); memcpy(enabled, save_enabled, dev->mode_config.num_connector);
kfree(save_enabled); kfree(save_enabled);
...@@ -679,9 +671,9 @@ int intel_fbdev_init(struct drm_device *dev) ...@@ -679,9 +671,9 @@ int intel_fbdev_init(struct drm_device *dev)
return 0; return 0;
} }
void intel_fbdev_initial_config(struct drm_device *dev) void intel_fbdev_initial_config(void *data, async_cookie_t cookie)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = data;
struct intel_fbdev *ifbdev = dev_priv->fbdev; struct intel_fbdev *ifbdev = dev_priv->fbdev;
/* Due to peculiar init order wrt to hpd handling this is separate. */ /* Due to peculiar init order wrt to hpd handling this is separate. */
...@@ -696,6 +688,7 @@ void intel_fbdev_fini(struct drm_device *dev) ...@@ -696,6 +688,7 @@ void intel_fbdev_fini(struct drm_device *dev)
flush_work(&dev_priv->fbdev_suspend_work); flush_work(&dev_priv->fbdev_suspend_work);
async_synchronize_full();
intel_fbdev_destroy(dev, dev_priv->fbdev); intel_fbdev_destroy(dev, dev_priv->fbdev);
kfree(dev_priv->fbdev); kfree(dev_priv->fbdev);
dev_priv->fbdev = NULL; dev_priv->fbdev = NULL;
......
...@@ -971,104 +971,117 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder, ...@@ -971,104 +971,117 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
return true; return true;
} }
static enum drm_connector_status static void
intel_hdmi_detect(struct drm_connector *connector, bool force) intel_hdmi_unset_edid(struct drm_connector *connector)
{ {
struct drm_device *dev = connector->dev;
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
struct intel_digital_port *intel_dig_port =
hdmi_to_dig_port(intel_hdmi);
struct intel_encoder *intel_encoder = &intel_dig_port->base;
struct drm_i915_private *dev_priv = dev->dev_private;
struct edid *edid;
enum intel_display_power_domain power_domain;
enum drm_connector_status status = connector_status_disconnected;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", intel_hdmi->has_hdmi_sink = false;
connector->base.id, connector->name); intel_hdmi->has_audio = false;
intel_hdmi->rgb_quant_range_selectable = false;
kfree(to_intel_connector(connector)->detect_edid);
to_intel_connector(connector)->detect_edid = NULL;
}
static bool
intel_hdmi_set_edid(struct drm_connector *connector)
{
struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
struct intel_encoder *intel_encoder =
&hdmi_to_dig_port(intel_hdmi)->base;
enum intel_display_power_domain power_domain;
struct edid *edid;
bool connected = false;
power_domain = intel_display_port_power_domain(intel_encoder); power_domain = intel_display_port_power_domain(intel_encoder);
intel_display_power_get(dev_priv, power_domain); intel_display_power_get(dev_priv, power_domain);
intel_hdmi->has_hdmi_sink = false;
intel_hdmi->has_audio = false;
intel_hdmi->rgb_quant_range_selectable = false;
edid = drm_get_edid(connector, edid = drm_get_edid(connector,
intel_gmbus_get_adapter(dev_priv, intel_gmbus_get_adapter(dev_priv,
intel_hdmi->ddc_bus)); intel_hdmi->ddc_bus));
if (edid) { intel_display_power_put(dev_priv, power_domain);
if (edid->input & DRM_EDID_INPUT_DIGITAL) {
status = connector_status_connected; to_intel_connector(connector)->detect_edid = edid;
if (intel_hdmi->force_audio != HDMI_AUDIO_OFF_DVI) if (edid && edid->input & DRM_EDID_INPUT_DIGITAL) {
intel_hdmi->has_hdmi_sink = intel_hdmi->rgb_quant_range_selectable =
drm_detect_hdmi_monitor(edid); drm_rgb_quant_range_selectable(edid);
intel_hdmi->has_audio = drm_detect_monitor_audio(edid);
intel_hdmi->rgb_quant_range_selectable =
drm_rgb_quant_range_selectable(edid);
}
kfree(edid);
}
if (status == connector_status_connected) { intel_hdmi->has_audio = drm_detect_monitor_audio(edid);
if (intel_hdmi->force_audio != HDMI_AUDIO_AUTO) if (intel_hdmi->force_audio != HDMI_AUDIO_AUTO)
intel_hdmi->has_audio = intel_hdmi->has_audio =
(intel_hdmi->force_audio == HDMI_AUDIO_ON); intel_hdmi->force_audio == HDMI_AUDIO_ON;
intel_encoder->type = INTEL_OUTPUT_HDMI;
if (intel_hdmi->force_audio != HDMI_AUDIO_OFF_DVI)
intel_hdmi->has_hdmi_sink =
drm_detect_hdmi_monitor(edid);
connected = true;
} }
intel_display_power_put(dev_priv, power_domain); return connected;
}
static enum drm_connector_status
intel_hdmi_detect(struct drm_connector *connector, bool force)
{
enum drm_connector_status status;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
intel_hdmi_unset_edid(connector);
if (intel_hdmi_set_edid(connector)) {
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
hdmi_to_dig_port(intel_hdmi)->base.type = INTEL_OUTPUT_HDMI;
status = connector_status_connected;
} else
status = connector_status_disconnected;
return status; return status;
} }
static int intel_hdmi_get_modes(struct drm_connector *connector) static void
intel_hdmi_force(struct drm_connector *connector)
{ {
struct intel_encoder *intel_encoder = intel_attached_encoder(connector); struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
struct drm_i915_private *dev_priv = connector->dev->dev_private;
enum intel_display_power_domain power_domain;
int ret;
/* We should parse the EDID data and find out if it's an HDMI sink so DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
* we can send audio to it. connector->base.id, connector->name);
*/
power_domain = intel_display_port_power_domain(intel_encoder); intel_hdmi_unset_edid(connector);
intel_display_power_get(dev_priv, power_domain);
ret = intel_ddc_get_modes(connector, if (connector->status != connector_status_connected)
intel_gmbus_get_adapter(dev_priv, return;
intel_hdmi->ddc_bus));
intel_display_power_put(dev_priv, power_domain); intel_hdmi_set_edid(connector);
hdmi_to_dig_port(intel_hdmi)->base.type = INTEL_OUTPUT_HDMI;
}
return ret; static int intel_hdmi_get_modes(struct drm_connector *connector)
{
struct edid *edid;
edid = to_intel_connector(connector)->detect_edid;
if (edid == NULL)
return 0;
return intel_connector_update_modes(connector, edid);
} }
static bool static bool
intel_hdmi_detect_audio(struct drm_connector *connector) intel_hdmi_detect_audio(struct drm_connector *connector)
{ {
struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
struct drm_i915_private *dev_priv = connector->dev->dev_private;
enum intel_display_power_domain power_domain;
struct edid *edid;
bool has_audio = false; bool has_audio = false;
struct edid *edid;
power_domain = intel_display_port_power_domain(intel_encoder); edid = to_intel_connector(connector)->detect_edid;
intel_display_power_get(dev_priv, power_domain); if (edid && edid->input & DRM_EDID_INPUT_DIGITAL)
has_audio = drm_detect_monitor_audio(edid);
edid = drm_get_edid(connector,
intel_gmbus_get_adapter(dev_priv,
intel_hdmi->ddc_bus));
if (edid) {
if (edid->input & DRM_EDID_INPUT_DIGITAL)
has_audio = drm_detect_monitor_audio(edid);
kfree(edid);
}
intel_display_power_put(dev_priv, power_domain);
return has_audio; return has_audio;
} }
...@@ -1488,6 +1501,7 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder) ...@@ -1488,6 +1501,7 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
static void intel_hdmi_destroy(struct drm_connector *connector) static void intel_hdmi_destroy(struct drm_connector *connector)
{ {
intel_hdmi_unset_edid(connector);
drm_connector_cleanup(connector); drm_connector_cleanup(connector);
kfree(connector); kfree(connector);
} }
...@@ -1495,6 +1509,7 @@ static void intel_hdmi_destroy(struct drm_connector *connector) ...@@ -1495,6 +1509,7 @@ static void intel_hdmi_destroy(struct drm_connector *connector)
static const struct drm_connector_funcs intel_hdmi_connector_funcs = { static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
.dpms = intel_connector_dpms, .dpms = intel_connector_dpms,
.detect = intel_hdmi_detect, .detect = intel_hdmi_detect,
.force = intel_hdmi_force,
.fill_modes = drm_helper_probe_single_connector_modes, .fill_modes = drm_helper_probe_single_connector_modes,
.set_property = intel_hdmi_set_property, .set_property = intel_hdmi_set_property,
.destroy = intel_hdmi_destroy, .destroy = intel_hdmi_destroy,
......
...@@ -1217,8 +1217,6 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *ring) ...@@ -1217,8 +1217,6 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *ring)
static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *ring) static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *ring)
{ {
int ret; int ret;
struct intel_context *dctx = ring->default_context;
struct drm_i915_gem_object *dctx_obj;
/* Intentionally left blank. */ /* Intentionally left blank. */
ring->buffer = NULL; ring->buffer = NULL;
...@@ -1232,18 +1230,6 @@ static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *rin ...@@ -1232,18 +1230,6 @@ static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *rin
spin_lock_init(&ring->execlist_lock); spin_lock_init(&ring->execlist_lock);
ring->next_context_status_buffer = 0; ring->next_context_status_buffer = 0;
ret = intel_lr_context_deferred_create(dctx, ring);
if (ret)
return ret;
/* The status page is offset 0 from the context object in LRCs. */
dctx_obj = dctx->engine[ring->id].state;
ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(dctx_obj);
ring->status_page.page_addr = kmap(sg_page(dctx_obj->pages->sgl));
if (ring->status_page.page_addr == NULL)
return -ENOMEM;
ring->status_page.obj = dctx_obj;
ret = i915_cmd_parser_init_ring(ring); ret = i915_cmd_parser_init_ring(ring);
if (ret) if (ret)
return ret; return ret;
...@@ -1254,7 +1240,9 @@ static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *rin ...@@ -1254,7 +1240,9 @@ static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *rin
return ret; return ret;
} }
return 0; ret = intel_lr_context_deferred_create(ring->default_context, ring);
return ret;
} }
static int logical_render_ring_init(struct drm_device *dev) static int logical_render_ring_init(struct drm_device *dev)
...@@ -1448,16 +1436,53 @@ int intel_logical_rings_init(struct drm_device *dev) ...@@ -1448,16 +1436,53 @@ int intel_logical_rings_init(struct drm_device *dev)
return ret; return ret;
} }
int intel_lr_context_render_state_init(struct intel_engine_cs *ring,
struct intel_context *ctx)
{
struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
struct render_state so;
struct drm_i915_file_private *file_priv = ctx->file_priv;
struct drm_file *file = file_priv ? file_priv->file : NULL;
int ret;
ret = i915_gem_render_state_prepare(ring, &so);
if (ret)
return ret;
if (so.rodata == NULL)
return 0;
ret = ring->emit_bb_start(ringbuf,
so.ggtt_offset,
I915_DISPATCH_SECURE);
if (ret)
goto out;
i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), ring);
ret = __i915_add_request(ring, file, so.obj, NULL);
/* intel_logical_ring_add_request moves object to inactive if it
* fails */
out:
i915_gem_render_state_fini(&so);
return ret;
}
static int static int
populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_obj, populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_obj,
struct intel_engine_cs *ring, struct intel_ringbuffer *ringbuf) struct intel_engine_cs *ring, struct intel_ringbuffer *ringbuf)
{ {
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *ring_obj = ringbuf->obj; struct drm_i915_gem_object *ring_obj = ringbuf->obj;
struct i915_hw_ppgtt *ppgtt = ctx->ppgtt; struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
struct page *page; struct page *page;
uint32_t *reg_state; uint32_t *reg_state;
int ret; int ret;
if (!ppgtt)
ppgtt = dev_priv->mm.aliasing_ppgtt;
ret = i915_gem_object_set_to_cpu_domain(ctx_obj, true); ret = i915_gem_object_set_to_cpu_domain(ctx_obj, true);
if (ret) { if (ret) {
DRM_DEBUG_DRIVER("Could not set to CPU domain\n"); DRM_DEBUG_DRIVER("Could not set to CPU domain\n");
...@@ -1687,6 +1712,29 @@ int intel_lr_context_deferred_create(struct intel_context *ctx, ...@@ -1687,6 +1712,29 @@ int intel_lr_context_deferred_create(struct intel_context *ctx,
ctx->engine[ring->id].ringbuf = ringbuf; ctx->engine[ring->id].ringbuf = ringbuf;
ctx->engine[ring->id].state = ctx_obj; ctx->engine[ring->id].state = ctx_obj;
if (ctx == ring->default_context) {
/* The status page is offset 0 from the default context object
* in LRC mode. */
ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(ctx_obj);
ring->status_page.page_addr =
kmap(sg_page(ctx_obj->pages->sgl));
if (ring->status_page.page_addr == NULL)
return -ENOMEM;
ring->status_page.obj = ctx_obj;
}
if (ring->id == RCS && !ctx->rcs_initialized) {
ret = intel_lr_context_render_state_init(ring, ctx);
if (ret) {
DRM_ERROR("Init render state failed: %d\n", ret);
ctx->engine[ring->id].ringbuf = NULL;
ctx->engine[ring->id].state = NULL;
intel_destroy_ringbuffer_obj(ringbuf);
goto error;
}
ctx->rcs_initialized = true;
}
return 0; return 0;
error: error:
......
...@@ -62,6 +62,8 @@ static inline void intel_logical_ring_emit(struct intel_ringbuffer *ringbuf, ...@@ -62,6 +62,8 @@ static inline void intel_logical_ring_emit(struct intel_ringbuffer *ringbuf,
int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf, int num_dwords); int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf, int num_dwords);
/* Logical Ring Contexts */ /* Logical Ring Contexts */
int intel_lr_context_render_state_init(struct intel_engine_cs *ring,
struct intel_context *ctx);
void intel_lr_context_free(struct intel_context *ctx); void intel_lr_context_free(struct intel_context *ctx);
int intel_lr_context_deferred_create(struct intel_context *ctx, int intel_lr_context_deferred_create(struct intel_context *ctx,
struct intel_engine_cs *ring); struct intel_engine_cs *ring);
......
...@@ -751,6 +751,8 @@ void intel_panel_disable_backlight(struct intel_connector *connector) ...@@ -751,6 +751,8 @@ void intel_panel_disable_backlight(struct intel_connector *connector)
spin_lock_irqsave(&dev_priv->backlight_lock, flags); spin_lock_irqsave(&dev_priv->backlight_lock, flags);
if (panel->backlight.device)
panel->backlight.device->props.power = FB_BLANK_POWERDOWN;
panel->backlight.enabled = false; panel->backlight.enabled = false;
dev_priv->display.disable_backlight(connector); dev_priv->display.disable_backlight(connector);
...@@ -957,6 +959,8 @@ void intel_panel_enable_backlight(struct intel_connector *connector) ...@@ -957,6 +959,8 @@ void intel_panel_enable_backlight(struct intel_connector *connector)
dev_priv->display.enable_backlight(connector); dev_priv->display.enable_backlight(connector);
panel->backlight.enabled = true; panel->backlight.enabled = true;
if (panel->backlight.device)
panel->backlight.device->props.power = FB_BLANK_UNBLANK;
spin_unlock_irqrestore(&dev_priv->backlight_lock, flags); spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
} }
...@@ -965,6 +969,7 @@ void intel_panel_enable_backlight(struct intel_connector *connector) ...@@ -965,6 +969,7 @@ void intel_panel_enable_backlight(struct intel_connector *connector)
static int intel_backlight_device_update_status(struct backlight_device *bd) static int intel_backlight_device_update_status(struct backlight_device *bd)
{ {
struct intel_connector *connector = bl_get_data(bd); struct intel_connector *connector = bl_get_data(bd);
struct intel_panel *panel = &connector->panel;
struct drm_device *dev = connector->base.dev; struct drm_device *dev = connector->base.dev;
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
...@@ -972,6 +977,23 @@ static int intel_backlight_device_update_status(struct backlight_device *bd) ...@@ -972,6 +977,23 @@ static int intel_backlight_device_update_status(struct backlight_device *bd)
bd->props.brightness, bd->props.max_brightness); bd->props.brightness, bd->props.max_brightness);
intel_panel_set_backlight(connector, bd->props.brightness, intel_panel_set_backlight(connector, bd->props.brightness,
bd->props.max_brightness); bd->props.max_brightness);
/*
* Allow flipping bl_power as a sub-state of enabled. Sadly the
* backlight class device does not make it easy to to differentiate
* between callbacks for brightness and bl_power, so our backlight_power
* callback needs to take this into account.
*/
if (panel->backlight.enabled) {
if (panel->backlight_power) {
bool enable = bd->props.power == FB_BLANK_UNBLANK &&
bd->props.brightness != 0;
panel->backlight_power(connector, enable);
}
} else {
bd->props.power = FB_BLANK_POWERDOWN;
}
drm_modeset_unlock(&dev->mode_config.connection_mutex); drm_modeset_unlock(&dev->mode_config.connection_mutex);
return 0; return 0;
} }
...@@ -1023,6 +1045,11 @@ static int intel_backlight_device_register(struct intel_connector *connector) ...@@ -1023,6 +1045,11 @@ static int intel_backlight_device_register(struct intel_connector *connector)
panel->backlight.level, panel->backlight.level,
props.max_brightness); props.max_brightness);
if (panel->backlight.enabled)
props.power = FB_BLANK_UNBLANK;
else
props.power = FB_BLANK_POWERDOWN;
/* /*
* Note: using the same name independent of the connector prevents * Note: using the same name independent of the connector prevents
* registration of multiple backlight devices in the driver. * registration of multiple backlight devices in the driver.
...@@ -1203,7 +1230,7 @@ static int vlv_setup_backlight(struct intel_connector *connector) ...@@ -1203,7 +1230,7 @@ static int vlv_setup_backlight(struct intel_connector *connector)
enum pipe pipe; enum pipe pipe;
u32 ctl, ctl2, val; u32 ctl, ctl2, val;
for_each_pipe(pipe) { for_each_pipe(dev_priv, pipe) {
u32 cur_val = I915_READ(VLV_BLC_PWM_CTL(pipe)); u32 cur_val = I915_READ(VLV_BLC_PWM_CTL(pipe));
/* Skip if the modulation freq is already set */ /* Skip if the modulation freq is already set */
......
此差异已折叠。
...@@ -24,13 +24,7 @@ ...@@ -24,13 +24,7 @@
#ifndef _INTEL_RENDERSTATE_H #ifndef _INTEL_RENDERSTATE_H
#define _INTEL_RENDERSTATE_H #define _INTEL_RENDERSTATE_H
#include <linux/types.h> #include "i915_drv.h"
struct intel_renderstate_rodata {
const u32 *reloc;
const u32 *batch;
const u32 batch_items;
};
extern const struct intel_renderstate_rodata gen6_null_state; extern const struct intel_renderstate_rodata gen6_null_state;
extern const struct intel_renderstate_rodata gen7_null_state; extern const struct intel_renderstate_rodata gen7_null_state;
......
...@@ -444,7 +444,14 @@ gen8_render_ring_flush(struct intel_engine_cs *ring, ...@@ -444,7 +444,14 @@ gen8_render_ring_flush(struct intel_engine_cs *ring,
return ret; return ret;
} }
return gen8_emit_pipe_control(ring, flags, scratch_addr); ret = gen8_emit_pipe_control(ring, flags, scratch_addr);
if (ret)
return ret;
if (!invalidate_domains && flush_domains)
return gen7_ring_fbc_flush(ring, FBC_REND_NUKE);
return 0;
} }
static void ring_write_tail(struct intel_engine_cs *ring, static void ring_write_tail(struct intel_engine_cs *ring,
...@@ -556,6 +563,14 @@ static int init_ring_common(struct intel_engine_cs *ring) ...@@ -556,6 +563,14 @@ static int init_ring_common(struct intel_engine_cs *ring)
* also enforces ordering), otherwise the hw might lose the new ring * also enforces ordering), otherwise the hw might lose the new ring
* register values. */ * register values. */
I915_WRITE_START(ring, i915_gem_obj_ggtt_offset(obj)); I915_WRITE_START(ring, i915_gem_obj_ggtt_offset(obj));
/* WaClearRingBufHeadRegAtInit:ctg,elk */
if (I915_READ_HEAD(ring))
DRM_DEBUG("%s initialization failed [head=%08x], fudging\n",
ring->name, I915_READ_HEAD(ring));
I915_WRITE_HEAD(ring, 0);
(void)I915_READ_HEAD(ring);
I915_WRITE_CTL(ring, I915_WRITE_CTL(ring,
((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES) ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES)
| RING_VALID); | RING_VALID);
...@@ -650,6 +665,146 @@ intel_init_pipe_control(struct intel_engine_cs *ring) ...@@ -650,6 +665,146 @@ intel_init_pipe_control(struct intel_engine_cs *ring)
return ret; return ret;
} }
static inline void intel_ring_emit_wa(struct intel_engine_cs *ring,
u32 addr, u32 value)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
if (WARN_ON(dev_priv->num_wa_regs >= I915_MAX_WA_REGS))
return;
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit(ring, addr);
intel_ring_emit(ring, value);
dev_priv->intel_wa_regs[dev_priv->num_wa_regs].addr = addr;
dev_priv->intel_wa_regs[dev_priv->num_wa_regs].mask = value & 0xFFFF;
/* value is updated with the status of remaining bits of this
* register when it is read from debugfs file
*/
dev_priv->intel_wa_regs[dev_priv->num_wa_regs].value = value;
dev_priv->num_wa_regs++;
return;
}
static int bdw_init_workarounds(struct intel_engine_cs *ring)
{
int ret;
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
/*
* workarounds applied in this fn are part of register state context,
* they need to be re-initialized followed by gpu reset, suspend/resume,
* module reload.
*/
dev_priv->num_wa_regs = 0;
memset(dev_priv->intel_wa_regs, 0, sizeof(dev_priv->intel_wa_regs));
/*
* update the number of dwords required based on the
* actual number of workarounds applied
*/
ret = intel_ring_begin(ring, 24);
if (ret)
return ret;
/* WaDisablePartialInstShootdown:bdw */
/* WaDisableThreadStallDopClockGating:bdw */
/* FIXME: Unclear whether we really need this on production bdw. */
intel_ring_emit_wa(ring, GEN8_ROW_CHICKEN,
_MASKED_BIT_ENABLE(PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE
| STALL_DOP_GATING_DISABLE));
/* WaDisableDopClockGating:bdw May not be needed for production */
intel_ring_emit_wa(ring, GEN7_ROW_CHICKEN2,
_MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
/*
* This GEN8_CENTROID_PIXEL_OPT_DIS W/A is only needed for
* pre-production hardware
*/
intel_ring_emit_wa(ring, HALF_SLICE_CHICKEN3,
_MASKED_BIT_ENABLE(GEN8_CENTROID_PIXEL_OPT_DIS
| GEN8_SAMPLER_POWER_BYPASS_DIS));
intel_ring_emit_wa(ring, GEN7_HALF_SLICE_CHICKEN1,
_MASKED_BIT_ENABLE(GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE));
intel_ring_emit_wa(ring, COMMON_SLICE_CHICKEN2,
_MASKED_BIT_ENABLE(GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE));
/* Use Force Non-Coherent whenever executing a 3D context. This is a
* workaround for for a possible hang in the unlikely event a TLB
* invalidation occurs during a PSD flush.
*/
intel_ring_emit_wa(ring, HDC_CHICKEN0,
_MASKED_BIT_ENABLE(HDC_FORCE_NON_COHERENT));
/* Wa4x4STCOptimizationDisable:bdw */
intel_ring_emit_wa(ring, CACHE_MODE_1,
_MASKED_BIT_ENABLE(GEN8_4x4_STC_OPTIMIZATION_DISABLE));
/*
* BSpec recommends 8x4 when MSAA is used,
* however in practice 16x4 seems fastest.
*
* Note that PS/WM thread counts depend on the WIZ hashing
* disable bit, which we don't touch here, but it's good
* to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
*/
intel_ring_emit_wa(ring, GEN7_GT_MODE,
GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
intel_ring_advance(ring);
DRM_DEBUG_DRIVER("Number of Workarounds applied: %d\n",
dev_priv->num_wa_regs);
return 0;
}
static int chv_init_workarounds(struct intel_engine_cs *ring)
{
int ret;
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
/*
* workarounds applied in this fn are part of register state context,
* they need to be re-initialized followed by gpu reset, suspend/resume,
* module reload.
*/
dev_priv->num_wa_regs = 0;
memset(dev_priv->intel_wa_regs, 0, sizeof(dev_priv->intel_wa_regs));
ret = intel_ring_begin(ring, 12);
if (ret)
return ret;
/* WaDisablePartialInstShootdown:chv */
intel_ring_emit_wa(ring, GEN8_ROW_CHICKEN,
_MASKED_BIT_ENABLE(PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE));
/* WaDisableThreadStallDopClockGating:chv */
intel_ring_emit_wa(ring, GEN8_ROW_CHICKEN,
_MASKED_BIT_ENABLE(STALL_DOP_GATING_DISABLE));
/* WaDisableDopClockGating:chv (pre-production hw) */
intel_ring_emit_wa(ring, GEN7_ROW_CHICKEN2,
_MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
/* WaDisableSamplerPowerBypass:chv (pre-production hw) */
intel_ring_emit_wa(ring, HALF_SLICE_CHICKEN3,
_MASKED_BIT_ENABLE(GEN8_SAMPLER_POWER_BYPASS_DIS));
intel_ring_advance(ring);
return 0;
}
static int init_render_ring(struct intel_engine_cs *ring) static int init_render_ring(struct intel_engine_cs *ring)
{ {
struct drm_device *dev = ring->dev; struct drm_device *dev = ring->dev;
...@@ -2148,6 +2303,10 @@ int intel_init_render_ring_buffer(struct drm_device *dev) ...@@ -2148,6 +2303,10 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
dev_priv->semaphore_obj = obj; dev_priv->semaphore_obj = obj;
} }
} }
if (IS_CHERRYVIEW(dev))
ring->init_context = chv_init_workarounds;
else
ring->init_context = bdw_init_workarounds;
ring->add_request = gen6_add_request; ring->add_request = gen6_add_request;
ring->flush = gen8_render_ring_flush; ring->flush = gen8_render_ring_flush;
ring->irq_get = gen8_ring_get_irq; ring->irq_get = gen8_ring_get_irq;
......
...@@ -148,6 +148,8 @@ struct intel_engine_cs { ...@@ -148,6 +148,8 @@ struct intel_engine_cs {
int (*init)(struct intel_engine_cs *ring); int (*init)(struct intel_engine_cs *ring);
int (*init_context)(struct intel_engine_cs *ring);
void (*write_tail)(struct intel_engine_cs *ring, void (*write_tail)(struct intel_engine_cs *ring,
u32 value); u32 value);
int __must_check (*flush)(struct intel_engine_cs *ring, int __must_check (*flush)(struct intel_engine_cs *ring,
......
...@@ -1218,9 +1218,9 @@ int intel_sprite_get_colorkey(struct drm_device *dev, void *data, ...@@ -1218,9 +1218,9 @@ int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
return ret; return ret;
} }
static int intel_plane_set_property(struct drm_plane *plane, int intel_plane_set_property(struct drm_plane *plane,
struct drm_property *prop, struct drm_property *prop,
uint64_t val) uint64_t val)
{ {
struct drm_device *dev = plane->dev; struct drm_device *dev = plane->dev;
struct intel_plane *intel_plane = to_intel_plane(plane); struct intel_plane *intel_plane = to_intel_plane(plane);
...@@ -1232,6 +1232,9 @@ static int intel_plane_set_property(struct drm_plane *plane, ...@@ -1232,6 +1232,9 @@ static int intel_plane_set_property(struct drm_plane *plane,
if (hweight32(val & 0xf) != 1) if (hweight32(val & 0xf) != 1)
return -EINVAL; return -EINVAL;
if (intel_plane->rotation == val)
return 0;
old_val = intel_plane->rotation; old_val = intel_plane->rotation;
intel_plane->rotation = val; intel_plane->rotation = val;
ret = intel_plane_restore(plane); ret = intel_plane_restore(plane);
...@@ -1249,7 +1252,7 @@ int intel_plane_restore(struct drm_plane *plane) ...@@ -1249,7 +1252,7 @@ int intel_plane_restore(struct drm_plane *plane)
if (!plane->crtc || !plane->fb) if (!plane->crtc || !plane->fb)
return 0; return 0;
return intel_update_plane(plane, plane->crtc, plane->fb, return plane->funcs->update_plane(plane, plane->crtc, plane->fb,
intel_plane->crtc_x, intel_plane->crtc_y, intel_plane->crtc_x, intel_plane->crtc_y,
intel_plane->crtc_w, intel_plane->crtc_h, intel_plane->crtc_w, intel_plane->crtc_h,
intel_plane->src_x, intel_plane->src_y, intel_plane->src_x, intel_plane->src_y,
...@@ -1375,10 +1378,10 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane) ...@@ -1375,10 +1378,10 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
intel_plane->plane = plane; intel_plane->plane = plane;
intel_plane->rotation = BIT(DRM_ROTATE_0); intel_plane->rotation = BIT(DRM_ROTATE_0);
possible_crtcs = (1 << pipe); possible_crtcs = (1 << pipe);
ret = drm_plane_init(dev, &intel_plane->base, possible_crtcs, ret = drm_universal_plane_init(dev, &intel_plane->base, possible_crtcs,
&intel_plane_funcs, &intel_plane_funcs,
plane_formats, num_plane_formats, plane_formats, num_plane_formats,
false); DRM_PLANE_TYPE_OVERLAY);
if (ret) { if (ret) {
kfree(intel_plane); kfree(intel_plane);
goto out; goto out;
......
...@@ -101,7 +101,7 @@ static void __gen7_gt_force_wake_mt_get(struct drm_i915_private *dev_priv, ...@@ -101,7 +101,7 @@ static void __gen7_gt_force_wake_mt_get(struct drm_i915_private *dev_priv,
{ {
u32 forcewake_ack; u32 forcewake_ack;
if (IS_HASWELL(dev_priv->dev) || IS_GEN8(dev_priv->dev)) if (IS_HASWELL(dev_priv->dev) || IS_BROADWELL(dev_priv->dev))
forcewake_ack = FORCEWAKE_ACK_HSW; forcewake_ack = FORCEWAKE_ACK_HSW;
else else
forcewake_ack = FORCEWAKE_MT_ACK; forcewake_ack = FORCEWAKE_MT_ACK;
...@@ -334,7 +334,7 @@ void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore) ...@@ -334,7 +334,7 @@ void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
else if (IS_GEN6(dev) || IS_GEN7(dev)) else if (IS_GEN6(dev) || IS_GEN7(dev))
__gen6_gt_force_wake_reset(dev_priv); __gen6_gt_force_wake_reset(dev_priv);
if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev) || IS_GEN8(dev)) if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev) || IS_BROADWELL(dev))
__gen7_gt_force_wake_mt_reset(dev_priv); __gen7_gt_force_wake_mt_reset(dev_priv);
if (restore) { /* If reset with a user forcewake, try to restore */ if (restore) { /* If reset with a user forcewake, try to restore */
...@@ -838,7 +838,7 @@ void intel_uncore_init(struct drm_device *dev) ...@@ -838,7 +838,7 @@ void intel_uncore_init(struct drm_device *dev)
if (IS_VALLEYVIEW(dev)) { if (IS_VALLEYVIEW(dev)) {
dev_priv->uncore.funcs.force_wake_get = __vlv_force_wake_get; dev_priv->uncore.funcs.force_wake_get = __vlv_force_wake_get;
dev_priv->uncore.funcs.force_wake_put = __vlv_force_wake_put; dev_priv->uncore.funcs.force_wake_put = __vlv_force_wake_put;
} else if (IS_HASWELL(dev) || IS_GEN8(dev)) { } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
dev_priv->uncore.funcs.force_wake_get = __gen7_gt_force_wake_mt_get; dev_priv->uncore.funcs.force_wake_get = __gen7_gt_force_wake_mt_get;
dev_priv->uncore.funcs.force_wake_put = __gen7_gt_force_wake_mt_put; dev_priv->uncore.funcs.force_wake_put = __gen7_gt_force_wake_mt_put;
} else if (IS_IVYBRIDGE(dev)) { } else if (IS_IVYBRIDGE(dev)) {
......
...@@ -232,8 +232,8 @@ void radeon_dp_aux_init(struct radeon_connector *radeon_connector) ...@@ -232,8 +232,8 @@ void radeon_dp_aux_init(struct radeon_connector *radeon_connector)
/***** general DP utility functions *****/ /***** general DP utility functions *****/
#define DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_1200 #define DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_LEVEL_3
#define DP_PRE_EMPHASIS_MAX DP_TRAIN_PRE_EMPHASIS_9_5 #define DP_PRE_EMPHASIS_MAX DP_TRAIN_PRE_EMPH_LEVEL_3
static void dp_get_adjust_train(u8 link_status[DP_LINK_STATUS_SIZE], static void dp_get_adjust_train(u8 link_status[DP_LINK_STATUS_SIZE],
int lane_count, int lane_count,
......
...@@ -533,9 +533,9 @@ int tegra_dpaux_train(struct tegra_dpaux *dpaux, struct drm_dp_link *link, ...@@ -533,9 +533,9 @@ int tegra_dpaux_train(struct tegra_dpaux *dpaux, struct drm_dp_link *link,
for (i = 0; i < link->num_lanes; i++) for (i = 0; i < link->num_lanes; i++)
values[i] = DP_TRAIN_MAX_PRE_EMPHASIS_REACHED | values[i] = DP_TRAIN_MAX_PRE_EMPHASIS_REACHED |
DP_TRAIN_PRE_EMPHASIS_0 | DP_TRAIN_PRE_EMPH_LEVEL_0 |
DP_TRAIN_MAX_SWING_REACHED | DP_TRAIN_MAX_SWING_REACHED |
DP_TRAIN_VOLTAGE_SWING_400; DP_TRAIN_VOLTAGE_SWING_LEVEL_0;
err = drm_dp_dpcd_write(&dpaux->aux, DP_TRAINING_LANE0_SET, values, err = drm_dp_dpcd_write(&dpaux->aux, DP_TRAINING_LANE0_SET, values,
link->num_lanes); link->num_lanes);
......
...@@ -190,16 +190,16 @@ ...@@ -190,16 +190,16 @@
# define DP_TRAIN_VOLTAGE_SWING_MASK 0x3 # define DP_TRAIN_VOLTAGE_SWING_MASK 0x3
# define DP_TRAIN_VOLTAGE_SWING_SHIFT 0 # define DP_TRAIN_VOLTAGE_SWING_SHIFT 0
# define DP_TRAIN_MAX_SWING_REACHED (1 << 2) # define DP_TRAIN_MAX_SWING_REACHED (1 << 2)
# define DP_TRAIN_VOLTAGE_SWING_400 (0 << 0) # define DP_TRAIN_VOLTAGE_SWING_LEVEL_0 (0 << 0)
# define DP_TRAIN_VOLTAGE_SWING_600 (1 << 0) # define DP_TRAIN_VOLTAGE_SWING_LEVEL_1 (1 << 0)
# define DP_TRAIN_VOLTAGE_SWING_800 (2 << 0) # define DP_TRAIN_VOLTAGE_SWING_LEVEL_2 (2 << 0)
# define DP_TRAIN_VOLTAGE_SWING_1200 (3 << 0) # define DP_TRAIN_VOLTAGE_SWING_LEVEL_3 (3 << 0)
# define DP_TRAIN_PRE_EMPHASIS_MASK (3 << 3) # define DP_TRAIN_PRE_EMPHASIS_MASK (3 << 3)
# define DP_TRAIN_PRE_EMPHASIS_0 (0 << 3) # define DP_TRAIN_PRE_EMPH_LEVEL_0 (0 << 3)
# define DP_TRAIN_PRE_EMPHASIS_3_5 (1 << 3) # define DP_TRAIN_PRE_EMPH_LEVEL_1 (1 << 3)
# define DP_TRAIN_PRE_EMPHASIS_6 (2 << 3) # define DP_TRAIN_PRE_EMPH_LEVEL_2 (2 << 3)
# define DP_TRAIN_PRE_EMPHASIS_9_5 (3 << 3) # define DP_TRAIN_PRE_EMPH_LEVEL_3 (3 << 3)
# define DP_TRAIN_PRE_EMPHASIS_SHIFT 3 # define DP_TRAIN_PRE_EMPHASIS_SHIFT 3
# define DP_TRAIN_MAX_PRE_EMPHASIS_REACHED (1 << 5) # define DP_TRAIN_MAX_PRE_EMPHASIS_REACHED (1 << 5)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册