提交 605b28c8 编写于 作者: D Dave Airlie

Merge tag 'drm-intel-next-2016-04-11' of git://anongit.freedesktop.org/drm-intel into drm-next

- make modeset hw state checker atomic aware (Maarten)
- close races in gpu stuck detection/seqno reading (Chris)
- tons&tons of small improvements from Chris Wilson all over the gem code
- more dsi/bxt work from Ramalingam&Jani
- macro polish from Joonas
- guc fw loading fixes (Arun&Dave)
- vmap notifier (acked by Andrew) + i915 support by Chris Wilson
- create bottom half for execlist irq processing (Chris Wilson)
- vlv/chv pll cleanup (Ville)
- rework DP detection, especially sink detection (Shubhangi Shrivastava)
- make color manager support fully atomic (Maarten)
- avoid livelock on chv in execlist irq handler (Chris)

* tag 'drm-intel-next-2016-04-11' of git://anongit.freedesktop.org/drm-intel: (82 commits)
  drm/i915: Update DRIVER_DATE to 20160411
  drm/i915: Avoid allocating a vmap arena for a single page
  drm,i915: Introduce drm_malloc_gfp()
  drm/i915/shrinker: Restrict vmap purge to objects with vmaps
  drm/i915: Refactor duplicate object vmap functions
  drm/i915: Consolidate common error handling in intel_pin_and_map_ringbuffer_obj
  drm/i915/dmabuf: Tighten struct_mutex for unmap_dma_buf
  drm/i915: implement WaClearTdlStateAckDirtyBits
  drm/i915/bxt: Reversed polarity of PORT_PLL_REF_SEL bit
  drm/i915: Rename hw state checker to hw state verifier.
  drm/i915: Move modeset state verifier calls.
  drm/i915: Make modeset state verifier take crtc as argument.
  drm/i915: Replace manual barrier() with READ_ONCE() in HWS accessor
  drm/i915: Use simplest form for flushing the single cacheline in the HWS
  drm/i915: Harden detection of missed interrupts
  drm/i915: Separate out the seqno-barrier from engine->get_seqno
  drm/i915: Remove forcewake dance from seqno/irq barrier on legacy gen6+
  drm/i915: Fixup the free space logic in ring_prepare
  drm/i915: Simplify check for idleness in hangcheck
  drm/i915: Apply a mb between emitting the request and hangcheck
  ...
...@@ -134,6 +134,8 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) ...@@ -134,6 +134,8 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
int pin_count = 0; int pin_count = 0;
enum intel_engine_id id; enum intel_engine_id id;
lockdep_assert_held(&obj->base.dev->struct_mutex);
seq_printf(m, "%pK: %s%s%s%s %8zdKiB %02x %02x [ ", seq_printf(m, "%pK: %s%s%s%s %8zdKiB %02x %02x [ ",
&obj->base, &obj->base,
obj->active ? "*" : " ", obj->active ? "*" : " ",
...@@ -202,8 +204,8 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) ...@@ -202,8 +204,8 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
uintptr_t list = (uintptr_t) node->info_ent->data; uintptr_t list = (uintptr_t) node->info_ent->data;
struct list_head *head; struct list_head *head;
struct drm_device *dev = node->minor->dev; struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_address_space *vm = &dev_priv->ggtt.base; struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct i915_vma *vma; struct i915_vma *vma;
u64 total_obj_size, total_gtt_size; u64 total_obj_size, total_gtt_size;
int count, ret; int count, ret;
...@@ -216,11 +218,11 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) ...@@ -216,11 +218,11 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
switch (list) { switch (list) {
case ACTIVE_LIST: case ACTIVE_LIST:
seq_puts(m, "Active:\n"); seq_puts(m, "Active:\n");
head = &vm->active_list; head = &ggtt->base.active_list;
break; break;
case INACTIVE_LIST: case INACTIVE_LIST:
seq_puts(m, "Inactive:\n"); seq_puts(m, "Inactive:\n");
head = &vm->inactive_list; head = &ggtt->base.inactive_list;
break; break;
default: default:
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
...@@ -429,11 +431,11 @@ static int i915_gem_object_info(struct seq_file *m, void* data) ...@@ -429,11 +431,11 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
{ {
struct drm_info_node *node = m->private; struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev; struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
u32 count, mappable_count, purgeable_count; u32 count, mappable_count, purgeable_count;
u64 size, mappable_size, purgeable_size; u64 size, mappable_size, purgeable_size;
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
struct i915_address_space *vm = &dev_priv->ggtt.base;
struct drm_file *file; struct drm_file *file;
struct i915_vma *vma; struct i915_vma *vma;
int ret; int ret;
...@@ -452,12 +454,12 @@ static int i915_gem_object_info(struct seq_file *m, void* data) ...@@ -452,12 +454,12 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
count, mappable_count, size, mappable_size); count, mappable_count, size, mappable_size);
size = count = mappable_size = mappable_count = 0; size = count = mappable_size = mappable_count = 0;
count_vmas(&vm->active_list, vm_link); count_vmas(&ggtt->base.active_list, vm_link);
seq_printf(m, " %u [%u] active objects, %llu [%llu] bytes\n", seq_printf(m, " %u [%u] active objects, %llu [%llu] bytes\n",
count, mappable_count, size, mappable_size); count, mappable_count, size, mappable_size);
size = count = mappable_size = mappable_count = 0; size = count = mappable_size = mappable_count = 0;
count_vmas(&vm->inactive_list, vm_link); count_vmas(&ggtt->base.inactive_list, vm_link);
seq_printf(m, " %u [%u] inactive objects, %llu [%llu] bytes\n", seq_printf(m, " %u [%u] inactive objects, %llu [%llu] bytes\n",
count, mappable_count, size, mappable_size); count, mappable_count, size, mappable_size);
...@@ -492,8 +494,7 @@ static int i915_gem_object_info(struct seq_file *m, void* data) ...@@ -492,8 +494,7 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
count, size); count, size);
seq_printf(m, "%llu [%llu] gtt total\n", seq_printf(m, "%llu [%llu] gtt total\n",
dev_priv->ggtt.base.total, ggtt->base.total, ggtt->mappable_end - ggtt->base.start);
(u64)dev_priv->ggtt.mappable_end - dev_priv->ggtt.base.start);
seq_putc(m, '\n'); seq_putc(m, '\n');
print_batch_pool_stats(m, dev_priv); print_batch_pool_stats(m, dev_priv);
...@@ -597,7 +598,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data) ...@@ -597,7 +598,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
engine->name, engine->name,
i915_gem_request_get_seqno(work->flip_queued_req), i915_gem_request_get_seqno(work->flip_queued_req),
dev_priv->next_seqno, dev_priv->next_seqno,
engine->get_seqno(engine, true), engine->get_seqno(engine),
i915_gem_request_completed(work->flip_queued_req, true)); i915_gem_request_completed(work->flip_queued_req, true));
} else } else
seq_printf(m, "Flip not associated with any ring\n"); seq_printf(m, "Flip not associated with any ring\n");
...@@ -727,10 +728,10 @@ static int i915_gem_request_info(struct seq_file *m, void *data) ...@@ -727,10 +728,10 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
static void i915_ring_seqno_info(struct seq_file *m, static void i915_ring_seqno_info(struct seq_file *m,
struct intel_engine_cs *engine) struct intel_engine_cs *engine)
{ {
if (engine->get_seqno) { seq_printf(m, "Current sequence (%s): %x\n",
seq_printf(m, "Current sequence (%s): %x\n", engine->name, engine->get_seqno(engine));
engine->name, engine->get_seqno(engine, false)); seq_printf(m, "Current user interrupts (%s): %x\n",
} engine->name, READ_ONCE(engine->user_interrupts));
} }
static int i915_gem_seqno_info(struct seq_file *m, void *data) static int i915_gem_seqno_info(struct seq_file *m, void *data)
...@@ -1345,8 +1346,8 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused) ...@@ -1345,8 +1346,8 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
intel_runtime_pm_get(dev_priv); intel_runtime_pm_get(dev_priv);
for_each_engine_id(engine, dev_priv, id) { for_each_engine_id(engine, dev_priv, id) {
seqno[id] = engine->get_seqno(engine, false);
acthd[id] = intel_ring_get_active_head(engine); acthd[id] = intel_ring_get_active_head(engine);
seqno[id] = engine->get_seqno(engine);
} }
i915_get_extra_instdone(dev, instdone); i915_get_extra_instdone(dev, instdone);
...@@ -1362,8 +1363,13 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused) ...@@ -1362,8 +1363,13 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
for_each_engine_id(engine, dev_priv, id) { for_each_engine_id(engine, dev_priv, id) {
seq_printf(m, "%s:\n", engine->name); seq_printf(m, "%s:\n", engine->name);
seq_printf(m, "\tseqno = %x [current %x]\n", seq_printf(m, "\tseqno = %x [current %x, last %x]\n",
engine->hangcheck.seqno, seqno[id]); engine->hangcheck.seqno,
seqno[id],
engine->last_submitted_seqno);
seq_printf(m, "\tuser interrupts = %x [current %x]\n",
engine->hangcheck.user_interrupts,
READ_ONCE(engine->user_interrupts));
seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n", seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
(long long)engine->hangcheck.acthd, (long long)engine->hangcheck.acthd,
(long long)acthd[id]); (long long)acthd[id]);
...@@ -1895,6 +1901,11 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data) ...@@ -1895,6 +1901,11 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
struct drm_device *dev = node->minor->dev; struct drm_device *dev = node->minor->dev;
struct intel_framebuffer *fbdev_fb = NULL; struct intel_framebuffer *fbdev_fb = NULL;
struct drm_framebuffer *drm_fb; struct drm_framebuffer *drm_fb;
int ret;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
#ifdef CONFIG_DRM_FBDEV_EMULATION #ifdef CONFIG_DRM_FBDEV_EMULATION
if (to_i915(dev)->fbdev) { if (to_i915(dev)->fbdev) {
...@@ -1929,6 +1940,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data) ...@@ -1929,6 +1940,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
seq_putc(m, '\n'); seq_putc(m, '\n');
} }
mutex_unlock(&dev->mode_config.fb_lock); mutex_unlock(&dev->mode_config.fb_lock);
mutex_unlock(&dev->struct_mutex);
return 0; return 0;
} }
...@@ -2093,7 +2105,6 @@ static int i915_execlists(struct seq_file *m, void *data) ...@@ -2093,7 +2105,6 @@ static int i915_execlists(struct seq_file *m, void *data)
for_each_engine(engine, dev_priv) { for_each_engine(engine, dev_priv) {
struct drm_i915_gem_request *head_req = NULL; struct drm_i915_gem_request *head_req = NULL;
int count = 0; int count = 0;
unsigned long flags;
seq_printf(m, "%s\n", engine->name); seq_printf(m, "%s\n", engine->name);
...@@ -2120,13 +2131,13 @@ static int i915_execlists(struct seq_file *m, void *data) ...@@ -2120,13 +2131,13 @@ static int i915_execlists(struct seq_file *m, void *data)
i, status, ctx_id); i, status, ctx_id);
} }
spin_lock_irqsave(&engine->execlist_lock, flags); spin_lock_bh(&engine->execlist_lock);
list_for_each(cursor, &engine->execlist_queue) list_for_each(cursor, &engine->execlist_queue)
count++; count++;
head_req = list_first_entry_or_null(&engine->execlist_queue, head_req = list_first_entry_or_null(&engine->execlist_queue,
struct drm_i915_gem_request, struct drm_i915_gem_request,
execlist_link); execlist_link);
spin_unlock_irqrestore(&engine->execlist_lock, flags); spin_unlock_bh(&engine->execlist_lock);
seq_printf(m, "\t%d requests in queue\n", count); seq_printf(m, "\t%d requests in queue\n", count);
if (head_req) { if (head_req) {
...@@ -2409,7 +2420,7 @@ static int i915_guc_load_status_info(struct seq_file *m, void *data) ...@@ -2409,7 +2420,7 @@ static int i915_guc_load_status_info(struct seq_file *m, void *data)
struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw; struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
u32 tmp, i; u32 tmp, i;
if (!HAS_GUC_UCODE(dev_priv->dev)) if (!HAS_GUC_UCODE(dev_priv))
return 0; return 0;
seq_printf(m, "GuC firmware status:\n"); seq_printf(m, "GuC firmware status:\n");
...@@ -2483,7 +2494,7 @@ static int i915_guc_info(struct seq_file *m, void *data) ...@@ -2483,7 +2494,7 @@ static int i915_guc_info(struct seq_file *m, void *data)
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
u64 total = 0; u64 total = 0;
if (!HAS_GUC_SCHED(dev_priv->dev)) if (!HAS_GUC_SCHED(dev_priv))
return 0; return 0;
if (mutex_lock_interruptible(&dev->struct_mutex)) if (mutex_lock_interruptible(&dev->struct_mutex))
...@@ -2687,10 +2698,8 @@ static int i915_runtime_pm_status(struct seq_file *m, void *unused) ...@@ -2687,10 +2698,8 @@ static int i915_runtime_pm_status(struct seq_file *m, void *unused)
struct drm_device *dev = node->minor->dev; struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
if (!HAS_RUNTIME_PM(dev)) { if (!HAS_RUNTIME_PM(dev_priv))
seq_puts(m, "not supported\n"); seq_puts(m, "Runtime power management not supported\n");
return 0;
}
seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->mm.busy)); seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->mm.busy));
seq_printf(m, "IRQs disabled: %s\n", seq_printf(m, "IRQs disabled: %s\n",
...@@ -2701,6 +2710,9 @@ static int i915_runtime_pm_status(struct seq_file *m, void *unused) ...@@ -2701,6 +2710,9 @@ static int i915_runtime_pm_status(struct seq_file *m, void *unused)
#else #else
seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n"); seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
#endif #endif
seq_printf(m, "PCI device power state: %s [%d]\n",
pci_power_name(dev_priv->dev->pdev->current_state),
dev_priv->dev->pdev->current_state);
return 0; return 0;
} }
......
...@@ -493,9 +493,11 @@ static int i915_load_modeset_init(struct drm_device *dev) ...@@ -493,9 +493,11 @@ static int i915_load_modeset_init(struct drm_device *dev)
* Some ports require correctly set-up hpd registers for detection to * Some ports require correctly set-up hpd registers for detection to
* work properly (leading to ghost connected connector status), e.g. VGA * work properly (leading to ghost connected connector status), e.g. VGA
* on gm45. Hence we can only set up the initial fbdev config after hpd * on gm45. Hence we can only set up the initial fbdev config after hpd
* irqs are fully enabled. We protect the fbdev initial config scanning * irqs are fully enabled. Now we should scan for the initial config
* against hotplug events by waiting in intel_fbdev_output_poll_changed * only once hotplug handling is enabled, but due to screwed-up locking
* until the asynchronous thread has finished. * around kms/fbdev init we can't protect the fdbev initial config
* scanning against hotplug events. Hence do this first and ignore the
* tiny window where we will loose hotplug notifactions.
*/ */
intel_fbdev_initial_config_async(dev); intel_fbdev_initial_config_async(dev);
...@@ -527,6 +529,7 @@ static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv) ...@@ -527,6 +529,7 @@ static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
{ {
struct apertures_struct *ap; struct apertures_struct *ap;
struct pci_dev *pdev = dev_priv->dev->pdev; struct pci_dev *pdev = dev_priv->dev->pdev;
struct i915_ggtt *ggtt = &dev_priv->ggtt;
bool primary; bool primary;
int ret; int ret;
...@@ -534,8 +537,8 @@ static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv) ...@@ -534,8 +537,8 @@ static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
if (!ap) if (!ap)
return -ENOMEM; return -ENOMEM;
ap->ranges[0].base = dev_priv->ggtt.mappable_base; ap->ranges[0].base = ggtt->mappable_base;
ap->ranges[0].size = dev_priv->ggtt.mappable_end; ap->ranges[0].size = ggtt->mappable_end;
primary = primary =
pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW; pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
...@@ -1170,6 +1173,7 @@ static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv) ...@@ -1170,6 +1173,7 @@ static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv)
static int i915_driver_init_hw(struct drm_i915_private *dev_priv) static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
{ {
struct drm_device *dev = dev_priv->dev; struct drm_device *dev = dev_priv->dev;
struct i915_ggtt *ggtt = &dev_priv->ggtt;
uint32_t aperture_size; uint32_t aperture_size;
int ret; int ret;
...@@ -1178,7 +1182,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv) ...@@ -1178,7 +1182,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
intel_device_info_runtime_init(dev); intel_device_info_runtime_init(dev);
ret = i915_gem_gtt_init(dev); ret = i915_ggtt_init_hw(dev);
if (ret) if (ret)
return ret; return ret;
...@@ -1187,13 +1191,13 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv) ...@@ -1187,13 +1191,13 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
ret = i915_kick_out_firmware_fb(dev_priv); ret = i915_kick_out_firmware_fb(dev_priv);
if (ret) { if (ret) {
DRM_ERROR("failed to remove conflicting framebuffer drivers\n"); DRM_ERROR("failed to remove conflicting framebuffer drivers\n");
goto out_gtt; goto out_ggtt;
} }
ret = i915_kick_out_vgacon(dev_priv); ret = i915_kick_out_vgacon(dev_priv);
if (ret) { if (ret) {
DRM_ERROR("failed to remove conflicting VGA console\n"); DRM_ERROR("failed to remove conflicting VGA console\n");
goto out_gtt; goto out_ggtt;
} }
pci_set_master(dev->pdev); pci_set_master(dev->pdev);
...@@ -1213,17 +1217,17 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv) ...@@ -1213,17 +1217,17 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32)); dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
aperture_size = dev_priv->ggtt.mappable_end; aperture_size = ggtt->mappable_end;
dev_priv->ggtt.mappable = ggtt->mappable =
io_mapping_create_wc(dev_priv->ggtt.mappable_base, io_mapping_create_wc(ggtt->mappable_base,
aperture_size); aperture_size);
if (dev_priv->ggtt.mappable == NULL) { if (!ggtt->mappable) {
ret = -EIO; ret = -EIO;
goto out_gtt; goto out_ggtt;
} }
dev_priv->ggtt.mtrr = arch_phys_wc_add(dev_priv->ggtt.mappable_base, ggtt->mtrr = arch_phys_wc_add(ggtt->mappable_base,
aperture_size); aperture_size);
pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY,
...@@ -1253,8 +1257,8 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv) ...@@ -1253,8 +1257,8 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
return 0; return 0;
out_gtt: out_ggtt:
i915_global_gtt_cleanup(dev); i915_ggtt_cleanup_hw(dev);
return ret; return ret;
} }
...@@ -1266,14 +1270,15 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv) ...@@ -1266,14 +1270,15 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
static void i915_driver_cleanup_hw(struct drm_i915_private *dev_priv) static void i915_driver_cleanup_hw(struct drm_i915_private *dev_priv)
{ {
struct drm_device *dev = dev_priv->dev; struct drm_device *dev = dev_priv->dev;
struct i915_ggtt *ggtt = &dev_priv->ggtt;
if (dev->pdev->msi_enabled) if (dev->pdev->msi_enabled)
pci_disable_msi(dev->pdev); pci_disable_msi(dev->pdev);
pm_qos_remove_request(&dev_priv->pm_qos); pm_qos_remove_request(&dev_priv->pm_qos);
arch_phys_wc_del(dev_priv->ggtt.mtrr); arch_phys_wc_del(ggtt->mtrr);
io_mapping_free(dev_priv->ggtt.mappable); io_mapping_free(ggtt->mappable);
i915_global_gtt_cleanup(dev); i915_ggtt_cleanup_hw(dev);
} }
/** /**
......
...@@ -360,14 +360,12 @@ static const struct intel_device_info intel_broxton_info = { ...@@ -360,14 +360,12 @@ static const struct intel_device_info intel_broxton_info = {
static const struct intel_device_info intel_kabylake_info = { static const struct intel_device_info intel_kabylake_info = {
BDW_FEATURES, BDW_FEATURES,
.is_preliminary = 1,
.is_kabylake = 1, .is_kabylake = 1,
.gen = 9, .gen = 9,
}; };
static const struct intel_device_info intel_kabylake_gt3_info = { static const struct intel_device_info intel_kabylake_gt3_info = {
BDW_FEATURES, BDW_FEATURES,
.is_preliminary = 1,
.is_kabylake = 1, .is_kabylake = 1,
.gen = 9, .gen = 9,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
...@@ -1402,7 +1400,7 @@ static int vlv_suspend_complete(struct drm_i915_private *dev_priv) ...@@ -1402,7 +1400,7 @@ static int vlv_suspend_complete(struct drm_i915_private *dev_priv)
if (err) if (err)
goto err2; goto err2;
if (!IS_CHERRYVIEW(dev_priv->dev)) if (!IS_CHERRYVIEW(dev_priv))
vlv_save_gunit_s0ix_state(dev_priv); vlv_save_gunit_s0ix_state(dev_priv);
err = vlv_force_gfx_clock(dev_priv, false); err = vlv_force_gfx_clock(dev_priv, false);
...@@ -1434,7 +1432,7 @@ static int vlv_resume_prepare(struct drm_i915_private *dev_priv, ...@@ -1434,7 +1432,7 @@ static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
*/ */
ret = vlv_force_gfx_clock(dev_priv, true); ret = vlv_force_gfx_clock(dev_priv, true);
if (!IS_CHERRYVIEW(dev_priv->dev)) if (!IS_CHERRYVIEW(dev_priv))
vlv_restore_gunit_s0ix_state(dev_priv); vlv_restore_gunit_s0ix_state(dev_priv);
err = vlv_allow_gt_wake(dev_priv, true); err = vlv_allow_gt_wake(dev_priv, true);
......
...@@ -60,7 +60,7 @@ ...@@ -60,7 +60,7 @@
#define DRIVER_NAME "i915" #define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics" #define DRIVER_DESC "Intel Graphics"
#define DRIVER_DATE "20160330" #define DRIVER_DATE "20160411"
#undef WARN_ON #undef WARN_ON
/* Many gcc seem to no see through this and fall over :( */ /* Many gcc seem to no see through this and fall over :( */
...@@ -495,6 +495,7 @@ struct drm_i915_error_state { ...@@ -495,6 +495,7 @@ struct drm_i915_error_state {
u32 cpu_ring_head; u32 cpu_ring_head;
u32 cpu_ring_tail; u32 cpu_ring_tail;
u32 last_seqno;
u32 semaphore_seqno[I915_NUM_ENGINES - 1]; u32 semaphore_seqno[I915_NUM_ENGINES - 1];
/* Register state */ /* Register state */
...@@ -612,8 +613,8 @@ struct drm_i915_display_funcs { ...@@ -612,8 +613,8 @@ struct drm_i915_display_funcs {
/* display clock increase/decrease */ /* display clock increase/decrease */
/* pll clock increase/decrease */ /* pll clock increase/decrease */
void (*load_csc_matrix)(struct drm_crtc *crtc); void (*load_csc_matrix)(struct drm_crtc_state *crtc_state);
void (*load_luts)(struct drm_crtc *crtc); void (*load_luts)(struct drm_crtc_state *crtc_state);
}; };
enum forcewake_domain_id { enum forcewake_domain_id {
...@@ -1118,6 +1119,7 @@ struct intel_gen6_power_mgmt { ...@@ -1118,6 +1119,7 @@ struct intel_gen6_power_mgmt {
u8 efficient_freq; /* AKA RPe. Pre-determined balanced frequency */ u8 efficient_freq; /* AKA RPe. Pre-determined balanced frequency */
u8 rp1_freq; /* "less than" RP0 power/freqency */ u8 rp1_freq; /* "less than" RP0 power/freqency */
u8 rp0_freq; /* Non-overclocked max frequency. */ u8 rp0_freq; /* Non-overclocked max frequency. */
u16 gpll_ref_freq; /* vlv/chv GPLL reference frequency */
u8 up_threshold; /* Current %busy required to uplock */ u8 up_threshold; /* Current %busy required to uplock */
u8 down_threshold; /* Current %busy required to downclock */ u8 down_threshold; /* Current %busy required to downclock */
...@@ -1257,6 +1259,7 @@ struct i915_gem_mm { ...@@ -1257,6 +1259,7 @@ struct i915_gem_mm {
struct i915_hw_ppgtt *aliasing_ppgtt; struct i915_hw_ppgtt *aliasing_ppgtt;
struct notifier_block oom_notifier; struct notifier_block oom_notifier;
struct notifier_block vmap_notifier;
struct shrinker shrinker; struct shrinker shrinker;
bool shrinker_no_lock_stealing; bool shrinker_no_lock_stealing;
...@@ -1837,6 +1840,13 @@ struct drm_i915_private { ...@@ -1837,6 +1840,13 @@ struct drm_i915_private {
struct intel_shared_dpll shared_dplls[I915_NUM_PLLS]; struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
const struct intel_dpll_mgr *dpll_mgr; const struct intel_dpll_mgr *dpll_mgr;
/*
* dpll_lock serializes intel_{prepare,enable,disable}_shared_dpll.
* Must be global rather than per dpll, because on some platforms
* plls share registers.
*/
struct mutex dpll_lock;
unsigned int active_crtcs; unsigned int active_crtcs;
unsigned int min_pixclk[I915_MAX_PIPES]; unsigned int min_pixclk[I915_MAX_PIPES];
...@@ -1893,7 +1903,14 @@ struct drm_i915_private { ...@@ -1893,7 +1903,14 @@ struct drm_i915_private {
u32 fdi_rx_config; u32 fdi_rx_config;
/* Shadow for DISPLAY_PHY_CONTROL which can't be safely read */
u32 chv_phy_control; u32 chv_phy_control;
/*
* Shadows for CHV DPLL_MD regs to keep the state
* checker somewhat working in the presence hardware
* crappiness (can't read out DPLL_MD for pipes B & C).
*/
u32 chv_dpll_md[I915_MAX_PIPES];
u32 suspend_count; u32 suspend_count;
bool suspended_to_idle; bool suspended_to_idle;
...@@ -2152,10 +2169,7 @@ struct drm_i915_gem_object { ...@@ -2152,10 +2169,7 @@ struct drm_i915_gem_object {
struct scatterlist *sg; struct scatterlist *sg;
int last; int last;
} get_page; } get_page;
void *mapping;
/* prime dma-buf support */
void *dma_buf_vmapping;
int vmapping_count;
/** Breadcrumb of last rendering to the buffer. /** Breadcrumb of last rendering to the buffer.
* There can only be one writer, but we allow for multiple readers. * There can only be one writer, but we allow for multiple readers.
...@@ -2732,6 +2746,7 @@ extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, ...@@ -2732,6 +2746,7 @@ extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
extern int intel_gpu_reset(struct drm_device *dev, u32 engine_mask); extern int intel_gpu_reset(struct drm_device *dev, u32 engine_mask);
extern bool intel_has_gpu_reset(struct drm_device *dev); extern bool intel_has_gpu_reset(struct drm_device *dev);
extern int i915_reset(struct drm_device *dev); extern int i915_reset(struct drm_device *dev);
extern int intel_guc_reset(struct drm_i915_private *dev_priv);
extern void intel_engine_init_hangcheck(struct intel_engine_cs *engine); extern void intel_engine_init_hangcheck(struct intel_engine_cs *engine);
extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv); extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv); extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
...@@ -2970,12 +2985,44 @@ static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj) ...@@ -2970,12 +2985,44 @@ static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
BUG_ON(obj->pages == NULL); BUG_ON(obj->pages == NULL);
obj->pages_pin_count++; obj->pages_pin_count++;
} }
static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj) static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
{ {
BUG_ON(obj->pages_pin_count == 0); BUG_ON(obj->pages_pin_count == 0);
obj->pages_pin_count--; obj->pages_pin_count--;
} }
/**
* i915_gem_object_pin_map - return a contiguous mapping of the entire object
* @obj - the object to map into kernel address space
*
* Calls i915_gem_object_pin_pages() to prevent reaping of the object's
* pages and then returns a contiguous mapping of the backing storage into
* the kernel address space.
*
* The caller must hold the struct_mutex.
*
* Returns the pointer through which to access the backing storage.
*/
void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj);
/**
* i915_gem_object_unpin_map - releases an earlier mapping
* @obj - the object to unmap
*
* After pinning the object and mapping its pages, once you are finished
* with your access, call i915_gem_object_unpin_map() to release the pin
* upon the mapping. Once the pin count reaches zero, that mapping may be
* removed.
*
* The caller must hold the struct_mutex.
*/
static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj)
{
lockdep_assert_held(&obj->base.dev->struct_mutex);
i915_gem_object_unpin_pages(obj);
}
int __must_check i915_mutex_lock_interruptible(struct drm_device *dev); int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
int i915_gem_object_sync(struct drm_i915_gem_object *obj, int i915_gem_object_sync(struct drm_i915_gem_object *obj,
struct intel_engine_cs *to, struct intel_engine_cs *to,
...@@ -2999,15 +3046,19 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2) ...@@ -2999,15 +3046,19 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2)
static inline bool i915_gem_request_started(struct drm_i915_gem_request *req, static inline bool i915_gem_request_started(struct drm_i915_gem_request *req,
bool lazy_coherency) bool lazy_coherency)
{ {
u32 seqno = req->engine->get_seqno(req->engine, lazy_coherency); if (!lazy_coherency && req->engine->irq_seqno_barrier)
return i915_seqno_passed(seqno, req->previous_seqno); req->engine->irq_seqno_barrier(req->engine);
return i915_seqno_passed(req->engine->get_seqno(req->engine),
req->previous_seqno);
} }
static inline bool i915_gem_request_completed(struct drm_i915_gem_request *req, static inline bool i915_gem_request_completed(struct drm_i915_gem_request *req,
bool lazy_coherency) bool lazy_coherency)
{ {
u32 seqno = req->engine->get_seqno(req->engine, lazy_coherency); if (!lazy_coherency && req->engine->irq_seqno_barrier)
return i915_seqno_passed(seqno, req->seqno); req->engine->irq_seqno_barrier(req->engine);
return i915_seqno_passed(req->engine->get_seqno(req->engine),
req->seqno);
} }
int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno); int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno);
...@@ -3147,13 +3198,9 @@ i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj) ...@@ -3147,13 +3198,9 @@ i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj); bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj);
/* Some GGTT VM helpers */ /* Some GGTT VM helpers */
#define i915_obj_to_ggtt(obj) \
(&((struct drm_i915_private *)(obj)->base.dev->dev_private)->ggtt.base)
static inline struct i915_hw_ppgtt * static inline struct i915_hw_ppgtt *
i915_vm_to_ppgtt(struct i915_address_space *vm) i915_vm_to_ppgtt(struct i915_address_space *vm)
{ {
WARN_ON(i915_is_ggtt(vm));
return container_of(vm, struct i915_hw_ppgtt, base); return container_of(vm, struct i915_hw_ppgtt, base);
} }
...@@ -3166,7 +3213,10 @@ static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj) ...@@ -3166,7 +3213,10 @@ static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj)
static inline unsigned long static inline unsigned long
i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj) i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj)
{ {
return i915_gem_obj_size(obj, i915_obj_to_ggtt(obj)); struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
return i915_gem_obj_size(obj, &ggtt->base);
} }
static inline int __must_check static inline int __must_check
...@@ -3174,7 +3224,10 @@ i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj, ...@@ -3174,7 +3224,10 @@ i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj,
uint32_t alignment, uint32_t alignment,
unsigned flags) unsigned flags)
{ {
return i915_gem_object_pin(obj, i915_obj_to_ggtt(obj), struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
return i915_gem_object_pin(obj, &ggtt->base,
alignment, flags | PIN_GLOBAL); alignment, flags | PIN_GLOBAL);
} }
...@@ -3289,6 +3342,7 @@ unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv, ...@@ -3289,6 +3342,7 @@ unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv,
#define I915_SHRINK_UNBOUND 0x2 #define I915_SHRINK_UNBOUND 0x2
#define I915_SHRINK_BOUND 0x4 #define I915_SHRINK_BOUND 0x4
#define I915_SHRINK_ACTIVE 0x8 #define I915_SHRINK_ACTIVE 0x8
#define I915_SHRINK_VMAPS 0x10
unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv); unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
void i915_gem_shrinker_init(struct drm_i915_private *dev_priv); void i915_gem_shrinker_init(struct drm_i915_private *dev_priv);
void i915_gem_shrinker_cleanup(struct drm_i915_private *dev_priv); void i915_gem_shrinker_cleanup(struct drm_i915_private *dev_priv);
...@@ -3388,6 +3442,8 @@ bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv); ...@@ -3388,6 +3442,8 @@ bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv);
bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin); bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin);
bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port); bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port);
bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, enum port *port); bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, enum port *port);
bool intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv,
enum port port);
/* intel_opregion.c */ /* intel_opregion.c */
#ifdef CONFIG_ACPI #ifdef CONFIG_ACPI
......
...@@ -130,9 +130,9 @@ int ...@@ -130,9 +130,9 @@ int
i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
struct drm_file *file) struct drm_file *file)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_get_aperture *args = data;
struct i915_ggtt *ggtt = &dev_priv->ggtt; struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct drm_i915_gem_get_aperture *args = data;
struct i915_vma *vma; struct i915_vma *vma;
size_t pinned; size_t pinned;
...@@ -146,7 +146,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, ...@@ -146,7 +146,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
pinned += vma->node.size; pinned += vma->node.size;
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
args->aper_size = dev_priv->ggtt.base.total; args->aper_size = ggtt->base.total;
args->aper_available_size = args->aper_size - pinned; args->aper_available_size = args->aper_size - pinned;
return 0; return 0;
...@@ -765,7 +765,8 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, ...@@ -765,7 +765,8 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
struct drm_i915_gem_pwrite *args, struct drm_i915_gem_pwrite *args,
struct drm_file *file) struct drm_file *file)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
ssize_t remain; ssize_t remain;
loff_t offset, page_base; loff_t offset, page_base;
char __user *user_data; char __user *user_data;
...@@ -807,7 +808,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, ...@@ -807,7 +808,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
* source page isn't available. Return the error and we'll * source page isn't available. Return the error and we'll
* retry in the slow path. * retry in the slow path.
*/ */
if (fast_user_write(dev_priv->ggtt.mappable, page_base, if (fast_user_write(ggtt->mappable, page_base,
page_offset, user_data, page_length)) { page_offset, user_data, page_length)) {
ret = -EFAULT; ret = -EFAULT;
goto out_flush; goto out_flush;
...@@ -1790,7 +1791,8 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ...@@ -1790,7 +1791,8 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{ {
struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data); struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
struct drm_device *dev = obj->base.dev; struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct i915_ggtt_view view = i915_ggtt_view_normal; struct i915_ggtt_view view = i915_ggtt_view_normal;
pgoff_t page_offset; pgoff_t page_offset;
unsigned long pfn; unsigned long pfn;
...@@ -1825,7 +1827,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ...@@ -1825,7 +1827,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
} }
/* Use a partial view if the object is bigger than the aperture. */ /* Use a partial view if the object is bigger than the aperture. */
if (obj->base.size >= dev_priv->ggtt.mappable_end && if (obj->base.size >= ggtt->mappable_end &&
obj->tiling_mode == I915_TILING_NONE) { obj->tiling_mode == I915_TILING_NONE) {
static const unsigned int chunk_size = 256; // 1 MiB static const unsigned int chunk_size = 256; // 1 MiB
...@@ -1853,7 +1855,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ...@@ -1853,7 +1855,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
goto unpin; goto unpin;
/* Finally, remap it using the new GTT offset */ /* Finally, remap it using the new GTT offset */
pfn = dev_priv->ggtt.mappable_base + pfn = ggtt->mappable_base +
i915_gem_obj_ggtt_offset_view(obj, &view); i915_gem_obj_ggtt_offset_view(obj, &view);
pfn >>= PAGE_SHIFT; pfn >>= PAGE_SHIFT;
...@@ -2227,6 +2229,14 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj) ...@@ -2227,6 +2229,14 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
* lists early. */ * lists early. */
list_del(&obj->global_list); list_del(&obj->global_list);
if (obj->mapping) {
if (is_vmalloc_addr(obj->mapping))
vunmap(obj->mapping);
else
kunmap(kmap_to_page(obj->mapping));
obj->mapping = NULL;
}
ops->put_pages(obj); ops->put_pages(obj);
obj->pages = NULL; obj->pages = NULL;
...@@ -2395,6 +2405,49 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj) ...@@ -2395,6 +2405,49 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
return 0; return 0;
} }
void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj)
{
int ret;
lockdep_assert_held(&obj->base.dev->struct_mutex);
ret = i915_gem_object_get_pages(obj);
if (ret)
return ERR_PTR(ret);
i915_gem_object_pin_pages(obj);
if (obj->mapping == NULL) {
struct page **pages;
pages = NULL;
if (obj->base.size == PAGE_SIZE)
obj->mapping = kmap(sg_page(obj->pages->sgl));
else
pages = drm_malloc_gfp(obj->base.size >> PAGE_SHIFT,
sizeof(*pages),
GFP_TEMPORARY);
if (pages != NULL) {
struct sg_page_iter sg_iter;
int n;
n = 0;
for_each_sg_page(obj->pages->sgl, &sg_iter,
obj->pages->nents, 0)
pages[n++] = sg_page_iter_page(&sg_iter);
obj->mapping = vmap(pages, n, 0, PAGE_KERNEL);
drm_free_large(pages);
}
if (obj->mapping == NULL) {
i915_gem_object_unpin_pages(obj);
return ERR_PTR(-ENOMEM);
}
}
return obj->mapping;
}
void i915_vma_move_to_active(struct i915_vma *vma, void i915_vma_move_to_active(struct i915_vma *vma,
struct drm_i915_gem_request *req) struct drm_i915_gem_request *req)
{ {
...@@ -2463,7 +2516,7 @@ i915_gem_init_seqno(struct drm_device *dev, u32 seqno) ...@@ -2463,7 +2516,7 @@ i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
int ret, j; int ret;
/* Carefully retire all requests without writing to the rings */ /* Carefully retire all requests without writing to the rings */
for_each_engine(engine, dev_priv) { for_each_engine(engine, dev_priv) {
...@@ -2474,13 +2527,9 @@ i915_gem_init_seqno(struct drm_device *dev, u32 seqno) ...@@ -2474,13 +2527,9 @@ i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
i915_gem_retire_requests(dev); i915_gem_retire_requests(dev);
/* Finally reset hw state */ /* Finally reset hw state */
for_each_engine(engine, dev_priv) { for_each_engine(engine, dev_priv)
intel_ring_init_seqno(engine, seqno); intel_ring_init_seqno(engine, seqno);
for (j = 0; j < ARRAY_SIZE(engine->semaphore.sync_seqno); j++)
engine->semaphore.sync_seqno[j] = 0;
}
return 0; return 0;
} }
...@@ -2574,6 +2623,28 @@ void __i915_add_request(struct drm_i915_gem_request *request, ...@@ -2574,6 +2623,28 @@ void __i915_add_request(struct drm_i915_gem_request *request,
WARN(ret, "*_ring_flush_all_caches failed: %d!\n", ret); WARN(ret, "*_ring_flush_all_caches failed: %d!\n", ret);
} }
trace_i915_gem_request_add(request);
request->head = request_start;
/* Whilst this request exists, batch_obj will be on the
* active_list, and so will hold the active reference. Only when this
* request is retired will the the batch_obj be moved onto the
* inactive_list and lose its active reference. Hence we do not need
* to explicitly hold another reference here.
*/
request->batch_obj = obj;
/* Seal the request and mark it as pending execution. Note that
* we may inspect this state, without holding any locks, during
* hangcheck. Hence we apply the barrier to ensure that we do not
* see a more recent value in the hws than we are tracking.
*/
request->emitted_jiffies = jiffies;
request->previous_seqno = engine->last_submitted_seqno;
smp_store_mb(engine->last_submitted_seqno, request->seqno);
list_add_tail(&request->list, &engine->request_list);
/* Record the position of the start of the request so that /* Record the position of the start of the request so that
* should we detect the updated seqno part-way through the * should we detect the updated seqno part-way through the
* GPU processing the request, we never over-estimate the * GPU processing the request, we never over-estimate the
...@@ -2591,23 +2662,6 @@ void __i915_add_request(struct drm_i915_gem_request *request, ...@@ -2591,23 +2662,6 @@ void __i915_add_request(struct drm_i915_gem_request *request,
/* Not allowed to fail! */ /* Not allowed to fail! */
WARN(ret, "emit|add_request failed: %d!\n", ret); WARN(ret, "emit|add_request failed: %d!\n", ret);
request->head = request_start;
/* Whilst this request exists, batch_obj will be on the
* active_list, and so will hold the active reference. Only when this
* request is retired will the the batch_obj be moved onto the
* inactive_list and lose its active reference. Hence we do not need
* to explicitly hold another reference here.
*/
request->batch_obj = obj;
request->emitted_jiffies = jiffies;
request->previous_seqno = engine->last_submitted_seqno;
engine->last_submitted_seqno = request->seqno;
list_add_tail(&request->list, &engine->request_list);
trace_i915_gem_request_add(request);
i915_queue_hangcheck(engine->dev); i915_queue_hangcheck(engine->dev);
queue_delayed_work(dev_priv->wq, queue_delayed_work(dev_priv->wq,
...@@ -2837,13 +2891,15 @@ static void i915_gem_reset_engine_cleanup(struct drm_i915_private *dev_priv, ...@@ -2837,13 +2891,15 @@ static void i915_gem_reset_engine_cleanup(struct drm_i915_private *dev_priv,
*/ */
if (i915.enable_execlists) { if (i915.enable_execlists) {
spin_lock_irq(&engine->execlist_lock); /* Ensure irq handler finishes or is cancelled. */
tasklet_kill(&engine->irq_tasklet);
spin_lock_bh(&engine->execlist_lock);
/* list_splice_tail_init checks for empty lists */ /* list_splice_tail_init checks for empty lists */
list_splice_tail_init(&engine->execlist_queue, list_splice_tail_init(&engine->execlist_queue,
&engine->execlist_retired_req_list); &engine->execlist_retired_req_list);
spin_unlock_bh(&engine->execlist_lock);
spin_unlock_irq(&engine->execlist_lock);
intel_execlists_retire_requests(engine); intel_execlists_retire_requests(engine);
} }
...@@ -2875,6 +2931,8 @@ static void i915_gem_reset_engine_cleanup(struct drm_i915_private *dev_priv, ...@@ -2875,6 +2931,8 @@ static void i915_gem_reset_engine_cleanup(struct drm_i915_private *dev_priv,
buffer->last_retired_head = buffer->tail; buffer->last_retired_head = buffer->tail;
intel_ring_update_space(buffer); intel_ring_update_space(buffer);
} }
intel_ring_init_seqno(engine, engine->last_submitted_seqno);
} }
void i915_gem_reset(struct drm_device *dev) void i915_gem_reset(struct drm_device *dev)
...@@ -2963,9 +3021,9 @@ i915_gem_retire_requests(struct drm_device *dev) ...@@ -2963,9 +3021,9 @@ i915_gem_retire_requests(struct drm_device *dev)
i915_gem_retire_requests_ring(engine); i915_gem_retire_requests_ring(engine);
idle &= list_empty(&engine->request_list); idle &= list_empty(&engine->request_list);
if (i915.enable_execlists) { if (i915.enable_execlists) {
spin_lock_irq(&engine->execlist_lock); spin_lock_bh(&engine->execlist_lock);
idle &= list_empty(&engine->execlist_queue); idle &= list_empty(&engine->execlist_queue);
spin_unlock_irq(&engine->execlist_lock); spin_unlock_bh(&engine->execlist_lock);
intel_execlists_retire_requests(engine); intel_execlists_retire_requests(engine);
} }
...@@ -3455,7 +3513,8 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, ...@@ -3455,7 +3513,8 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
uint64_t flags) uint64_t flags)
{ {
struct drm_device *dev = obj->base.dev; struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
u32 fence_alignment, unfenced_alignment; u32 fence_alignment, unfenced_alignment;
u32 search_flag, alloc_flag; u32 search_flag, alloc_flag;
u64 start, end; u64 start, end;
...@@ -3502,7 +3561,7 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, ...@@ -3502,7 +3561,7 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0; start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
end = vm->total; end = vm->total;
if (flags & PIN_MAPPABLE) if (flags & PIN_MAPPABLE)
end = min_t(u64, end, dev_priv->ggtt.mappable_end); end = min_t(u64, end, ggtt->mappable_end);
if (flags & PIN_ZONE_4G) if (flags & PIN_ZONE_4G)
end = min_t(u64, end, (1ULL << 32) - PAGE_SIZE); end = min_t(u64, end, (1ULL << 32) - PAGE_SIZE);
...@@ -3709,6 +3768,9 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj) ...@@ -3709,6 +3768,9 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
int int
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
{ {
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
uint32_t old_write_domain, old_read_domains; uint32_t old_write_domain, old_read_domains;
struct i915_vma *vma; struct i915_vma *vma;
int ret; int ret;
...@@ -3763,7 +3825,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) ...@@ -3763,7 +3825,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
vma = i915_gem_obj_to_ggtt(obj); vma = i915_gem_obj_to_ggtt(obj);
if (vma && drm_mm_node_allocated(&vma->node) && !obj->active) if (vma && drm_mm_node_allocated(&vma->node) && !obj->active)
list_move_tail(&vma->vm_link, list_move_tail(&vma->vm_link,
&to_i915(obj->base.dev)->ggtt.base.inactive_list); &ggtt->base.inactive_list);
return 0; return 0;
} }
...@@ -4232,9 +4294,6 @@ i915_gem_object_do_pin(struct drm_i915_gem_object *obj, ...@@ -4232,9 +4294,6 @@ i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
vma = ggtt_view ? i915_gem_obj_to_ggtt_view(obj, ggtt_view) : vma = ggtt_view ? i915_gem_obj_to_ggtt_view(obj, ggtt_view) :
i915_gem_obj_to_vma(obj, vm); i915_gem_obj_to_vma(obj, vm);
if (IS_ERR(vma))
return PTR_ERR(vma);
if (vma) { if (vma) {
if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT)) if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
return -EBUSY; return -EBUSY;
...@@ -4297,10 +4356,13 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, ...@@ -4297,10 +4356,13 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
uint32_t alignment, uint32_t alignment,
uint64_t flags) uint64_t flags)
{ {
if (WARN_ONCE(!view, "no view specified")) struct drm_device *dev = obj->base.dev;
return -EINVAL; struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
BUG_ON(!view);
return i915_gem_object_do_pin(obj, i915_obj_to_ggtt(obj), view, return i915_gem_object_do_pin(obj, &ggtt->base, view,
alignment, flags | PIN_GLOBAL); alignment, flags | PIN_GLOBAL);
} }
...@@ -4612,14 +4674,15 @@ struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj, ...@@ -4612,14 +4674,15 @@ struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj, struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj,
const struct i915_ggtt_view *view) const struct i915_ggtt_view *view)
{ {
struct i915_address_space *ggtt = i915_obj_to_ggtt(obj); struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct i915_vma *vma; struct i915_vma *vma;
if (WARN_ONCE(!view, "no view specified")) BUG_ON(!view);
return ERR_PTR(-EINVAL);
list_for_each_entry(vma, &obj->vma_list, obj_link) list_for_each_entry(vma, &obj->vma_list, obj_link)
if (vma->vm == ggtt && if (vma->vm == &ggtt->base &&
i915_ggtt_view_equal(&vma->ggtt_view, view)) i915_ggtt_view_equal(&vma->ggtt_view, view))
return vma; return vma;
return NULL; return NULL;
...@@ -4964,7 +5027,7 @@ int i915_gem_init(struct drm_device *dev) ...@@ -4964,7 +5027,7 @@ int i915_gem_init(struct drm_device *dev)
if (ret) if (ret)
goto out_unlock; goto out_unlock;
i915_gem_init_global_gtt(dev); i915_gem_init_ggtt(dev);
ret = i915_gem_context_init(dev); ret = i915_gem_context_init(dev);
if (ret) if (ret)
...@@ -5212,11 +5275,12 @@ u64 i915_gem_obj_offset(struct drm_i915_gem_object *o, ...@@ -5212,11 +5275,12 @@ u64 i915_gem_obj_offset(struct drm_i915_gem_object *o,
u64 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o, u64 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
const struct i915_ggtt_view *view) const struct i915_ggtt_view *view)
{ {
struct i915_address_space *ggtt = i915_obj_to_ggtt(o); struct drm_i915_private *dev_priv = to_i915(o->base.dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct i915_vma *vma; struct i915_vma *vma;
list_for_each_entry(vma, &o->vma_list, obj_link) list_for_each_entry(vma, &o->vma_list, obj_link)
if (vma->vm == ggtt && if (vma->vm == &ggtt->base &&
i915_ggtt_view_equal(&vma->ggtt_view, view)) i915_ggtt_view_equal(&vma->ggtt_view, view))
return vma->node.start; return vma->node.start;
...@@ -5243,11 +5307,12 @@ bool i915_gem_obj_bound(struct drm_i915_gem_object *o, ...@@ -5243,11 +5307,12 @@ bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o, bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o,
const struct i915_ggtt_view *view) const struct i915_ggtt_view *view)
{ {
struct i915_address_space *ggtt = i915_obj_to_ggtt(o); struct drm_i915_private *dev_priv = to_i915(o->base.dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct i915_vma *vma; struct i915_vma *vma;
list_for_each_entry(vma, &o->vma_list, obj_link) list_for_each_entry(vma, &o->vma_list, obj_link)
if (vma->vm == ggtt && if (vma->vm == &ggtt->base &&
i915_ggtt_view_equal(&vma->ggtt_view, view) && i915_ggtt_view_equal(&vma->ggtt_view, view) &&
drm_mm_node_allocated(&vma->node)) drm_mm_node_allocated(&vma->node))
return true; return true;
......
...@@ -95,14 +95,12 @@ static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment, ...@@ -95,14 +95,12 @@ static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
{ {
struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf); struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
mutex_lock(&obj->base.dev->struct_mutex);
dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir); dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
sg_free_table(sg); sg_free_table(sg);
kfree(sg); kfree(sg);
mutex_lock(&obj->base.dev->struct_mutex);
i915_gem_object_unpin_pages(obj); i915_gem_object_unpin_pages(obj);
mutex_unlock(&obj->base.dev->struct_mutex); mutex_unlock(&obj->base.dev->struct_mutex);
} }
...@@ -110,51 +108,17 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf) ...@@ -110,51 +108,17 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
{ {
struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
struct drm_device *dev = obj->base.dev; struct drm_device *dev = obj->base.dev;
struct sg_page_iter sg_iter; void *addr;
struct page **pages; int ret;
int ret, i;
ret = i915_mutex_lock_interruptible(dev); ret = i915_mutex_lock_interruptible(dev);
if (ret) if (ret)
return ERR_PTR(ret); return ERR_PTR(ret);
if (obj->dma_buf_vmapping) { addr = i915_gem_object_pin_map(obj);
obj->vmapping_count++;
goto out_unlock;
}
ret = i915_gem_object_get_pages(obj);
if (ret)
goto err;
i915_gem_object_pin_pages(obj);
ret = -ENOMEM;
pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages));
if (pages == NULL)
goto err_unpin;
i = 0;
for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0)
pages[i++] = sg_page_iter_page(&sg_iter);
obj->dma_buf_vmapping = vmap(pages, i, 0, PAGE_KERNEL);
drm_free_large(pages);
if (!obj->dma_buf_vmapping)
goto err_unpin;
obj->vmapping_count = 1;
out_unlock:
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
return obj->dma_buf_vmapping;
err_unpin: return addr;
i915_gem_object_unpin_pages(obj);
err:
mutex_unlock(&dev->struct_mutex);
return ERR_PTR(ret);
} }
static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr) static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
...@@ -163,12 +127,7 @@ static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr) ...@@ -163,12 +127,7 @@ static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
struct drm_device *dev = obj->base.dev; struct drm_device *dev = obj->base.dev;
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
if (--obj->vmapping_count == 0) { i915_gem_object_unpin_map(obj);
vunmap(obj->dma_buf_vmapping);
obj->dma_buf_vmapping = NULL;
i915_gem_object_unpin_pages(obj);
}
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
} }
......
...@@ -313,7 +313,8 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj, ...@@ -313,7 +313,8 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
uint64_t target_offset) uint64_t target_offset)
{ {
struct drm_device *dev = obj->base.dev; struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
uint64_t delta = relocation_target(reloc, target_offset); uint64_t delta = relocation_target(reloc, target_offset);
uint64_t offset; uint64_t offset;
void __iomem *reloc_page; void __iomem *reloc_page;
...@@ -330,7 +331,7 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj, ...@@ -330,7 +331,7 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
/* Map the page containing the relocation we're going to perform. */ /* Map the page containing the relocation we're going to perform. */
offset = i915_gem_obj_ggtt_offset(obj); offset = i915_gem_obj_ggtt_offset(obj);
offset += reloc->offset; offset += reloc->offset;
reloc_page = io_mapping_map_atomic_wc(dev_priv->ggtt.mappable, reloc_page = io_mapping_map_atomic_wc(ggtt->mappable,
offset & PAGE_MASK); offset & PAGE_MASK);
iowrite32(lower_32_bits(delta), reloc_page + offset_in_page(offset)); iowrite32(lower_32_bits(delta), reloc_page + offset_in_page(offset));
...@@ -340,7 +341,7 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj, ...@@ -340,7 +341,7 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
if (offset_in_page(offset) == 0) { if (offset_in_page(offset) == 0) {
io_mapping_unmap_atomic(reloc_page); io_mapping_unmap_atomic(reloc_page);
reloc_page = reloc_page =
io_mapping_map_atomic_wc(dev_priv->ggtt.mappable, io_mapping_map_atomic_wc(ggtt->mappable,
offset); offset);
} }
...@@ -1431,7 +1432,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, ...@@ -1431,7 +1432,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct drm_i915_gem_execbuffer2 *args, struct drm_i915_gem_execbuffer2 *args,
struct drm_i915_gem_exec_object2 *exec) struct drm_i915_gem_exec_object2 *exec)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct drm_i915_gem_request *req = NULL; struct drm_i915_gem_request *req = NULL;
struct eb_vmas *eb; struct eb_vmas *eb;
struct drm_i915_gem_object *batch_obj; struct drm_i915_gem_object *batch_obj;
...@@ -1504,7 +1506,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, ...@@ -1504,7 +1506,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
if (ctx->ppgtt) if (ctx->ppgtt)
vm = &ctx->ppgtt->base; vm = &ctx->ppgtt->base;
else else
vm = &dev_priv->ggtt.base; vm = &ggtt->base;
memset(&params_master, 0x00, sizeof(params_master)); memset(&params_master, 0x00, sizeof(params_master));
...@@ -1781,11 +1783,9 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data, ...@@ -1781,11 +1783,9 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
return -EINVAL; return -EINVAL;
} }
exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count, exec2_list = drm_malloc_gfp(args->buffer_count,
GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY); sizeof(*exec2_list),
if (exec2_list == NULL) GFP_TEMPORARY);
exec2_list = drm_malloc_ab(sizeof(*exec2_list),
args->buffer_count);
if (exec2_list == NULL) { if (exec2_list == NULL) {
DRM_DEBUG("Failed to allocate exec list for %d buffers\n", DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
args->buffer_count); args->buffer_count);
......
此差异已折叠。
...@@ -42,7 +42,7 @@ typedef uint64_t gen8_pde_t; ...@@ -42,7 +42,7 @@ typedef uint64_t gen8_pde_t;
typedef uint64_t gen8_ppgtt_pdpe_t; typedef uint64_t gen8_ppgtt_pdpe_t;
typedef uint64_t gen8_ppgtt_pml4e_t; typedef uint64_t gen8_ppgtt_pml4e_t;
#define gtt_total_entries(gtt) ((gtt).base.total >> PAGE_SHIFT) #define ggtt_total_entries(ggtt) ((ggtt)->base.total >> PAGE_SHIFT)
/* gen6-hsw has bit 11-4 for physical addr bit 39-32 */ /* gen6-hsw has bit 11-4 for physical addr bit 39-32 */
#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0)) #define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
...@@ -513,10 +513,9 @@ i915_page_dir_dma_addr(const struct i915_hw_ppgtt *ppgtt, const unsigned n) ...@@ -513,10 +513,9 @@ i915_page_dir_dma_addr(const struct i915_hw_ppgtt *ppgtt, const unsigned n)
px_dma(ppgtt->base.scratch_pd); px_dma(ppgtt->base.scratch_pd);
} }
int i915_gem_gtt_init(struct drm_device *dev); int i915_ggtt_init_hw(struct drm_device *dev);
void i915_gem_init_global_gtt(struct drm_device *dev); void i915_gem_init_ggtt(struct drm_device *dev);
void i915_global_gtt_cleanup(struct drm_device *dev); void i915_ggtt_cleanup_hw(struct drm_device *dev);
int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt); int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt);
int i915_ppgtt_init_hw(struct drm_device *dev); int i915_ppgtt_init_hw(struct drm_device *dev);
......
...@@ -28,6 +28,7 @@ ...@@ -28,6 +28,7 @@
#include <linux/swap.h> #include <linux/swap.h>
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/dma-buf.h> #include <linux/dma-buf.h>
#include <linux/vmalloc.h>
#include <drm/drmP.h> #include <drm/drmP.h>
#include <drm/i915_drm.h> #include <drm/i915_drm.h>
...@@ -166,6 +167,10 @@ i915_gem_shrink(struct drm_i915_private *dev_priv, ...@@ -166,6 +167,10 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
obj->madv != I915_MADV_DONTNEED) obj->madv != I915_MADV_DONTNEED)
continue; continue;
if (flags & I915_SHRINK_VMAPS &&
!is_vmalloc_addr(obj->mapping))
continue;
if ((flags & I915_SHRINK_ACTIVE) == 0 && obj->active) if ((flags & I915_SHRINK_ACTIVE) == 0 && obj->active)
continue; continue;
...@@ -246,7 +251,7 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc) ...@@ -246,7 +251,7 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
count = 0; count = 0;
list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
if (obj->pages_pin_count == 0) if (can_release_pages(obj))
count += obj->base.size >> PAGE_SHIFT; count += obj->base.size >> PAGE_SHIFT;
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
...@@ -288,35 +293,56 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc) ...@@ -288,35 +293,56 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
return freed; return freed;
} }
struct shrinker_lock_uninterruptible {
bool was_interruptible;
bool unlock;
};
static bool
i915_gem_shrinker_lock_uninterruptible(struct drm_i915_private *dev_priv,
struct shrinker_lock_uninterruptible *slu,
int timeout_ms)
{
unsigned long timeout = msecs_to_jiffies(timeout_ms) + 1;
while (!i915_gem_shrinker_lock(dev_priv->dev, &slu->unlock)) {
schedule_timeout_killable(1);
if (fatal_signal_pending(current))
return false;
if (--timeout == 0) {
pr_err("Unable to lock GPU to purge memory.\n");
return false;
}
}
slu->was_interruptible = dev_priv->mm.interruptible;
dev_priv->mm.interruptible = false;
return true;
}
static void
i915_gem_shrinker_unlock_uninterruptible(struct drm_i915_private *dev_priv,
struct shrinker_lock_uninterruptible *slu)
{
dev_priv->mm.interruptible = slu->was_interruptible;
if (slu->unlock)
mutex_unlock(&dev_priv->dev->struct_mutex);
}
static int static int
i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr) i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
{ {
struct drm_i915_private *dev_priv = struct drm_i915_private *dev_priv =
container_of(nb, struct drm_i915_private, mm.oom_notifier); container_of(nb, struct drm_i915_private, mm.oom_notifier);
struct drm_device *dev = dev_priv->dev; struct shrinker_lock_uninterruptible slu;
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
unsigned long timeout = msecs_to_jiffies(5000) + 1;
unsigned long pinned, bound, unbound, freed_pages; unsigned long pinned, bound, unbound, freed_pages;
bool was_interruptible;
bool unlock;
while (!i915_gem_shrinker_lock(dev, &unlock) && --timeout) { if (!i915_gem_shrinker_lock_uninterruptible(dev_priv, &slu, 5000))
schedule_timeout_killable(1);
if (fatal_signal_pending(current))
return NOTIFY_DONE;
}
if (timeout == 0) {
pr_err("Unable to purge GPU memory due lock contention.\n");
return NOTIFY_DONE; return NOTIFY_DONE;
}
was_interruptible = dev_priv->mm.interruptible;
dev_priv->mm.interruptible = false;
freed_pages = i915_gem_shrink_all(dev_priv); freed_pages = i915_gem_shrink_all(dev_priv);
dev_priv->mm.interruptible = was_interruptible;
/* Because we may be allocating inside our own driver, we cannot /* Because we may be allocating inside our own driver, we cannot
* assert that there are no objects with pinned pages that are not * assert that there are no objects with pinned pages that are not
* being pointed to by hardware. * being pointed to by hardware.
...@@ -341,8 +367,7 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr) ...@@ -341,8 +367,7 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
bound += obj->base.size; bound += obj->base.size;
} }
if (unlock) i915_gem_shrinker_unlock_uninterruptible(dev_priv, &slu);
mutex_unlock(&dev->struct_mutex);
if (freed_pages || unbound || bound) if (freed_pages || unbound || bound)
pr_info("Purging GPU memory, %lu bytes freed, %lu bytes still pinned.\n", pr_info("Purging GPU memory, %lu bytes freed, %lu bytes still pinned.\n",
...@@ -356,6 +381,29 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr) ...@@ -356,6 +381,29 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
return NOTIFY_DONE; return NOTIFY_DONE;
} }
static int
i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
{
struct drm_i915_private *dev_priv =
container_of(nb, struct drm_i915_private, mm.vmap_notifier);
struct shrinker_lock_uninterruptible slu;
unsigned long freed_pages;
if (!i915_gem_shrinker_lock_uninterruptible(dev_priv, &slu, 5000))
return NOTIFY_DONE;
freed_pages = i915_gem_shrink(dev_priv, -1UL,
I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND |
I915_SHRINK_ACTIVE |
I915_SHRINK_VMAPS);
i915_gem_shrinker_unlock_uninterruptible(dev_priv, &slu);
*(unsigned long *)ptr += freed_pages;
return NOTIFY_DONE;
}
/** /**
* i915_gem_shrinker_init - Initialize i915 shrinker * i915_gem_shrinker_init - Initialize i915 shrinker
* @dev_priv: i915 device * @dev_priv: i915 device
...@@ -371,6 +419,9 @@ void i915_gem_shrinker_init(struct drm_i915_private *dev_priv) ...@@ -371,6 +419,9 @@ void i915_gem_shrinker_init(struct drm_i915_private *dev_priv)
dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom; dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
WARN_ON(register_oom_notifier(&dev_priv->mm.oom_notifier)); WARN_ON(register_oom_notifier(&dev_priv->mm.oom_notifier));
dev_priv->mm.vmap_notifier.notifier_call = i915_gem_shrinker_vmap;
WARN_ON(register_vmap_purge_notifier(&dev_priv->mm.vmap_notifier));
} }
/** /**
...@@ -381,6 +432,7 @@ void i915_gem_shrinker_init(struct drm_i915_private *dev_priv) ...@@ -381,6 +432,7 @@ void i915_gem_shrinker_init(struct drm_i915_private *dev_priv)
*/ */
void i915_gem_shrinker_cleanup(struct drm_i915_private *dev_priv) void i915_gem_shrinker_cleanup(struct drm_i915_private *dev_priv)
{ {
WARN_ON(unregister_vmap_purge_notifier(&dev_priv->mm.vmap_notifier));
WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier)); WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier));
unregister_shrinker(&dev_priv->mm.shrinker); unregister_shrinker(&dev_priv->mm.shrinker);
} }
...@@ -72,9 +72,11 @@ int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv, ...@@ -72,9 +72,11 @@ int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
struct drm_mm_node *node, u64 size, struct drm_mm_node *node, u64 size,
unsigned alignment) unsigned alignment)
{ {
struct i915_ggtt *ggtt = &dev_priv->ggtt;
return i915_gem_stolen_insert_node_in_range(dev_priv, node, size, return i915_gem_stolen_insert_node_in_range(dev_priv, node, size,
alignment, 0, alignment, 0,
dev_priv->ggtt.stolen_usable_size); ggtt->stolen_usable_size);
} }
void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv, void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
...@@ -87,7 +89,8 @@ void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv, ...@@ -87,7 +89,8 @@ void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
static unsigned long i915_stolen_to_physical(struct drm_device *dev) static unsigned long i915_stolen_to_physical(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct resource *r; struct resource *r;
u32 base; u32 base;
...@@ -134,7 +137,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev) ...@@ -134,7 +137,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
I85X_DRB3, &tmp); I85X_DRB3, &tmp);
tom = tmp * MB(32); tom = tmp * MB(32);
base = tom - tseg_size - dev_priv->ggtt.stolen_size; base = tom - tseg_size - ggtt->stolen_size;
} else if (IS_845G(dev)) { } else if (IS_845G(dev)) {
u32 tseg_size = 0; u32 tseg_size = 0;
u32 tom; u32 tom;
...@@ -158,7 +161,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev) ...@@ -158,7 +161,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
I830_DRB3, &tmp); I830_DRB3, &tmp);
tom = tmp * MB(32); tom = tmp * MB(32);
base = tom - tseg_size - dev_priv->ggtt.stolen_size; base = tom - tseg_size - ggtt->stolen_size;
} else if (IS_I830(dev)) { } else if (IS_I830(dev)) {
u32 tseg_size = 0; u32 tseg_size = 0;
u32 tom; u32 tom;
...@@ -178,7 +181,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev) ...@@ -178,7 +181,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
I830_DRB3, &tmp); I830_DRB3, &tmp);
tom = tmp * MB(32); tom = tmp * MB(32);
base = tom - tseg_size - dev_priv->ggtt.stolen_size; base = tom - tseg_size - ggtt->stolen_size;
} }
if (base == 0) if (base == 0)
...@@ -189,41 +192,41 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev) ...@@ -189,41 +192,41 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
struct { struct {
u32 start, end; u32 start, end;
} stolen[2] = { } stolen[2] = {
{ .start = base, .end = base + dev_priv->ggtt.stolen_size, }, { .start = base, .end = base + ggtt->stolen_size, },
{ .start = base, .end = base + dev_priv->ggtt.stolen_size, }, { .start = base, .end = base + ggtt->stolen_size, },
}; };
u64 gtt_start, gtt_end; u64 ggtt_start, ggtt_end;
gtt_start = I915_READ(PGTBL_CTL); ggtt_start = I915_READ(PGTBL_CTL);
if (IS_GEN4(dev)) if (IS_GEN4(dev))
gtt_start = (gtt_start & PGTBL_ADDRESS_LO_MASK) | ggtt_start = (ggtt_start & PGTBL_ADDRESS_LO_MASK) |
(gtt_start & PGTBL_ADDRESS_HI_MASK) << 28; (ggtt_start & PGTBL_ADDRESS_HI_MASK) << 28;
else else
gtt_start &= PGTBL_ADDRESS_LO_MASK; ggtt_start &= PGTBL_ADDRESS_LO_MASK;
gtt_end = gtt_start + gtt_total_entries(dev_priv->ggtt) * 4; ggtt_end = ggtt_start + ggtt_total_entries(ggtt) * 4;
if (gtt_start >= stolen[0].start && gtt_start < stolen[0].end) if (ggtt_start >= stolen[0].start && ggtt_start < stolen[0].end)
stolen[0].end = gtt_start; stolen[0].end = ggtt_start;
if (gtt_end > stolen[1].start && gtt_end <= stolen[1].end) if (ggtt_end > stolen[1].start && ggtt_end <= stolen[1].end)
stolen[1].start = gtt_end; stolen[1].start = ggtt_end;
/* pick the larger of the two chunks */ /* pick the larger of the two chunks */
if (stolen[0].end - stolen[0].start > if (stolen[0].end - stolen[0].start >
stolen[1].end - stolen[1].start) { stolen[1].end - stolen[1].start) {
base = stolen[0].start; base = stolen[0].start;
dev_priv->ggtt.stolen_size = stolen[0].end - stolen[0].start; ggtt->stolen_size = stolen[0].end - stolen[0].start;
} else { } else {
base = stolen[1].start; base = stolen[1].start;
dev_priv->ggtt.stolen_size = stolen[1].end - stolen[1].start; ggtt->stolen_size = stolen[1].end - stolen[1].start;
} }
if (stolen[0].start != stolen[1].start || if (stolen[0].start != stolen[1].start ||
stolen[0].end != stolen[1].end) { stolen[0].end != stolen[1].end) {
DRM_DEBUG_KMS("GTT within stolen memory at 0x%llx-0x%llx\n", DRM_DEBUG_KMS("GTT within stolen memory at 0x%llx-0x%llx\n",
(unsigned long long) gtt_start, (unsigned long long)ggtt_start,
(unsigned long long) gtt_end - 1); (unsigned long long)ggtt_end - 1);
DRM_DEBUG_KMS("Stolen memory adjusted to 0x%x-0x%x\n", DRM_DEBUG_KMS("Stolen memory adjusted to 0x%x-0x%x\n",
base, base + (u32) dev_priv->ggtt.stolen_size - 1); base, base + (u32)ggtt->stolen_size - 1);
} }
} }
...@@ -233,7 +236,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev) ...@@ -233,7 +236,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
* kernel. So if the region is already marked as busy, something * kernel. So if the region is already marked as busy, something
* is seriously wrong. * is seriously wrong.
*/ */
r = devm_request_mem_region(dev->dev, base, dev_priv->ggtt.stolen_size, r = devm_request_mem_region(dev->dev, base, ggtt->stolen_size,
"Graphics Stolen Memory"); "Graphics Stolen Memory");
if (r == NULL) { if (r == NULL) {
/* /*
...@@ -245,7 +248,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev) ...@@ -245,7 +248,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
* reservation starting from 1 instead of 0. * reservation starting from 1 instead of 0.
*/ */
r = devm_request_mem_region(dev->dev, base + 1, r = devm_request_mem_region(dev->dev, base + 1,
dev_priv->ggtt.stolen_size - 1, ggtt->stolen_size - 1,
"Graphics Stolen Memory"); "Graphics Stolen Memory");
/* /*
* GEN3 firmware likes to smash pci bridges into the stolen * GEN3 firmware likes to smash pci bridges into the stolen
...@@ -253,7 +256,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev) ...@@ -253,7 +256,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
*/ */
if (r == NULL && !IS_GEN3(dev)) { if (r == NULL && !IS_GEN3(dev)) {
DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n", DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n",
base, base + (uint32_t)dev_priv->ggtt.stolen_size); base, base + (uint32_t)ggtt->stolen_size);
base = 0; base = 0;
} }
} }
...@@ -274,11 +277,12 @@ void i915_gem_cleanup_stolen(struct drm_device *dev) ...@@ -274,11 +277,12 @@ void i915_gem_cleanup_stolen(struct drm_device *dev)
static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv, static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv,
unsigned long *base, unsigned long *size) unsigned long *base, unsigned long *size)
{ {
struct i915_ggtt *ggtt = &dev_priv->ggtt;
uint32_t reg_val = I915_READ(IS_GM45(dev_priv) ? uint32_t reg_val = I915_READ(IS_GM45(dev_priv) ?
CTG_STOLEN_RESERVED : CTG_STOLEN_RESERVED :
ELK_STOLEN_RESERVED); ELK_STOLEN_RESERVED);
unsigned long stolen_top = dev_priv->mm.stolen_base + unsigned long stolen_top = dev_priv->mm.stolen_base +
dev_priv->ggtt.stolen_size; ggtt->stolen_size;
*base = (reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK) << 16; *base = (reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK) << 16;
...@@ -369,10 +373,11 @@ static void gen8_get_stolen_reserved(struct drm_i915_private *dev_priv, ...@@ -369,10 +373,11 @@ static void gen8_get_stolen_reserved(struct drm_i915_private *dev_priv,
static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv, static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv,
unsigned long *base, unsigned long *size) unsigned long *base, unsigned long *size)
{ {
struct i915_ggtt *ggtt = &dev_priv->ggtt;
uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED); uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
unsigned long stolen_top; unsigned long stolen_top;
stolen_top = dev_priv->mm.stolen_base + dev_priv->ggtt.stolen_size; stolen_top = dev_priv->mm.stolen_base + ggtt->stolen_size;
*base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK; *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
...@@ -388,7 +393,8 @@ static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv, ...@@ -388,7 +393,8 @@ static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv,
int i915_gem_init_stolen(struct drm_device *dev) int i915_gem_init_stolen(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
unsigned long reserved_total, reserved_base = 0, reserved_size; unsigned long reserved_total, reserved_base = 0, reserved_size;
unsigned long stolen_top; unsigned long stolen_top;
...@@ -401,14 +407,14 @@ int i915_gem_init_stolen(struct drm_device *dev) ...@@ -401,14 +407,14 @@ int i915_gem_init_stolen(struct drm_device *dev)
} }
#endif #endif
if (dev_priv->ggtt.stolen_size == 0) if (ggtt->stolen_size == 0)
return 0; return 0;
dev_priv->mm.stolen_base = i915_stolen_to_physical(dev); dev_priv->mm.stolen_base = i915_stolen_to_physical(dev);
if (dev_priv->mm.stolen_base == 0) if (dev_priv->mm.stolen_base == 0)
return 0; return 0;
stolen_top = dev_priv->mm.stolen_base + dev_priv->ggtt.stolen_size; stolen_top = dev_priv->mm.stolen_base + ggtt->stolen_size;
switch (INTEL_INFO(dev_priv)->gen) { switch (INTEL_INFO(dev_priv)->gen) {
case 2: case 2:
...@@ -458,19 +464,18 @@ int i915_gem_init_stolen(struct drm_device *dev) ...@@ -458,19 +464,18 @@ int i915_gem_init_stolen(struct drm_device *dev)
return 0; return 0;
} }
dev_priv->ggtt.stolen_reserved_base = reserved_base; ggtt->stolen_reserved_base = reserved_base;
dev_priv->ggtt.stolen_reserved_size = reserved_size; ggtt->stolen_reserved_size = reserved_size;
/* It is possible for the reserved area to end before the end of stolen /* It is possible for the reserved area to end before the end of stolen
* memory, so just consider the start. */ * memory, so just consider the start. */
reserved_total = stolen_top - reserved_base; reserved_total = stolen_top - reserved_base;
DRM_DEBUG_KMS("Memory reserved for graphics device: %zuK, usable: %luK\n", DRM_DEBUG_KMS("Memory reserved for graphics device: %zuK, usable: %luK\n",
dev_priv->ggtt.stolen_size >> 10, ggtt->stolen_size >> 10,
(dev_priv->ggtt.stolen_size - reserved_total) >> 10); (ggtt->stolen_size - reserved_total) >> 10);
dev_priv->ggtt.stolen_usable_size = dev_priv->ggtt.stolen_size - ggtt->stolen_usable_size = ggtt->stolen_size - reserved_total;
reserved_total;
/* /*
* Basic memrange allocator for stolen space. * Basic memrange allocator for stolen space.
...@@ -483,7 +488,7 @@ int i915_gem_init_stolen(struct drm_device *dev) ...@@ -483,7 +488,7 @@ int i915_gem_init_stolen(struct drm_device *dev)
* i915_gem_stolen_insert_node_in_range(). We may want to fix the fbcon * i915_gem_stolen_insert_node_in_range(). We may want to fix the fbcon
* problem later. * problem later.
*/ */
drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->ggtt.stolen_usable_size); drm_mm_init(&dev_priv->mm.stolen, 0, ggtt->stolen_usable_size);
return 0; return 0;
} }
...@@ -492,12 +497,13 @@ static struct sg_table * ...@@ -492,12 +497,13 @@ static struct sg_table *
i915_pages_create_for_stolen(struct drm_device *dev, i915_pages_create_for_stolen(struct drm_device *dev,
u32 offset, u32 size) u32 offset, u32 size)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct sg_table *st; struct sg_table *st;
struct scatterlist *sg; struct scatterlist *sg;
DRM_DEBUG_DRIVER("offset=0x%x, size=%d\n", offset, size); DRM_DEBUG_DRIVER("offset=0x%x, size=%d\n", offset, size);
BUG_ON(offset > dev_priv->ggtt.stolen_size - size); BUG_ON(offset > ggtt->stolen_size - size);
/* We hide that we have no struct page backing our stolen object /* We hide that we have no struct page backing our stolen object
* by wrapping the contiguous physical allocation with a fake * by wrapping the contiguous physical allocation with a fake
...@@ -628,8 +634,8 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, ...@@ -628,8 +634,8 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
u32 gtt_offset, u32 gtt_offset,
u32 size) u32 size)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_address_space *ggtt = &dev_priv->ggtt.base; struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
struct drm_mm_node *stolen; struct drm_mm_node *stolen;
struct i915_vma *vma; struct i915_vma *vma;
...@@ -675,7 +681,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, ...@@ -675,7 +681,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
if (gtt_offset == I915_GTT_OFFSET_NONE) if (gtt_offset == I915_GTT_OFFSET_NONE)
return obj; return obj;
vma = i915_gem_obj_lookup_or_create_vma(obj, ggtt); vma = i915_gem_obj_lookup_or_create_vma(obj, &ggtt->base);
if (IS_ERR(vma)) { if (IS_ERR(vma)) {
ret = PTR_ERR(vma); ret = PTR_ERR(vma);
goto err; goto err;
...@@ -688,8 +694,8 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, ...@@ -688,8 +694,8 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
*/ */
vma->node.start = gtt_offset; vma->node.start = gtt_offset;
vma->node.size = size; vma->node.size = size;
if (drm_mm_initialized(&ggtt->mm)) { if (drm_mm_initialized(&ggtt->base.mm)) {
ret = drm_mm_reserve_node(&ggtt->mm, &vma->node); ret = drm_mm_reserve_node(&ggtt->base.mm, &vma->node);
if (ret) { if (ret) {
DRM_DEBUG_KMS("failed to allocate stolen GTT space\n"); DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
goto err; goto err;
...@@ -697,7 +703,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, ...@@ -697,7 +703,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
vma->bound |= GLOBAL_BIND; vma->bound |= GLOBAL_BIND;
__i915_vma_set_map_and_fenceable(vma); __i915_vma_set_map_and_fenceable(vma);
list_add_tail(&vma->vm_link, &ggtt->inactive_list); list_add_tail(&vma->vm_link, &ggtt->base.inactive_list);
} }
list_add_tail(&obj->global_list, &dev_priv->mm.bound_list); list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
......
...@@ -494,10 +494,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work) ...@@ -494,10 +494,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
ret = -ENOMEM; ret = -ENOMEM;
pinned = 0; pinned = 0;
pvec = kmalloc(npages*sizeof(struct page *), pvec = drm_malloc_gfp(npages, sizeof(struct page *), GFP_TEMPORARY);
GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
if (pvec == NULL)
pvec = drm_malloc_ab(npages, sizeof(struct page *));
if (pvec != NULL) { if (pvec != NULL) {
struct mm_struct *mm = obj->userptr.mm->mm; struct mm_struct *mm = obj->userptr.mm->mm;
...@@ -634,14 +631,11 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj) ...@@ -634,14 +631,11 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
pvec = NULL; pvec = NULL;
pinned = 0; pinned = 0;
if (obj->userptr.mm->mm == current->mm) { if (obj->userptr.mm->mm == current->mm) {
pvec = kmalloc(num_pages*sizeof(struct page *), pvec = drm_malloc_gfp(num_pages, sizeof(struct page *),
GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY); GFP_TEMPORARY);
if (pvec == NULL) { if (pvec == NULL) {
pvec = drm_malloc_ab(num_pages, sizeof(struct page *)); __i915_gem_userptr_set_active(obj, false);
if (pvec == NULL) { return -ENOMEM;
__i915_gem_userptr_set_active(obj, false);
return -ENOMEM;
}
} }
pinned = __get_user_pages_fast(obj->userptr.ptr, num_pages, pinned = __get_user_pages_fast(obj->userptr.ptr, num_pages,
......
...@@ -296,6 +296,7 @@ static void i915_ring_error_state(struct drm_i915_error_state_buf *m, ...@@ -296,6 +296,7 @@ static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
} }
} }
err_printf(m, " seqno: 0x%08x\n", ring->seqno); err_printf(m, " seqno: 0x%08x\n", ring->seqno);
err_printf(m, " last_seqno: 0x%08x\n", ring->last_seqno);
err_printf(m, " waiting: %s\n", yesno(ring->waiting)); err_printf(m, " waiting: %s\n", yesno(ring->waiting));
err_printf(m, " ring->head: 0x%08x\n", ring->cpu_ring_head); err_printf(m, " ring->head: 0x%08x\n", ring->cpu_ring_head);
err_printf(m, " ring->tail: 0x%08x\n", ring->cpu_ring_tail); err_printf(m, " ring->tail: 0x%08x\n", ring->cpu_ring_tail);
...@@ -627,6 +628,7 @@ i915_error_object_create(struct drm_i915_private *dev_priv, ...@@ -627,6 +628,7 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
struct drm_i915_gem_object *src, struct drm_i915_gem_object *src,
struct i915_address_space *vm) struct i915_address_space *vm)
{ {
struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct drm_i915_error_object *dst; struct drm_i915_error_object *dst;
struct i915_vma *vma = NULL; struct i915_vma *vma = NULL;
int num_pages; int num_pages;
...@@ -653,7 +655,7 @@ i915_error_object_create(struct drm_i915_private *dev_priv, ...@@ -653,7 +655,7 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
vma = i915_gem_obj_to_ggtt(src); vma = i915_gem_obj_to_ggtt(src);
use_ggtt = (src->cache_level == I915_CACHE_NONE && use_ggtt = (src->cache_level == I915_CACHE_NONE &&
vma && (vma->bound & GLOBAL_BIND) && vma && (vma->bound & GLOBAL_BIND) &&
reloc_offset + num_pages * PAGE_SIZE <= dev_priv->ggtt.mappable_end); reloc_offset + num_pages * PAGE_SIZE <= ggtt->mappable_end);
/* Cannot access stolen address directly, try to use the aperture */ /* Cannot access stolen address directly, try to use the aperture */
if (src->stolen) { if (src->stolen) {
...@@ -663,12 +665,13 @@ i915_error_object_create(struct drm_i915_private *dev_priv, ...@@ -663,12 +665,13 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
goto unwind; goto unwind;
reloc_offset = i915_gem_obj_ggtt_offset(src); reloc_offset = i915_gem_obj_ggtt_offset(src);
if (reloc_offset + num_pages * PAGE_SIZE > dev_priv->ggtt.mappable_end) if (reloc_offset + num_pages * PAGE_SIZE > ggtt->mappable_end)
goto unwind; goto unwind;
} }
/* Cannot access snooped pages through the aperture */ /* Cannot access snooped pages through the aperture */
if (use_ggtt && src->cache_level != I915_CACHE_NONE && !HAS_LLC(dev_priv->dev)) if (use_ggtt && src->cache_level != I915_CACHE_NONE &&
!HAS_LLC(dev_priv))
goto unwind; goto unwind;
dst->page_count = num_pages; dst->page_count = num_pages;
...@@ -689,7 +692,7 @@ i915_error_object_create(struct drm_i915_private *dev_priv, ...@@ -689,7 +692,7 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
* captures what the GPU read. * captures what the GPU read.
*/ */
s = io_mapping_map_atomic_wc(dev_priv->ggtt.mappable, s = io_mapping_map_atomic_wc(ggtt->mappable,
reloc_offset); reloc_offset);
memcpy_fromio(d, s, PAGE_SIZE); memcpy_fromio(d, s, PAGE_SIZE);
io_mapping_unmap_atomic(s); io_mapping_unmap_atomic(s);
...@@ -883,7 +886,7 @@ static void gen6_record_semaphore_state(struct drm_i915_private *dev_priv, ...@@ -883,7 +886,7 @@ static void gen6_record_semaphore_state(struct drm_i915_private *dev_priv,
ering->semaphore_seqno[0] = engine->semaphore.sync_seqno[0]; ering->semaphore_seqno[0] = engine->semaphore.sync_seqno[0];
ering->semaphore_seqno[1] = engine->semaphore.sync_seqno[1]; ering->semaphore_seqno[1] = engine->semaphore.sync_seqno[1];
if (HAS_VEBOX(dev_priv->dev)) { if (HAS_VEBOX(dev_priv)) {
ering->semaphore_mboxes[2] = ering->semaphore_mboxes[2] =
I915_READ(RING_SYNC_2(engine->mmio_base)); I915_READ(RING_SYNC_2(engine->mmio_base));
ering->semaphore_seqno[2] = engine->semaphore.sync_seqno[2]; ering->semaphore_seqno[2] = engine->semaphore.sync_seqno[2];
...@@ -928,8 +931,9 @@ static void i915_record_ring_state(struct drm_device *dev, ...@@ -928,8 +931,9 @@ static void i915_record_ring_state(struct drm_device *dev,
ering->waiting = waitqueue_active(&engine->irq_queue); ering->waiting = waitqueue_active(&engine->irq_queue);
ering->instpm = I915_READ(RING_INSTPM(engine->mmio_base)); ering->instpm = I915_READ(RING_INSTPM(engine->mmio_base));
ering->seqno = engine->get_seqno(engine, false);
ering->acthd = intel_ring_get_active_head(engine); ering->acthd = intel_ring_get_active_head(engine);
ering->seqno = engine->get_seqno(engine);
ering->last_seqno = engine->last_submitted_seqno;
ering->start = I915_READ_START(engine); ering->start = I915_READ_START(engine);
ering->head = I915_READ_HEAD(engine); ering->head = I915_READ_HEAD(engine);
ering->tail = I915_READ_TAIL(engine); ering->tail = I915_READ_TAIL(engine);
...@@ -1015,7 +1019,8 @@ static void i915_gem_record_active_context(struct intel_engine_cs *engine, ...@@ -1015,7 +1019,8 @@ static void i915_gem_record_active_context(struct intel_engine_cs *engine,
static void i915_gem_record_rings(struct drm_device *dev, static void i915_gem_record_rings(struct drm_device *dev,
struct drm_i915_error_state *error) struct drm_i915_error_state *error)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct drm_i915_gem_request *request; struct drm_i915_gem_request *request;
int i, count; int i, count;
...@@ -1038,7 +1043,7 @@ static void i915_gem_record_rings(struct drm_device *dev, ...@@ -1038,7 +1043,7 @@ static void i915_gem_record_rings(struct drm_device *dev,
vm = request->ctx && request->ctx->ppgtt ? vm = request->ctx && request->ctx->ppgtt ?
&request->ctx->ppgtt->base : &request->ctx->ppgtt->base :
&dev_priv->ggtt.base; &ggtt->base;
/* We need to copy these to an anonymous buffer /* We need to copy these to an anonymous buffer
* as the simplest method to avoid being overwritten * as the simplest method to avoid being overwritten
...@@ -1049,7 +1054,7 @@ static void i915_gem_record_rings(struct drm_device *dev, ...@@ -1049,7 +1054,7 @@ static void i915_gem_record_rings(struct drm_device *dev,
request->batch_obj, request->batch_obj,
vm); vm);
if (HAS_BROKEN_CS_TLB(dev_priv->dev)) if (HAS_BROKEN_CS_TLB(dev_priv))
error->ring[i].wa_batchbuffer = error->ring[i].wa_batchbuffer =
i915_error_ggtt_object_create(dev_priv, i915_error_ggtt_object_create(dev_priv,
engine->scratch.obj); engine->scratch.obj);
......
...@@ -27,9 +27,12 @@ ...@@ -27,9 +27,12 @@
/* Definitions of GuC H/W registers, bits, etc */ /* Definitions of GuC H/W registers, bits, etc */
#define GUC_STATUS _MMIO(0xc000) #define GUC_STATUS _MMIO(0xc000)
#define GS_RESET_SHIFT 0
#define GS_MIA_IN_RESET (0x01 << GS_RESET_SHIFT)
#define GS_BOOTROM_SHIFT 1 #define GS_BOOTROM_SHIFT 1
#define GS_BOOTROM_MASK (0x7F << GS_BOOTROM_SHIFT) #define GS_BOOTROM_MASK (0x7F << GS_BOOTROM_SHIFT)
#define GS_BOOTROM_RSA_FAILED (0x50 << GS_BOOTROM_SHIFT) #define GS_BOOTROM_RSA_FAILED (0x50 << GS_BOOTROM_SHIFT)
#define GS_BOOTROM_JUMP_PASSED (0x76 << GS_BOOTROM_SHIFT)
#define GS_UKERNEL_SHIFT 8 #define GS_UKERNEL_SHIFT 8
#define GS_UKERNEL_MASK (0xFF << GS_UKERNEL_SHIFT) #define GS_UKERNEL_MASK (0xFF << GS_UKERNEL_SHIFT)
#define GS_UKERNEL_LAPIC_DONE (0x30 << GS_UKERNEL_SHIFT) #define GS_UKERNEL_LAPIC_DONE (0x30 << GS_UKERNEL_SHIFT)
...@@ -37,7 +40,13 @@ ...@@ -37,7 +40,13 @@
#define GS_UKERNEL_READY (0xF0 << GS_UKERNEL_SHIFT) #define GS_UKERNEL_READY (0xF0 << GS_UKERNEL_SHIFT)
#define GS_MIA_SHIFT 16 #define GS_MIA_SHIFT 16
#define GS_MIA_MASK (0x07 << GS_MIA_SHIFT) #define GS_MIA_MASK (0x07 << GS_MIA_SHIFT)
#define GS_MIA_CORE_STATE (1 << GS_MIA_SHIFT) #define GS_MIA_CORE_STATE (0x01 << GS_MIA_SHIFT)
#define GS_MIA_HALT_REQUESTED (0x02 << GS_MIA_SHIFT)
#define GS_MIA_ISR_ENTRY (0x04 << GS_MIA_SHIFT)
#define GS_AUTH_STATUS_SHIFT 30
#define GS_AUTH_STATUS_MASK (0x03 << GS_AUTH_STATUS_SHIFT)
#define GS_AUTH_STATUS_BAD (0x01 << GS_AUTH_STATUS_SHIFT)
#define GS_AUTH_STATUS_GOOD (0x02 << GS_AUTH_STATUS_SHIFT)
#define SOFT_SCRATCH(n) _MMIO(0xc180 + (n) * 4) #define SOFT_SCRATCH(n) _MMIO(0xc180 + (n) * 4)
#define SOFT_SCRATCH_COUNT 16 #define SOFT_SCRATCH_COUNT 16
......
...@@ -1000,6 +1000,7 @@ static void notify_ring(struct intel_engine_cs *engine) ...@@ -1000,6 +1000,7 @@ static void notify_ring(struct intel_engine_cs *engine)
return; return;
trace_i915_gem_request_notify(engine); trace_i915_gem_request_notify(engine);
engine->user_interrupts++;
wake_up_all(&engine->irq_queue); wake_up_all(&engine->irq_queue);
} }
...@@ -1218,7 +1219,7 @@ static void ivybridge_parity_work(struct work_struct *work) ...@@ -1218,7 +1219,7 @@ static void ivybridge_parity_work(struct work_struct *work)
i915_reg_t reg; i915_reg_t reg;
slice--; slice--;
if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev))) if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv)))
break; break;
dev_priv->l3_parity.which_slice &= ~(1<<slice); dev_priv->l3_parity.which_slice &= ~(1<<slice);
...@@ -1257,7 +1258,7 @@ static void ivybridge_parity_work(struct work_struct *work) ...@@ -1257,7 +1258,7 @@ static void ivybridge_parity_work(struct work_struct *work)
out: out:
WARN_ON(dev_priv->l3_parity.which_slice); WARN_ON(dev_priv->l3_parity.which_slice);
spin_lock_irq(&dev_priv->irq_lock); spin_lock_irq(&dev_priv->irq_lock);
gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev)); gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
spin_unlock_irq(&dev_priv->irq_lock); spin_unlock_irq(&dev_priv->irq_lock);
mutex_unlock(&dev_priv->dev->struct_mutex); mutex_unlock(&dev_priv->dev->struct_mutex);
...@@ -1323,7 +1324,7 @@ gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift) ...@@ -1323,7 +1324,7 @@ gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift)
if (iir & (GT_RENDER_USER_INTERRUPT << test_shift)) if (iir & (GT_RENDER_USER_INTERRUPT << test_shift))
notify_ring(engine); notify_ring(engine);
if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift)) if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift))
intel_lrc_irq_handler(engine); tasklet_schedule(&engine->irq_tasklet);
} }
static irqreturn_t gen8_gt_irq_handler(struct drm_i915_private *dev_priv, static irqreturn_t gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
...@@ -1626,7 +1627,7 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) ...@@ -1626,7 +1627,7 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
if (INTEL_INFO(dev_priv)->gen >= 8) if (INTEL_INFO(dev_priv)->gen >= 8)
return; return;
if (HAS_VEBOX(dev_priv->dev)) { if (HAS_VEBOX(dev_priv)) {
if (pm_iir & PM_VEBOX_USER_INTERRUPT) if (pm_iir & PM_VEBOX_USER_INTERRUPT)
notify_ring(&dev_priv->engine[VECS]); notify_ring(&dev_priv->engine[VECS]);
...@@ -1828,7 +1829,7 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg) ...@@ -1828,7 +1829,7 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
/* IRQs are synced during runtime_suspend, we don't require a wakeref */ /* IRQs are synced during runtime_suspend, we don't require a wakeref */
disable_rpm_wakeref_asserts(dev_priv); disable_rpm_wakeref_asserts(dev_priv);
for (;;) { do {
master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL; master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
iir = I915_READ(VLV_IIR); iir = I915_READ(VLV_IIR);
...@@ -1856,7 +1857,7 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg) ...@@ -1856,7 +1857,7 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL); I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
POSTING_READ(GEN8_MASTER_IRQ); POSTING_READ(GEN8_MASTER_IRQ);
} } while (0);
enable_rpm_wakeref_asserts(dev_priv); enable_rpm_wakeref_asserts(dev_priv);
...@@ -2805,8 +2806,8 @@ static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe) ...@@ -2805,8 +2806,8 @@ static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
static bool static bool
ring_idle(struct intel_engine_cs *engine, u32 seqno) ring_idle(struct intel_engine_cs *engine, u32 seqno)
{ {
return (list_empty(&engine->request_list) || return i915_seqno_passed(seqno,
i915_seqno_passed(seqno, engine->last_submitted_seqno)); READ_ONCE(engine->last_submitted_seqno));
} }
static bool static bool
...@@ -2828,7 +2829,7 @@ semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr, ...@@ -2828,7 +2829,7 @@ semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr,
struct drm_i915_private *dev_priv = engine->dev->dev_private; struct drm_i915_private *dev_priv = engine->dev->dev_private;
struct intel_engine_cs *signaller; struct intel_engine_cs *signaller;
if (INTEL_INFO(dev_priv->dev)->gen >= 8) { if (INTEL_INFO(dev_priv)->gen >= 8) {
for_each_engine(signaller, dev_priv) { for_each_engine(signaller, dev_priv) {
if (engine == signaller) if (engine == signaller)
continue; continue;
...@@ -2941,7 +2942,7 @@ static int semaphore_passed(struct intel_engine_cs *engine) ...@@ -2941,7 +2942,7 @@ static int semaphore_passed(struct intel_engine_cs *engine)
if (signaller->hangcheck.deadlock >= I915_NUM_ENGINES) if (signaller->hangcheck.deadlock >= I915_NUM_ENGINES)
return -1; return -1;
if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno)) if (i915_seqno_passed(signaller->get_seqno(signaller), seqno))
return 1; return 1;
/* cursory check for an unkickable deadlock */ /* cursory check for an unkickable deadlock */
...@@ -3054,6 +3055,24 @@ ring_stuck(struct intel_engine_cs *engine, u64 acthd) ...@@ -3054,6 +3055,24 @@ ring_stuck(struct intel_engine_cs *engine, u64 acthd)
return HANGCHECK_HUNG; return HANGCHECK_HUNG;
} }
static unsigned kick_waiters(struct intel_engine_cs *engine)
{
struct drm_i915_private *i915 = to_i915(engine->dev);
unsigned user_interrupts = READ_ONCE(engine->user_interrupts);
if (engine->hangcheck.user_interrupts == user_interrupts &&
!test_and_set_bit(engine->id, &i915->gpu_error.missed_irq_rings)) {
if (!(i915->gpu_error.test_irq_rings & intel_engine_flag(engine)))
DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
engine->name);
else
DRM_INFO("Fake missed irq on %s\n",
engine->name);
wake_up_all(&engine->irq_queue);
}
return user_interrupts;
}
/* /*
* This is called when the chip hasn't reported back with completed * This is called when the chip hasn't reported back with completed
* batchbuffers in a long time. We keep track per ring seqno progress and * batchbuffers in a long time. We keep track per ring seqno progress and
...@@ -3096,29 +3115,33 @@ static void i915_hangcheck_elapsed(struct work_struct *work) ...@@ -3096,29 +3115,33 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
for_each_engine_id(engine, dev_priv, id) { for_each_engine_id(engine, dev_priv, id) {
u64 acthd; u64 acthd;
u32 seqno; u32 seqno;
unsigned user_interrupts;
bool busy = true; bool busy = true;
semaphore_clear_deadlocks(dev_priv); semaphore_clear_deadlocks(dev_priv);
seqno = engine->get_seqno(engine, false); /* We don't strictly need an irq-barrier here, as we are not
* serving an interrupt request, be paranoid in case the
* barrier has side-effects (such as preventing a broken
* cacheline snoop) and so be sure that we can see the seqno
* advance. If the seqno should stick, due to a stale
* cacheline, we would erroneously declare the GPU hung.
*/
if (engine->irq_seqno_barrier)
engine->irq_seqno_barrier(engine);
acthd = intel_ring_get_active_head(engine); acthd = intel_ring_get_active_head(engine);
seqno = engine->get_seqno(engine);
/* Reset stuck interrupts between batch advances */
user_interrupts = 0;
if (engine->hangcheck.seqno == seqno) { if (engine->hangcheck.seqno == seqno) {
if (ring_idle(engine, seqno)) { if (ring_idle(engine, seqno)) {
engine->hangcheck.action = HANGCHECK_IDLE; engine->hangcheck.action = HANGCHECK_IDLE;
if (waitqueue_active(&engine->irq_queue)) { if (waitqueue_active(&engine->irq_queue)) {
/* Issue a wake-up to catch stuck h/w. */
if (!test_and_set_bit(engine->id, &dev_priv->gpu_error.missed_irq_rings)) {
if (!(dev_priv->gpu_error.test_irq_rings & intel_engine_flag(engine)))
DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
engine->name);
else
DRM_INFO("Fake missed irq on %s\n",
engine->name);
wake_up_all(&engine->irq_queue);
}
/* Safeguard against driver failure */ /* Safeguard against driver failure */
user_interrupts = kick_waiters(engine);
engine->hangcheck.score += BUSY; engine->hangcheck.score += BUSY;
} else } else
busy = false; busy = false;
...@@ -3169,7 +3192,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work) ...@@ -3169,7 +3192,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
engine->hangcheck.score = 0; engine->hangcheck.score = 0;
/* Clear head and subunit states on seqno movement */ /* Clear head and subunit states on seqno movement */
engine->hangcheck.acthd = 0; acthd = 0;
memset(engine->hangcheck.instdone, 0, memset(engine->hangcheck.instdone, 0,
sizeof(engine->hangcheck.instdone)); sizeof(engine->hangcheck.instdone));
...@@ -3177,6 +3200,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work) ...@@ -3177,6 +3200,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
engine->hangcheck.seqno = seqno; engine->hangcheck.seqno = seqno;
engine->hangcheck.acthd = acthd; engine->hangcheck.acthd = acthd;
engine->hangcheck.user_interrupts = user_interrupts;
busy_count += busy; busy_count += busy;
} }
...@@ -3500,6 +3524,26 @@ static void bxt_hpd_irq_setup(struct drm_device *dev) ...@@ -3500,6 +3524,26 @@ static void bxt_hpd_irq_setup(struct drm_device *dev)
hotplug = I915_READ(PCH_PORT_HOTPLUG); hotplug = I915_READ(PCH_PORT_HOTPLUG);
hotplug |= PORTC_HOTPLUG_ENABLE | PORTB_HOTPLUG_ENABLE | hotplug |= PORTC_HOTPLUG_ENABLE | PORTB_HOTPLUG_ENABLE |
PORTA_HOTPLUG_ENABLE; PORTA_HOTPLUG_ENABLE;
DRM_DEBUG_KMS("Invert bit setting: hp_ctl:%x hp_port:%x\n",
hotplug, enabled_irqs);
hotplug &= ~BXT_DDI_HPD_INVERT_MASK;
/*
* For BXT invert bit has to be set based on AOB design
* for HPD detection logic, update it based on VBT fields.
*/
if ((enabled_irqs & BXT_DE_PORT_HP_DDIA) &&
intel_bios_is_port_hpd_inverted(dev_priv, PORT_A))
hotplug |= BXT_DDIA_HPD_INVERT;
if ((enabled_irqs & BXT_DE_PORT_HP_DDIB) &&
intel_bios_is_port_hpd_inverted(dev_priv, PORT_B))
hotplug |= BXT_DDIB_HPD_INVERT;
if ((enabled_irqs & BXT_DE_PORT_HP_DDIC) &&
intel_bios_is_port_hpd_inverted(dev_priv, PORT_C))
hotplug |= BXT_DDIC_HPD_INVERT;
I915_WRITE(PCH_PORT_HOTPLUG, hotplug); I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
} }
......
...@@ -165,6 +165,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) ...@@ -165,6 +165,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define GEN6_GRDOM_MEDIA (1 << 2) #define GEN6_GRDOM_MEDIA (1 << 2)
#define GEN6_GRDOM_BLT (1 << 3) #define GEN6_GRDOM_BLT (1 << 3)
#define GEN6_GRDOM_VECS (1 << 4) #define GEN6_GRDOM_VECS (1 << 4)
#define GEN9_GRDOM_GUC (1 << 5)
#define GEN8_GRDOM_MEDIA2 (1 << 7) #define GEN8_GRDOM_MEDIA2 (1 << 7)
#define RING_PP_DIR_BASE(ring) _MMIO((ring)->mmio_base+0x228) #define RING_PP_DIR_BASE(ring) _MMIO((ring)->mmio_base+0x228)
...@@ -627,6 +628,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) ...@@ -627,6 +628,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define IOSF_PORT_GPIO_SC 0x48 #define IOSF_PORT_GPIO_SC 0x48
#define IOSF_PORT_GPIO_SUS 0xa8 #define IOSF_PORT_GPIO_SUS 0xa8
#define IOSF_PORT_CCU 0xa9 #define IOSF_PORT_CCU 0xa9
#define CHV_IOSF_PORT_GPIO_N 0x13
#define CHV_IOSF_PORT_GPIO_SE 0x48
#define CHV_IOSF_PORT_GPIO_E 0xa8
#define CHV_IOSF_PORT_GPIO_SW 0xb2
#define VLV_IOSF_DATA _MMIO(VLV_DISPLAY_BASE + 0x2104) #define VLV_IOSF_DATA _MMIO(VLV_DISPLAY_BASE + 0x2104)
#define VLV_IOSF_ADDR _MMIO(VLV_DISPLAY_BASE + 0x2108) #define VLV_IOSF_ADDR _MMIO(VLV_DISPLAY_BASE + 0x2108)
...@@ -791,6 +796,7 @@ enum skl_disp_power_wells { ...@@ -791,6 +796,7 @@ enum skl_disp_power_wells {
#define DSI_PLL_M1_DIV_SHIFT 0 #define DSI_PLL_M1_DIV_SHIFT 0
#define DSI_PLL_M1_DIV_MASK (0x1ff << 0) #define DSI_PLL_M1_DIV_MASK (0x1ff << 0)
#define CCK_CZ_CLOCK_CONTROL 0x62 #define CCK_CZ_CLOCK_CONTROL 0x62
#define CCK_GPLL_CLOCK_CONTROL 0x67
#define CCK_DISPLAY_CLOCK_CONTROL 0x6b #define CCK_DISPLAY_CLOCK_CONTROL 0x6b
#define CCK_DISPLAY_REF_CLOCK_CONTROL 0x6c #define CCK_DISPLAY_REF_CLOCK_CONTROL 0x6c
#define CCK_TRUNK_FORCE_ON (1 << 17) #define CCK_TRUNK_FORCE_ON (1 << 17)
...@@ -1324,6 +1330,7 @@ enum skl_disp_power_wells { ...@@ -1324,6 +1330,7 @@ enum skl_disp_power_wells {
#define _PORT_CL1CM_DW0_A 0x162000 #define _PORT_CL1CM_DW0_A 0x162000
#define _PORT_CL1CM_DW0_BC 0x6C000 #define _PORT_CL1CM_DW0_BC 0x6C000
#define PHY_POWER_GOOD (1 << 16) #define PHY_POWER_GOOD (1 << 16)
#define PHY_RESERVED (1 << 7)
#define BXT_PORT_CL1CM_DW0(phy) _BXT_PHY((phy), _PORT_CL1CM_DW0_BC, \ #define BXT_PORT_CL1CM_DW0(phy) _BXT_PHY((phy), _PORT_CL1CM_DW0_BC, \
_PORT_CL1CM_DW0_A) _PORT_CL1CM_DW0_A)
...@@ -1783,6 +1790,18 @@ enum skl_disp_power_wells { ...@@ -1783,6 +1790,18 @@ enum skl_disp_power_wells {
#define GEN9_IZ_HASHING_MASK(slice) (0x3 << ((slice) * 2)) #define GEN9_IZ_HASHING_MASK(slice) (0x3 << ((slice) * 2))
#define GEN9_IZ_HASHING(slice, val) ((val) << ((slice) * 2)) #define GEN9_IZ_HASHING(slice, val) ((val) << ((slice) * 2))
/* WaClearTdlStateAckDirtyBits */
#define GEN8_STATE_ACK _MMIO(0x20F0)
#define GEN9_STATE_ACK_SLICE1 _MMIO(0x20F8)
#define GEN9_STATE_ACK_SLICE2 _MMIO(0x2100)
#define GEN9_STATE_ACK_TDL0 (1 << 12)
#define GEN9_STATE_ACK_TDL1 (1 << 13)
#define GEN9_STATE_ACK_TDL2 (1 << 14)
#define GEN9_STATE_ACK_TDL3 (1 << 15)
#define GEN9_SUBSLICE_TDL_ACK_BITS \
(GEN9_STATE_ACK_TDL3 | GEN9_STATE_ACK_TDL2 | \
GEN9_STATE_ACK_TDL1 | GEN9_STATE_ACK_TDL0)
#define GFX_MODE _MMIO(0x2520) #define GFX_MODE _MMIO(0x2520)
#define GFX_MODE_GEN7 _MMIO(0x229c) #define GFX_MODE_GEN7 _MMIO(0x229c)
#define RING_MODE_GEN7(ring) _MMIO((ring)->mmio_base+0x29c) #define RING_MODE_GEN7(ring) _MMIO((ring)->mmio_base+0x29c)
...@@ -4785,6 +4804,10 @@ enum skl_disp_power_wells { ...@@ -4785,6 +4804,10 @@ enum skl_disp_power_wells {
#define CBR_PND_DEADLINE_DISABLE (1<<31) #define CBR_PND_DEADLINE_DISABLE (1<<31)
#define CBR_PWM_CLOCK_MUX_SELECT (1<<30) #define CBR_PWM_CLOCK_MUX_SELECT (1<<30)
#define CBR4_VLV _MMIO(VLV_DISPLAY_BASE + 0x70450)
#define CBR_DPLLBMD_PIPE_C (1<<29)
#define CBR_DPLLBMD_PIPE_B (1<<18)
/* FIFO watermark sizes etc */ /* FIFO watermark sizes etc */
#define G4X_FIFO_LINE_SIZE 64 #define G4X_FIFO_LINE_SIZE 64
#define I915_FIFO_LINE_SIZE 64 #define I915_FIFO_LINE_SIZE 64
...@@ -6185,6 +6208,7 @@ enum skl_disp_power_wells { ...@@ -6185,6 +6208,7 @@ enum skl_disp_power_wells {
/* digital port hotplug */ /* digital port hotplug */
#define PCH_PORT_HOTPLUG _MMIO(0xc4030) /* SHOTPLUG_CTL */ #define PCH_PORT_HOTPLUG _MMIO(0xc4030) /* SHOTPLUG_CTL */
#define PORTA_HOTPLUG_ENABLE (1 << 28) /* LPT:LP+ & BXT */ #define PORTA_HOTPLUG_ENABLE (1 << 28) /* LPT:LP+ & BXT */
#define BXT_DDIA_HPD_INVERT (1 << 27)
#define PORTA_HOTPLUG_STATUS_MASK (3 << 24) /* SPT+ & BXT */ #define PORTA_HOTPLUG_STATUS_MASK (3 << 24) /* SPT+ & BXT */
#define PORTA_HOTPLUG_NO_DETECT (0 << 24) /* SPT+ & BXT */ #define PORTA_HOTPLUG_NO_DETECT (0 << 24) /* SPT+ & BXT */
#define PORTA_HOTPLUG_SHORT_DETECT (1 << 24) /* SPT+ & BXT */ #define PORTA_HOTPLUG_SHORT_DETECT (1 << 24) /* SPT+ & BXT */
...@@ -6200,6 +6224,7 @@ enum skl_disp_power_wells { ...@@ -6200,6 +6224,7 @@ enum skl_disp_power_wells {
#define PORTD_HOTPLUG_SHORT_DETECT (1 << 16) #define PORTD_HOTPLUG_SHORT_DETECT (1 << 16)
#define PORTD_HOTPLUG_LONG_DETECT (2 << 16) #define PORTD_HOTPLUG_LONG_DETECT (2 << 16)
#define PORTC_HOTPLUG_ENABLE (1 << 12) #define PORTC_HOTPLUG_ENABLE (1 << 12)
#define BXT_DDIC_HPD_INVERT (1 << 11)
#define PORTC_PULSE_DURATION_2ms (0 << 10) /* pre-LPT */ #define PORTC_PULSE_DURATION_2ms (0 << 10) /* pre-LPT */
#define PORTC_PULSE_DURATION_4_5ms (1 << 10) /* pre-LPT */ #define PORTC_PULSE_DURATION_4_5ms (1 << 10) /* pre-LPT */
#define PORTC_PULSE_DURATION_6ms (2 << 10) /* pre-LPT */ #define PORTC_PULSE_DURATION_6ms (2 << 10) /* pre-LPT */
...@@ -6210,6 +6235,7 @@ enum skl_disp_power_wells { ...@@ -6210,6 +6235,7 @@ enum skl_disp_power_wells {
#define PORTC_HOTPLUG_SHORT_DETECT (1 << 8) #define PORTC_HOTPLUG_SHORT_DETECT (1 << 8)
#define PORTC_HOTPLUG_LONG_DETECT (2 << 8) #define PORTC_HOTPLUG_LONG_DETECT (2 << 8)
#define PORTB_HOTPLUG_ENABLE (1 << 4) #define PORTB_HOTPLUG_ENABLE (1 << 4)
#define BXT_DDIB_HPD_INVERT (1 << 3)
#define PORTB_PULSE_DURATION_2ms (0 << 2) /* pre-LPT */ #define PORTB_PULSE_DURATION_2ms (0 << 2) /* pre-LPT */
#define PORTB_PULSE_DURATION_4_5ms (1 << 2) /* pre-LPT */ #define PORTB_PULSE_DURATION_4_5ms (1 << 2) /* pre-LPT */
#define PORTB_PULSE_DURATION_6ms (2 << 2) /* pre-LPT */ #define PORTB_PULSE_DURATION_6ms (2 << 2) /* pre-LPT */
...@@ -6219,6 +6245,9 @@ enum skl_disp_power_wells { ...@@ -6219,6 +6245,9 @@ enum skl_disp_power_wells {
#define PORTB_HOTPLUG_NO_DETECT (0 << 0) #define PORTB_HOTPLUG_NO_DETECT (0 << 0)
#define PORTB_HOTPLUG_SHORT_DETECT (1 << 0) #define PORTB_HOTPLUG_SHORT_DETECT (1 << 0)
#define PORTB_HOTPLUG_LONG_DETECT (2 << 0) #define PORTB_HOTPLUG_LONG_DETECT (2 << 0)
#define BXT_DDI_HPD_INVERT_MASK (BXT_DDIA_HPD_INVERT | \
BXT_DDIB_HPD_INVERT | \
BXT_DDIC_HPD_INVERT)
#define PCH_PORT_HOTPLUG2 _MMIO(0xc403C) /* SHOTPLUG_CTL2 SPT+ */ #define PCH_PORT_HOTPLUG2 _MMIO(0xc403C) /* SHOTPLUG_CTL2 SPT+ */
#define PORTE_HOTPLUG_ENABLE (1 << 4) #define PORTE_HOTPLUG_ENABLE (1 << 4)
......
...@@ -562,7 +562,7 @@ TRACE_EVENT(i915_gem_request_notify, ...@@ -562,7 +562,7 @@ TRACE_EVENT(i915_gem_request_notify,
TP_fast_assign( TP_fast_assign(
__entry->dev = engine->dev->primary->index; __entry->dev = engine->dev->primary->index;
__entry->ring = engine->id; __entry->ring = engine->id;
__entry->seqno = engine->get_seqno(engine, false); __entry->seqno = engine->get_seqno(engine);
), ),
TP_printk("dev=%u, ring=%u, seqno=%u", TP_printk("dev=%u, ring=%u, seqno=%u",
......
...@@ -181,8 +181,8 @@ static int vgt_balloon_space(struct drm_mm *mm, ...@@ -181,8 +181,8 @@ static int vgt_balloon_space(struct drm_mm *mm,
int intel_vgt_balloon(struct drm_device *dev) int intel_vgt_balloon(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_address_space *ggtt_vm = &dev_priv->ggtt.base; struct i915_ggtt *ggtt = &dev_priv->ggtt;
unsigned long ggtt_vm_end = ggtt_vm->start + ggtt_vm->total; unsigned long ggtt_end = ggtt->base.start + ggtt->base.total;
unsigned long mappable_base, mappable_size, mappable_end; unsigned long mappable_base, mappable_size, mappable_end;
unsigned long unmappable_base, unmappable_size, unmappable_end; unsigned long unmappable_base, unmappable_size, unmappable_end;
...@@ -202,19 +202,19 @@ int intel_vgt_balloon(struct drm_device *dev) ...@@ -202,19 +202,19 @@ int intel_vgt_balloon(struct drm_device *dev)
DRM_INFO("Unmappable graphic memory: base 0x%lx size %ldKiB\n", DRM_INFO("Unmappable graphic memory: base 0x%lx size %ldKiB\n",
unmappable_base, unmappable_size / 1024); unmappable_base, unmappable_size / 1024);
if (mappable_base < ggtt_vm->start || if (mappable_base < ggtt->base.start ||
mappable_end > dev_priv->ggtt.mappable_end || mappable_end > ggtt->mappable_end ||
unmappable_base < dev_priv->ggtt.mappable_end || unmappable_base < ggtt->mappable_end ||
unmappable_end > ggtt_vm_end) { unmappable_end > ggtt_end) {
DRM_ERROR("Invalid ballooning configuration!\n"); DRM_ERROR("Invalid ballooning configuration!\n");
return -EINVAL; return -EINVAL;
} }
/* Unmappable graphic memory ballooning */ /* Unmappable graphic memory ballooning */
if (unmappable_base > dev_priv->ggtt.mappable_end) { if (unmappable_base > ggtt->mappable_end) {
ret = vgt_balloon_space(&ggtt_vm->mm, ret = vgt_balloon_space(&ggtt->base.mm,
&bl_info.space[2], &bl_info.space[2],
dev_priv->ggtt.mappable_end, ggtt->mappable_end,
unmappable_base); unmappable_base);
if (ret) if (ret)
...@@ -225,30 +225,30 @@ int intel_vgt_balloon(struct drm_device *dev) ...@@ -225,30 +225,30 @@ int intel_vgt_balloon(struct drm_device *dev)
* No need to partition out the last physical page, * No need to partition out the last physical page,
* because it is reserved to the guard page. * because it is reserved to the guard page.
*/ */
if (unmappable_end < ggtt_vm_end - PAGE_SIZE) { if (unmappable_end < ggtt_end - PAGE_SIZE) {
ret = vgt_balloon_space(&ggtt_vm->mm, ret = vgt_balloon_space(&ggtt->base.mm,
&bl_info.space[3], &bl_info.space[3],
unmappable_end, unmappable_end,
ggtt_vm_end - PAGE_SIZE); ggtt_end - PAGE_SIZE);
if (ret) if (ret)
goto err; goto err;
} }
/* Mappable graphic memory ballooning */ /* Mappable graphic memory ballooning */
if (mappable_base > ggtt_vm->start) { if (mappable_base > ggtt->base.start) {
ret = vgt_balloon_space(&ggtt_vm->mm, ret = vgt_balloon_space(&ggtt->base.mm,
&bl_info.space[0], &bl_info.space[0],
ggtt_vm->start, mappable_base); ggtt->base.start, mappable_base);
if (ret) if (ret)
goto err; goto err;
} }
if (mappable_end < dev_priv->ggtt.mappable_end) { if (mappable_end < ggtt->mappable_end) {
ret = vgt_balloon_space(&ggtt_vm->mm, ret = vgt_balloon_space(&ggtt->base.mm,
&bl_info.space[1], &bl_info.space[1],
mappable_end, mappable_end,
dev_priv->ggtt.mappable_end); ggtt->mappable_end);
if (ret) if (ret)
goto err; goto err;
......
...@@ -373,7 +373,7 @@ static void ilk_audio_codec_disable(struct intel_encoder *encoder) ...@@ -373,7 +373,7 @@ static void ilk_audio_codec_disable(struct intel_encoder *encoder)
if (WARN_ON(port == PORT_A)) if (WARN_ON(port == PORT_A))
return; return;
if (HAS_PCH_IBX(dev_priv->dev)) { if (HAS_PCH_IBX(dev_priv)) {
aud_config = IBX_AUD_CFG(pipe); aud_config = IBX_AUD_CFG(pipe);
aud_cntrl_st2 = IBX_AUD_CNTL_ST2; aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
......
...@@ -1123,7 +1123,7 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port, ...@@ -1123,7 +1123,7 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
} }
/* Parse the I_boost config for SKL and above */ /* Parse the I_boost config for SKL and above */
if (bdb->version >= 196 && (child->common.flags_1 & IBOOST_ENABLE)) { if (bdb->version >= 196 && child->common.iboost) {
info->dp_boost_level = translate_iboost(child->common.iboost_level & 0xF); info->dp_boost_level = translate_iboost(child->common.iboost_level & 0xF);
DRM_DEBUG_KMS("VBT (e)DP boost level for port %c: %d\n", DRM_DEBUG_KMS("VBT (e)DP boost level for port %c: %d\n",
port_name(port), info->dp_boost_level); port_name(port), info->dp_boost_level);
...@@ -1241,6 +1241,19 @@ parse_device_mapping(struct drm_i915_private *dev_priv, ...@@ -1241,6 +1241,19 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
*/ */
memcpy(child_dev_ptr, p_child, memcpy(child_dev_ptr, p_child,
min_t(size_t, p_defs->child_dev_size, sizeof(*p_child))); min_t(size_t, p_defs->child_dev_size, sizeof(*p_child)));
/*
* copied full block, now init values when they are not
* available in current version
*/
if (bdb->version < 196) {
/* Set default values for bits added from v196 */
child_dev_ptr->common.iboost = 0;
child_dev_ptr->common.hpd_invert = 0;
}
if (bdb->version < 192)
child_dev_ptr->common.lspcon = 0;
} }
return; return;
} }
...@@ -1585,3 +1598,47 @@ bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, ...@@ -1585,3 +1598,47 @@ bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv,
return false; return false;
} }
/**
* intel_bios_is_port_hpd_inverted - is HPD inverted for %port
* @dev_priv: i915 device instance
* @port: port to check
*
* Return true if HPD should be inverted for %port.
*/
bool
intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv,
enum port port)
{
int i;
if (WARN_ON_ONCE(!IS_BROXTON(dev_priv)))
return false;
for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
if (!dev_priv->vbt.child_dev[i].common.hpd_invert)
continue;
switch (dev_priv->vbt.child_dev[i].common.dvo_port) {
case DVO_PORT_DPA:
case DVO_PORT_HDMIA:
if (port == PORT_A)
return true;
break;
case DVO_PORT_DPB:
case DVO_PORT_HDMIB:
if (port == PORT_B)
return true;
break;
case DVO_PORT_DPC:
case DVO_PORT_HDMIC:
if (port == PORT_C)
return true;
break;
default:
break;
}
}
return false;
}
...@@ -92,10 +92,10 @@ static void ctm_mult_by_limited(uint64_t *result, int64_t *input) ...@@ -92,10 +92,10 @@ static void ctm_mult_by_limited(uint64_t *result, int64_t *input)
} }
/* Set up the pipe CSC unit. */ /* Set up the pipe CSC unit. */
static void i9xx_load_csc_matrix(struct drm_crtc *crtc) static void i9xx_load_csc_matrix(struct drm_crtc_state *crtc_state)
{ {
struct drm_crtc *crtc = crtc_state->crtc;
struct drm_device *dev = crtc->dev; struct drm_device *dev = crtc->dev;
struct drm_crtc_state *crtc_state = crtc->state;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int i, pipe = intel_crtc->pipe; int i, pipe = intel_crtc->pipe;
...@@ -203,10 +203,10 @@ static void i9xx_load_csc_matrix(struct drm_crtc *crtc) ...@@ -203,10 +203,10 @@ static void i9xx_load_csc_matrix(struct drm_crtc *crtc)
/* /*
* Set up the pipe CSC unit on CherryView. * Set up the pipe CSC unit on CherryView.
*/ */
static void cherryview_load_csc_matrix(struct drm_crtc *crtc) static void cherryview_load_csc_matrix(struct drm_crtc_state *state)
{ {
struct drm_crtc *crtc = state->crtc;
struct drm_device *dev = crtc->dev; struct drm_device *dev = crtc->dev;
struct drm_crtc_state *state = crtc->state;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
int pipe = to_intel_crtc(crtc)->pipe; int pipe = to_intel_crtc(crtc)->pipe;
uint32_t mode; uint32_t mode;
...@@ -252,13 +252,13 @@ static void cherryview_load_csc_matrix(struct drm_crtc *crtc) ...@@ -252,13 +252,13 @@ static void cherryview_load_csc_matrix(struct drm_crtc *crtc)
I915_WRITE(CGM_PIPE_MODE(pipe), mode); I915_WRITE(CGM_PIPE_MODE(pipe), mode);
} }
void intel_color_set_csc(struct drm_crtc *crtc) void intel_color_set_csc(struct drm_crtc_state *crtc_state)
{ {
struct drm_device *dev = crtc->dev; struct drm_device *dev = crtc_state->crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
if (dev_priv->display.load_csc_matrix) if (dev_priv->display.load_csc_matrix)
dev_priv->display.load_csc_matrix(crtc); dev_priv->display.load_csc_matrix(crtc_state);
} }
/* Loads the legacy palette/gamma unit for the CRTC. */ /* Loads the legacy palette/gamma unit for the CRTC. */
...@@ -303,19 +303,20 @@ static void i9xx_load_luts_internal(struct drm_crtc *crtc, ...@@ -303,19 +303,20 @@ static void i9xx_load_luts_internal(struct drm_crtc *crtc,
} }
} }
static void i9xx_load_luts(struct drm_crtc *crtc) static void i9xx_load_luts(struct drm_crtc_state *crtc_state)
{ {
i9xx_load_luts_internal(crtc, crtc->state->gamma_lut); i9xx_load_luts_internal(crtc_state->crtc, crtc_state->gamma_lut);
} }
/* Loads the legacy palette/gamma unit for the CRTC on Haswell. */ /* Loads the legacy palette/gamma unit for the CRTC on Haswell. */
static void haswell_load_luts(struct drm_crtc *crtc) static void haswell_load_luts(struct drm_crtc_state *crtc_state)
{ {
struct drm_crtc *crtc = crtc_state->crtc;
struct drm_device *dev = crtc->dev; struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_crtc_state *intel_crtc_state = struct intel_crtc_state *intel_crtc_state =
to_intel_crtc_state(crtc->state); to_intel_crtc_state(crtc_state);
bool reenable_ips = false; bool reenable_ips = false;
/* /*
...@@ -331,24 +332,24 @@ static void haswell_load_luts(struct drm_crtc *crtc) ...@@ -331,24 +332,24 @@ static void haswell_load_luts(struct drm_crtc *crtc)
intel_crtc_state->gamma_mode = GAMMA_MODE_MODE_8BIT; intel_crtc_state->gamma_mode = GAMMA_MODE_MODE_8BIT;
I915_WRITE(GAMMA_MODE(intel_crtc->pipe), GAMMA_MODE_MODE_8BIT); I915_WRITE(GAMMA_MODE(intel_crtc->pipe), GAMMA_MODE_MODE_8BIT);
i9xx_load_luts(crtc); i9xx_load_luts(crtc_state);
if (reenable_ips) if (reenable_ips)
hsw_enable_ips(intel_crtc); hsw_enable_ips(intel_crtc);
} }
/* Loads the palette/gamma unit for the CRTC on Broadwell+. */ /* Loads the palette/gamma unit for the CRTC on Broadwell+. */
static void broadwell_load_luts(struct drm_crtc *crtc) static void broadwell_load_luts(struct drm_crtc_state *state)
{ {
struct drm_crtc *crtc = state->crtc;
struct drm_device *dev = crtc->dev; struct drm_device *dev = crtc->dev;
struct drm_crtc_state *state = crtc->state;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc_state *intel_state = to_intel_crtc_state(state); struct intel_crtc_state *intel_state = to_intel_crtc_state(state);
enum pipe pipe = to_intel_crtc(crtc)->pipe; enum pipe pipe = to_intel_crtc(crtc)->pipe;
uint32_t i, lut_size = INTEL_INFO(dev)->color.degamma_lut_size; uint32_t i, lut_size = INTEL_INFO(dev)->color.degamma_lut_size;
if (crtc_state_is_legacy(state)) { if (crtc_state_is_legacy(state)) {
haswell_load_luts(crtc); haswell_load_luts(state);
return; return;
} }
...@@ -421,11 +422,11 @@ static void broadwell_load_luts(struct drm_crtc *crtc) ...@@ -421,11 +422,11 @@ static void broadwell_load_luts(struct drm_crtc *crtc)
} }
/* Loads the palette/gamma unit for the CRTC on CherryView. */ /* Loads the palette/gamma unit for the CRTC on CherryView. */
static void cherryview_load_luts(struct drm_crtc *crtc) static void cherryview_load_luts(struct drm_crtc_state *state)
{ {
struct drm_crtc *crtc = state->crtc;
struct drm_device *dev = crtc->dev; struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc_state *state = crtc->state;
enum pipe pipe = to_intel_crtc(crtc)->pipe; enum pipe pipe = to_intel_crtc(crtc)->pipe;
struct drm_color_lut *lut; struct drm_color_lut *lut;
uint32_t i, lut_size; uint32_t i, lut_size;
...@@ -481,16 +482,12 @@ static void cherryview_load_luts(struct drm_crtc *crtc) ...@@ -481,16 +482,12 @@ static void cherryview_load_luts(struct drm_crtc *crtc)
i9xx_load_luts_internal(crtc, NULL); i9xx_load_luts_internal(crtc, NULL);
} }
void intel_color_load_luts(struct drm_crtc *crtc) void intel_color_load_luts(struct drm_crtc_state *crtc_state)
{ {
struct drm_device *dev = crtc->dev; struct drm_device *dev = crtc_state->crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
/* The clocks have to be on to load the palette. */ dev_priv->display.load_luts(crtc_state);
if (!crtc->state->active)
return;
dev_priv->display.load_luts(crtc);
} }
int intel_color_check(struct drm_crtc *crtc, int intel_color_check(struct drm_crtc *crtc,
......
...@@ -315,6 +315,9 @@ static void ddi_get_encoder_port(struct intel_encoder *intel_encoder, ...@@ -315,6 +315,9 @@ static void ddi_get_encoder_port(struct intel_encoder *intel_encoder,
*dig_port = enc_to_mst(encoder)->primary; *dig_port = enc_to_mst(encoder)->primary;
*port = (*dig_port)->port; *port = (*dig_port)->port;
break; break;
default:
WARN(1, "Invalid DDI encoder type %d\n", intel_encoder->type);
/* fallthrough and treat as unknown */
case INTEL_OUTPUT_DISPLAYPORT: case INTEL_OUTPUT_DISPLAYPORT:
case INTEL_OUTPUT_EDP: case INTEL_OUTPUT_EDP:
case INTEL_OUTPUT_HDMI: case INTEL_OUTPUT_HDMI:
...@@ -326,9 +329,6 @@ static void ddi_get_encoder_port(struct intel_encoder *intel_encoder, ...@@ -326,9 +329,6 @@ static void ddi_get_encoder_port(struct intel_encoder *intel_encoder,
*dig_port = NULL; *dig_port = NULL;
*port = PORT_E; *port = PORT_E;
break; break;
default:
WARN(1, "Invalid DDI encoder type %d\n", intel_encoder->type);
break;
} }
} }
...@@ -629,6 +629,10 @@ void hsw_fdi_link_train(struct drm_crtc *crtc) ...@@ -629,6 +629,10 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
break; break;
} }
rx_ctl_val &= ~FDI_RX_ENABLE;
I915_WRITE(FDI_RX_CTL(PIPE_A), rx_ctl_val);
POSTING_READ(FDI_RX_CTL(PIPE_A));
temp = I915_READ(DDI_BUF_CTL(PORT_E)); temp = I915_READ(DDI_BUF_CTL(PORT_E));
temp &= ~DDI_BUF_CTL_ENABLE; temp &= ~DDI_BUF_CTL_ENABLE;
I915_WRITE(DDI_BUF_CTL(PORT_E), temp); I915_WRITE(DDI_BUF_CTL(PORT_E), temp);
...@@ -643,10 +647,6 @@ void hsw_fdi_link_train(struct drm_crtc *crtc) ...@@ -643,10 +647,6 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
intel_wait_ddi_buf_idle(dev_priv, PORT_E); intel_wait_ddi_buf_idle(dev_priv, PORT_E);
rx_ctl_val &= ~FDI_RX_ENABLE;
I915_WRITE(FDI_RX_CTL(PIPE_A), rx_ctl_val);
POSTING_READ(FDI_RX_CTL(PIPE_A));
/* Reset FDI_RX_MISC pwrdn lanes */ /* Reset FDI_RX_MISC pwrdn lanes */
temp = I915_READ(FDI_RX_MISC(PIPE_A)); temp = I915_READ(FDI_RX_MISC(PIPE_A));
temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK); temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
...@@ -1726,18 +1726,31 @@ static void broxton_phy_init(struct drm_i915_private *dev_priv, ...@@ -1726,18 +1726,31 @@ static void broxton_phy_init(struct drm_i915_private *dev_priv,
enum dpio_phy phy) enum dpio_phy phy)
{ {
enum port port; enum port port;
uint32_t val; u32 ports, val;
val = I915_READ(BXT_P_CR_GT_DISP_PWRON); val = I915_READ(BXT_P_CR_GT_DISP_PWRON);
val |= GT_DISPLAY_POWER_ON(phy); val |= GT_DISPLAY_POWER_ON(phy);
I915_WRITE(BXT_P_CR_GT_DISP_PWRON, val); I915_WRITE(BXT_P_CR_GT_DISP_PWRON, val);
/* Considering 10ms timeout until BSpec is updated */ /*
if (wait_for(I915_READ(BXT_PORT_CL1CM_DW0(phy)) & PHY_POWER_GOOD, 10)) * The PHY registers start out inaccessible and respond to reads with
* all 1s. Eventually they become accessible as they power up, then
* the reserved bit will give the default 0. Poll on the reserved bit
* becoming 0 to find when the PHY is accessible.
* HW team confirmed that the time to reach phypowergood status is
* anywhere between 50 us and 100us.
*/
if (wait_for_us(((I915_READ(BXT_PORT_CL1CM_DW0(phy)) &
(PHY_RESERVED | PHY_POWER_GOOD)) == PHY_POWER_GOOD), 100)) {
DRM_ERROR("timeout during PHY%d power on\n", phy); DRM_ERROR("timeout during PHY%d power on\n", phy);
}
if (phy == DPIO_PHY0)
ports = BIT(PORT_B) | BIT(PORT_C);
else
ports = BIT(PORT_A);
for (port = (phy == DPIO_PHY0 ? PORT_B : PORT_A); for_each_port_masked(port, ports) {
port <= (phy == DPIO_PHY0 ? PORT_C : PORT_A); port++) {
int lane; int lane;
for (lane = 0; lane < 4; lane++) { for (lane = 0; lane < 4; lane++) {
...@@ -1898,12 +1911,18 @@ void intel_ddi_fdi_disable(struct drm_crtc *crtc) ...@@ -1898,12 +1911,18 @@ void intel_ddi_fdi_disable(struct drm_crtc *crtc)
struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc); struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
uint32_t val; uint32_t val;
intel_ddi_post_disable(intel_encoder); /*
* Bspec lists this as both step 13 (before DDI_BUF_CTL disable)
* and step 18 (after clearing PORT_CLK_SEL). Based on a BUN,
* step 13 is the correct place for it. Step 18 is where it was
* originally before the BUN.
*/
val = I915_READ(FDI_RX_CTL(PIPE_A)); val = I915_READ(FDI_RX_CTL(PIPE_A));
val &= ~FDI_RX_ENABLE; val &= ~FDI_RX_ENABLE;
I915_WRITE(FDI_RX_CTL(PIPE_A), val); I915_WRITE(FDI_RX_CTL(PIPE_A), val);
intel_ddi_post_disable(intel_encoder);
val = I915_READ(FDI_RX_MISC(PIPE_A)); val = I915_READ(FDI_RX_MISC(PIPE_A));
val &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK); val &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
val |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2); val |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2);
......
...@@ -129,6 +129,7 @@ static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync); ...@@ -129,6 +129,7 @@ static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp); static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
static void vlv_steal_power_sequencer(struct drm_device *dev, static void vlv_steal_power_sequencer(struct drm_device *dev,
enum pipe pipe); enum pipe pipe);
static void intel_dp_unset_edid(struct intel_dp *intel_dp);
static unsigned int intel_dp_unused_lane_mask(int lane_count) static unsigned int intel_dp_unused_lane_mask(int lane_count)
{ {
...@@ -3787,6 +3788,27 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp) ...@@ -3787,6 +3788,27 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
if (intel_dp->dpcd[DP_DPCD_REV] == 0) if (intel_dp->dpcd[DP_DPCD_REV] == 0)
return false; /* DPCD not present */ return false; /* DPCD not present */
if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
&intel_dp->sink_count, 1) < 0)
return false;
/*
* Sink count can change between short pulse hpd hence
* a member variable in intel_dp will track any changes
* between short pulse interrupts.
*/
intel_dp->sink_count = DP_GET_SINK_COUNT(intel_dp->sink_count);
/*
* SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that
* a dongle is present but no display. Unless we require to know
* if a dongle is present or not, we don't need to update
* downstream port information. So, an early return here saves
* time from performing other operations which are not required.
*/
if (!intel_dp->sink_count)
return false;
/* Check if the panel supports PSR */ /* Check if the panel supports PSR */
memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd)); memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
if (is_edp(intel_dp)) { if (is_edp(intel_dp)) {
...@@ -4214,6 +4236,36 @@ intel_dp_check_mst_status(struct intel_dp *intel_dp) ...@@ -4214,6 +4236,36 @@ intel_dp_check_mst_status(struct intel_dp *intel_dp)
return -EINVAL; return -EINVAL;
} }
static void
intel_dp_check_link_status(struct intel_dp *intel_dp)
{
struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
struct drm_device *dev = intel_dp_to_dev(intel_dp);
u8 link_status[DP_LINK_STATUS_SIZE];
WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
if (!intel_dp_get_link_status(intel_dp, link_status)) {
DRM_ERROR("Failed to get link status\n");
return;
}
if (!intel_encoder->base.crtc)
return;
if (!to_intel_crtc(intel_encoder->base.crtc)->active)
return;
/* if link training is requested we should perform it always */
if ((intel_dp->compliance_test_type == DP_TEST_LINK_TRAINING) ||
(!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count))) {
DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
intel_encoder->base.name);
intel_dp_start_link_train(intel_dp);
intel_dp_stop_link_train(intel_dp);
}
}
/* /*
* According to DP spec * According to DP spec
* 5.1.2: * 5.1.2:
...@@ -4221,16 +4273,19 @@ intel_dp_check_mst_status(struct intel_dp *intel_dp) ...@@ -4221,16 +4273,19 @@ intel_dp_check_mst_status(struct intel_dp *intel_dp)
* 2. Configure link according to Receiver Capabilities * 2. Configure link according to Receiver Capabilities
* 3. Use Link Training from 2.5.3.3 and 3.5.1.3 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
* 4. Check link status on receipt of hot-plug interrupt * 4. Check link status on receipt of hot-plug interrupt
*
* intel_dp_short_pulse - handles short pulse interrupts
* when full detection is not required.
* Returns %true if short pulse is handled and full detection
* is NOT required and %false otherwise.
*/ */
static void static bool
intel_dp_check_link_status(struct intel_dp *intel_dp) intel_dp_short_pulse(struct intel_dp *intel_dp)
{ {
struct drm_device *dev = intel_dp_to_dev(intel_dp); struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
u8 sink_irq_vector; u8 sink_irq_vector;
u8 link_status[DP_LINK_STATUS_SIZE]; u8 old_sink_count = intel_dp->sink_count;
bool ret;
WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
/* /*
* Clearing compliance test variables to allow capturing * Clearing compliance test variables to allow capturing
...@@ -4240,20 +4295,17 @@ intel_dp_check_link_status(struct intel_dp *intel_dp) ...@@ -4240,20 +4295,17 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
intel_dp->compliance_test_type = 0; intel_dp->compliance_test_type = 0;
intel_dp->compliance_test_data = 0; intel_dp->compliance_test_data = 0;
if (!intel_encoder->base.crtc) /*
return; * Now read the DPCD to see if it's actually running
* If the current value of sink count doesn't match with
if (!to_intel_crtc(intel_encoder->base.crtc)->active) * the value that was stored earlier or dpcd read failed
return; * we need to do full detection
*/
/* Try to read receiver status if the link appears to be up */ ret = intel_dp_get_dpcd(intel_dp);
if (!intel_dp_get_link_status(intel_dp, link_status)) {
return;
}
/* Now read the DPCD to see if it's actually running */ if ((old_sink_count != intel_dp->sink_count) || !ret) {
if (!intel_dp_get_dpcd(intel_dp)) { /* No need to proceed if we are going to do full detect */
return; return false;
} }
/* Try to read the source of the interrupt */ /* Try to read the source of the interrupt */
...@@ -4270,14 +4322,11 @@ intel_dp_check_link_status(struct intel_dp *intel_dp) ...@@ -4270,14 +4322,11 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n"); DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
} }
/* if link training is requested we should perform it always */ drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
if ((intel_dp->compliance_test_type == DP_TEST_LINK_TRAINING) || intel_dp_check_link_status(intel_dp);
(!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count))) { drm_modeset_unlock(&dev->mode_config.connection_mutex);
DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
intel_encoder->base.name); return true;
intel_dp_start_link_train(intel_dp);
intel_dp_stop_link_train(intel_dp);
}
} }
/* XXX this is probably wrong for multiple downstream ports */ /* XXX this is probably wrong for multiple downstream ports */
...@@ -4297,14 +4346,9 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp) ...@@ -4297,14 +4346,9 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp)
/* If we're HPD-aware, SINK_COUNT changes dynamically */ /* If we're HPD-aware, SINK_COUNT changes dynamically */
if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) { intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
uint8_t reg;
if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
&reg, 1) < 0)
return connector_status_unknown;
return DP_GET_SINK_COUNT(reg) ? connector_status_connected return intel_dp->sink_count ?
: connector_status_disconnected; connector_status_connected : connector_status_disconnected;
} }
/* If no HPD, poke DDC gently */ /* If no HPD, poke DDC gently */
...@@ -4513,6 +4557,7 @@ intel_dp_set_edid(struct intel_dp *intel_dp) ...@@ -4513,6 +4557,7 @@ intel_dp_set_edid(struct intel_dp *intel_dp)
struct intel_connector *intel_connector = intel_dp->attached_connector; struct intel_connector *intel_connector = intel_dp->attached_connector;
struct edid *edid; struct edid *edid;
intel_dp_unset_edid(intel_dp);
edid = intel_dp_get_edid(intel_dp); edid = intel_dp_get_edid(intel_dp);
intel_connector->detect_edid = edid; intel_connector->detect_edid = edid;
...@@ -4533,9 +4578,10 @@ intel_dp_unset_edid(struct intel_dp *intel_dp) ...@@ -4533,9 +4578,10 @@ intel_dp_unset_edid(struct intel_dp *intel_dp)
intel_dp->has_audio = false; intel_dp->has_audio = false;
} }
static enum drm_connector_status static void
intel_dp_detect(struct drm_connector *connector, bool force) intel_dp_long_pulse(struct intel_connector *intel_connector)
{ {
struct drm_connector *connector = &intel_connector->base;
struct intel_dp *intel_dp = intel_attached_dp(connector); struct intel_dp *intel_dp = intel_attached_dp(connector);
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *intel_encoder = &intel_dig_port->base; struct intel_encoder *intel_encoder = &intel_dig_port->base;
...@@ -4545,17 +4591,6 @@ intel_dp_detect(struct drm_connector *connector, bool force) ...@@ -4545,17 +4591,6 @@ intel_dp_detect(struct drm_connector *connector, bool force)
bool ret; bool ret;
u8 sink_irq_vector; u8 sink_irq_vector;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
intel_dp_unset_edid(intel_dp);
if (intel_dp->is_mst) {
/* MST devices are disconnected from a monitor POV */
if (intel_encoder->type != INTEL_OUTPUT_EDP)
intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
return connector_status_disconnected;
}
power_domain = intel_display_port_aux_power_domain(intel_encoder); power_domain = intel_display_port_aux_power_domain(intel_encoder);
intel_display_power_get(to_i915(dev), power_domain); intel_display_power_get(to_i915(dev), power_domain);
...@@ -4576,16 +4611,30 @@ intel_dp_detect(struct drm_connector *connector, bool force) ...@@ -4576,16 +4611,30 @@ intel_dp_detect(struct drm_connector *connector, bool force)
goto out; goto out;
} }
if (intel_encoder->type != INTEL_OUTPUT_EDP)
intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
intel_dp_probe_oui(intel_dp); intel_dp_probe_oui(intel_dp);
ret = intel_dp_probe_mst(intel_dp); ret = intel_dp_probe_mst(intel_dp);
if (ret) { if (ret) {
/* if we are in MST mode then this connector /*
won't appear connected or have anything with EDID on it */ * If we are in MST mode then this connector
if (intel_encoder->type != INTEL_OUTPUT_EDP) * won't appear connected or have anything
intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; * with EDID on it
*/
status = connector_status_disconnected; status = connector_status_disconnected;
goto out; goto out;
} else if (connector->status == connector_status_connected) {
/*
* If display was connected already and is still connected
* check links status, there has been known issues of
* link loss triggerring long pulse!!!!
*/
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
intel_dp_check_link_status(intel_dp);
drm_modeset_unlock(&dev->mode_config.connection_mutex);
goto out;
} }
/* /*
...@@ -4598,9 +4647,8 @@ intel_dp_detect(struct drm_connector *connector, bool force) ...@@ -4598,9 +4647,8 @@ intel_dp_detect(struct drm_connector *connector, bool force)
intel_dp_set_edid(intel_dp); intel_dp_set_edid(intel_dp);
if (intel_encoder->type != INTEL_OUTPUT_EDP)
intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
status = connector_status_connected; status = connector_status_connected;
intel_dp->detect_done = true;
/* Try to read the source of the interrupt */ /* Try to read the source of the interrupt */
if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
...@@ -4617,8 +4665,54 @@ intel_dp_detect(struct drm_connector *connector, bool force) ...@@ -4617,8 +4665,54 @@ intel_dp_detect(struct drm_connector *connector, bool force)
} }
out: out:
if (status != connector_status_connected) {
intel_dp_unset_edid(intel_dp);
/*
* If we were in MST mode, and device is not there,
* get out of MST mode
*/
if (intel_dp->is_mst) {
DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
intel_dp->is_mst = false;
drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
intel_dp->is_mst);
}
}
intel_display_power_put(to_i915(dev), power_domain); intel_display_power_put(to_i915(dev), power_domain);
return status; return;
}
static enum drm_connector_status
intel_dp_detect(struct drm_connector *connector, bool force)
{
struct intel_dp *intel_dp = intel_attached_dp(connector);
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *intel_encoder = &intel_dig_port->base;
struct intel_connector *intel_connector = to_intel_connector(connector);
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
if (intel_dp->is_mst) {
/* MST devices are disconnected from a monitor POV */
intel_dp_unset_edid(intel_dp);
if (intel_encoder->type != INTEL_OUTPUT_EDP)
intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
return connector_status_disconnected;
}
/* If full detect is not performed yet, do a full detect */
if (!intel_dp->detect_done)
intel_dp_long_pulse(intel_dp->attached_connector);
intel_dp->detect_done = false;
if (intel_connector->detect_edid)
return connector_status_connected;
else
return connector_status_disconnected;
} }
static void static void
...@@ -4945,44 +5039,37 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd) ...@@ -4945,44 +5039,37 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
/* indicate that we need to restart link training */ /* indicate that we need to restart link training */
intel_dp->train_set_valid = false; intel_dp->train_set_valid = false;
if (!intel_digital_port_connected(dev_priv, intel_dig_port)) intel_dp_long_pulse(intel_dp->attached_connector);
goto mst_fail; if (intel_dp->is_mst)
ret = IRQ_HANDLED;
goto put_power;
if (!intel_dp_get_dpcd(intel_dp)) {
goto mst_fail;
}
intel_dp_probe_oui(intel_dp);
if (!intel_dp_probe_mst(intel_dp)) {
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
intel_dp_check_link_status(intel_dp);
drm_modeset_unlock(&dev->mode_config.connection_mutex);
goto mst_fail;
}
} else { } else {
if (intel_dp->is_mst) { if (intel_dp->is_mst) {
if (intel_dp_check_mst_status(intel_dp) == -EINVAL) if (intel_dp_check_mst_status(intel_dp) == -EINVAL) {
goto mst_fail; /*
* If we were in MST mode, and device is not
* there, get out of MST mode
*/
DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
intel_dp->is_mst = false;
drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
intel_dp->is_mst);
goto put_power;
}
} }
if (!intel_dp->is_mst) { if (!intel_dp->is_mst) {
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); if (!intel_dp_short_pulse(intel_dp)) {
intel_dp_check_link_status(intel_dp); intel_dp_long_pulse(intel_dp->attached_connector);
drm_modeset_unlock(&dev->mode_config.connection_mutex); goto put_power;
}
} }
} }
ret = IRQ_HANDLED; ret = IRQ_HANDLED;
goto put_power;
mst_fail:
/* if we were in MST mode, and device is not there get out of MST mode */
if (intel_dp->is_mst) {
DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
intel_dp->is_mst = false;
drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
}
put_power: put_power:
intel_display_power_put(dev_priv, power_domain); intel_display_power_put(dev_priv, power_domain);
......
...@@ -89,14 +89,16 @@ void intel_prepare_shared_dpll(struct intel_crtc *crtc) ...@@ -89,14 +89,16 @@ void intel_prepare_shared_dpll(struct intel_crtc *crtc)
if (WARN_ON(pll == NULL)) if (WARN_ON(pll == NULL))
return; return;
mutex_lock(&dev_priv->dpll_lock);
WARN_ON(!pll->config.crtc_mask); WARN_ON(!pll->config.crtc_mask);
if (pll->active_mask == 0) { if (!pll->active_mask) {
DRM_DEBUG_DRIVER("setting up %s\n", pll->name); DRM_DEBUG_DRIVER("setting up %s\n", pll->name);
WARN_ON(pll->on); WARN_ON(pll->on);
assert_shared_dpll_disabled(dev_priv, pll); assert_shared_dpll_disabled(dev_priv, pll);
pll->funcs.mode_set(dev_priv, pll); pll->funcs.mode_set(dev_priv, pll);
} }
mutex_unlock(&dev_priv->dpll_lock);
} }
/** /**
...@@ -113,14 +115,17 @@ void intel_enable_shared_dpll(struct intel_crtc *crtc) ...@@ -113,14 +115,17 @@ void intel_enable_shared_dpll(struct intel_crtc *crtc)
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_shared_dpll *pll = crtc->config->shared_dpll; struct intel_shared_dpll *pll = crtc->config->shared_dpll;
unsigned crtc_mask = 1 << drm_crtc_index(&crtc->base); unsigned crtc_mask = 1 << drm_crtc_index(&crtc->base);
unsigned old_mask = pll->active_mask; unsigned old_mask;
if (WARN_ON(pll == NULL)) if (WARN_ON(pll == NULL))
return; return;
mutex_lock(&dev_priv->dpll_lock);
old_mask = pll->active_mask;
if (WARN_ON(!(pll->config.crtc_mask & crtc_mask)) || if (WARN_ON(!(pll->config.crtc_mask & crtc_mask)) ||
WARN_ON(pll->active_mask & crtc_mask)) WARN_ON(pll->active_mask & crtc_mask))
return; goto out;
pll->active_mask |= crtc_mask; pll->active_mask |= crtc_mask;
...@@ -131,13 +136,16 @@ void intel_enable_shared_dpll(struct intel_crtc *crtc) ...@@ -131,13 +136,16 @@ void intel_enable_shared_dpll(struct intel_crtc *crtc)
if (old_mask) { if (old_mask) {
WARN_ON(!pll->on); WARN_ON(!pll->on);
assert_shared_dpll_enabled(dev_priv, pll); assert_shared_dpll_enabled(dev_priv, pll);
return; goto out;
} }
WARN_ON(pll->on); WARN_ON(pll->on);
DRM_DEBUG_KMS("enabling %s\n", pll->name); DRM_DEBUG_KMS("enabling %s\n", pll->name);
pll->funcs.enable(dev_priv, pll); pll->funcs.enable(dev_priv, pll);
pll->on = true; pll->on = true;
out:
mutex_unlock(&dev_priv->dpll_lock);
} }
void intel_disable_shared_dpll(struct intel_crtc *crtc) void intel_disable_shared_dpll(struct intel_crtc *crtc)
...@@ -154,8 +162,9 @@ void intel_disable_shared_dpll(struct intel_crtc *crtc) ...@@ -154,8 +162,9 @@ void intel_disable_shared_dpll(struct intel_crtc *crtc)
if (pll == NULL) if (pll == NULL)
return; return;
mutex_lock(&dev_priv->dpll_lock);
if (WARN_ON(!(pll->active_mask & crtc_mask))) if (WARN_ON(!(pll->active_mask & crtc_mask)))
return; goto out;
DRM_DEBUG_KMS("disable %s (active %x, on? %d) for crtc %d\n", DRM_DEBUG_KMS("disable %s (active %x, on? %d) for crtc %d\n",
pll->name, pll->active_mask, pll->on, pll->name, pll->active_mask, pll->on,
...@@ -166,11 +175,14 @@ void intel_disable_shared_dpll(struct intel_crtc *crtc) ...@@ -166,11 +175,14 @@ void intel_disable_shared_dpll(struct intel_crtc *crtc)
pll->active_mask &= ~crtc_mask; pll->active_mask &= ~crtc_mask;
if (pll->active_mask) if (pll->active_mask)
return; goto out;
DRM_DEBUG_KMS("disabling %s\n", pll->name); DRM_DEBUG_KMS("disabling %s\n", pll->name);
pll->funcs.disable(dev_priv, pll); pll->funcs.disable(dev_priv, pll);
pll->on = false; pll->on = false;
out:
mutex_unlock(&dev_priv->dpll_lock);
} }
static struct intel_shared_dpll * static struct intel_shared_dpll *
...@@ -286,7 +298,7 @@ static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv) ...@@ -286,7 +298,7 @@ static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
u32 val; u32 val;
bool enabled; bool enabled;
I915_STATE_WARN_ON(!(HAS_PCH_IBX(dev_priv->dev) || HAS_PCH_CPT(dev_priv->dev))); I915_STATE_WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
val = I915_READ(PCH_DREF_CONTROL); val = I915_READ(PCH_DREF_CONTROL);
enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK | enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
...@@ -1284,7 +1296,15 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv, ...@@ -1284,7 +1296,15 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
enum port port = (enum port)pll->id; /* 1:1 port->PLL mapping */ enum port port = (enum port)pll->id; /* 1:1 port->PLL mapping */
temp = I915_READ(BXT_PORT_PLL_ENABLE(port)); temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
temp &= ~PORT_PLL_REF_SEL; /*
* Definition of each bit polarity has been changed
* after A1 stepping
*/
if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
temp &= ~PORT_PLL_REF_SEL;
else
temp |= PORT_PLL_REF_SEL;
/* Non-SSC reference */ /* Non-SSC reference */
I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp); I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
...@@ -1750,6 +1770,7 @@ void intel_shared_dpll_init(struct drm_device *dev) ...@@ -1750,6 +1770,7 @@ void intel_shared_dpll_init(struct drm_device *dev)
dev_priv->dpll_mgr = dpll_mgr; dev_priv->dpll_mgr = dpll_mgr;
dev_priv->num_shared_dpll = i; dev_priv->num_shared_dpll = i;
mutex_init(&dev_priv->dpll_lock);
BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS); BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS);
......
...@@ -796,7 +796,9 @@ struct intel_dp { ...@@ -796,7 +796,9 @@ struct intel_dp {
uint32_t DP; uint32_t DP;
int link_rate; int link_rate;
uint8_t lane_count; uint8_t lane_count;
uint8_t sink_count;
bool has_audio; bool has_audio;
bool detect_done;
enum hdmi_force_audio force_audio; enum hdmi_force_audio force_audio;
bool limited_color_range; bool limited_color_range;
bool color_range_auto; bool color_range_auto;
...@@ -1102,6 +1104,8 @@ void i915_audio_component_init(struct drm_i915_private *dev_priv); ...@@ -1102,6 +1104,8 @@ void i915_audio_component_init(struct drm_i915_private *dev_priv);
void i915_audio_component_cleanup(struct drm_i915_private *dev_priv); void i915_audio_component_cleanup(struct drm_i915_private *dev_priv);
/* intel_display.c */ /* intel_display.c */
int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
const char *name, u32 reg, int ref_freq);
extern const struct drm_plane_funcs intel_plane_funcs; extern const struct drm_plane_funcs intel_plane_funcs;
void intel_init_display_hooks(struct drm_i915_private *dev_priv); void intel_init_display_hooks(struct drm_i915_private *dev_priv);
unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info); unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info);
...@@ -1669,7 +1673,7 @@ extern const struct drm_plane_helper_funcs intel_plane_helper_funcs; ...@@ -1669,7 +1673,7 @@ extern const struct drm_plane_helper_funcs intel_plane_helper_funcs;
/* intel_color.c */ /* intel_color.c */
void intel_color_init(struct drm_crtc *crtc); void intel_color_init(struct drm_crtc *crtc);
int intel_color_check(struct drm_crtc *crtc, struct drm_crtc_state *state); int intel_color_check(struct drm_crtc *crtc, struct drm_crtc_state *state);
void intel_color_set_csc(struct drm_crtc *crtc); void intel_color_set_csc(struct drm_crtc_state *crtc_state);
void intel_color_load_luts(struct drm_crtc *crtc); void intel_color_load_luts(struct drm_crtc_state *crtc_state);
#endif /* __INTEL_DRV_H__ */ #endif /* __INTEL_DRV_H__ */
...@@ -46,6 +46,24 @@ static const struct { ...@@ -46,6 +46,24 @@ static const struct {
}, },
}; };
enum mipi_dsi_pixel_format pixel_format_from_register_bits(u32 fmt)
{
/* It just so happens the VBT matches register contents. */
switch (fmt) {
case VID_MODE_FORMAT_RGB888:
return MIPI_DSI_FMT_RGB888;
case VID_MODE_FORMAT_RGB666:
return MIPI_DSI_FMT_RGB666;
case VID_MODE_FORMAT_RGB666_PACKED:
return MIPI_DSI_FMT_RGB666_PACKED;
case VID_MODE_FORMAT_RGB565:
return MIPI_DSI_FMT_RGB565;
default:
MISSING_CASE(fmt);
return MIPI_DSI_FMT_RGB666;
}
}
static void wait_for_dsi_fifo_empty(struct intel_dsi *intel_dsi, enum port port) static void wait_for_dsi_fifo_empty(struct intel_dsi *intel_dsi, enum port port)
{ {
struct drm_encoder *encoder = &intel_dsi->base.base; struct drm_encoder *encoder = &intel_dsi->base.base;
...@@ -740,14 +758,74 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder, ...@@ -740,14 +758,74 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
return active; return active;
} }
static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_display_mode *adjusted_mode =
&pipe_config->base.adjusted_mode;
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
unsigned int bpp, fmt;
enum port port;
u16 vfp, vsync, vbp;
/*
* Atleast one port is active as encoder->get_config called only if
* encoder->get_hw_state() returns true.
*/
for_each_dsi_port(port, intel_dsi->ports) {
if (I915_READ(BXT_MIPI_PORT_CTRL(port)) & DPI_ENABLE)
break;
}
fmt = I915_READ(MIPI_DSI_FUNC_PRG(port)) & VID_MODE_FORMAT_MASK;
pipe_config->pipe_bpp =
mipi_dsi_pixel_format_to_bpp(
pixel_format_from_register_bits(fmt));
bpp = pipe_config->pipe_bpp;
/* In terms of pixels */
adjusted_mode->crtc_hdisplay =
I915_READ(BXT_MIPI_TRANS_HACTIVE(port));
adjusted_mode->crtc_vdisplay =
I915_READ(BXT_MIPI_TRANS_VACTIVE(port));
adjusted_mode->crtc_vtotal =
I915_READ(BXT_MIPI_TRANS_VTOTAL(port));
/*
* TODO: Retrieve hfp, hsync and hbp. Adjust them for dual link and
* calculate hsync_start, hsync_end, htotal and hblank_end
*/
/* vertical values are in terms of lines */
vfp = I915_READ(MIPI_VFP_COUNT(port));
vsync = I915_READ(MIPI_VSYNC_PADDING_COUNT(port));
vbp = I915_READ(MIPI_VBP_COUNT(port));
adjusted_mode->crtc_hblank_start = adjusted_mode->crtc_hdisplay;
adjusted_mode->crtc_vsync_start =
vfp + adjusted_mode->crtc_vdisplay;
adjusted_mode->crtc_vsync_end =
vsync + adjusted_mode->crtc_vsync_start;
adjusted_mode->crtc_vblank_start = adjusted_mode->crtc_vdisplay;
adjusted_mode->crtc_vblank_end = adjusted_mode->crtc_vtotal;
}
static void intel_dsi_get_config(struct intel_encoder *encoder, static void intel_dsi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config) struct intel_crtc_state *pipe_config)
{ {
struct drm_device *dev = encoder->base.dev;
u32 pclk; u32 pclk;
DRM_DEBUG_KMS("\n"); DRM_DEBUG_KMS("\n");
pipe_config->has_dsi_encoder = true; pipe_config->has_dsi_encoder = true;
if (IS_BROXTON(dev))
bxt_dsi_get_pipe_config(encoder, pipe_config);
/* /*
* DPLL_MD is not used in case of DSI, reading will get some default value * DPLL_MD is not used in case of DSI, reading will get some default value
* set dpll_md = 0 * set dpll_md = 0
......
...@@ -134,5 +134,6 @@ extern void intel_dsi_reset_clocks(struct intel_encoder *encoder, ...@@ -134,5 +134,6 @@ extern void intel_dsi_reset_clocks(struct intel_encoder *encoder,
enum port port); enum port port);
struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id); struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id);
enum mipi_dsi_pixel_format pixel_format_from_register_bits(u32 fmt);
#endif /* _INTEL_DSI_H */ #endif /* _INTEL_DSI_H */
...@@ -58,50 +58,41 @@ static inline struct vbt_panel *to_vbt_panel(struct drm_panel *panel) ...@@ -58,50 +58,41 @@ static inline struct vbt_panel *to_vbt_panel(struct drm_panel *panel)
#define NS_KHZ_RATIO 1000000 #define NS_KHZ_RATIO 1000000
#define GPI0_NC_0_HV_DDI0_HPD 0x4130 /* base offsets for gpio pads */
#define GPIO_NC_0_HV_DDI0_PAD 0x4138 #define VLV_GPIO_NC_0_HV_DDI0_HPD 0x4130
#define GPIO_NC_1_HV_DDI0_DDC_SDA 0x4120 #define VLV_GPIO_NC_1_HV_DDI0_DDC_SDA 0x4120
#define GPIO_NC_1_HV_DDI0_DDC_SDA_PAD 0x4128 #define VLV_GPIO_NC_2_HV_DDI0_DDC_SCL 0x4110
#define GPIO_NC_2_HV_DDI0_DDC_SCL 0x4110 #define VLV_GPIO_NC_3_PANEL0_VDDEN 0x4140
#define GPIO_NC_2_HV_DDI0_DDC_SCL_PAD 0x4118 #define VLV_GPIO_NC_4_PANEL0_BKLTEN 0x4150
#define GPIO_NC_3_PANEL0_VDDEN 0x4140 #define VLV_GPIO_NC_5_PANEL0_BKLTCTL 0x4160
#define GPIO_NC_3_PANEL0_VDDEN_PAD 0x4148 #define VLV_GPIO_NC_6_HV_DDI1_HPD 0x4180
#define GPIO_NC_4_PANEL0_BLKEN 0x4150 #define VLV_GPIO_NC_7_HV_DDI1_DDC_SDA 0x4190
#define GPIO_NC_4_PANEL0_BLKEN_PAD 0x4158 #define VLV_GPIO_NC_8_HV_DDI1_DDC_SCL 0x4170
#define GPIO_NC_5_PANEL0_BLKCTL 0x4160 #define VLV_GPIO_NC_9_PANEL1_VDDEN 0x4100
#define GPIO_NC_5_PANEL0_BLKCTL_PAD 0x4168 #define VLV_GPIO_NC_10_PANEL1_BKLTEN 0x40E0
#define GPIO_NC_6_PCONF0 0x4180 #define VLV_GPIO_NC_11_PANEL1_BKLTCTL 0x40F0
#define GPIO_NC_6_PAD 0x4188
#define GPIO_NC_7_PCONF0 0x4190 #define VLV_GPIO_PCONF0(base_offset) (base_offset)
#define GPIO_NC_7_PAD 0x4198 #define VLV_GPIO_PAD_VAL(base_offset) ((base_offset) + 8)
#define GPIO_NC_8_PCONF0 0x4170
#define GPIO_NC_8_PAD 0x4178 struct gpio_map {
#define GPIO_NC_9_PCONF0 0x4100 u16 base_offset;
#define GPIO_NC_9_PAD 0x4108 bool init;
#define GPIO_NC_10_PCONF0 0x40E0
#define GPIO_NC_10_PAD 0x40E8
#define GPIO_NC_11_PCONF0 0x40F0
#define GPIO_NC_11_PAD 0x40F8
struct gpio_table {
u16 function_reg;
u16 pad_reg;
u8 init;
}; };
static struct gpio_table gtable[] = { static struct gpio_map vlv_gpio_table[] = {
{ GPI0_NC_0_HV_DDI0_HPD, GPIO_NC_0_HV_DDI0_PAD, 0 }, { VLV_GPIO_NC_0_HV_DDI0_HPD },
{ GPIO_NC_1_HV_DDI0_DDC_SDA, GPIO_NC_1_HV_DDI0_DDC_SDA_PAD, 0 }, { VLV_GPIO_NC_1_HV_DDI0_DDC_SDA },
{ GPIO_NC_2_HV_DDI0_DDC_SCL, GPIO_NC_2_HV_DDI0_DDC_SCL_PAD, 0 }, { VLV_GPIO_NC_2_HV_DDI0_DDC_SCL },
{ GPIO_NC_3_PANEL0_VDDEN, GPIO_NC_3_PANEL0_VDDEN_PAD, 0 }, { VLV_GPIO_NC_3_PANEL0_VDDEN },
{ GPIO_NC_4_PANEL0_BLKEN, GPIO_NC_4_PANEL0_BLKEN_PAD, 0 }, { VLV_GPIO_NC_4_PANEL0_BKLTEN },
{ GPIO_NC_5_PANEL0_BLKCTL, GPIO_NC_5_PANEL0_BLKCTL_PAD, 0 }, { VLV_GPIO_NC_5_PANEL0_BKLTCTL },
{ GPIO_NC_6_PCONF0, GPIO_NC_6_PAD, 0 }, { VLV_GPIO_NC_6_HV_DDI1_HPD },
{ GPIO_NC_7_PCONF0, GPIO_NC_7_PAD, 0 }, { VLV_GPIO_NC_7_HV_DDI1_DDC_SDA },
{ GPIO_NC_8_PCONF0, GPIO_NC_8_PAD, 0 }, { VLV_GPIO_NC_8_HV_DDI1_DDC_SCL },
{ GPIO_NC_9_PCONF0, GPIO_NC_9_PAD, 0 }, { VLV_GPIO_NC_9_PANEL1_VDDEN },
{ GPIO_NC_10_PCONF0, GPIO_NC_10_PAD, 0}, { VLV_GPIO_NC_10_PANEL1_BKLTEN },
{ GPIO_NC_11_PCONF0, GPIO_NC_11_PAD, 0} { VLV_GPIO_NC_11_PANEL1_BKLTCTL },
}; };
static inline enum port intel_dsi_seq_port_to_port(u8 port) static inline enum port intel_dsi_seq_port_to_port(u8 port)
...@@ -196,56 +187,76 @@ static const u8 *mipi_exec_delay(struct intel_dsi *intel_dsi, const u8 *data) ...@@ -196,56 +187,76 @@ static const u8 *mipi_exec_delay(struct intel_dsi *intel_dsi, const u8 *data)
return data; return data;
} }
static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data) static void vlv_exec_gpio(struct drm_i915_private *dev_priv,
u8 gpio_source, u8 gpio_index, bool value)
{ {
u8 gpio, action; struct gpio_map *map;
u16 function, pad; u16 pconf0, padval;
u32 val; u32 tmp;
struct drm_device *dev = intel_dsi->base.base.dev; u8 port;
struct drm_i915_private *dev_priv = dev->dev_private;
if (dev_priv->vbt.dsi.seq_version >= 3)
data++;
gpio = *data++; if (gpio_index >= ARRAY_SIZE(vlv_gpio_table)) {
DRM_DEBUG_KMS("unknown gpio index %u\n", gpio_index);
/* pull up/down */ return;
action = *data++ & 1;
if (gpio >= ARRAY_SIZE(gtable)) {
DRM_DEBUG_KMS("unknown gpio %u\n", gpio);
goto out;
} }
if (!IS_VALLEYVIEW(dev_priv)) { map = &vlv_gpio_table[gpio_index];
DRM_DEBUG_KMS("GPIO element not supported on this platform\n");
goto out;
}
if (dev_priv->vbt.dsi.seq_version >= 3) { if (dev_priv->vbt.dsi.seq_version >= 3) {
DRM_DEBUG_KMS("GPIO element v3 not supported\n"); DRM_DEBUG_KMS("GPIO element v3 not supported\n");
goto out; return;
} else {
if (gpio_source == 0) {
port = IOSF_PORT_GPIO_NC;
} else if (gpio_source == 1) {
port = IOSF_PORT_GPIO_SC;
} else {
DRM_DEBUG_KMS("unknown gpio source %u\n", gpio_source);
return;
}
} }
function = gtable[gpio].function_reg; pconf0 = VLV_GPIO_PCONF0(map->base_offset);
pad = gtable[gpio].pad_reg; padval = VLV_GPIO_PAD_VAL(map->base_offset);
mutex_lock(&dev_priv->sb_lock); mutex_lock(&dev_priv->sb_lock);
if (!gtable[gpio].init) { if (!map->init) {
/* program the function */
/* FIXME: remove constant below */ /* FIXME: remove constant below */
vlv_iosf_sb_write(dev_priv, IOSF_PORT_GPIO_NC, function, vlv_iosf_sb_write(dev_priv, port, pconf0, 0x2000CC00);
0x2000CC00); map->init = true;
gtable[gpio].init = 1;
} }
val = 0x4 | action; tmp = 0x4 | value;
vlv_iosf_sb_write(dev_priv, port, padval, tmp);
mutex_unlock(&dev_priv->sb_lock);
}
static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
{
struct drm_device *dev = intel_dsi->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u8 gpio_source, gpio_index;
bool value;
if (dev_priv->vbt.dsi.seq_version >= 3)
data++;
gpio_index = *data++;
/* gpio source in sequence v2 only */
if (dev_priv->vbt.dsi.seq_version == 2)
gpio_source = (*data >> 1) & 3;
else
gpio_source = 0;
/* pull up/down */ /* pull up/down */
vlv_iosf_sb_write(dev_priv, IOSF_PORT_GPIO_NC, pad, val); value = *data++ & 1;
mutex_unlock(&dev_priv->sb_lock);
if (IS_VALLEYVIEW(dev_priv))
vlv_exec_gpio(dev_priv, gpio_source, gpio_index, value);
else
DRM_DEBUG_KMS("GPIO element not supported on this platform\n");
out:
return data; return data;
} }
...@@ -412,25 +423,6 @@ static const struct drm_panel_funcs vbt_panel_funcs = { ...@@ -412,25 +423,6 @@ static const struct drm_panel_funcs vbt_panel_funcs = {
.get_modes = vbt_panel_get_modes, .get_modes = vbt_panel_get_modes,
}; };
/* XXX: This should be done when parsing the VBT in intel_bios.c */
static enum mipi_dsi_pixel_format pixel_format_from_vbt(u32 fmt)
{
/* It just so happens the VBT matches register contents. */
switch (fmt) {
case VID_MODE_FORMAT_RGB888:
return MIPI_DSI_FMT_RGB888;
case VID_MODE_FORMAT_RGB666:
return MIPI_DSI_FMT_RGB666;
case VID_MODE_FORMAT_RGB666_PACKED:
return MIPI_DSI_FMT_RGB666_PACKED;
case VID_MODE_FORMAT_RGB565:
return MIPI_DSI_FMT_RGB565;
default:
MISSING_CASE(fmt);
return MIPI_DSI_FMT_RGB666;
}
}
struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id) struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id)
{ {
struct drm_device *dev = intel_dsi->base.base.dev; struct drm_device *dev = intel_dsi->base.base.dev;
...@@ -455,7 +447,9 @@ struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id) ...@@ -455,7 +447,9 @@ struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id)
intel_dsi->eotp_pkt = mipi_config->eot_pkt_disabled ? 0 : 1; intel_dsi->eotp_pkt = mipi_config->eot_pkt_disabled ? 0 : 1;
intel_dsi->clock_stop = mipi_config->enable_clk_stop ? 1 : 0; intel_dsi->clock_stop = mipi_config->enable_clk_stop ? 1 : 0;
intel_dsi->lane_count = mipi_config->lane_cnt + 1; intel_dsi->lane_count = mipi_config->lane_cnt + 1;
intel_dsi->pixel_format = pixel_format_from_vbt(mipi_config->videomode_color_format << 7); intel_dsi->pixel_format =
pixel_format_from_register_bits(
mipi_config->videomode_color_format << 7);
bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format); bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
intel_dsi->dual_link = mipi_config->dual_link; intel_dsi->dual_link = mipi_config->dual_link;
......
...@@ -506,6 +506,7 @@ static int find_compression_threshold(struct drm_i915_private *dev_priv, ...@@ -506,6 +506,7 @@ static int find_compression_threshold(struct drm_i915_private *dev_priv,
int size, int size,
int fb_cpp) int fb_cpp)
{ {
struct i915_ggtt *ggtt = &dev_priv->ggtt;
int compression_threshold = 1; int compression_threshold = 1;
int ret; int ret;
u64 end; u64 end;
...@@ -516,9 +517,9 @@ static int find_compression_threshold(struct drm_i915_private *dev_priv, ...@@ -516,9 +517,9 @@ static int find_compression_threshold(struct drm_i915_private *dev_priv,
* underruns, even if that range is not reserved by the BIOS. */ * underruns, even if that range is not reserved by the BIOS. */
if (IS_BROADWELL(dev_priv) || if (IS_BROADWELL(dev_priv) ||
IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
end = dev_priv->ggtt.stolen_size - 8 * 1024 * 1024; end = ggtt->stolen_size - 8 * 1024 * 1024;
else else
end = dev_priv->ggtt.stolen_usable_size; end = ggtt->stolen_usable_size;
/* HACK: This code depends on what we will do in *_enable_fbc. If that /* HACK: This code depends on what we will do in *_enable_fbc. If that
* code changes, this code needs to change as well. * code changes, this code needs to change as well.
......
...@@ -122,6 +122,7 @@ static int intelfb_alloc(struct drm_fb_helper *helper, ...@@ -122,6 +122,7 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
struct drm_framebuffer *fb; struct drm_framebuffer *fb;
struct drm_device *dev = helper->dev; struct drm_device *dev = helper->dev;
struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct drm_mode_fb_cmd2 mode_cmd = {}; struct drm_mode_fb_cmd2 mode_cmd = {};
struct drm_i915_gem_object *obj = NULL; struct drm_i915_gem_object *obj = NULL;
int size, ret; int size, ret;
...@@ -146,7 +147,7 @@ static int intelfb_alloc(struct drm_fb_helper *helper, ...@@ -146,7 +147,7 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
/* If the FB is too big, just don't use it since fbdev is not very /* If the FB is too big, just don't use it since fbdev is not very
* important and we should probably use that space with FBC or other * important and we should probably use that space with FBC or other
* features. */ * features. */
if (size * 2 < dev_priv->ggtt.stolen_usable_size) if (size * 2 < ggtt->stolen_usable_size)
obj = i915_gem_object_create_stolen(dev, size); obj = i915_gem_object_create_stolen(dev, size);
if (obj == NULL) if (obj == NULL)
obj = i915_gem_alloc_object(dev, size); obj = i915_gem_alloc_object(dev, size);
...@@ -181,7 +182,8 @@ static int intelfb_create(struct drm_fb_helper *helper, ...@@ -181,7 +182,8 @@ static int intelfb_create(struct drm_fb_helper *helper,
container_of(helper, struct intel_fbdev, helper); container_of(helper, struct intel_fbdev, helper);
struct intel_framebuffer *intel_fb = ifbdev->fb; struct intel_framebuffer *intel_fb = ifbdev->fb;
struct drm_device *dev = helper->dev; struct drm_device *dev = helper->dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct fb_info *info; struct fb_info *info;
struct drm_framebuffer *fb; struct drm_framebuffer *fb;
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
...@@ -244,13 +246,13 @@ static int intelfb_create(struct drm_fb_helper *helper, ...@@ -244,13 +246,13 @@ static int intelfb_create(struct drm_fb_helper *helper,
/* setup aperture base/size for vesafb takeover */ /* setup aperture base/size for vesafb takeover */
info->apertures->ranges[0].base = dev->mode_config.fb_base; info->apertures->ranges[0].base = dev->mode_config.fb_base;
info->apertures->ranges[0].size = dev_priv->ggtt.mappable_end; info->apertures->ranges[0].size = ggtt->mappable_end;
info->fix.smem_start = dev->mode_config.fb_base + i915_gem_obj_ggtt_offset(obj); info->fix.smem_start = dev->mode_config.fb_base + i915_gem_obj_ggtt_offset(obj);
info->fix.smem_len = size; info->fix.smem_len = size;
info->screen_base = info->screen_base =
ioremap_wc(dev_priv->ggtt.mappable_base + i915_gem_obj_ggtt_offset(obj), ioremap_wc(ggtt->mappable_base + i915_gem_obj_ggtt_offset(obj),
size); size);
if (!info->screen_base) { if (!info->screen_base) {
DRM_ERROR("Failed to remap framebuffer into virtual memory\n"); DRM_ERROR("Failed to remap framebuffer into virtual memory\n");
...@@ -808,8 +810,6 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous ...@@ -808,8 +810,6 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous
void intel_fbdev_output_poll_changed(struct drm_device *dev) void intel_fbdev_output_poll_changed(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
async_synchronize_full();
if (dev_priv->fbdev) if (dev_priv->fbdev)
drm_fb_helper_hotplug_event(&dev_priv->fbdev->helper); drm_fb_helper_hotplug_event(&dev_priv->fbdev->helper);
} }
...@@ -821,7 +821,6 @@ void intel_fbdev_restore_mode(struct drm_device *dev) ...@@ -821,7 +821,6 @@ void intel_fbdev_restore_mode(struct drm_device *dev)
struct intel_fbdev *ifbdev = dev_priv->fbdev; struct intel_fbdev *ifbdev = dev_priv->fbdev;
struct drm_fb_helper *fb_helper; struct drm_fb_helper *fb_helper;
async_synchronize_full();
if (!ifbdev) if (!ifbdev)
return; return;
......
...@@ -333,7 +333,7 @@ bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv, ...@@ -333,7 +333,7 @@ bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
old = !intel_crtc->pch_fifo_underrun_disabled; old = !intel_crtc->pch_fifo_underrun_disabled;
intel_crtc->pch_fifo_underrun_disabled = !enable; intel_crtc->pch_fifo_underrun_disabled = !enable;
if (HAS_PCH_IBX(dev_priv->dev)) if (HAS_PCH_IBX(dev_priv))
ibx_set_fifo_underrun_reporting(dev_priv->dev, pch_transcoder, ibx_set_fifo_underrun_reporting(dev_priv->dev, pch_transcoder,
enable); enable);
else else
...@@ -363,7 +363,7 @@ void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv, ...@@ -363,7 +363,7 @@ void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
return; return;
/* GMCH can't disable fifo underruns, filter them. */ /* GMCH can't disable fifo underruns, filter them. */
if (HAS_GMCH_DISPLAY(dev_priv->dev) && if (HAS_GMCH_DISPLAY(dev_priv) &&
to_intel_crtc(crtc)->cpu_fifo_underrun_disabled) to_intel_crtc(crtc)->cpu_fifo_underrun_disabled)
return; return;
......
...@@ -353,6 +353,24 @@ static int guc_ucode_xfer(struct drm_i915_private *dev_priv) ...@@ -353,6 +353,24 @@ static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
return ret; return ret;
} }
static int i915_reset_guc(struct drm_i915_private *dev_priv)
{
int ret;
u32 guc_status;
ret = intel_guc_reset(dev_priv);
if (ret) {
DRM_ERROR("GuC reset failed, ret = %d\n", ret);
return ret;
}
guc_status = I915_READ(GUC_STATUS);
WARN(!(guc_status & GS_MIA_IN_RESET),
"GuC status: 0x%x, MIA core expected to be in reset\n", guc_status);
return ret;
}
/** /**
* intel_guc_ucode_load() - load GuC uCode into the device * intel_guc_ucode_load() - load GuC uCode into the device
* @dev: drm device * @dev: drm device
...@@ -369,7 +387,7 @@ int intel_guc_ucode_load(struct drm_device *dev) ...@@ -369,7 +387,7 @@ int intel_guc_ucode_load(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw; struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
int err = 0; int retries, err = 0;
if (!i915.enable_guc_submission) if (!i915.enable_guc_submission)
return 0; return 0;
...@@ -417,9 +435,33 @@ int intel_guc_ucode_load(struct drm_device *dev) ...@@ -417,9 +435,33 @@ int intel_guc_ucode_load(struct drm_device *dev)
if (err) if (err)
goto fail; goto fail;
err = guc_ucode_xfer(dev_priv); /*
if (err) * WaEnableuKernelHeaderValidFix:skl,bxt
goto fail; * For BXT, this is only upto B0 but below WA is required for later
* steppings also so this is extended as well.
*/
/* WaEnableGuCBootHashCheckNotSet:skl,bxt */
for (retries = 3; ; ) {
/*
* Always reset the GuC just before (re)loading, so
* that the state and timing are fairly predictable
*/
err = i915_reset_guc(dev_priv);
if (err) {
DRM_ERROR("GuC reset failed, err %d\n", err);
goto fail;
}
err = guc_ucode_xfer(dev_priv);
if (!err)
break;
if (--retries == 0)
goto fail;
DRM_INFO("GuC fw load failed, err %d; will reset and "
"retry %d more time(s)\n", err, retries);
}
guc_fw->guc_fw_load_status = GUC_FIRMWARE_SUCCESS; guc_fw->guc_fw_load_status = GUC_FIRMWARE_SUCCESS;
...@@ -440,6 +482,7 @@ int intel_guc_ucode_load(struct drm_device *dev) ...@@ -440,6 +482,7 @@ int intel_guc_ucode_load(struct drm_device *dev)
return 0; return 0;
fail: fail:
DRM_ERROR("GuC firmware load failed, err %d\n", err);
if (guc_fw->guc_fw_load_status == GUC_FIRMWARE_PENDING) if (guc_fw->guc_fw_load_status == GUC_FIRMWARE_PENDING)
guc_fw->guc_fw_load_status = GUC_FIRMWARE_FAIL; guc_fw->guc_fw_load_status = GUC_FIRMWARE_FAIL;
......
...@@ -638,7 +638,7 @@ static bool intel_hdmi_set_gcp_infoframe(struct drm_encoder *encoder) ...@@ -638,7 +638,7 @@ static bool intel_hdmi_set_gcp_infoframe(struct drm_encoder *encoder)
reg = HSW_TVIDEO_DIP_GCP(crtc->config->cpu_transcoder); reg = HSW_TVIDEO_DIP_GCP(crtc->config->cpu_transcoder);
else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
reg = VLV_TVIDEO_DIP_GCP(crtc->pipe); reg = VLV_TVIDEO_DIP_GCP(crtc->pipe);
else if (HAS_PCH_SPLIT(dev_priv->dev)) else if (HAS_PCH_SPLIT(dev_priv))
reg = TVIDEO_DIP_GCP(crtc->pipe); reg = TVIDEO_DIP_GCP(crtc->pipe);
else else
return false; return false;
......
此差异已折叠。
此差异已折叠。
...@@ -118,7 +118,6 @@ int intel_execlists_submission(struct i915_execbuffer_params *params, ...@@ -118,7 +118,6 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
struct drm_i915_gem_execbuffer2 *args, struct drm_i915_gem_execbuffer2 *args,
struct list_head *vmas); struct list_head *vmas);
void intel_lrc_irq_handler(struct intel_engine_cs *engine);
void intel_execlists_retire_requests(struct intel_engine_cs *engine); void intel_execlists_retire_requests(struct intel_engine_cs *engine);
#endif /* _INTEL_LRC_H_ */ #endif /* _INTEL_LRC_H_ */
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册