diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 6c60e04fc09c75281e0f604fe4ff9bd85ffb3192..ddc21d4b388d2419a63fbd4f4a5745deea80957b 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -3488,7 +3488,7 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, if (flags & PIN_MAPPABLE) end = min_t(u64, end, dev_priv->gtt.mappable_end); if (flags & PIN_ZONE_4G) - end = min_t(u64, end, (1ULL << 32)); + end = min_t(u64, end, (1ULL << 32) - PAGE_SIZE); if (alignment == 0) alignment = flags & PIN_MAPPABLE ? fence_alignment : diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 900ffd044db80049c47d0e976e59d406466de0c7..c25083c78ba7c21c7cdc92ed43f62c7e499851bb 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -347,6 +347,10 @@ void i915_gem_context_reset(struct drm_device *dev) i915_gem_context_unreference(lctx); ring->last_context = NULL; } + + /* Force the GPU state to be reinitialised on enabling */ + if (ring->default_context) + ring->default_context->legacy_hw_ctx.initialized = false; } } @@ -715,7 +719,7 @@ static int do_switch(struct drm_i915_gem_request *req) if (ret) goto unpin_out; - if (!to->legacy_hw_ctx.initialized) { + if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to)) { hw_flags |= MI_RESTORE_INHIBIT; /* NB: If we inhibit the restore, the context is not allowed to * die because future work may end up depending on valid address diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 5d01ea680dc1ed3ba416b42f235c2296cab0628f..dccb517361b3f1763e3241f9c87f04c58ec4d7fc 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -249,6 +249,31 @@ static inline int use_cpu_reloc(struct drm_i915_gem_object *obj) obj->cache_level != I915_CACHE_NONE); } +/* Used to convert any address to canonical form. + * Starting from gen8, some commands (e.g. STATE_BASE_ADDRESS, + * MI_LOAD_REGISTER_MEM and others, see Broadwell PRM Vol2a) require the + * addresses to be in a canonical form: + * "GraphicsAddress[63:48] are ignored by the HW and assumed to be in correct + * canonical form [63:48] == [47]." + */ +#define GEN8_HIGH_ADDRESS_BIT 47 +static inline uint64_t gen8_canonical_addr(uint64_t address) +{ + return sign_extend64(address, GEN8_HIGH_ADDRESS_BIT); +} + +static inline uint64_t gen8_noncanonical_addr(uint64_t address) +{ + return address & ((1ULL << (GEN8_HIGH_ADDRESS_BIT + 1)) - 1); +} + +static inline uint64_t +relocation_target(struct drm_i915_gem_relocation_entry *reloc, + uint64_t target_offset) +{ + return gen8_canonical_addr((int)reloc->delta + target_offset); +} + static int relocate_entry_cpu(struct drm_i915_gem_object *obj, struct drm_i915_gem_relocation_entry *reloc, @@ -256,7 +281,7 @@ relocate_entry_cpu(struct drm_i915_gem_object *obj, { struct drm_device *dev = obj->base.dev; uint32_t page_offset = offset_in_page(reloc->offset); - uint64_t delta = reloc->delta + target_offset; + uint64_t delta = relocation_target(reloc, target_offset); char *vaddr; int ret; @@ -292,7 +317,7 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj, { struct drm_device *dev = obj->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - uint64_t delta = reloc->delta + target_offset; + uint64_t delta = relocation_target(reloc, target_offset); uint64_t offset; void __iomem *reloc_page; int ret; @@ -347,7 +372,7 @@ relocate_entry_clflush(struct drm_i915_gem_object *obj, { struct drm_device *dev = obj->base.dev; uint32_t page_offset = offset_in_page(reloc->offset); - uint64_t delta = (int)reloc->delta + target_offset; + uint64_t delta = relocation_target(reloc, target_offset); char *vaddr; int ret; @@ -395,7 +420,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, target_i915_obj = target_vma->obj; target_obj = &target_vma->obj->base; - target_offset = target_vma->node.start; + target_offset = gen8_canonical_addr(target_vma->node.start); /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and * pipe_control writes because the gpu doesn't properly redirect them @@ -994,6 +1019,21 @@ validate_exec_list(struct drm_device *dev, if (exec[i].flags & invalid_flags) return -EINVAL; + /* Offset can be used as input (EXEC_OBJECT_PINNED), reject + * any non-page-aligned or non-canonical addresses. + */ + if (exec[i].flags & EXEC_OBJECT_PINNED) { + if (exec[i].offset != + gen8_canonical_addr(exec[i].offset & PAGE_MASK)) + return -EINVAL; + + /* From drm_mm perspective address space is continuous, + * so from this point we're always using non-canonical + * form internally. + */ + exec[i].offset = gen8_noncanonical_addr(exec[i].offset); + } + if (exec[i].alignment && !is_power_of_2(exec[i].alignment)) return -EINVAL; @@ -1687,6 +1727,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, /* Copy the new buffer offsets back to the user's exec list. */ for (i = 0; i < args->buffer_count; i++) { + exec2_list[i].offset = + gen8_canonical_addr(exec2_list[i].offset); ret = __copy_to_user(&user_exec_list[i].offset, &exec2_list[i].offset, sizeof(user_exec_list[i].offset)); @@ -1752,6 +1794,8 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data, int i; for (i = 0; i < args->buffer_count; i++) { + exec2_list[i].offset = + gen8_canonical_addr(exec2_list[i].offset); ret = __copy_to_user(&user_exec_list[i].offset, &exec2_list[i].offset, sizeof(user_exec_list[i].offset)); diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 52bc6c3dfe04d8de03132032129aec2105ee57f2..56f4f2e58d53be25f3024ad8e6c259a1e6a44bfb 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -2384,6 +2384,32 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm, assert_rpm_atomic_end(dev_priv, rpm_atomic_seq); } +struct insert_entries { + struct i915_address_space *vm; + struct sg_table *st; + uint64_t start; + enum i915_cache_level level; + u32 flags; +}; + +static int gen8_ggtt_insert_entries__cb(void *_arg) +{ + struct insert_entries *arg = _arg; + gen8_ggtt_insert_entries(arg->vm, arg->st, + arg->start, arg->level, arg->flags); + return 0; +} + +static void gen8_ggtt_insert_entries__BKL(struct i915_address_space *vm, + struct sg_table *st, + uint64_t start, + enum i915_cache_level level, + u32 flags) +{ + struct insert_entries arg = { vm, st, start, level, flags }; + stop_machine(gen8_ggtt_insert_entries__cb, &arg, NULL); +} + /* * Binds an object into the global gtt with the specified cache level. The object * will be accessible to the GPU via commands whose operands reference offsets @@ -2560,26 +2586,6 @@ static int ggtt_bind_vma(struct i915_vma *vma, return 0; } -struct ggtt_bind_vma__cb { - struct i915_vma *vma; - enum i915_cache_level cache_level; - u32 flags; -}; - -static int ggtt_bind_vma__cb(void *_arg) -{ - struct ggtt_bind_vma__cb *arg = _arg; - return ggtt_bind_vma(arg->vma, arg->cache_level, arg->flags); -} - -static int ggtt_bind_vma__BKL(struct i915_vma *vma, - enum i915_cache_level cache_level, - u32 flags) -{ - struct ggtt_bind_vma__cb arg = { vma, cache_level, flags }; - return stop_machine(ggtt_bind_vma__cb, &arg, NULL); -} - static int aliasing_gtt_bind_vma(struct i915_vma *vma, enum i915_cache_level cache_level, u32 flags) @@ -3048,8 +3054,8 @@ static int gen8_gmch_probe(struct drm_device *dev, dev_priv->gtt.base.bind_vma = ggtt_bind_vma; dev_priv->gtt.base.unbind_vma = ggtt_unbind_vma; - if (IS_CHERRYVIEW(dev)) - dev_priv->gtt.base.bind_vma = ggtt_bind_vma__BKL; + if (IS_CHERRYVIEW(dev_priv)) + dev_priv->gtt.base.insert_entries = gen8_ggtt_insert_entries__BKL; return ret; } diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 3f8c753997bafa2adc843232afa747751e05bf0d..fa8afa7860ae175753b501622d9295f1475a6c96 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -2414,9 +2414,13 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg) spt_irq_handler(dev, pch_iir); else cpt_irq_handler(dev, pch_iir); - } else - DRM_ERROR("The master control interrupt lied (SDE)!\n"); - + } else { + /* + * Like on previous PCH there seems to be something + * fishy going on with forwarding PCH interrupts. + */ + DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n"); + } } I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index ceaea7a3641a2aa2eb61bb5ee9a2cc639559e6b3..580d094bfc1e1fc7426a937b721b29062ac9fe8e 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -13763,13 +13763,15 @@ intel_prepare_plane_fb(struct drm_plane *plane, /* For framebuffer backed by dmabuf, wait for fence */ if (obj && obj->base.dma_buf) { - ret = reservation_object_wait_timeout_rcu(obj->base.dma_buf->resv, - false, true, - MAX_SCHEDULE_TIMEOUT); - if (ret == -ERESTARTSYS) - return ret; + long lret; + + lret = reservation_object_wait_timeout_rcu(obj->base.dma_buf->resv, + false, true, + MAX_SCHEDULE_TIMEOUT); + if (lret == -ERESTARTSYS) + return lret; - WARN_ON(ret < 0); + WARN(lret < 0, "waiting returns %li\n", lret); } if (!obj) { diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index d523ebb2f89dbac35120ce0ee78be868f19b355f..ea5415851c6e2cc36d4c6790db473fab637fd46d 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -1442,8 +1442,10 @@ static inline void assert_rpm_wakelock_held(struct drm_i915_private *dev_priv) { assert_rpm_device_not_suspended(dev_priv); - WARN_ONCE(!atomic_read(&dev_priv->pm.wakeref_count), - "RPM wakelock ref not held during HW access"); + /* FIXME: Needs to be converted back to WARN_ONCE, but currently causes + * too much noise. */ + if (!atomic_read(&dev_priv->pm.wakeref_count)) + DRM_DEBUG_DRIVER("RPM wakelock ref not held during HW access"); } static inline int diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c index a294a3cbaea1454dc3aab271d3ec45a350ef0520..bee673005d48a6f3cb2e97856adfde114ae519a2 100644 --- a/drivers/gpu/drm/i915/intel_hotplug.c +++ b/drivers/gpu/drm/i915/intel_hotplug.c @@ -468,9 +468,14 @@ void intel_hpd_init(struct drm_i915_private *dev_priv) list_for_each_entry(connector, &mode_config->connector_list, head) { struct intel_connector *intel_connector = to_intel_connector(connector); connector->polled = intel_connector->polled; - if (connector->encoder && !connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE) - connector->polled = DRM_CONNECTOR_POLL_HPD; + + /* MST has a dynamic intel_connector->encoder and it's reprobing + * is all handled by the MST helpers. */ if (intel_connector->mst_port) + continue; + + if (!connector->polled && I915_HAS_HOTPLUG(dev) && + intel_connector->encoder->hpd_pin > HPD_NONE) connector->polled = DRM_CONNECTOR_POLL_HPD; }