diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 167e0bd79f98b9772b59ae645bd98c4e77a52d3f..906f8851df4c7e3d5e2d15e22934748a3831a40e 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2645,10 +2645,13 @@ void i915_init_vm(struct drm_i915_private *dev_priv, void i915_gem_free_object(struct drm_gem_object *obj); void i915_gem_vma_destroy(struct i915_vma *vma); -#define PIN_MAPPABLE 0x1 -#define PIN_NONBLOCK 0x2 -#define PIN_GLOBAL 0x4 -#define PIN_OFFSET_BIAS 0x8 +/* Flags used by pin/bind&friends. */ +#define PIN_MAPPABLE (1<<0) +#define PIN_NONBLOCK (1<<1) +#define PIN_GLOBAL (1<<2) +#define PIN_OFFSET_BIAS (1<<3) +#define PIN_USER (1<<4) +#define PIN_UPDATE (1<<5) #define PIN_OFFSET_MASK (~4095) int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj, diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index b86da6fceb7680d5dc610ca77d4e0220d8a033dc..91aade7c9f6d5d3c7ec1aacb3bcb2033be2bc846 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -3589,8 +3589,7 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, goto err_remove_node; trace_i915_vma_bind(vma, flags); - ret = i915_vma_bind(vma, obj->cache_level, - flags & PIN_GLOBAL ? GLOBAL_BIND : 0); + ret = i915_vma_bind(vma, obj->cache_level, flags); if (ret) goto err_finish_gtt; @@ -3816,7 +3815,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, list_for_each_entry(vma, &obj->vma_list, vma_link) if (drm_mm_node_allocated(&vma->node)) { ret = i915_vma_bind(vma, cache_level, - vma->bound & GLOBAL_BIND); + PIN_UPDATE); if (ret) return ret; } @@ -4201,10 +4200,8 @@ i915_gem_object_do_pin(struct drm_i915_gem_object *obj, flags); if (IS_ERR(vma)) return PTR_ERR(vma); - } - - if (flags & PIN_GLOBAL && !(vma->bound & GLOBAL_BIND)) { - ret = i915_vma_bind(vma, obj->cache_level, GLOBAL_BIND); + } else { + ret = i915_vma_bind(vma, obj->cache_level, flags); if (ret) return ret; } diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 7f69aa820458bb2c94e6979c966820edfc0bce46..cfdc8c6073aa87501cc3a8f24be569554e8c5799 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -400,10 +400,9 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, * pipe_control writes because the gpu doesn't properly redirect them * through the ppgtt for non_secure batchbuffers. */ if (unlikely(IS_GEN6(dev) && - reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION && - !(target_vma->bound & GLOBAL_BIND))) { + reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION)) { ret = i915_vma_bind(target_vma, target_i915_obj->cache_level, - GLOBAL_BIND); + PIN_GLOBAL); if (WARN_ONCE(ret, "Unexpected failure to bind target VMA!")) return ret; } @@ -585,7 +584,7 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma, uint64_t flags; int ret; - flags = 0; + flags = PIN_USER; if (entry->flags & EXEC_OBJECT_NEEDS_GTT) flags |= PIN_GLOBAL; diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 4e2caef837725080f678fbcaf79efce215cfc9fd..9e06180e206f5e9e20af2d47915daa598493fd19 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -1748,15 +1748,7 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev) continue; i915_gem_clflush_object(obj, obj->pin_display); - /* The bind_vma code tries to be smart about tracking mappings. - * Unfortunately above, we've just wiped out the mappings - * without telling our object about it. So we need to fake it. - * - * Bind is not expected to fail since this is only called on - * resume and assumption is all requirements exist already. - */ - vma->bound &= ~GLOBAL_BIND; - WARN_ON(i915_vma_bind(vma, obj->cache_level, GLOBAL_BIND)); + WARN_ON(i915_vma_bind(vma, obj->cache_level, PIN_UPDATE)); } @@ -1957,7 +1949,8 @@ static void i915_ggtt_bind_vma(struct i915_vma *vma, BUG_ON(!i915_is_ggtt(vma->vm)); intel_gtt_insert_sg_entries(vma->ggtt_view.pages, entry, flags); - vma->bound = GLOBAL_BIND; + + vma->bound |= GLOBAL_BIND; } static void i915_ggtt_clear_range(struct i915_address_space *vm, @@ -1976,7 +1969,6 @@ static void i915_ggtt_unbind_vma(struct i915_vma *vma) const unsigned int size = vma->obj->base.size >> PAGE_SHIFT; BUG_ON(!i915_is_ggtt(vma->vm)); - vma->bound = 0; intel_gtt_clear_range(first, size); } @@ -1997,35 +1989,19 @@ static void ggtt_bind_vma(struct i915_vma *vma, if (i915_is_ggtt(vma->vm)) pages = vma->ggtt_view.pages; - /* If there is no aliasing PPGTT, or the caller needs a global mapping, - * or we have a global mapping already but the cacheability flags have - * changed, set the global PTEs. - * - * If there is an aliasing PPGTT it is anecdotally faster, so use that - * instead if none of the above hold true. - * - * NB: A global mapping should only be needed for special regions like - * "gtt mappable", SNB errata, or if specified via special execbuf - * flags. At all other times, the GPU will use the aliasing PPGTT. - */ if (!dev_priv->mm.aliasing_ppgtt || flags & GLOBAL_BIND) { - if (!(vma->bound & GLOBAL_BIND) || - (cache_level != obj->cache_level)) { - vma->vm->insert_entries(vma->vm, pages, - vma->node.start, - cache_level, pte_flags); - vma->bound |= GLOBAL_BIND; - } + vma->vm->insert_entries(vma->vm, pages, + vma->node.start, + cache_level, pte_flags); + + vma->bound |= GLOBAL_BIND; } - if (dev_priv->mm.aliasing_ppgtt && - (!(vma->bound & LOCAL_BIND) || - (cache_level != obj->cache_level))) { + if (dev_priv->mm.aliasing_ppgtt && flags & LOCAL_BIND) { struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt; appgtt->base.insert_entries(&appgtt->base, pages, vma->node.start, cache_level, pte_flags); - vma->bound |= LOCAL_BIND; } } @@ -2040,16 +2016,14 @@ static void ggtt_unbind_vma(struct i915_vma *vma) vma->node.start, obj->base.size, true); - vma->bound &= ~GLOBAL_BIND; } - if (vma->bound & LOCAL_BIND) { + if (dev_priv->mm.aliasing_ppgtt && vma->bound & LOCAL_BIND) { struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt; appgtt->base.clear_range(&appgtt->base, vma->node.start, obj->base.size, true); - vma->bound &= ~LOCAL_BIND; } } @@ -2839,6 +2813,7 @@ i915_get_ggtt_vma_pages(struct i915_vma *vma) int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level, u32 flags) { + u32 bind_flags = 0; int ret; if (vma->vm->allocate_va_range) { @@ -2855,12 +2830,24 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level, if (i915_is_ggtt(vma->vm)) { ret = i915_get_ggtt_vma_pages(vma); - if (ret) - return ret; + return 0; } - vma->vm->bind_vma(vma, cache_level, flags); + if (flags & PIN_GLOBAL) + bind_flags |= GLOBAL_BIND; + if (flags & PIN_USER) + bind_flags |= LOCAL_BIND; + + if (flags & PIN_UPDATE) + bind_flags |= vma->bound; + else + bind_flags &= ~vma->bound; + + if (bind_flags) + vma->vm->bind_vma(vma, cache_level, bind_flags); + + vma->bound |= bind_flags; return 0; }