提交 15717de2 编写于 作者: C Chris Wilson

drm/i915: Count how many VMA are bound for an object

Since we may have VMA allocated for an object, but we interrupted their
binding, there is a disparity between have elements on the obj->vma_list
and being bound. i915_gem_obj_bound_any() does this check, but this is
not rigorously observed - add an explicit count to make it easier.
Signed-off-by: NChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: NTvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: NJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/1470293567-10811-7-git-send-email-chris@chris-wilson.co.uk
上级 2bfa996e
...@@ -174,6 +174,9 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) ...@@ -174,6 +174,9 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
if (obj->fence_reg != I915_FENCE_REG_NONE) if (obj->fence_reg != I915_FENCE_REG_NONE)
seq_printf(m, " (fence: %d)", obj->fence_reg); seq_printf(m, " (fence: %d)", obj->fence_reg);
list_for_each_entry(vma, &obj->vma_list, obj_link) { list_for_each_entry(vma, &obj->vma_list, obj_link) {
if (!drm_mm_node_allocated(&vma->node))
continue;
seq_printf(m, " (%sgtt offset: %08llx, size: %08llx", seq_printf(m, " (%sgtt offset: %08llx, size: %08llx",
vma->is_ggtt ? "g" : "pp", vma->is_ggtt ? "g" : "pp",
vma->node.start, vma->node.size); vma->node.start, vma->node.size);
...@@ -335,11 +338,11 @@ static int per_file_stats(int id, void *ptr, void *data) ...@@ -335,11 +338,11 @@ static int per_file_stats(int id, void *ptr, void *data)
struct drm_i915_gem_object *obj = ptr; struct drm_i915_gem_object *obj = ptr;
struct file_stats *stats = data; struct file_stats *stats = data;
struct i915_vma *vma; struct i915_vma *vma;
int bound = 0;
stats->count++; stats->count++;
stats->total += obj->base.size; stats->total += obj->base.size;
if (!obj->bind_count)
stats->unbound += obj->base.size;
if (obj->base.name || obj->base.dma_buf) if (obj->base.name || obj->base.dma_buf)
stats->shared += obj->base.size; stats->shared += obj->base.size;
...@@ -347,8 +350,6 @@ static int per_file_stats(int id, void *ptr, void *data) ...@@ -347,8 +350,6 @@ static int per_file_stats(int id, void *ptr, void *data)
if (!drm_mm_node_allocated(&vma->node)) if (!drm_mm_node_allocated(&vma->node))
continue; continue;
bound++;
if (vma->is_ggtt) { if (vma->is_ggtt) {
stats->global += vma->node.size; stats->global += vma->node.size;
} else { } else {
...@@ -364,9 +365,6 @@ static int per_file_stats(int id, void *ptr, void *data) ...@@ -364,9 +365,6 @@ static int per_file_stats(int id, void *ptr, void *data)
stats->inactive += vma->node.size; stats->inactive += vma->node.size;
} }
if (!bound)
stats->unbound += obj->base.size;
return 0; return 0;
} }
......
...@@ -2221,6 +2221,8 @@ struct drm_i915_gem_object { ...@@ -2221,6 +2221,8 @@ struct drm_i915_gem_object {
unsigned int frontbuffer_bits:INTEL_FRONTBUFFER_BITS; unsigned int frontbuffer_bits:INTEL_FRONTBUFFER_BITS;
unsigned int has_wc_mmap; unsigned int has_wc_mmap;
/** Count of VMA actually bound by this object */
unsigned int bind_count;
unsigned int pin_display; unsigned int pin_display;
struct sg_table *pages; struct sg_table *pages;
...@@ -3266,7 +3268,6 @@ i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *o) ...@@ -3266,7 +3268,6 @@ i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *o)
return i915_gem_obj_ggtt_offset_view(o, &i915_ggtt_view_normal); return i915_gem_obj_ggtt_offset_view(o, &i915_ggtt_view_normal);
} }
bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o);
bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o, bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o,
const struct i915_ggtt_view *view); const struct i915_ggtt_view *view);
bool i915_gem_obj_bound(struct drm_i915_gem_object *o, bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
......
...@@ -2107,7 +2107,7 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj) ...@@ -2107,7 +2107,7 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
if (obj->pages_pin_count) if (obj->pages_pin_count)
return -EBUSY; return -EBUSY;
BUG_ON(i915_gem_obj_bound_any(obj)); GEM_BUG_ON(obj->bind_count);
/* ->put_pages might need to allocate memory for the bit17 swizzle /* ->put_pages might need to allocate memory for the bit17 swizzle
* array, hence protect them from being reaped by removing them from gtt * array, hence protect them from being reaped by removing them from gtt
...@@ -2965,7 +2965,6 @@ static void __i915_vma_iounmap(struct i915_vma *vma) ...@@ -2965,7 +2965,6 @@ static void __i915_vma_iounmap(struct i915_vma *vma)
static int __i915_vma_unbind(struct i915_vma *vma, bool wait) static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
{ {
struct drm_i915_gem_object *obj = vma->obj; struct drm_i915_gem_object *obj = vma->obj;
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
int ret; int ret;
if (list_empty(&vma->obj_link)) if (list_empty(&vma->obj_link))
...@@ -2979,7 +2978,8 @@ static int __i915_vma_unbind(struct i915_vma *vma, bool wait) ...@@ -2979,7 +2978,8 @@ static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
if (vma->pin_count) if (vma->pin_count)
return -EBUSY; return -EBUSY;
BUG_ON(obj->pages == NULL); GEM_BUG_ON(obj->bind_count == 0);
GEM_BUG_ON(!obj->pages);
if (wait) { if (wait) {
ret = i915_gem_object_wait_rendering(obj, false); ret = i915_gem_object_wait_rendering(obj, false);
...@@ -3019,8 +3019,9 @@ static int __i915_vma_unbind(struct i915_vma *vma, bool wait) ...@@ -3019,8 +3019,9 @@ static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
/* Since the unbound list is global, only move to that list if /* Since the unbound list is global, only move to that list if
* no more VMAs exist. */ * no more VMAs exist. */
if (list_empty(&obj->vma_list)) if (--obj->bind_count == 0)
list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list); list_move_tail(&obj->global_list,
&to_i915(obj->base.dev)->mm.unbound_list);
/* And finally now the object is completely decoupled from this vma, /* And finally now the object is completely decoupled from this vma,
* we can drop its hold on the backing storage and allow it to be * we can drop its hold on the backing storage and allow it to be
...@@ -3255,6 +3256,7 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, ...@@ -3255,6 +3256,7 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
list_move_tail(&obj->global_list, &dev_priv->mm.bound_list); list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
list_add_tail(&vma->vm_link, &vm->inactive_list); list_add_tail(&vma->vm_link, &vm->inactive_list);
obj->bind_count++;
return vma; return vma;
...@@ -3450,7 +3452,6 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, ...@@ -3450,7 +3452,6 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
{ {
struct drm_device *dev = obj->base.dev; struct drm_device *dev = obj->base.dev;
struct i915_vma *vma, *next; struct i915_vma *vma, *next;
bool bound = false;
int ret = 0; int ret = 0;
if (obj->cache_level == cache_level) if (obj->cache_level == cache_level)
...@@ -3474,8 +3475,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, ...@@ -3474,8 +3475,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
ret = i915_vma_unbind(vma); ret = i915_vma_unbind(vma);
if (ret) if (ret)
return ret; return ret;
} else }
bound = true;
} }
/* We can reuse the existing drm_mm nodes but need to change the /* We can reuse the existing drm_mm nodes but need to change the
...@@ -3485,7 +3485,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, ...@@ -3485,7 +3485,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
* rewrite the PTE in the belief that doing so tramples upon less * rewrite the PTE in the belief that doing so tramples upon less
* state and so involves less work. * state and so involves less work.
*/ */
if (bound) { if (obj->bind_count) {
/* Before we change the PTE, the GPU must not be accessing it. /* Before we change the PTE, the GPU must not be accessing it.
* If we wait upon the object, we know that all the bound * If we wait upon the object, we know that all the bound
* VMA are no longer active. * VMA are no longer active.
...@@ -4223,6 +4223,7 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj) ...@@ -4223,6 +4223,7 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
dev_priv->mm.interruptible = was_interruptible; dev_priv->mm.interruptible = was_interruptible;
} }
} }
GEM_BUG_ON(obj->bind_count);
/* Stolen objects don't hold a ref, but do hold pin count. Fix that up /* Stolen objects don't hold a ref, but do hold pin count. Fix that up
* before progressing. */ * before progressing. */
...@@ -4840,17 +4841,6 @@ bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o, ...@@ -4840,17 +4841,6 @@ bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o,
return false; return false;
} }
bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
{
struct i915_vma *vma;
list_for_each_entry(vma, &o->vma_list, obj_link)
if (drm_mm_node_allocated(&vma->node))
return true;
return false;
}
unsigned long i915_gem_obj_ggtt_size(struct drm_i915_gem_object *o) unsigned long i915_gem_obj_ggtt_size(struct drm_i915_gem_object *o)
{ {
struct i915_vma *vma; struct i915_vma *vma;
......
...@@ -48,19 +48,15 @@ static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task) ...@@ -48,19 +48,15 @@ static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
#endif #endif
} }
static int num_vma_bound(struct drm_i915_gem_object *obj) static bool any_vma_pinned(struct drm_i915_gem_object *obj)
{ {
struct i915_vma *vma; struct i915_vma *vma;
int count = 0;
list_for_each_entry(vma, &obj->vma_list, obj_link) { list_for_each_entry(vma, &obj->vma_list, obj_link)
if (drm_mm_node_allocated(&vma->node))
count++;
if (vma->pin_count) if (vma->pin_count)
count++; return true;
}
return count; return false;
} }
static bool swap_available(void) static bool swap_available(void)
...@@ -82,7 +78,10 @@ static bool can_release_pages(struct drm_i915_gem_object *obj) ...@@ -82,7 +78,10 @@ static bool can_release_pages(struct drm_i915_gem_object *obj)
* to the GPU, simply unbinding from the GPU is not going to succeed * to the GPU, simply unbinding from the GPU is not going to succeed
* in releasing our pin count on the pages themselves. * in releasing our pin count on the pages themselves.
*/ */
if (obj->pages_pin_count != num_vma_bound(obj)) if (obj->pages_pin_count > obj->bind_count)
return false;
if (any_vma_pinned(obj))
return false; return false;
/* We can only return physical pages to the system if we can either /* We can only return physical pages to the system if we can either
......
...@@ -708,6 +708,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, ...@@ -708,6 +708,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
vma->bound |= GLOBAL_BIND; vma->bound |= GLOBAL_BIND;
__i915_vma_set_map_and_fenceable(vma); __i915_vma_set_map_and_fenceable(vma);
list_add_tail(&vma->vm_link, &ggtt->base.inactive_list); list_add_tail(&vma->vm_link, &ggtt->base.inactive_list);
obj->bind_count++;
list_add_tail(&obj->global_list, &dev_priv->mm.bound_list); list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
i915_gem_object_pin_pages(obj); i915_gem_object_pin_pages(obj);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册