提交 a65adaf8 编写于 作者: C Chris Wilson

drm/i915: Track user GTT faulting per-vma

We don't wish to refault the entire object (other vma) when unbinding
one partial vma. To do this track which vma have been faulted into the
user's address space.

v2: Use a local vma_offset to tidy up a multiline unmap_mapping_range().
Signed-off-by: NChris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20171009084401.29090-3-chris@chris-wilson.co.ukReviewed-by: NJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
上级 3bd40735
......@@ -98,7 +98,7 @@ static char get_tiling_flag(struct drm_i915_gem_object *obj)
static char get_global_flag(struct drm_i915_gem_object *obj)
{
return !list_empty(&obj->userfault_link) ? 'g' : ' ';
return obj->userfault_count ? 'g' : ' ';
}
static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
......
......@@ -1914,18 +1914,22 @@ int i915_gem_fault(struct vm_fault *vmf)
if (ret)
goto err_unpin;
/* Mark as being mmapped into userspace for later revocation */
assert_rpm_wakelock_held(dev_priv);
if (list_empty(&obj->userfault_link))
list_add(&obj->userfault_link, &dev_priv->mm.userfault_list);
/* Finally, remap it using the new GTT offset */
ret = remap_io_mapping(area,
area->vm_start + (vma->ggtt_view.partial.offset << PAGE_SHIFT),
(ggtt->mappable_base + vma->node.start) >> PAGE_SHIFT,
min_t(u64, vma->size, area->vm_end - area->vm_start),
&ggtt->mappable);
if (ret)
goto err_fence;
/* Mark as being mmapped into userspace for later revocation */
assert_rpm_wakelock_held(dev_priv);
if (!i915_vma_set_userfault(vma) && !obj->userfault_count++)
list_add(&obj->userfault_link, &dev_priv->mm.userfault_list);
GEM_BUG_ON(!obj->userfault_count);
err_fence:
i915_vma_unpin_fence(vma);
err_unpin:
__i915_vma_unpin(vma);
......@@ -1978,6 +1982,25 @@ int i915_gem_fault(struct vm_fault *vmf)
return ret;
}
static void __i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
{
struct i915_vma *vma;
GEM_BUG_ON(!obj->userfault_count);
obj->userfault_count = 0;
list_del(&obj->userfault_link);
drm_vma_node_unmap(&obj->base.vma_node,
obj->base.dev->anon_inode->i_mapping);
list_for_each_entry(vma, &obj->vma_list, obj_link) {
if (!i915_vma_is_ggtt(vma))
break;
i915_vma_unset_userfault(vma);
}
}
/**
* i915_gem_release_mmap - remove physical page mappings
* @obj: obj in question
......@@ -2008,12 +2031,10 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj)
lockdep_assert_held(&i915->drm.struct_mutex);
intel_runtime_pm_get(i915);
if (list_empty(&obj->userfault_link))
if (!obj->userfault_count)
goto out;
list_del_init(&obj->userfault_link);
drm_vma_node_unmap(&obj->base.vma_node,
obj->base.dev->anon_inode->i_mapping);
__i915_gem_object_release_mmap(obj);
/* Ensure that the CPU's PTE are revoked and there are not outstanding
* memory transactions from userspace before we return. The TLB
......@@ -2041,11 +2062,8 @@ void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv)
*/
list_for_each_entry_safe(obj, on,
&dev_priv->mm.userfault_list, userfault_link) {
list_del_init(&obj->userfault_link);
drm_vma_node_unmap(&obj->base.vma_node,
obj->base.dev->anon_inode->i_mapping);
}
&dev_priv->mm.userfault_list, userfault_link)
__i915_gem_object_release_mmap(obj);
/* The fence will be lost when the device powers down. If any were
* in use by hardware (i.e. they are pinned), we should not be powering
......@@ -2068,7 +2086,7 @@ void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv)
if (!reg->vma)
continue;
GEM_BUG_ON(!list_empty(&reg->vma->obj->userfault_link));
GEM_BUG_ON(i915_vma_has_userfault(reg->vma));
reg->dirty = true;
}
}
......@@ -4276,7 +4294,6 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
mutex_init(&obj->mm.lock);
INIT_LIST_HEAD(&obj->global_link);
INIT_LIST_HEAD(&obj->userfault_link);
INIT_LIST_HEAD(&obj->vma_list);
INIT_LIST_HEAD(&obj->lut_list);
INIT_LIST_HEAD(&obj->batch_pool_link);
......@@ -4457,6 +4474,7 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
llist_for_each_entry_safe(obj, on, freed, freed) {
GEM_BUG_ON(obj->bind_count);
GEM_BUG_ON(obj->userfault_count);
GEM_BUG_ON(atomic_read(&obj->frontbuffer_bits));
GEM_BUG_ON(!list_empty(&obj->lut_list));
......
......@@ -82,7 +82,7 @@ mark_free(struct drm_mm_scan *scan,
if (i915_vma_is_pinned(vma))
return false;
if (flags & PIN_NONFAULT && !list_empty(&vma->obj->userfault_link))
if (flags & PIN_NONFAULT && i915_vma_has_userfault(vma))
return false;
list_add(&vma->evict_link, unwind);
......
......@@ -240,7 +240,8 @@ static int fence_update(struct drm_i915_fence_reg *fence,
/* Ensure that all userspace CPU access is completed before
* stealing the fence.
*/
i915_gem_release_mmap(fence->vma->obj);
GEM_BUG_ON(fence->vma->fence != fence);
i915_vma_revoke_mmap(fence->vma);
fence->vma->fence = NULL;
fence->vma = NULL;
......@@ -451,7 +452,7 @@ void i915_gem_revoke_fences(struct drm_i915_private *dev_priv)
GEM_BUG_ON(fence->vma && fence->vma->fence != fence);
if (fence->vma)
i915_gem_release_mmap(fence->vma->obj);
i915_vma_revoke_mmap(fence->vma);
}
}
......@@ -479,7 +480,7 @@ void i915_gem_restore_fences(struct drm_i915_private *dev_priv)
*/
if (vma && !i915_gem_object_is_tiled(vma->obj)) {
GEM_BUG_ON(!reg->dirty);
GEM_BUG_ON(!list_empty(&vma->obj->userfault_link));
GEM_BUG_ON(i915_vma_has_userfault(vma));
list_move(&reg->link, &dev_priv->mm.fence_list);
vma->fence = NULL;
......
......@@ -123,6 +123,7 @@ struct drm_i915_gem_object {
/**
* Whether the object is currently in the GGTT mmap.
*/
unsigned int userfault_count;
struct list_head userfault_link;
struct list_head batch_pool_link;
......
......@@ -690,6 +690,30 @@ static void __i915_vma_iounmap(struct i915_vma *vma)
vma->iomap = NULL;
}
void i915_vma_revoke_mmap(struct i915_vma *vma)
{
struct drm_vma_offset_node *node = &vma->obj->base.vma_node;
u64 vma_offset;
lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
if (!i915_vma_has_userfault(vma))
return;
GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
GEM_BUG_ON(!vma->obj->userfault_count);
vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping,
drm_vma_node_offset_addr(node) + vma_offset,
vma->size,
1);
i915_vma_unset_userfault(vma);
if (!--vma->obj->userfault_count)
list_del(&vma->obj->userfault_link);
}
int i915_vma_unbind(struct i915_vma *vma)
{
struct drm_i915_gem_object *obj = vma->obj;
......@@ -753,11 +777,13 @@ int i915_vma_unbind(struct i915_vma *vma)
return ret;
/* Force a pagefault for domain tracking on next user access */
i915_gem_release_mmap(obj);
i915_vma_revoke_mmap(vma);
__i915_vma_iounmap(vma);
vma->flags &= ~I915_VMA_CAN_FENCE;
}
GEM_BUG_ON(vma->fence);
GEM_BUG_ON(i915_vma_has_userfault(vma));
if (likely(!vma->vm->closed)) {
trace_i915_vma_unbind(vma);
......
......@@ -66,7 +66,7 @@ struct i915_vma {
* that exist in the ctx->handle_vmas LUT for this vma.
*/
unsigned int open_count;
unsigned int flags;
unsigned long flags;
/**
* How many users have pinned this object in GTT space. The following
* users can each hold at most one reference: pwrite/pread, execbuffer
......@@ -88,6 +88,8 @@ struct i915_vma {
#define I915_VMA_GGTT BIT(8)
#define I915_VMA_CAN_FENCE BIT(9)
#define I915_VMA_CLOSED BIT(10)
#define I915_VMA_USERFAULT_BIT 11
#define I915_VMA_USERFAULT BIT(I915_VMA_USERFAULT_BIT)
unsigned int active;
struct i915_gem_active last_read[I915_NUM_ENGINES];
......@@ -146,6 +148,22 @@ static inline bool i915_vma_is_closed(const struct i915_vma *vma)
return vma->flags & I915_VMA_CLOSED;
}
static inline bool i915_vma_set_userfault(struct i915_vma *vma)
{
GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
return __test_and_set_bit(I915_VMA_USERFAULT_BIT, &vma->flags);
}
static inline void i915_vma_unset_userfault(struct i915_vma *vma)
{
return __clear_bit(I915_VMA_USERFAULT_BIT, &vma->flags);
}
static inline bool i915_vma_has_userfault(const struct i915_vma *vma)
{
return test_bit(I915_VMA_USERFAULT_BIT, &vma->flags);
}
static inline unsigned int i915_vma_get_active(const struct i915_vma *vma)
{
return vma->active;
......@@ -244,6 +262,7 @@ bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level);
bool i915_vma_misplaced(const struct i915_vma *vma,
u64 size, u64 alignment, u64 flags);
void __i915_vma_set_map_and_fenceable(struct i915_vma *vma);
void i915_vma_revoke_mmap(struct i915_vma *vma);
int __must_check i915_vma_unbind(struct i915_vma *vma);
void i915_vma_unlink_ctx(struct i915_vma *vma);
void i915_vma_close(struct i915_vma *vma);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册