提交 8d17fb44 编写于 作者: T Thomas Hellstrom

drm/ttm: Allow execbuf util reserves without ticket

If no reservation ticket is given to the execbuf reservation utilities,
try reservation with non-blocking semantics.
This is intended for eviction paths that use the execbuf reservation
utilities for convenience rather than for deadlock avoidance.
Signed-off-by: NThomas Hellstrom <thellstrom@vmware.com>
Reviewed-by: NJakob Bornecrantz <jakob@vmware.com>
上级 a3483353
...@@ -32,8 +32,7 @@ ...@@ -32,8 +32,7 @@
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/module.h> #include <linux/module.h>
static void ttm_eu_backoff_reservation_locked(struct list_head *list, static void ttm_eu_backoff_reservation_locked(struct list_head *list)
struct ww_acquire_ctx *ticket)
{ {
struct ttm_validate_buffer *entry; struct ttm_validate_buffer *entry;
...@@ -93,8 +92,9 @@ void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket, ...@@ -93,8 +92,9 @@ void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
entry = list_first_entry(list, struct ttm_validate_buffer, head); entry = list_first_entry(list, struct ttm_validate_buffer, head);
glob = entry->bo->glob; glob = entry->bo->glob;
spin_lock(&glob->lru_lock); spin_lock(&glob->lru_lock);
ttm_eu_backoff_reservation_locked(list, ticket); ttm_eu_backoff_reservation_locked(list);
ww_acquire_fini(ticket); if (ticket)
ww_acquire_fini(ticket);
spin_unlock(&glob->lru_lock); spin_unlock(&glob->lru_lock);
} }
EXPORT_SYMBOL(ttm_eu_backoff_reservation); EXPORT_SYMBOL(ttm_eu_backoff_reservation);
...@@ -130,7 +130,8 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, ...@@ -130,7 +130,8 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
entry = list_first_entry(list, struct ttm_validate_buffer, head); entry = list_first_entry(list, struct ttm_validate_buffer, head);
glob = entry->bo->glob; glob = entry->bo->glob;
ww_acquire_init(ticket, &reservation_ww_class); if (ticket)
ww_acquire_init(ticket, &reservation_ww_class);
retry: retry:
list_for_each_entry(entry, list, head) { list_for_each_entry(entry, list, head) {
struct ttm_buffer_object *bo = entry->bo; struct ttm_buffer_object *bo = entry->bo;
...@@ -139,16 +140,17 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, ...@@ -139,16 +140,17 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
if (entry->reserved) if (entry->reserved)
continue; continue;
ret = ttm_bo_reserve_nolru(bo, true, (ticket == NULL), true,
ret = ttm_bo_reserve_nolru(bo, true, false, true, ticket); ticket);
if (ret == -EDEADLK) { if (ret == -EDEADLK) {
/* uh oh, we lost out, drop every reservation and try /* uh oh, we lost out, drop every reservation and try
* to only reserve this buffer, then start over if * to only reserve this buffer, then start over if
* this succeeds. * this succeeds.
*/ */
BUG_ON(ticket == NULL);
spin_lock(&glob->lru_lock); spin_lock(&glob->lru_lock);
ttm_eu_backoff_reservation_locked(list, ticket); ttm_eu_backoff_reservation_locked(list);
spin_unlock(&glob->lru_lock); spin_unlock(&glob->lru_lock);
ttm_eu_list_ref_sub(list); ttm_eu_list_ref_sub(list);
ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock, ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
...@@ -175,7 +177,8 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, ...@@ -175,7 +177,8 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
} }
} }
ww_acquire_done(ticket); if (ticket)
ww_acquire_done(ticket);
spin_lock(&glob->lru_lock); spin_lock(&glob->lru_lock);
ttm_eu_del_from_lru_locked(list); ttm_eu_del_from_lru_locked(list);
spin_unlock(&glob->lru_lock); spin_unlock(&glob->lru_lock);
...@@ -184,12 +187,14 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, ...@@ -184,12 +187,14 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
err: err:
spin_lock(&glob->lru_lock); spin_lock(&glob->lru_lock);
ttm_eu_backoff_reservation_locked(list, ticket); ttm_eu_backoff_reservation_locked(list);
spin_unlock(&glob->lru_lock); spin_unlock(&glob->lru_lock);
ttm_eu_list_ref_sub(list); ttm_eu_list_ref_sub(list);
err_fini: err_fini:
ww_acquire_done(ticket); if (ticket) {
ww_acquire_fini(ticket); ww_acquire_done(ticket);
ww_acquire_fini(ticket);
}
return ret; return ret;
} }
EXPORT_SYMBOL(ttm_eu_reserve_buffers); EXPORT_SYMBOL(ttm_eu_reserve_buffers);
...@@ -224,7 +229,8 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket, ...@@ -224,7 +229,8 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
} }
spin_unlock(&bdev->fence_lock); spin_unlock(&bdev->fence_lock);
spin_unlock(&glob->lru_lock); spin_unlock(&glob->lru_lock);
ww_acquire_fini(ticket); if (ticket)
ww_acquire_fini(ticket);
list_for_each_entry(entry, list, head) { list_for_each_entry(entry, list, head) {
if (entry->old_sync_obj) if (entry->old_sync_obj)
......
...@@ -70,7 +70,8 @@ extern void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket, ...@@ -70,7 +70,8 @@ extern void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
/** /**
* function ttm_eu_reserve_buffers * function ttm_eu_reserve_buffers
* *
* @ticket: [out] ww_acquire_ctx returned by call. * @ticket: [out] ww_acquire_ctx filled in by call, or NULL if only
* non-blocking reserves should be tried.
* @list: thread private list of ttm_validate_buffer structs. * @list: thread private list of ttm_validate_buffer structs.
* *
* Tries to reserve bos pointed to by the list entries for validation. * Tries to reserve bos pointed to by the list entries for validation.
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册