提交 f2d476a1 编写于 作者: M Maarten Lankhorst

drm/ttm: use ttm_bo_reserve_slowpath_nolru in ttm_eu_reserve_buffers, v2

This requires re-use of the seqno, which increases fairness slightly.
Instead of spinning with a new seqno every time we keep the current one,
but still drop all other reservations we hold. Only when we succeed,
we try to get back our other reservations again.

This should increase fairness slightly as well.

Changes since v1:
 - Increase val_seq before calling ttm_bo_reserve_slowpath_nolru and
   retrying to take all entries to prevent a race.
Signed-off-by: NMaarten Lankhorst <maarten.lankhorst@canonical.com>
Reviewed-by: NJerome Glisse <jglisse@redhat.com>
上级 5e45d7df
...@@ -129,13 +129,17 @@ int ttm_eu_reserve_buffers(struct list_head *list) ...@@ -129,13 +129,17 @@ int ttm_eu_reserve_buffers(struct list_head *list)
entry = list_first_entry(list, struct ttm_validate_buffer, head); entry = list_first_entry(list, struct ttm_validate_buffer, head);
glob = entry->bo->glob; glob = entry->bo->glob;
retry:
spin_lock(&glob->lru_lock); spin_lock(&glob->lru_lock);
val_seq = entry->bo->bdev->val_seq++; val_seq = entry->bo->bdev->val_seq++;
retry:
list_for_each_entry(entry, list, head) { list_for_each_entry(entry, list, head) {
struct ttm_buffer_object *bo = entry->bo; struct ttm_buffer_object *bo = entry->bo;
/* already slowpath reserved? */
if (entry->reserved)
continue;
ret = ttm_bo_reserve_nolru(bo, true, true, true, val_seq); ret = ttm_bo_reserve_nolru(bo, true, true, true, val_seq);
switch (ret) { switch (ret) {
case 0: case 0:
...@@ -155,11 +159,26 @@ int ttm_eu_reserve_buffers(struct list_head *list) ...@@ -155,11 +159,26 @@ int ttm_eu_reserve_buffers(struct list_head *list)
/* fallthrough */ /* fallthrough */
case -EAGAIN: case -EAGAIN:
ttm_eu_backoff_reservation_locked(list); ttm_eu_backoff_reservation_locked(list);
/*
* temporarily increase sequence number every retry,
* to prevent us from seeing our old reservation
* sequence when someone else reserved the buffer,
* but hasn't updated the seq_valid/seqno members yet.
*/
val_seq = entry->bo->bdev->val_seq++;
spin_unlock(&glob->lru_lock); spin_unlock(&glob->lru_lock);
ttm_eu_list_ref_sub(list); ttm_eu_list_ref_sub(list);
ret = ttm_bo_wait_unreserved(bo, true); ret = ttm_bo_reserve_slowpath_nolru(bo, true, val_seq);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
spin_lock(&glob->lru_lock);
entry->reserved = true;
if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
ret = -EBUSY;
goto err;
}
goto retry; goto retry;
default: default:
goto err; goto err;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册