提交 ef78f7b1 编写于 作者: C Chris Wilson

drm/i915: Use drm_gem_object.resv

Since commit 1ba62714 ("drm: Add reservation_object to
drm_gem_object"), struct drm_gem_object grew its own builtin
reservation_object rendering our own private one bloat. Remove our
redundant reservation_object and point into obj->base.resv instead.

References: 1ba62714 ("drm: Add reservation_object to drm_gem_object")
Signed-off-by: NChris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Matthew Auld <matthew.auld@intel.com>
Cc: Mika Kuoppala <mika.kuoppala@intel.com>
Reviewed-by: NMatthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190618125858.7295-1-chris@chris-wilson.co.uk
上级 7009db14
...@@ -14256,7 +14256,7 @@ intel_prepare_plane_fb(struct drm_plane *plane, ...@@ -14256,7 +14256,7 @@ intel_prepare_plane_fb(struct drm_plane *plane,
*/ */
if (needs_modeset(crtc_state)) { if (needs_modeset(crtc_state)) {
ret = i915_sw_fence_await_reservation(&intel_state->commit_ready, ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
old_obj->resv, NULL, old_obj->base.resv, NULL,
false, 0, false, 0,
GFP_KERNEL); GFP_KERNEL);
if (ret < 0) if (ret < 0)
...@@ -14300,13 +14300,13 @@ intel_prepare_plane_fb(struct drm_plane *plane, ...@@ -14300,13 +14300,13 @@ intel_prepare_plane_fb(struct drm_plane *plane,
struct dma_fence *fence; struct dma_fence *fence;
ret = i915_sw_fence_await_reservation(&intel_state->commit_ready, ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
obj->resv, NULL, obj->base.resv, NULL,
false, I915_FENCE_TIMEOUT, false, I915_FENCE_TIMEOUT,
GFP_KERNEL); GFP_KERNEL);
if (ret < 0) if (ret < 0)
return ret; return ret;
fence = reservation_object_get_excl_rcu(obj->resv); fence = reservation_object_get_excl_rcu(obj->base.resv);
if (fence) { if (fence) {
add_rps_boost_after_vblank(new_state->crtc, fence); add_rps_boost_after_vblank(new_state->crtc, fence);
dma_fence_put(fence); dma_fence_put(fence);
......
...@@ -110,13 +110,14 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, ...@@ -110,13 +110,14 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
* *
*/ */
retry: retry:
seq = raw_read_seqcount(&obj->resv->seq); seq = raw_read_seqcount(&obj->base.resv->seq);
/* Translate the exclusive fence to the READ *and* WRITE engine */ /* Translate the exclusive fence to the READ *and* WRITE engine */
args->busy = busy_check_writer(rcu_dereference(obj->resv->fence_excl)); args->busy =
busy_check_writer(rcu_dereference(obj->base.resv->fence_excl));
/* Translate shared fences to READ set of engines */ /* Translate shared fences to READ set of engines */
list = rcu_dereference(obj->resv->fence); list = rcu_dereference(obj->base.resv->fence);
if (list) { if (list) {
unsigned int shared_count = list->shared_count, i; unsigned int shared_count = list->shared_count, i;
...@@ -128,7 +129,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, ...@@ -128,7 +129,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
} }
} }
if (args->busy && read_seqcount_retry(&obj->resv->seq, seq)) if (args->busy && read_seqcount_retry(&obj->base.resv->seq, seq))
goto retry; goto retry;
err = 0; err = 0;
......
...@@ -143,11 +143,12 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, ...@@ -143,11 +143,12 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
dma_fence_get(&clflush->dma); dma_fence_get(&clflush->dma);
i915_sw_fence_await_reservation(&clflush->wait, i915_sw_fence_await_reservation(&clflush->wait,
obj->resv, NULL, obj->base.resv, NULL,
true, I915_FENCE_TIMEOUT, true, I915_FENCE_TIMEOUT,
I915_FENCE_GFP); I915_FENCE_GFP);
reservation_object_add_excl_fence(obj->resv, &clflush->dma); reservation_object_add_excl_fence(obj->base.resv,
&clflush->dma);
i915_sw_fence_commit(&clflush->wait); i915_sw_fence_commit(&clflush->wait);
} else if (obj->mm.pages) { } else if (obj->mm.pages) {
......
...@@ -282,13 +282,13 @@ int i915_gem_schedule_fill_pages_blt(struct drm_i915_gem_object *obj, ...@@ -282,13 +282,13 @@ int i915_gem_schedule_fill_pages_blt(struct drm_i915_gem_object *obj,
i915_gem_object_lock(obj); i915_gem_object_lock(obj);
err = i915_sw_fence_await_reservation(&work->wait, err = i915_sw_fence_await_reservation(&work->wait,
obj->resv, NULL, obj->base.resv, NULL,
true, I915_FENCE_TIMEOUT, true, I915_FENCE_TIMEOUT,
I915_FENCE_GFP); I915_FENCE_GFP);
if (err < 0) { if (err < 0) {
dma_fence_set_error(&work->dma, err); dma_fence_set_error(&work->dma, err);
} else { } else {
reservation_object_add_excl_fence(obj->resv, &work->dma); reservation_object_add_excl_fence(obj->base.resv, &work->dma);
err = 0; err = 0;
} }
i915_gem_object_unlock(obj); i915_gem_object_unlock(obj);
......
...@@ -214,7 +214,7 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev, ...@@ -214,7 +214,7 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
exp_info.size = gem_obj->size; exp_info.size = gem_obj->size;
exp_info.flags = flags; exp_info.flags = flags;
exp_info.priv = gem_obj; exp_info.priv = gem_obj;
exp_info.resv = obj->resv; exp_info.resv = obj->base.resv;
if (obj->ops->dmabuf_export) { if (obj->ops->dmabuf_export) {
int ret = obj->ops->dmabuf_export(obj); int ret = obj->ops->dmabuf_export(obj);
...@@ -290,7 +290,7 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev, ...@@ -290,7 +290,7 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
drm_gem_private_object_init(dev, &obj->base, dma_buf->size); drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops); i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops);
obj->base.import_attach = attach; obj->base.import_attach = attach;
obj->resv = dma_buf->resv; obj->base.resv = dma_buf->resv;
/* We use GTT as shorthand for a coherent domain, one that is /* We use GTT as shorthand for a coherent domain, one that is
* neither in the GPU cache nor in the CPU cache, where all * neither in the GPU cache nor in the CPU cache, where all
......
...@@ -73,12 +73,12 @@ i915_gem_object_lock_fence(struct drm_i915_gem_object *obj) ...@@ -73,12 +73,12 @@ i915_gem_object_lock_fence(struct drm_i915_gem_object *obj)
0); 0);
if (i915_sw_fence_await_reservation(&stub->chain, if (i915_sw_fence_await_reservation(&stub->chain,
obj->resv, NULL, obj->base.resv, NULL,
true, I915_FENCE_TIMEOUT, true, I915_FENCE_TIMEOUT,
I915_FENCE_GFP) < 0) I915_FENCE_GFP) < 0)
goto err; goto err;
reservation_object_add_excl_fence(obj->resv, &stub->dma); reservation_object_add_excl_fence(obj->base.resv, &stub->dma);
return &stub->dma; return &stub->dma;
......
...@@ -70,9 +70,6 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj, ...@@ -70,9 +70,6 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
obj->ops = ops; obj->ops = ops;
reservation_object_init(&obj->__builtin_resv);
obj->resv = &obj->__builtin_resv;
obj->frontbuffer_ggtt_origin = ORIGIN_GTT; obj->frontbuffer_ggtt_origin = ORIGIN_GTT;
i915_active_request_init(&obj->frontbuffer_write, i915_active_request_init(&obj->frontbuffer_write,
NULL, frontbuffer_retire); NULL, frontbuffer_retire);
...@@ -233,7 +230,6 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915, ...@@ -233,7 +230,6 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
if (obj->base.import_attach) if (obj->base.import_attach)
drm_prime_gem_destroy(&obj->base, NULL); drm_prime_gem_destroy(&obj->base, NULL);
reservation_object_fini(&obj->__builtin_resv);
drm_gem_object_release(&obj->base); drm_gem_object_release(&obj->base);
bitmap_free(obj->bit_17); bitmap_free(obj->bit_17);
......
...@@ -99,22 +99,22 @@ i915_gem_object_put(struct drm_i915_gem_object *obj) ...@@ -99,22 +99,22 @@ i915_gem_object_put(struct drm_i915_gem_object *obj)
__drm_gem_object_put(&obj->base); __drm_gem_object_put(&obj->base);
} }
#define assert_object_held(obj) reservation_object_assert_held((obj)->resv) #define assert_object_held(obj) reservation_object_assert_held((obj)->base.resv)
static inline void i915_gem_object_lock(struct drm_i915_gem_object *obj) static inline void i915_gem_object_lock(struct drm_i915_gem_object *obj)
{ {
reservation_object_lock(obj->resv, NULL); reservation_object_lock(obj->base.resv, NULL);
} }
static inline int static inline int
i915_gem_object_lock_interruptible(struct drm_i915_gem_object *obj) i915_gem_object_lock_interruptible(struct drm_i915_gem_object *obj)
{ {
return reservation_object_lock_interruptible(obj->resv, NULL); return reservation_object_lock_interruptible(obj->base.resv, NULL);
} }
static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj) static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj)
{ {
reservation_object_unlock(obj->resv); reservation_object_unlock(obj->base.resv);
} }
struct dma_fence * struct dma_fence *
...@@ -373,7 +373,7 @@ i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj) ...@@ -373,7 +373,7 @@ i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj)
struct dma_fence *fence; struct dma_fence *fence;
rcu_read_lock(); rcu_read_lock();
fence = reservation_object_get_excl_rcu(obj->resv); fence = reservation_object_get_excl_rcu(obj->base.resv);
rcu_read_unlock(); rcu_read_unlock();
if (fence && dma_fence_is_i915(fence) && !dma_fence_is_signaled(fence)) if (fence && dma_fence_is_i915(fence) && !dma_fence_is_signaled(fence))
......
...@@ -7,8 +7,6 @@ ...@@ -7,8 +7,6 @@
#ifndef __I915_GEM_OBJECT_TYPES_H__ #ifndef __I915_GEM_OBJECT_TYPES_H__
#define __I915_GEM_OBJECT_TYPES_H__ #define __I915_GEM_OBJECT_TYPES_H__
#include <linux/reservation.h>
#include <drm/drm_gem.h> #include <drm/drm_gem.h>
#include "i915_active.h" #include "i915_active.h"
...@@ -228,18 +226,6 @@ struct drm_i915_gem_object { ...@@ -228,18 +226,6 @@ struct drm_i915_gem_object {
bool quirked:1; bool quirked:1;
} mm; } mm;
/** Breadcrumb of last rendering to the buffer.
* There can only be one writer, but we allow for multiple readers.
* If there is a writer that necessarily implies that all other
* read requests are complete - but we may only be lazily clearing
* the read requests. A read request is naturally the most recent
* request on a ring, so we may have two different write and read
* requests on one ring where the write request is older than the
* read request. This allows for the CPU to read from an active
* buffer by only waiting for the write to complete.
*/
struct reservation_object *resv;
/** References from framebuffers, locks out tiling changes. */ /** References from framebuffers, locks out tiling changes. */
unsigned int framebuffer_references; unsigned int framebuffer_references;
...@@ -262,8 +248,6 @@ struct drm_i915_gem_object { ...@@ -262,8 +248,6 @@ struct drm_i915_gem_object {
/** for phys allocated objects */ /** for phys allocated objects */
struct drm_dma_handle *phys_handle; struct drm_dma_handle *phys_handle;
struct reservation_object __builtin_resv;
}; };
static inline struct drm_i915_gem_object * static inline struct drm_i915_gem_object *
......
...@@ -144,7 +144,7 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj, ...@@ -144,7 +144,7 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
unsigned int count, i; unsigned int count, i;
int ret; int ret;
ret = reservation_object_get_fences_rcu(obj->resv, ret = reservation_object_get_fences_rcu(obj->base.resv,
&excl, &count, &shared); &excl, &count, &shared);
if (ret) if (ret)
return ret; return ret;
...@@ -156,7 +156,7 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj, ...@@ -156,7 +156,7 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
kfree(shared); kfree(shared);
} else { } else {
excl = reservation_object_get_excl_rcu(obj->resv); excl = reservation_object_get_excl_rcu(obj->base.resv);
} }
if (excl) { if (excl) {
...@@ -180,7 +180,8 @@ i915_gem_object_wait(struct drm_i915_gem_object *obj, ...@@ -180,7 +180,8 @@ i915_gem_object_wait(struct drm_i915_gem_object *obj,
might_sleep(); might_sleep();
GEM_BUG_ON(timeout < 0); GEM_BUG_ON(timeout < 0);
timeout = i915_gem_object_wait_reservation(obj->resv, flags, timeout); timeout = i915_gem_object_wait_reservation(obj->base.resv,
flags, timeout);
return timeout < 0 ? timeout : 0; return timeout < 0 ? timeout : 0;
} }
......
...@@ -96,7 +96,7 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool, ...@@ -96,7 +96,7 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
list_for_each_entry(obj, list, batch_pool_link) { list_for_each_entry(obj, list, batch_pool_link) {
/* The batches are strictly LRU ordered */ /* The batches are strictly LRU ordered */
if (i915_gem_object_is_active(obj)) { if (i915_gem_object_is_active(obj)) {
struct reservation_object *resv = obj->resv; struct reservation_object *resv = obj->base.resv;
if (!reservation_object_test_signaled_rcu(resv, true)) if (!reservation_object_test_signaled_rcu(resv, true))
break; break;
...@@ -119,7 +119,7 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool, ...@@ -119,7 +119,7 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
} }
} }
GEM_BUG_ON(!reservation_object_test_signaled_rcu(obj->resv, GEM_BUG_ON(!reservation_object_test_signaled_rcu(obj->base.resv,
true)); true));
if (obj->base.size >= size) if (obj->base.size >= size)
......
...@@ -1027,7 +1027,7 @@ i915_request_await_object(struct i915_request *to, ...@@ -1027,7 +1027,7 @@ i915_request_await_object(struct i915_request *to,
struct dma_fence **shared; struct dma_fence **shared;
unsigned int count, i; unsigned int count, i;
ret = reservation_object_get_fences_rcu(obj->resv, ret = reservation_object_get_fences_rcu(obj->base.resv,
&excl, &count, &shared); &excl, &count, &shared);
if (ret) if (ret)
return ret; return ret;
...@@ -1044,7 +1044,7 @@ i915_request_await_object(struct i915_request *to, ...@@ -1044,7 +1044,7 @@ i915_request_await_object(struct i915_request *to,
dma_fence_put(shared[i]); dma_fence_put(shared[i]);
kfree(shared); kfree(shared);
} else { } else {
excl = reservation_object_get_excl_rcu(obj->resv); excl = reservation_object_get_excl_rcu(obj->base.resv);
} }
if (excl) { if (excl) {
......
...@@ -99,10 +99,10 @@ static void __i915_vma_retire(struct i915_active *ref) ...@@ -99,10 +99,10 @@ static void __i915_vma_retire(struct i915_active *ref)
return; return;
/* Prune the shared fence arrays iff completely idle (inc. external) */ /* Prune the shared fence arrays iff completely idle (inc. external) */
if (reservation_object_trylock(obj->resv)) { if (reservation_object_trylock(obj->base.resv)) {
if (reservation_object_test_signaled_rcu(obj->resv, true)) if (reservation_object_test_signaled_rcu(obj->base.resv, true))
reservation_object_add_excl_fence(obj->resv, NULL); reservation_object_add_excl_fence(obj->base.resv, NULL);
reservation_object_unlock(obj->resv); reservation_object_unlock(obj->base.resv);
} }
/* /*
...@@ -134,7 +134,7 @@ vma_create(struct drm_i915_gem_object *obj, ...@@ -134,7 +134,7 @@ vma_create(struct drm_i915_gem_object *obj,
vma->vm = vm; vma->vm = vm;
vma->ops = &vm->vma_ops; vma->ops = &vm->vma_ops;
vma->obj = obj; vma->obj = obj;
vma->resv = obj->resv; vma->resv = obj->base.resv;
vma->size = obj->base.size; vma->size = obj->base.size;
vma->display_alignment = I915_GTT_MIN_ALIGNMENT; vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册