提交 edd4fc63 编写于 作者: R Rob Clark

drm/msm: rework inactive-work

Re-arrange things a bit so that we can get work requested after a bo
fence passes, like pageflip, done before retiring bo's.  Without any
sort of bo cache in userspace, some games can trigger hundred's of
transient bo's, which can cause retire to take a long time (5-10ms).
Obviously we want a bo cache.. but this cleanup will make things a
bit easier for atomic as well and makes things a bit cleaner.
Signed-off-by: NRob Clark <robdclark@gmail.com>
Acked-by: NDavid Brown <davidb@codeaurora.org>
上级 a8623918
...@@ -51,7 +51,7 @@ struct mdp4_crtc { ...@@ -51,7 +51,7 @@ struct mdp4_crtc {
/* if there is a pending flip, these will be non-null: */ /* if there is a pending flip, these will be non-null: */
struct drm_pending_vblank_event *event; struct drm_pending_vblank_event *event;
struct work_struct pageflip_work; struct msm_fence_cb pageflip_cb;
/* the fb that we currently hold a scanout ref to: */ /* the fb that we currently hold a scanout ref to: */
struct drm_framebuffer *fb; struct drm_framebuffer *fb;
...@@ -132,10 +132,10 @@ static void crtc_flush(struct drm_crtc *crtc) ...@@ -132,10 +132,10 @@ static void crtc_flush(struct drm_crtc *crtc)
mdp4_write(mdp4_kms, REG_MDP4_OVERLAY_FLUSH, flush); mdp4_write(mdp4_kms, REG_MDP4_OVERLAY_FLUSH, flush);
} }
static void pageflip_worker(struct work_struct *work) static void pageflip_cb(struct msm_fence_cb *cb)
{ {
struct mdp4_crtc *mdp4_crtc = struct mdp4_crtc *mdp4_crtc =
container_of(work, struct mdp4_crtc, pageflip_work); container_of(cb, struct mdp4_crtc, pageflip_cb);
struct drm_crtc *crtc = &mdp4_crtc->base; struct drm_crtc *crtc = &mdp4_crtc->base;
mdp4_plane_set_scanout(mdp4_crtc->plane, crtc->fb); mdp4_plane_set_scanout(mdp4_crtc->plane, crtc->fb);
...@@ -397,8 +397,7 @@ static int mdp4_crtc_page_flip(struct drm_crtc *crtc, ...@@ -397,8 +397,7 @@ static int mdp4_crtc_page_flip(struct drm_crtc *crtc,
mdp4_crtc->event = event; mdp4_crtc->event = event;
update_fb(crtc, true, new_fb); update_fb(crtc, true, new_fb);
return msm_gem_queue_inactive_work(obj, return msm_gem_queue_inactive_cb(obj, &mdp4_crtc->pageflip_cb);
&mdp4_crtc->pageflip_work);
} }
static int mdp4_crtc_set_property(struct drm_crtc *crtc, static int mdp4_crtc_set_property(struct drm_crtc *crtc,
...@@ -702,7 +701,7 @@ struct drm_crtc *mdp4_crtc_init(struct drm_device *dev, ...@@ -702,7 +701,7 @@ struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
ret = drm_flip_work_init(&mdp4_crtc->unref_cursor_work, 64, ret = drm_flip_work_init(&mdp4_crtc->unref_cursor_work, 64,
"unref cursor", unref_cursor_worker); "unref cursor", unref_cursor_worker);
INIT_WORK(&mdp4_crtc->pageflip_work, pageflip_worker); INIT_FENCE_CB(&mdp4_crtc->pageflip_cb, pageflip_cb);
drm_crtc_init(dev, crtc, &mdp4_crtc_funcs); drm_crtc_init(dev, crtc, &mdp4_crtc_funcs);
drm_crtc_helper_add(crtc, &mdp4_crtc_helper_funcs); drm_crtc_helper_add(crtc, &mdp4_crtc_helper_funcs);
......
...@@ -187,6 +187,7 @@ static int msm_load(struct drm_device *dev, unsigned long flags) ...@@ -187,6 +187,7 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
init_waitqueue_head(&priv->fence_event); init_waitqueue_head(&priv->fence_event);
INIT_LIST_HEAD(&priv->inactive_list); INIT_LIST_HEAD(&priv->inactive_list);
INIT_LIST_HEAD(&priv->fence_cbs);
drm_mode_config_init(dev); drm_mode_config_init(dev);
...@@ -539,15 +540,36 @@ int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence, ...@@ -539,15 +540,36 @@ int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence,
return ret; return ret;
} }
/* call under struct_mutex */ /* called from workqueue */
void msm_update_fence(struct drm_device *dev, uint32_t fence) void msm_update_fence(struct drm_device *dev, uint32_t fence)
{ {
struct msm_drm_private *priv = dev->dev_private; struct msm_drm_private *priv = dev->dev_private;
if (fence > priv->completed_fence) { mutex_lock(&dev->struct_mutex);
priv->completed_fence = fence; priv->completed_fence = max(fence, priv->completed_fence);
wake_up_all(&priv->fence_event);
while (!list_empty(&priv->fence_cbs)) {
struct msm_fence_cb *cb;
cb = list_first_entry(&priv->fence_cbs,
struct msm_fence_cb, work.entry);
if (cb->fence > priv->completed_fence)
break;
list_del_init(&cb->work.entry);
queue_work(priv->wq, &cb->work);
} }
mutex_unlock(&dev->struct_mutex);
wake_up_all(&priv->fence_event);
}
void __msm_fence_worker(struct work_struct *work)
{
struct msm_fence_cb *cb = container_of(work, struct msm_fence_cb, work);
cb->func(cb);
} }
/* /*
......
...@@ -73,6 +73,9 @@ struct msm_drm_private { ...@@ -73,6 +73,9 @@ struct msm_drm_private {
struct workqueue_struct *wq; struct workqueue_struct *wq;
/* callbacks deferred until bo is inactive: */
struct list_head fence_cbs;
/* registered IOMMU domains: */ /* registered IOMMU domains: */
unsigned int num_iommus; unsigned int num_iommus;
struct iommu_domain *iommus[NUM_DOMAINS]; struct iommu_domain *iommus[NUM_DOMAINS];
...@@ -97,6 +100,20 @@ struct msm_format { ...@@ -97,6 +100,20 @@ struct msm_format {
uint32_t pixel_format; uint32_t pixel_format;
}; };
/* callback from wq once fence has passed: */
struct msm_fence_cb {
struct work_struct work;
uint32_t fence;
void (*func)(struct msm_fence_cb *cb);
};
void __msm_fence_worker(struct work_struct *work);
#define INIT_FENCE_CB(_cb, _func) do { \
INIT_WORK(&(_cb)->work, __msm_fence_worker); \
(_cb)->func = _func; \
} while (0)
/* As there are different display controller blocks depending on the /* As there are different display controller blocks depending on the
* snapdragon version, the kms support is split out and the appropriate * snapdragon version, the kms support is split out and the appropriate
* implementation is loaded at runtime. The kms module is responsible * implementation is loaded at runtime. The kms module is responsible
...@@ -160,8 +177,8 @@ int msm_gem_prime_pin(struct drm_gem_object *obj); ...@@ -160,8 +177,8 @@ int msm_gem_prime_pin(struct drm_gem_object *obj);
void msm_gem_prime_unpin(struct drm_gem_object *obj); void msm_gem_prime_unpin(struct drm_gem_object *obj);
void *msm_gem_vaddr_locked(struct drm_gem_object *obj); void *msm_gem_vaddr_locked(struct drm_gem_object *obj);
void *msm_gem_vaddr(struct drm_gem_object *obj); void *msm_gem_vaddr(struct drm_gem_object *obj);
int msm_gem_queue_inactive_work(struct drm_gem_object *obj, int msm_gem_queue_inactive_cb(struct drm_gem_object *obj,
struct work_struct *work); struct msm_fence_cb *cb);
void msm_gem_move_to_active(struct drm_gem_object *obj, void msm_gem_move_to_active(struct drm_gem_object *obj,
struct msm_gpu *gpu, bool write, uint32_t fence); struct msm_gpu *gpu, bool write, uint32_t fence);
void msm_gem_move_to_inactive(struct drm_gem_object *obj); void msm_gem_move_to_inactive(struct drm_gem_object *obj);
......
...@@ -309,7 +309,17 @@ int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id, ...@@ -309,7 +309,17 @@ int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova) int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova)
{ {
struct msm_gem_object *msm_obj = to_msm_bo(obj);
int ret; int ret;
/* this is safe right now because we don't unmap until the
* bo is deleted:
*/
if (msm_obj->domain[id].iova) {
*iova = msm_obj->domain[id].iova;
return 0;
}
mutex_lock(&obj->dev->struct_mutex); mutex_lock(&obj->dev->struct_mutex);
ret = msm_gem_get_iova_locked(obj, id, iova); ret = msm_gem_get_iova_locked(obj, id, iova);
mutex_unlock(&obj->dev->struct_mutex); mutex_unlock(&obj->dev->struct_mutex);
...@@ -379,8 +389,11 @@ void *msm_gem_vaddr(struct drm_gem_object *obj) ...@@ -379,8 +389,11 @@ void *msm_gem_vaddr(struct drm_gem_object *obj)
return ret; return ret;
} }
int msm_gem_queue_inactive_work(struct drm_gem_object *obj, /* setup callback for when bo is no longer busy..
struct work_struct *work) * TODO probably want to differentiate read vs write..
*/
int msm_gem_queue_inactive_cb(struct drm_gem_object *obj,
struct msm_fence_cb *cb)
{ {
struct drm_device *dev = obj->dev; struct drm_device *dev = obj->dev;
struct msm_drm_private *priv = dev->dev_private; struct msm_drm_private *priv = dev->dev_private;
...@@ -388,12 +401,13 @@ int msm_gem_queue_inactive_work(struct drm_gem_object *obj, ...@@ -388,12 +401,13 @@ int msm_gem_queue_inactive_work(struct drm_gem_object *obj,
int ret = 0; int ret = 0;
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
if (!list_empty(&work->entry)) { if (!list_empty(&cb->work.entry)) {
ret = -EINVAL; ret = -EINVAL;
} else if (is_active(msm_obj)) { } else if (is_active(msm_obj)) {
list_add_tail(&work->entry, &msm_obj->inactive_work); cb->fence = max(msm_obj->read_fence, msm_obj->write_fence);
list_add_tail(&cb->work.entry, &priv->fence_cbs);
} else { } else {
queue_work(priv->wq, work); queue_work(priv->wq, &cb->work);
} }
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
...@@ -426,16 +440,6 @@ void msm_gem_move_to_inactive(struct drm_gem_object *obj) ...@@ -426,16 +440,6 @@ void msm_gem_move_to_inactive(struct drm_gem_object *obj)
msm_obj->write_fence = 0; msm_obj->write_fence = 0;
list_del_init(&msm_obj->mm_list); list_del_init(&msm_obj->mm_list);
list_add_tail(&msm_obj->mm_list, &priv->inactive_list); list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
while (!list_empty(&msm_obj->inactive_work)) {
struct work_struct *work;
work = list_first_entry(&msm_obj->inactive_work,
struct work_struct, entry);
list_del_init(&work->entry);
queue_work(priv->wq, work);
}
} }
int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op,
...@@ -604,7 +608,6 @@ static int msm_gem_new_impl(struct drm_device *dev, ...@@ -604,7 +608,6 @@ static int msm_gem_new_impl(struct drm_device *dev,
reservation_object_init(msm_obj->resv); reservation_object_init(msm_obj->resv);
INIT_LIST_HEAD(&msm_obj->submit_entry); INIT_LIST_HEAD(&msm_obj->submit_entry);
INIT_LIST_HEAD(&msm_obj->inactive_work);
list_add_tail(&msm_obj->mm_list, &priv->inactive_list); list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
*obj = &msm_obj->base; *obj = &msm_obj->base;
......
...@@ -45,9 +45,6 @@ struct msm_gem_object { ...@@ -45,9 +45,6 @@ struct msm_gem_object {
*/ */
struct list_head submit_entry; struct list_head submit_entry;
/* work defered until bo is inactive: */
struct list_head inactive_work;
struct page **pages; struct page **pages;
struct sg_table *sgt; struct sg_table *sgt;
void *vaddr; void *vaddr;
......
...@@ -268,6 +268,8 @@ static void retire_worker(struct work_struct *work) ...@@ -268,6 +268,8 @@ static void retire_worker(struct work_struct *work)
struct drm_device *dev = gpu->dev; struct drm_device *dev = gpu->dev;
uint32_t fence = gpu->funcs->last_fence(gpu); uint32_t fence = gpu->funcs->last_fence(gpu);
msm_update_fence(gpu->dev, fence);
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
while (!list_empty(&gpu->active_list)) { while (!list_empty(&gpu->active_list)) {
...@@ -287,8 +289,6 @@ static void retire_worker(struct work_struct *work) ...@@ -287,8 +289,6 @@ static void retire_worker(struct work_struct *work)
} }
} }
msm_update_fence(gpu->dev, fence);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册