提交 36ff39c4 编写于 作者: C Christian König

drm/radeon: replace cs_mutex with vm_mutex v3

Try to remove or replace the cs_mutex with a
vm_mutex where it is still needed.

v2: fix locking order
v3: rebased on drm-next
Signed-off-by: NChristian König <deathsimple@vodafone.de>
上级 736fc37f
...@@ -159,48 +159,6 @@ static inline int radeon_atrm_get_bios_chunk(uint8_t *bios, int offset, int len) ...@@ -159,48 +159,6 @@ static inline int radeon_atrm_get_bios_chunk(uint8_t *bios, int offset, int len)
#endif #endif
bool radeon_get_bios(struct radeon_device *rdev); bool radeon_get_bios(struct radeon_device *rdev);
/*
* Mutex which allows recursive locking from the same process.
*/
struct radeon_mutex {
struct mutex mutex;
struct task_struct *owner;
int level;
};
static inline void radeon_mutex_init(struct radeon_mutex *mutex)
{
mutex_init(&mutex->mutex);
mutex->owner = NULL;
mutex->level = 0;
}
static inline void radeon_mutex_lock(struct radeon_mutex *mutex)
{
if (mutex_trylock(&mutex->mutex)) {
/* The mutex was unlocked before, so it's ours now */
mutex->owner = current;
} else if (mutex->owner != current) {
/* Another process locked the mutex, take it */
mutex_lock(&mutex->mutex);
mutex->owner = current;
}
/* Otherwise the mutex was already locked by this process */
mutex->level++;
}
static inline void radeon_mutex_unlock(struct radeon_mutex *mutex)
{
if (--mutex->level > 0)
return;
mutex->owner = NULL;
mutex_unlock(&mutex->mutex);
}
/* /*
* Dummy page * Dummy page
*/ */
...@@ -712,6 +670,7 @@ struct radeon_vm_funcs { ...@@ -712,6 +670,7 @@ struct radeon_vm_funcs {
}; };
struct radeon_vm_manager { struct radeon_vm_manager {
struct mutex lock;
struct list_head lru_vm; struct list_head lru_vm;
uint32_t use_bitmap; uint32_t use_bitmap;
struct radeon_sa_manager sa_manager; struct radeon_sa_manager sa_manager;
...@@ -1532,7 +1491,6 @@ struct radeon_device { ...@@ -1532,7 +1491,6 @@ struct radeon_device {
struct radeon_gem gem; struct radeon_gem gem;
struct radeon_pm pm; struct radeon_pm pm;
uint32_t bios_scratch[RADEON_BIOS_NUM_SCRATCH]; uint32_t bios_scratch[RADEON_BIOS_NUM_SCRATCH];
struct radeon_mutex cs_mutex;
struct radeon_wb wb; struct radeon_wb wb;
struct radeon_dummy_page dummy_page; struct radeon_dummy_page dummy_page;
bool shutdown; bool shutdown;
......
...@@ -440,6 +440,7 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev, ...@@ -440,6 +440,7 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
return r; return r;
} }
mutex_lock(&rdev->vm_manager.lock);
mutex_lock(&vm->mutex); mutex_lock(&vm->mutex);
r = radeon_vm_bind(rdev, vm); r = radeon_vm_bind(rdev, vm);
if (r) { if (r) {
...@@ -477,7 +478,8 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev, ...@@ -477,7 +478,8 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
} }
vm->fence = radeon_fence_ref(parser->ib.fence); vm->fence = radeon_fence_ref(parser->ib.fence);
} }
mutex_unlock(&fpriv->vm.mutex); mutex_unlock(&vm->mutex);
mutex_unlock(&rdev->vm_manager.lock);
return r; return r;
} }
...@@ -497,9 +499,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) ...@@ -497,9 +499,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
struct radeon_cs_parser parser; struct radeon_cs_parser parser;
int r; int r;
radeon_mutex_lock(&rdev->cs_mutex);
if (!rdev->accel_working) { if (!rdev->accel_working) {
radeon_mutex_unlock(&rdev->cs_mutex);
return -EBUSY; return -EBUSY;
} }
/* initialize parser */ /* initialize parser */
...@@ -513,7 +513,6 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) ...@@ -513,7 +513,6 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
DRM_ERROR("Failed to initialize parser !\n"); DRM_ERROR("Failed to initialize parser !\n");
radeon_cs_parser_fini(&parser, r); radeon_cs_parser_fini(&parser, r);
r = radeon_cs_handle_lockup(rdev, r); r = radeon_cs_handle_lockup(rdev, r);
radeon_mutex_unlock(&rdev->cs_mutex);
return r; return r;
} }
r = radeon_cs_parser_relocs(&parser); r = radeon_cs_parser_relocs(&parser);
...@@ -522,7 +521,6 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) ...@@ -522,7 +521,6 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
DRM_ERROR("Failed to parse relocation %d!\n", r); DRM_ERROR("Failed to parse relocation %d!\n", r);
radeon_cs_parser_fini(&parser, r); radeon_cs_parser_fini(&parser, r);
r = radeon_cs_handle_lockup(rdev, r); r = radeon_cs_handle_lockup(rdev, r);
radeon_mutex_unlock(&rdev->cs_mutex);
return r; return r;
} }
r = radeon_cs_ib_chunk(rdev, &parser); r = radeon_cs_ib_chunk(rdev, &parser);
...@@ -536,7 +534,6 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) ...@@ -536,7 +534,6 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
out: out:
radeon_cs_parser_fini(&parser, r); radeon_cs_parser_fini(&parser, r);
r = radeon_cs_handle_lockup(rdev, r); r = radeon_cs_handle_lockup(rdev, r);
radeon_mutex_unlock(&rdev->cs_mutex);
return r; return r;
} }
......
...@@ -728,7 +728,6 @@ int radeon_device_init(struct radeon_device *rdev, ...@@ -728,7 +728,6 @@ int radeon_device_init(struct radeon_device *rdev,
/* mutex initialization are all done here so we /* mutex initialization are all done here so we
* can recall function without having locking issues */ * can recall function without having locking issues */
radeon_mutex_init(&rdev->cs_mutex);
mutex_init(&rdev->ring_lock); mutex_init(&rdev->ring_lock);
mutex_init(&rdev->dc_hw_i2c_mutex); mutex_init(&rdev->dc_hw_i2c_mutex);
atomic_set(&rdev->ih.lock, 0); atomic_set(&rdev->ih.lock, 0);
...@@ -741,6 +740,7 @@ int radeon_device_init(struct radeon_device *rdev, ...@@ -741,6 +740,7 @@ int radeon_device_init(struct radeon_device *rdev,
if (r) if (r)
return r; return r;
/* initialize vm here */ /* initialize vm here */
mutex_init(&rdev->vm_manager.lock);
rdev->vm_manager.use_bitmap = 1; rdev->vm_manager.use_bitmap = 1;
rdev->vm_manager.max_pfn = 1 << 20; rdev->vm_manager.max_pfn = 1 << 20;
INIT_LIST_HEAD(&rdev->vm_manager.lru_vm); INIT_LIST_HEAD(&rdev->vm_manager.lru_vm);
......
...@@ -305,7 +305,7 @@ int radeon_vm_manager_init(struct radeon_device *rdev) ...@@ -305,7 +305,7 @@ int radeon_vm_manager_init(struct radeon_device *rdev)
return r; return r;
} }
/* cs mutex must be lock */ /* global mutex must be lock */
static void radeon_vm_unbind_locked(struct radeon_device *rdev, static void radeon_vm_unbind_locked(struct radeon_device *rdev,
struct radeon_vm *vm) struct radeon_vm *vm)
{ {
...@@ -356,17 +356,17 @@ int radeon_vm_manager_suspend(struct radeon_device *rdev) ...@@ -356,17 +356,17 @@ int radeon_vm_manager_suspend(struct radeon_device *rdev)
{ {
struct radeon_vm *vm, *tmp; struct radeon_vm *vm, *tmp;
radeon_mutex_lock(&rdev->cs_mutex); mutex_lock(&rdev->vm_manager.lock);
/* unbind all active vm */ /* unbind all active vm */
list_for_each_entry_safe(vm, tmp, &rdev->vm_manager.lru_vm, list) { list_for_each_entry_safe(vm, tmp, &rdev->vm_manager.lru_vm, list) {
radeon_vm_unbind_locked(rdev, vm); radeon_vm_unbind_locked(rdev, vm);
} }
rdev->vm_manager.funcs->fini(rdev); rdev->vm_manager.funcs->fini(rdev);
radeon_mutex_unlock(&rdev->cs_mutex); mutex_unlock(&rdev->vm_manager.lock);
return radeon_sa_bo_manager_suspend(rdev, &rdev->vm_manager.sa_manager); return radeon_sa_bo_manager_suspend(rdev, &rdev->vm_manager.sa_manager);
} }
/* cs mutex must be lock */ /* global mutex must be locked */
void radeon_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm) void radeon_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm)
{ {
mutex_lock(&vm->mutex); mutex_lock(&vm->mutex);
...@@ -374,7 +374,7 @@ void radeon_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm) ...@@ -374,7 +374,7 @@ void radeon_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm)
mutex_unlock(&vm->mutex); mutex_unlock(&vm->mutex);
} }
/* cs mutex must be lock & vm mutex must be lock */ /* global and local mutex must be locked */
int radeon_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm) int radeon_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm)
{ {
struct radeon_vm *vm_evict; struct radeon_vm *vm_evict;
...@@ -478,7 +478,7 @@ int radeon_vm_bo_add(struct radeon_device *rdev, ...@@ -478,7 +478,7 @@ int radeon_vm_bo_add(struct radeon_device *rdev,
if (last_pfn > vm->last_pfn) { if (last_pfn > vm->last_pfn) {
/* release mutex and lock in right order */ /* release mutex and lock in right order */
mutex_unlock(&vm->mutex); mutex_unlock(&vm->mutex);
radeon_mutex_lock(&rdev->cs_mutex); mutex_lock(&rdev->vm_manager.lock);
mutex_lock(&vm->mutex); mutex_lock(&vm->mutex);
/* and check again */ /* and check again */
if (last_pfn > vm->last_pfn) { if (last_pfn > vm->last_pfn) {
...@@ -487,7 +487,7 @@ int radeon_vm_bo_add(struct radeon_device *rdev, ...@@ -487,7 +487,7 @@ int radeon_vm_bo_add(struct radeon_device *rdev,
radeon_vm_unbind_locked(rdev, vm); radeon_vm_unbind_locked(rdev, vm);
vm->last_pfn = (last_pfn + align) & ~align; vm->last_pfn = (last_pfn + align) & ~align;
} }
radeon_mutex_unlock(&rdev->cs_mutex); mutex_unlock(&rdev->vm_manager.lock);
} }
head = &vm->va; head = &vm->va;
last_offset = 0; last_offset = 0;
...@@ -542,7 +542,7 @@ static u64 radeon_vm_get_addr(struct radeon_device *rdev, ...@@ -542,7 +542,7 @@ static u64 radeon_vm_get_addr(struct radeon_device *rdev,
return addr; return addr;
} }
/* object have to be reserved & cs mutex took & vm mutex took */ /* object have to be reserved & global and local mutex must be locked */
int radeon_vm_bo_update_pte(struct radeon_device *rdev, int radeon_vm_bo_update_pte(struct radeon_device *rdev,
struct radeon_vm *vm, struct radeon_vm *vm,
struct radeon_bo *bo, struct radeon_bo *bo,
...@@ -601,10 +601,10 @@ int radeon_vm_bo_rmv(struct radeon_device *rdev, ...@@ -601,10 +601,10 @@ int radeon_vm_bo_rmv(struct radeon_device *rdev,
if (bo_va == NULL) if (bo_va == NULL)
return 0; return 0;
radeon_mutex_lock(&rdev->cs_mutex); mutex_lock(&rdev->vm_manager.lock);
mutex_lock(&vm->mutex); mutex_lock(&vm->mutex);
radeon_vm_bo_update_pte(rdev, vm, bo, NULL); radeon_vm_bo_update_pte(rdev, vm, bo, NULL);
radeon_mutex_unlock(&rdev->cs_mutex); mutex_unlock(&rdev->vm_manager.lock);
list_del(&bo_va->vm_list); list_del(&bo_va->vm_list);
mutex_unlock(&vm->mutex); mutex_unlock(&vm->mutex);
list_del(&bo_va->bo_list); list_del(&bo_va->bo_list);
...@@ -647,10 +647,10 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm) ...@@ -647,10 +647,10 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
struct radeon_bo_va *bo_va, *tmp; struct radeon_bo_va *bo_va, *tmp;
int r; int r;
radeon_mutex_lock(&rdev->cs_mutex); mutex_lock(&rdev->vm_manager.lock);
mutex_lock(&vm->mutex); mutex_lock(&vm->mutex);
radeon_vm_unbind_locked(rdev, vm); radeon_vm_unbind_locked(rdev, vm);
radeon_mutex_unlock(&rdev->cs_mutex); mutex_unlock(&rdev->vm_manager.lock);
/* remove all bo */ /* remove all bo */
r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
......
...@@ -159,11 +159,9 @@ void radeon_gem_object_close(struct drm_gem_object *obj, ...@@ -159,11 +159,9 @@ void radeon_gem_object_close(struct drm_gem_object *obj,
static int radeon_gem_handle_lockup(struct radeon_device *rdev, int r) static int radeon_gem_handle_lockup(struct radeon_device *rdev, int r)
{ {
if (r == -EDEADLK) { if (r == -EDEADLK) {
radeon_mutex_lock(&rdev->cs_mutex);
r = radeon_gpu_reset(rdev); r = radeon_gpu_reset(rdev);
if (!r) if (!r)
r = -EAGAIN; r = -EAGAIN;
radeon_mutex_unlock(&rdev->cs_mutex);
} }
return r; return r;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册