提交 8a206685 编写于 作者: C Christian König

drm/amdgpu: use drm_exec for GEM and CSA handling v2

Start using the new component here as well.

v2: ignore duplicates to allow per VM BO mappings
Signed-off-by: NChristian König <christian.koenig@amd.com>
Acked-by: NAlex Deucher <alexander.deucher@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20230711133122.3710-5-christian.koenig@amd.com
上级 8abc1eb2
......@@ -22,6 +22,8 @@
* * Author: Monk.liu@amd.com
*/
#include <drm/drm_exec.h>
#include "amdgpu.h"
uint64_t amdgpu_csa_vaddr(struct amdgpu_device *adev)
......@@ -65,31 +67,25 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct amdgpu_bo *bo, struct amdgpu_bo_va **bo_va,
uint64_t csa_addr, uint32_t size)
{
struct ww_acquire_ctx ticket;
struct list_head list;
struct amdgpu_bo_list_entry pd;
struct ttm_validate_buffer csa_tv;
struct drm_exec exec;
int r;
INIT_LIST_HEAD(&list);
INIT_LIST_HEAD(&csa_tv.head);
csa_tv.bo = &bo->tbo;
csa_tv.num_shared = 1;
list_add(&csa_tv.head, &list);
amdgpu_vm_get_pd_bo(vm, &list, &pd);
r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
if (r) {
DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r);
return r;
drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
drm_exec_until_all_locked(&exec) {
r = amdgpu_vm_lock_pd(vm, &exec, 0);
if (likely(!r))
r = drm_exec_lock_obj(&exec, &bo->tbo.base);
drm_exec_retry_on_contention(&exec);
if (unlikely(r)) {
DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r);
goto error;
}
}
*bo_va = amdgpu_vm_bo_add(adev, vm, bo);
if (!*bo_va) {
ttm_eu_backoff_reservation(&ticket, &list);
DRM_ERROR("failed to create bo_va for static CSA\n");
return -ENOMEM;
r = -ENOMEM;
goto error;
}
r = amdgpu_vm_bo_map(adev, *bo_va, csa_addr, 0, size,
......@@ -99,48 +95,42 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
if (r) {
DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r);
amdgpu_vm_bo_del(adev, *bo_va);
ttm_eu_backoff_reservation(&ticket, &list);
return r;
goto error;
}
ttm_eu_backoff_reservation(&ticket, &list);
return 0;
error:
drm_exec_fini(&exec);
return r;
}
int amdgpu_unmap_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct amdgpu_bo *bo, struct amdgpu_bo_va *bo_va,
uint64_t csa_addr)
{
struct ww_acquire_ctx ticket;
struct list_head list;
struct amdgpu_bo_list_entry pd;
struct ttm_validate_buffer csa_tv;
struct drm_exec exec;
int r;
INIT_LIST_HEAD(&list);
INIT_LIST_HEAD(&csa_tv.head);
csa_tv.bo = &bo->tbo;
csa_tv.num_shared = 1;
list_add(&csa_tv.head, &list);
amdgpu_vm_get_pd_bo(vm, &list, &pd);
r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
if (r) {
DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r);
return r;
drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT);
drm_exec_until_all_locked(&exec) {
r = amdgpu_vm_lock_pd(vm, &exec, 0);
if (likely(!r))
r = drm_exec_lock_obj(&exec, &bo->tbo.base);
drm_exec_retry_on_contention(&exec);
if (unlikely(r)) {
DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r);
goto error;
}
}
r = amdgpu_vm_bo_unmap(adev, bo_va, csa_addr);
if (r) {
DRM_ERROR("failed to do bo_unmap on static CSA, err=%d\n", r);
ttm_eu_backoff_reservation(&ticket, &list);
return r;
goto error;
}
amdgpu_vm_bo_del(adev, bo_va);
ttm_eu_backoff_reservation(&ticket, &list);
return 0;
error:
drm_exec_fini(&exec);
return r;
}
......@@ -33,6 +33,7 @@
#include <drm/amdgpu_drm.h>
#include <drm/drm_drv.h>
#include <drm/drm_exec.h>
#include <drm/drm_gem_ttm_helper.h>
#include <drm/ttm/ttm_tt.h>
......@@ -198,29 +199,24 @@ static void amdgpu_gem_object_close(struct drm_gem_object *obj,
struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
struct amdgpu_vm *vm = &fpriv->vm;
struct amdgpu_bo_list_entry vm_pd;
struct list_head list, duplicates;
struct dma_fence *fence = NULL;
struct ttm_validate_buffer tv;
struct ww_acquire_ctx ticket;
struct amdgpu_bo_va *bo_va;
struct drm_exec exec;
long r;
INIT_LIST_HEAD(&list);
INIT_LIST_HEAD(&duplicates);
tv.bo = &bo->tbo;
tv.num_shared = 2;
list_add(&tv.head, &list);
amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates);
if (r) {
dev_err(adev->dev, "leaking bo va because "
"we fail to reserve bo (%ld)\n", r);
return;
drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES);
drm_exec_until_all_locked(&exec) {
r = drm_exec_prepare_obj(&exec, &bo->tbo.base, 1);
drm_exec_retry_on_contention(&exec);
if (unlikely(r))
goto out_unlock;
r = amdgpu_vm_lock_pd(vm, &exec, 0);
drm_exec_retry_on_contention(&exec);
if (unlikely(r))
goto out_unlock;
}
bo_va = amdgpu_vm_bo_find(vm, bo);
if (!bo_va || --bo_va->ref_count)
goto out_unlock;
......@@ -230,6 +226,9 @@ static void amdgpu_gem_object_close(struct drm_gem_object *obj,
goto out_unlock;
r = amdgpu_vm_clear_freed(adev, vm, &fence);
if (unlikely(r < 0))
dev_err(adev->dev, "failed to clear page "
"tables on GEM object close (%ld)\n", r);
if (r || !fence)
goto out_unlock;
......@@ -237,10 +236,9 @@ static void amdgpu_gem_object_close(struct drm_gem_object *obj,
dma_fence_put(fence);
out_unlock:
if (unlikely(r < 0))
dev_err(adev->dev, "failed to clear page "
"tables on GEM object close (%ld)\n", r);
ttm_eu_backoff_reservation(&ticket, &list);
if (r)
dev_err(adev->dev, "leaking bo va (%ld)\n", r);
drm_exec_fini(&exec);
}
static int amdgpu_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
......@@ -675,10 +673,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
struct amdgpu_fpriv *fpriv = filp->driver_priv;
struct amdgpu_bo *abo;
struct amdgpu_bo_va *bo_va;
struct amdgpu_bo_list_entry vm_pd;
struct ttm_validate_buffer tv;
struct ww_acquire_ctx ticket;
struct list_head list, duplicates;
struct drm_exec exec;
uint64_t va_flags;
uint64_t vm_size;
int r = 0;
......@@ -728,36 +723,38 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
}
INIT_LIST_HEAD(&list);
INIT_LIST_HEAD(&duplicates);
if ((args->operation != AMDGPU_VA_OP_CLEAR) &&
!(args->flags & AMDGPU_VM_PAGE_PRT)) {
gobj = drm_gem_object_lookup(filp, args->handle);
if (gobj == NULL)
return -ENOENT;
abo = gem_to_amdgpu_bo(gobj);
tv.bo = &abo->tbo;
if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
tv.num_shared = 1;
else
tv.num_shared = 0;
list_add(&tv.head, &list);
} else {
gobj = NULL;
abo = NULL;
}
amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd);
drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT |
DRM_EXEC_IGNORE_DUPLICATES);
drm_exec_until_all_locked(&exec) {
if (gobj) {
r = drm_exec_lock_obj(&exec, gobj);
drm_exec_retry_on_contention(&exec);
if (unlikely(r))
goto error;
}
r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
if (r)
goto error_unref;
r = amdgpu_vm_lock_pd(&fpriv->vm, &exec, 2);
drm_exec_retry_on_contention(&exec);
if (unlikely(r))
goto error;
}
if (abo) {
bo_va = amdgpu_vm_bo_find(&fpriv->vm, abo);
if (!bo_va) {
r = -ENOENT;
goto error_backoff;
goto error;
}
} else if (args->operation != AMDGPU_VA_OP_CLEAR) {
bo_va = fpriv->prt_va;
......@@ -794,10 +791,8 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
amdgpu_gem_va_update_vm(adev, &fpriv->vm, bo_va,
args->operation);
error_backoff:
ttm_eu_backoff_reservation(&ticket, &list);
error_unref:
error:
drm_exec_fini(&exec);
drm_gem_object_put(gobj);
return r;
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册