提交 59276f05 编写于 作者: N Nirmoy Das 提交者: Alex Deucher

drm/amdgpu: switch to amdgpu_bo_vm for vm code

The subclass, amdgpu_bo_vm is intended for PT/PD BOs which are also
shadowed, so switch to amdgpu_bo_vm BO for PT/PD BOs.

v4: update amdgpu_vm_update_funcs to accept amdgpu_bo_vm.
v3: simplify code.
    check also if shadow bo exist instead of checking bo only type.
v2: squash three related patches.
Signed-off-by: NNirmoy Das <nirmoy.das@amd.com>
Reviewed-by: NChristian König <christian.koenig@amd.com>
Signed-off-by: NAlex Deucher <alexander.deucher@amd.com>
上级 1fdc79f6
......@@ -652,15 +652,15 @@ void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
spin_lock(&adev->mman.bdev.lru_lock);
list_for_each_entry(bo_base, &vm->idle, vm_status) {
struct amdgpu_bo *bo = bo_base->bo;
struct amdgpu_bo *shadow = amdgpu_bo_shadowed(bo);
if (!bo->parent)
continue;
ttm_bo_move_to_lru_tail(&bo->tbo, &bo->tbo.mem,
&vm->lru_bulk_move);
if (bo->shadow)
ttm_bo_move_to_lru_tail(&bo->shadow->tbo,
&bo->shadow->tbo.mem,
if (shadow)
ttm_bo_move_to_lru_tail(&shadow->tbo, &shadow->tbo.mem,
&vm->lru_bulk_move);
}
spin_unlock(&adev->mman.bdev.lru_lock);
......@@ -692,12 +692,13 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
list_for_each_entry_safe(bo_base, tmp, &vm->evicted, vm_status) {
struct amdgpu_bo *bo = bo_base->bo;
struct amdgpu_bo *shadow = amdgpu_bo_shadowed(bo);
r = validate(param, bo);
if (r)
return r;
if (bo->shadow) {
r = validate(param, bo->shadow);
if (shadow) {
r = validate(param, shadow);
if (r)
return r;
}
......@@ -705,7 +706,7 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
if (bo->tbo.type != ttm_bo_type_kernel) {
amdgpu_vm_bo_moved(bo_base);
} else {
vm->update_funcs->map_table(bo);
vm->update_funcs->map_table(to_amdgpu_bo_vm(bo));
amdgpu_vm_bo_relocated(bo_base);
}
}
......@@ -737,7 +738,7 @@ bool amdgpu_vm_ready(struct amdgpu_vm *vm)
*
* @adev: amdgpu_device pointer
* @vm: VM to clear BO from
* @bo: BO to clear
* @vmbo: BO to clear
* @immediate: use an immediate update
*
* Root PD needs to be reserved when calling this.
......@@ -747,13 +748,14 @@ bool amdgpu_vm_ready(struct amdgpu_vm *vm)
*/
static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
struct amdgpu_bo *bo,
struct amdgpu_bo_vm *vmbo,
bool immediate)
{
struct ttm_operation_ctx ctx = { true, false };
unsigned level = adev->vm_manager.root_level;
struct amdgpu_vm_update_params params;
struct amdgpu_bo *ancestor = bo;
struct amdgpu_bo *ancestor = &vmbo->bo;
struct amdgpu_bo *bo = &vmbo->bo;
unsigned entries, ats_entries;
uint64_t addr;
int r;
......@@ -793,14 +795,15 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
if (r)
return r;
if (bo->shadow) {
r = ttm_bo_validate(&bo->shadow->tbo, &bo->shadow->placement,
&ctx);
if (vmbo->shadow) {
struct amdgpu_bo *shadow = vmbo->shadow;
r = ttm_bo_validate(&shadow->tbo, &shadow->placement, &ctx);
if (r)
return r;
}
r = vm->update_funcs->map_table(bo);
r = vm->update_funcs->map_table(vmbo);
if (r)
return r;
......@@ -824,7 +827,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
amdgpu_gmc_get_vm_pde(adev, level, &value, &flags);
}
r = vm->update_funcs->update(&params, bo, addr, 0, ats_entries,
r = vm->update_funcs->update(&params, vmbo, addr, 0, ats_entries,
value, flags);
if (r)
return r;
......@@ -847,7 +850,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
}
}
r = vm->update_funcs->update(&params, bo, addr, 0, entries,
r = vm->update_funcs->update(&params, vmbo, addr, 0, entries,
value, flags);
if (r)
return r;
......@@ -863,14 +866,16 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
* @vm: requesting vm
* @level: the page table level
* @immediate: use a immediate update
* @bo: pointer to the buffer object pointer
* @vmbo: pointer to the buffer object pointer
*/
static int amdgpu_vm_pt_create(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
int level, bool immediate,
struct amdgpu_bo **bo)
struct amdgpu_bo_vm **vmbo)
{
struct amdgpu_bo_param bp;
struct amdgpu_bo *bo;
struct dma_resv *resv;
int r;
memset(&bp, 0, sizeof(bp));
......@@ -881,7 +886,7 @@ static int amdgpu_vm_pt_create(struct amdgpu_device *adev,
bp.domain = amdgpu_bo_get_preferred_pin_domain(adev, bp.domain);
bp.flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
AMDGPU_GEM_CREATE_CPU_GTT_USWC;
bp.bo_ptr_size = sizeof(struct amdgpu_bo);
bp.bo_ptr_size = sizeof(struct amdgpu_bo_vm);
if (vm->use_cpu_for_update)
bp.flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
......@@ -890,26 +895,41 @@ static int amdgpu_vm_pt_create(struct amdgpu_device *adev,
if (vm->root.base.bo)
bp.resv = vm->root.base.bo->tbo.base.resv;
r = amdgpu_bo_create(adev, &bp, bo);
r = amdgpu_bo_create_vm(adev, &bp, vmbo);
if (r)
return r;
if (vm->is_compute_context && (adev->flags & AMD_IS_APU))
bo = &(*vmbo)->bo;
if (vm->is_compute_context && (adev->flags & AMD_IS_APU)) {
(*vmbo)->shadow = NULL;
return 0;
}
if (!bp.resv)
WARN_ON(dma_resv_lock((*bo)->tbo.base.resv,
WARN_ON(dma_resv_lock(bo->tbo.base.resv,
NULL));
r = amdgpu_bo_create_shadow(adev, bp.size, *bo);
resv = bp.resv;
memset(&bp, 0, sizeof(bp));
bp.size = amdgpu_vm_bo_size(adev, level);
bp.domain = AMDGPU_GEM_DOMAIN_GTT;
bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC;
bp.type = ttm_bo_type_kernel;
bp.resv = bo->tbo.base.resv;
bp.bo_ptr_size = sizeof(struct amdgpu_bo);
if (!bp.resv)
dma_resv_unlock((*bo)->tbo.base.resv);
r = amdgpu_bo_create(adev, &bp, &(*vmbo)->shadow);
if (!resv)
dma_resv_unlock(bo->tbo.base.resv);
if (r) {
amdgpu_bo_unref(bo);
amdgpu_bo_unref(&bo);
return r;
}
(*vmbo)->shadow->parent = amdgpu_bo_ref(bo);
amdgpu_bo_add_to_shadow_list((*vmbo)->shadow);
return 0;
}
......@@ -933,7 +953,8 @@ static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
bool immediate)
{
struct amdgpu_vm_pt *entry = cursor->entry;
struct amdgpu_bo *pt;
struct amdgpu_bo *pt_bo;
struct amdgpu_bo_vm *pt;
int r;
if (cursor->level < AMDGPU_VM_PTB && !entry->entries) {
......@@ -957,8 +978,9 @@ static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
/* Keep a reference to the root directory to avoid
* freeing them up in the wrong order.
*/
pt->parent = amdgpu_bo_ref(cursor->parent->base.bo);
amdgpu_vm_bo_base_init(&entry->base, vm, pt);
pt_bo = &pt->bo;
pt_bo->parent = amdgpu_bo_ref(cursor->parent->base.bo);
amdgpu_vm_bo_base_init(&entry->base, vm, pt_bo);
r = amdgpu_vm_clear_bo(adev, vm, pt, immediate);
if (r)
......@@ -968,7 +990,7 @@ static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
error_free_pt:
amdgpu_bo_unref(&pt->shadow);
amdgpu_bo_unref(&pt);
amdgpu_bo_unref(&pt_bo);
return r;
}
......@@ -979,10 +1001,13 @@ static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
*/
static void amdgpu_vm_free_table(struct amdgpu_vm_pt *entry)
{
struct amdgpu_bo *shadow;
if (entry->base.bo) {
shadow = amdgpu_bo_shadowed(entry->base.bo);
entry->base.bo->vm_bo = NULL;
list_del(&entry->base.vm_status);
amdgpu_bo_unref(&entry->base.bo->shadow);
amdgpu_bo_unref(&shadow);
amdgpu_bo_unref(&entry->base.bo);
}
kvfree(entry->entries);
......@@ -1284,7 +1309,8 @@ static int amdgpu_vm_update_pde(struct amdgpu_vm_update_params *params,
level += params->adev->vm_manager.root_level;
amdgpu_gmc_get_pde_for_bo(entry->base.bo, level, &pt, &flags);
pde = (entry - parent->entries) * 8;
return vm->update_funcs->update(params, bo, pde, pt, 1, 0, flags);
return vm->update_funcs->update(params, to_amdgpu_bo_vm(bo), pde, pt,
1, 0, flags);
}
/**
......@@ -1364,9 +1390,9 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
* Make sure to set the right flags for the PTEs at the desired level.
*/
static void amdgpu_vm_update_flags(struct amdgpu_vm_update_params *params,
struct amdgpu_bo *bo, unsigned level,
struct amdgpu_bo_vm *pt, unsigned int level,
uint64_t pe, uint64_t addr,
unsigned count, uint32_t incr,
unsigned int count, uint32_t incr,
uint64_t flags)
{
......@@ -1382,7 +1408,7 @@ static void amdgpu_vm_update_flags(struct amdgpu_vm_update_params *params,
flags |= AMDGPU_PTE_EXECUTABLE;
}
params->vm->update_funcs->update(params, bo, pe, addr, count, incr,
params->vm->update_funcs->update(params, pt, pe, addr, count, incr,
flags);
}
......@@ -1562,9 +1588,9 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
nptes, dst, incr, upd_flags,
vm->task_info.pid,
vm->immediate.fence_context);
amdgpu_vm_update_flags(params, pt, cursor.level,
pe_start, dst, nptes, incr,
upd_flags);
amdgpu_vm_update_flags(params, to_amdgpu_bo_vm(pt),
cursor.level, pe_start, dst,
nptes, incr, upd_flags);
pe_start += nptes * 8;
dst += nptes * incr;
......@@ -2674,7 +2700,7 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
struct amdgpu_vm_bo_base *bo_base;
/* shadow bo doesn't have bo base, its validation needs its parent */
if (bo->parent && bo->parent->shadow == bo)
if (bo->parent && (amdgpu_bo_shadowed(bo->parent) == bo))
bo = bo->parent;
for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
......@@ -2843,7 +2869,8 @@ long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
*/
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, u32 pasid)
{
struct amdgpu_bo *root;
struct amdgpu_bo *root_bo;
struct amdgpu_bo_vm *root;
int r, i;
vm->va = RB_ROOT_CACHED;
......@@ -2897,16 +2924,16 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, u32 pasid)
false, &root);
if (r)
goto error_free_delayed;
r = amdgpu_bo_reserve(root, true);
root_bo = &root->bo;
r = amdgpu_bo_reserve(root_bo, true);
if (r)
goto error_free_root;
r = dma_resv_reserve_shared(root->tbo.base.resv, 1);
r = dma_resv_reserve_shared(root_bo->tbo.base.resv, 1);
if (r)
goto error_unreserve;
amdgpu_vm_bo_base_init(&vm->root.base, vm, root);
amdgpu_vm_bo_base_init(&vm->root.base, vm, root_bo);
r = amdgpu_vm_clear_bo(adev, vm, root, false);
if (r)
......@@ -2935,8 +2962,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, u32 pasid)
amdgpu_bo_unreserve(vm->root.base.bo);
error_free_root:
amdgpu_bo_unref(&vm->root.base.bo->shadow);
amdgpu_bo_unref(&vm->root.base.bo);
amdgpu_bo_unref(&root->shadow);
amdgpu_bo_unref(&root_bo);
vm->root.base.bo = NULL;
error_free_delayed:
......@@ -3034,7 +3061,9 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm,
*/
if (pte_support_ats != vm->pte_support_ats) {
vm->pte_support_ats = pte_support_ats;
r = amdgpu_vm_clear_bo(adev, vm, vm->root.base.bo, false);
r = amdgpu_vm_clear_bo(adev, vm,
to_amdgpu_bo_vm(vm->root.base.bo),
false);
if (r)
goto free_idr;
}
......@@ -3078,7 +3107,7 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm,
}
/* Free the shadow bo for compute VM */
amdgpu_bo_unref(&vm->root.base.bo->shadow);
amdgpu_bo_unref(&to_amdgpu_bo_vm(vm->root.base.bo)->shadow);
if (pasid)
vm->pasid = pasid;
......
......@@ -39,6 +39,7 @@
struct amdgpu_bo_va;
struct amdgpu_job;
struct amdgpu_bo_list_entry;
struct amdgpu_bo_vm;
/*
* GPUVM handling
......@@ -239,11 +240,11 @@ struct amdgpu_vm_update_params {
};
struct amdgpu_vm_update_funcs {
int (*map_table)(struct amdgpu_bo *bo);
int (*map_table)(struct amdgpu_bo_vm *bo);
int (*prepare)(struct amdgpu_vm_update_params *p, struct dma_resv *resv,
enum amdgpu_sync_mode sync_mode);
int (*update)(struct amdgpu_vm_update_params *p,
struct amdgpu_bo *bo, uint64_t pe, uint64_t addr,
struct amdgpu_bo_vm *bo, uint64_t pe, uint64_t addr,
unsigned count, uint32_t incr, uint64_t flags);
int (*commit)(struct amdgpu_vm_update_params *p,
struct dma_fence **fence);
......
......@@ -29,9 +29,9 @@
*
* @table: newly allocated or validated PD/PT
*/
static int amdgpu_vm_cpu_map_table(struct amdgpu_bo *table)
static int amdgpu_vm_cpu_map_table(struct amdgpu_bo_vm *table)
{
return amdgpu_bo_kmap(table, NULL);
return amdgpu_bo_kmap(&table->bo, NULL);
}
/**
......@@ -58,7 +58,7 @@ static int amdgpu_vm_cpu_prepare(struct amdgpu_vm_update_params *p,
* amdgpu_vm_cpu_update - helper to update page tables via CPU
*
* @p: see amdgpu_vm_update_params definition
* @bo: PD/PT to update
* @vmbo: PD/PT to update
* @pe: byte offset of the PDE/PTE, relative to start of PDB/PTB
* @addr: dst addr to write into pe
* @count: number of page entries to update
......@@ -68,7 +68,7 @@ static int amdgpu_vm_cpu_prepare(struct amdgpu_vm_update_params *p,
* Write count number of PT/PD entries directly.
*/
static int amdgpu_vm_cpu_update(struct amdgpu_vm_update_params *p,
struct amdgpu_bo *bo, uint64_t pe,
struct amdgpu_bo_vm *vmbo, uint64_t pe,
uint64_t addr, unsigned count, uint32_t incr,
uint64_t flags)
{
......@@ -76,13 +76,13 @@ static int amdgpu_vm_cpu_update(struct amdgpu_vm_update_params *p,
uint64_t value;
int r;
if (bo->tbo.moving) {
r = dma_fence_wait(bo->tbo.moving, true);
if (vmbo->bo.tbo.moving) {
r = dma_fence_wait(vmbo->bo.tbo.moving, true);
if (r)
return r;
}
pe += (unsigned long)amdgpu_bo_kptr(bo);
pe += (unsigned long)amdgpu_bo_kptr(&vmbo->bo);
trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags, p->immediate);
......
......@@ -33,11 +33,11 @@
*
* @table: newly allocated or validated PD/PT
*/
static int amdgpu_vm_sdma_map_table(struct amdgpu_bo *table)
static int amdgpu_vm_sdma_map_table(struct amdgpu_bo_vm *table)
{
int r;
r = amdgpu_ttm_alloc_gart(&table->tbo);
r = amdgpu_ttm_alloc_gart(&table->bo.tbo);
if (r)
return r;
......@@ -186,7 +186,7 @@ static void amdgpu_vm_sdma_set_ptes(struct amdgpu_vm_update_params *p,
* amdgpu_vm_sdma_update - execute VM update
*
* @p: see amdgpu_vm_update_params definition
* @bo: PD/PT to update
* @vmbo: PD/PT to update
* @pe: byte offset of the PDE/PTE, relative to start of PDB/PTB
* @addr: dst addr to write into pe
* @count: number of page entries to update
......@@ -197,10 +197,11 @@ static void amdgpu_vm_sdma_set_ptes(struct amdgpu_vm_update_params *p,
* the IB.
*/
static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
struct amdgpu_bo *bo, uint64_t pe,
struct amdgpu_bo_vm *vmbo, uint64_t pe,
uint64_t addr, unsigned count, uint32_t incr,
uint64_t flags)
{
struct amdgpu_bo *bo = &vmbo->bo;
enum amdgpu_ib_pool_type pool = p->immediate ? AMDGPU_IB_POOL_IMMEDIATE
: AMDGPU_IB_POOL_DELAYED;
unsigned int i, ndw, nptes;
......@@ -238,8 +239,8 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
if (!p->pages_addr) {
/* set page commands needed */
if (bo->shadow)
amdgpu_vm_sdma_set_ptes(p, bo->shadow, pe, addr,
if (vmbo->shadow)
amdgpu_vm_sdma_set_ptes(p, vmbo->shadow, pe, addr,
count, incr, flags);
amdgpu_vm_sdma_set_ptes(p, bo, pe, addr, count,
incr, flags);
......@@ -248,7 +249,7 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
/* copy commands needed */
ndw -= p->adev->vm_manager.vm_pte_funcs->copy_pte_num_dw *
(bo->shadow ? 2 : 1);
(vmbo->shadow ? 2 : 1);
/* for padding */
ndw -= 7;
......@@ -263,8 +264,8 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
pte[i] |= flags;
}
if (bo->shadow)
amdgpu_vm_sdma_copy_ptes(p, bo->shadow, pe, nptes);
if (vmbo->shadow)
amdgpu_vm_sdma_copy_ptes(p, vmbo->shadow, pe, nptes);
amdgpu_vm_sdma_copy_ptes(p, bo, pe, nptes);
pe += nptes * 8;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册