提交 eab3de23 编写于 作者: C Christian König 提交者: Alex Deucher

drm/amdgpu: explicit give BO type to amdgpu_bo_create

Drop the "kernel" and sg parameter and give the BO type to create
explicit to amdgpu_bo_create instead of figuring it out from the
parameters.
Signed-off-by: NChristian König <christian.koenig@amd.com>
Reviewed-by: NRoger He <Hongbo.He@amd.com>
Signed-off-by: NAlex Deucher <alexander.deucher@amd.com>
上级 e3364dfc
......@@ -441,7 +441,7 @@ struct amdgpu_sa_bo {
void amdgpu_gem_force_release(struct amdgpu_device *adev);
int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
int alignment, u32 initial_domain,
u64 flags, bool kernel,
u64 flags, enum ttm_bo_type type,
struct reservation_object *resv,
struct drm_gem_object **obj);
......
......@@ -221,8 +221,9 @@ int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
uint64_t gpu_addr_tmp = 0;
void *cpu_ptr_tmp = NULL;
r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_GTT,
AMDGPU_GEM_CREATE_CPU_GTT_USWC, NULL, NULL, &bo);
r = amdgpu_bo_create(adev, size, PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
AMDGPU_GEM_CREATE_CPU_GTT_USWC, ttm_bo_type_kernel,
NULL, &bo);
if (r) {
dev_err(adev->dev,
"failed to allocate BO for amdkfd (%d)\n", r);
......
......@@ -997,8 +997,8 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n",
va, size, domain_string(alloc_domain));
ret = amdgpu_bo_create(adev, size, byte_align, false,
alloc_domain, alloc_flags, NULL, NULL, &bo);
ret = amdgpu_bo_create(adev, size, byte_align,
alloc_domain, alloc_flags, ttm_bo_type_device, NULL, &bo);
if (ret) {
pr_debug("Failed to create BO on domain %s. ret %d\n",
domain_string(alloc_domain), ret);
......
......@@ -80,8 +80,8 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size,
int time;
n = AMDGPU_BENCHMARK_ITERATIONS;
r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, sdomain, 0, NULL,
NULL, &sobj);
r = amdgpu_bo_create(adev, size, PAGE_SIZE,sdomain, 0,
ttm_bo_type_kernel, NULL, &sobj);
if (r) {
goto out_cleanup;
}
......@@ -93,8 +93,8 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size,
if (r) {
goto out_cleanup;
}
r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, ddomain, 0, NULL,
NULL, &dobj);
r = amdgpu_bo_create(adev, size, PAGE_SIZE, ddomain, 0,
ttm_bo_type_kernel, NULL, &dobj);
if (r) {
goto out_cleanup;
}
......
......@@ -113,11 +113,12 @@ int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev)
int r;
if (adev->gart.robj == NULL) {
r = amdgpu_bo_create(adev, adev->gart.table_size,
PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
r = amdgpu_bo_create(adev, adev->gart.table_size, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
NULL, NULL, &adev->gart.robj);
ttm_bo_type_kernel, NULL,
&adev->gart.robj);
if (r) {
return r;
}
......
......@@ -45,7 +45,7 @@ void amdgpu_gem_object_free(struct drm_gem_object *gobj)
int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
int alignment, u32 initial_domain,
u64 flags, bool kernel,
u64 flags, enum ttm_bo_type type,
struct reservation_object *resv,
struct drm_gem_object **obj)
{
......@@ -59,8 +59,8 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
}
retry:
r = amdgpu_bo_create(adev, size, alignment, kernel, initial_domain,
flags, NULL, resv, &bo);
r = amdgpu_bo_create(adev, size, alignment, initial_domain,
flags, type, resv, &bo);
if (r) {
if (r != -ERESTARTSYS) {
if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
......
......@@ -191,10 +191,10 @@ int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
int r;
if (!*bo_ptr) {
r = amdgpu_bo_create(adev, size, align, true, domain,
r = amdgpu_bo_create(adev, size, align, domain,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
NULL, NULL, bo_ptr);
ttm_bo_type_kernel, NULL, bo_ptr);
if (r) {
dev_err(adev->dev, "(%d) failed to allocate kernel bo\n",
r);
......@@ -335,21 +335,19 @@ static bool amdgpu_bo_validate_size(struct amdgpu_device *adev,
return false;
}
static int amdgpu_bo_do_create(struct amdgpu_device *adev,
unsigned long size, int byte_align,
bool kernel, u32 domain, u64 flags,
struct sg_table *sg,
static int amdgpu_bo_do_create(struct amdgpu_device *adev, unsigned long size,
int byte_align, u32 domain,
u64 flags, enum ttm_bo_type type,
struct reservation_object *resv,
struct amdgpu_bo **bo_ptr)
{
struct ttm_operation_ctx ctx = {
.interruptible = !kernel,
.interruptible = (type != ttm_bo_type_kernel),
.no_wait_gpu = false,
.resv = resv,
.flags = TTM_OPT_FLAG_ALLOW_RES_EVICT
};
struct amdgpu_bo *bo;
enum ttm_bo_type type;
unsigned long page_align;
size_t acc_size;
int r;
......@@ -360,13 +358,6 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
if (!amdgpu_bo_validate_size(adev, size, domain))
return -ENOMEM;
if (kernel) {
type = ttm_bo_type_kernel;
} else if (sg) {
type = ttm_bo_type_sg;
} else {
type = ttm_bo_type_device;
}
*bo_ptr = NULL;
acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
......@@ -385,7 +376,8 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
AMDGPU_GEM_DOMAIN_GWS |
AMDGPU_GEM_DOMAIN_OA);
bo->allowed_domains = bo->preferred_domains;
if (!kernel && bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
if (type != ttm_bo_type_kernel &&
bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
bo->flags = flags;
......@@ -423,7 +415,7 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, type,
&bo->placement, page_align, &ctx, acc_size,
sg, resv, &amdgpu_ttm_bo_destroy);
NULL, resv, &amdgpu_ttm_bo_destroy);
if (unlikely(r != 0))
return r;
......@@ -435,7 +427,7 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
else
amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, 0);
if (kernel)
if (type == ttm_bo_type_kernel)
bo->tbo.priority = 1;
if (flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
......@@ -479,12 +471,11 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
if (bo->shadow)
return 0;
r = amdgpu_bo_do_create(adev, size, byte_align, true,
AMDGPU_GEM_DOMAIN_GTT,
r = amdgpu_bo_do_create(adev, size, byte_align, AMDGPU_GEM_DOMAIN_GTT,
AMDGPU_GEM_CREATE_CPU_GTT_USWC |
AMDGPU_GEM_CREATE_SHADOW,
NULL, bo->tbo.resv,
&bo->shadow);
ttm_bo_type_kernel,
bo->tbo.resv, &bo->shadow);
if (!r) {
bo->shadow->parent = amdgpu_bo_ref(bo);
mutex_lock(&adev->shadow_list_lock);
......@@ -495,18 +486,17 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
return r;
}
int amdgpu_bo_create(struct amdgpu_device *adev,
unsigned long size, int byte_align,
bool kernel, u32 domain, u64 flags,
struct sg_table *sg,
int amdgpu_bo_create(struct amdgpu_device *adev, unsigned long size,
int byte_align, u32 domain,
u64 flags, enum ttm_bo_type type,
struct reservation_object *resv,
struct amdgpu_bo **bo_ptr)
{
uint64_t parent_flags = flags & ~AMDGPU_GEM_CREATE_SHADOW;
int r;
r = amdgpu_bo_do_create(adev, size, byte_align, kernel, domain,
parent_flags, sg, resv, bo_ptr);
r = amdgpu_bo_do_create(adev, size, byte_align, domain,
parent_flags, type, resv, bo_ptr);
if (r)
return r;
......
......@@ -203,12 +203,11 @@ static inline bool amdgpu_bo_explicit_sync(struct amdgpu_bo *bo)
return bo->flags & AMDGPU_GEM_CREATE_EXPLICIT_SYNC;
}
int amdgpu_bo_create(struct amdgpu_device *adev,
unsigned long size, int byte_align,
bool kernel, u32 domain, u64 flags,
struct sg_table *sg,
struct reservation_object *resv,
struct amdgpu_bo **bo_ptr);
int amdgpu_bo_create(struct amdgpu_device *adev, unsigned long size,
int byte_align, u32 domain,
u64 flags, enum ttm_bo_type type,
struct reservation_object *resv,
struct amdgpu_bo **bo_ptr);
int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
unsigned long size, int align,
u32 domain, struct amdgpu_bo **bo_ptr,
......
......@@ -105,11 +105,14 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
int ret;
ww_mutex_lock(&resv->lock, NULL);
ret = amdgpu_bo_create(adev, attach->dmabuf->size, PAGE_SIZE, false,
AMDGPU_GEM_DOMAIN_CPU, 0, sg, resv, &bo);
ret = amdgpu_bo_create(adev, attach->dmabuf->size, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_CPU, 0, ttm_bo_type_sg,
resv, &bo);
if (ret)
goto error;
bo->tbo.sg = sg;
bo->tbo.ttm->sg = sg;
bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
if (attach->dmabuf->ops != &amdgpu_dmabuf_ops)
......
......@@ -59,9 +59,8 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
goto out_cleanup;
}
r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_VRAM, 0,
NULL, NULL, &vram_obj);
r = amdgpu_bo_create(adev, size, PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, 0,
ttm_bo_type_kernel, NULL, &vram_obj);
if (r) {
DRM_ERROR("Failed to create VRAM object\n");
goto out_cleanup;
......@@ -80,9 +79,9 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
void **vram_start, **vram_end;
struct dma_fence *fence = NULL;
r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_GTT, 0, NULL,
NULL, gtt_obj + i);
r = amdgpu_bo_create(adev, size, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_GTT, 0,
ttm_bo_type_kernel, NULL, gtt_obj + i);
if (r) {
DRM_ERROR("Failed to create GTT object %d\n", i);
goto out_lclean;
......
......@@ -1342,11 +1342,12 @@ static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev)
if (adev->fw_vram_usage.size > 0 &&
adev->fw_vram_usage.size <= vram_size) {
r = amdgpu_bo_create(adev, adev->fw_vram_usage.size,
PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, NULL, NULL,
&adev->fw_vram_usage.reserved_bo);
r = amdgpu_bo_create(adev, adev->fw_vram_usage.size, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
ttm_bo_type_kernel, NULL,
&adev->fw_vram_usage.reserved_bo);
if (r)
goto error_create;
......
......@@ -413,9 +413,9 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
if (!entry->base.bo) {
r = amdgpu_bo_create(adev,
amdgpu_vm_bo_size(adev, level),
AMDGPU_GPU_PAGE_SIZE, true,
AMDGPU_GPU_PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM, flags,
NULL, resv, &pt);
ttm_bo_type_kernel, resv, &pt);
if (r)
return r;
......@@ -2409,8 +2409,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
AMDGPU_GEM_CREATE_SHADOW);
size = amdgpu_vm_bo_size(adev, adev->vm_manager.root_level);
r = amdgpu_bo_create(adev, size, align, true, AMDGPU_GEM_DOMAIN_VRAM,
flags, NULL, NULL, &vm->root.base.bo);
r = amdgpu_bo_create(adev, size, align, AMDGPU_GEM_DOMAIN_VRAM, flags,
ttm_bo_type_kernel, NULL, &vm->root.base.bo);
if (r)
goto error_free_sched_entity;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册