提交 58ab2c08 编写于 作者: C Christian König 提交者: Alex Deucher

drm/amdgpu: use VRAM|GTT for a bunch of kernel allocations

Technically all of those can use GTT as well, no need to force things
into VRAM.
Signed-off-by: NChristian König <christian.koenig@amd.com>
Signed-off-by: NLuben Tuikov <luben.tuikov@amd.com>
Acked-by: NFelix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: NAlex Deucher <alexander.deucher@amd.com>
上级 9c705b96
...@@ -755,6 +755,11 @@ struct amdgpu_mqd { ...@@ -755,6 +755,11 @@ struct amdgpu_mqd {
#define AMDGPU_PRODUCT_NAME_LEN 64 #define AMDGPU_PRODUCT_NAME_LEN 64
struct amdgpu_reset_domain; struct amdgpu_reset_domain;
/*
* Non-zero (true) if the GPU has VRAM. Zero (false) otherwise.
*/
#define AMDGPU_HAS_VRAM(_adev) ((_adev)->gmc.real_vram_size)
struct amdgpu_device { struct amdgpu_device {
struct device *dev; struct device *dev;
struct pci_dev *pdev; struct pci_dev *pdev;
......
...@@ -934,7 +934,8 @@ static int amdgpu_device_asic_init(struct amdgpu_device *adev) ...@@ -934,7 +934,8 @@ static int amdgpu_device_asic_init(struct amdgpu_device *adev)
static int amdgpu_device_vram_scratch_init(struct amdgpu_device *adev) static int amdgpu_device_vram_scratch_init(struct amdgpu_device *adev)
{ {
return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE, return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE,
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM,
&adev->vram_scratch.robj, &adev->vram_scratch.robj,
&adev->vram_scratch.gpu_addr, &adev->vram_scratch.gpu_addr,
(void **)&adev->vram_scratch.ptr); (void **)&adev->vram_scratch.ptr);
...@@ -2410,7 +2411,8 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev) ...@@ -2410,7 +2411,8 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
/* right after GMC hw init, we create CSA */ /* right after GMC hw init, we create CSA */
if (amdgpu_mcbp) { if (amdgpu_mcbp) {
r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj, r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_GEM_DOMAIN_VRAM |
AMDGPU_GEM_DOMAIN_GTT,
AMDGPU_CSA_SIZE); AMDGPU_CSA_SIZE);
if (r) { if (r) {
DRM_ERROR("allocate CSA failed %d\n", r); DRM_ERROR("allocate CSA failed %d\n", r);
......
...@@ -372,8 +372,11 @@ int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev, ...@@ -372,8 +372,11 @@ int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev,
* KIQ MQD no matter SRIOV or Bare-metal * KIQ MQD no matter SRIOV or Bare-metal
*/ */
r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE, r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM, &ring->mqd_obj, AMDGPU_GEM_DOMAIN_VRAM |
&ring->mqd_gpu_addr, &ring->mqd_ptr); AMDGPU_GEM_DOMAIN_GTT,
&ring->mqd_obj,
&ring->mqd_gpu_addr,
&ring->mqd_ptr);
if (r) { if (r) {
dev_warn(adev->dev, "failed to create ring mqd ob (%d)", r); dev_warn(adev->dev, "failed to create ring mqd ob (%d)", r);
return r; return r;
......
...@@ -66,7 +66,8 @@ static int psp_ring_init(struct psp_context *psp, ...@@ -66,7 +66,8 @@ static int psp_ring_init(struct psp_context *psp,
/* allocate 4k Page of Local Frame Buffer memory for ring */ /* allocate 4k Page of Local Frame Buffer memory for ring */
ring->ring_size = 0x1000; ring->ring_size = 0x1000;
ret = amdgpu_bo_create_kernel(adev, ring->ring_size, PAGE_SIZE, ret = amdgpu_bo_create_kernel(adev, ring->ring_size, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_GEM_DOMAIN_VRAM |
AMDGPU_GEM_DOMAIN_GTT,
&adev->firmware.rbuf, &adev->firmware.rbuf,
&ring->ring_mem_mc_addr, &ring->ring_mem_mc_addr,
(void **)&ring->ring_mem); (void **)&ring->ring_mem);
...@@ -797,9 +798,13 @@ static int psp_tmr_init(struct psp_context *psp) ...@@ -797,9 +798,13 @@ static int psp_tmr_init(struct psp_context *psp)
if (!psp->tmr_bo) { if (!psp->tmr_bo) {
pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL; pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
ret = amdgpu_bo_create_kernel(psp->adev, tmr_size, PSP_TMR_ALIGNMENT, ret = amdgpu_bo_create_kernel(psp->adev, tmr_size,
AMDGPU_GEM_DOMAIN_VRAM, PSP_TMR_ALIGNMENT,
&psp->tmr_bo, &psp->tmr_mc_addr, pptr); AMDGPU_HAS_VRAM(psp->adev) ?
AMDGPU_GEM_DOMAIN_VRAM :
AMDGPU_GEM_DOMAIN_GTT,
&psp->tmr_bo, &psp->tmr_mc_addr,
pptr);
} }
return ret; return ret;
...@@ -1092,7 +1097,8 @@ int psp_ta_init_shared_buf(struct psp_context *psp, ...@@ -1092,7 +1097,8 @@ int psp_ta_init_shared_buf(struct psp_context *psp,
* physical) for ta to host memory * physical) for ta to host memory
*/ */
return amdgpu_bo_create_kernel(psp->adev, mem_ctx->shared_mem_size, return amdgpu_bo_create_kernel(psp->adev, mem_ctx->shared_mem_size,
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM |
AMDGPU_GEM_DOMAIN_GTT,
&mem_ctx->shared_bo, &mem_ctx->shared_bo,
&mem_ctx->shared_mc_addr, &mem_ctx->shared_mc_addr,
&mem_ctx->shared_buf); &mem_ctx->shared_buf);
...@@ -3444,9 +3450,9 @@ static ssize_t psp_usbc_pd_fw_sysfs_write(struct device *dev, ...@@ -3444,9 +3450,9 @@ static ssize_t psp_usbc_pd_fw_sysfs_write(struct device *dev,
/* LFB address which is aligned to 1MB boundary per PSP request */ /* LFB address which is aligned to 1MB boundary per PSP request */
ret = amdgpu_bo_create_kernel(adev, usbc_pd_fw->size, 0x100000, ret = amdgpu_bo_create_kernel(adev, usbc_pd_fw->size, 0x100000,
AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_GEM_DOMAIN_VRAM |
&fw_buf_bo, AMDGPU_GEM_DOMAIN_GTT,
&fw_pri_mc_addr, &fw_buf_bo, &fw_pri_mc_addr,
&fw_pri_cpu_addr); &fw_pri_cpu_addr);
if (ret) if (ret)
goto rel_buf; goto rel_buf;
......
...@@ -93,7 +93,8 @@ int amdgpu_gfx_rlc_init_sr(struct amdgpu_device *adev, u32 dws) ...@@ -93,7 +93,8 @@ int amdgpu_gfx_rlc_init_sr(struct amdgpu_device *adev, u32 dws)
/* allocate save restore block */ /* allocate save restore block */
r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE, r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_GEM_DOMAIN_VRAM |
AMDGPU_GEM_DOMAIN_GTT,
&adev->gfx.rlc.save_restore_obj, &adev->gfx.rlc.save_restore_obj,
&adev->gfx.rlc.save_restore_gpu_addr, &adev->gfx.rlc.save_restore_gpu_addr,
(void **)&adev->gfx.rlc.sr_ptr); (void **)&adev->gfx.rlc.sr_ptr);
...@@ -130,7 +131,8 @@ int amdgpu_gfx_rlc_init_csb(struct amdgpu_device *adev) ...@@ -130,7 +131,8 @@ int amdgpu_gfx_rlc_init_csb(struct amdgpu_device *adev)
/* allocate clear state block */ /* allocate clear state block */
adev->gfx.rlc.clear_state_size = dws = adev->gfx.rlc.funcs->get_csb_size(adev); adev->gfx.rlc.clear_state_size = dws = adev->gfx.rlc.funcs->get_csb_size(adev);
r = amdgpu_bo_create_kernel(adev, dws * 4, PAGE_SIZE, r = amdgpu_bo_create_kernel(adev, dws * 4, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_GEM_DOMAIN_VRAM |
AMDGPU_GEM_DOMAIN_GTT,
&adev->gfx.rlc.clear_state_obj, &adev->gfx.rlc.clear_state_obj,
&adev->gfx.rlc.clear_state_gpu_addr, &adev->gfx.rlc.clear_state_gpu_addr,
(void **)&adev->gfx.rlc.cs_ptr); (void **)&adev->gfx.rlc.cs_ptr);
...@@ -156,7 +158,8 @@ int amdgpu_gfx_rlc_init_cpt(struct amdgpu_device *adev) ...@@ -156,7 +158,8 @@ int amdgpu_gfx_rlc_init_cpt(struct amdgpu_device *adev)
int r; int r;
r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size, r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size,
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM |
AMDGPU_GEM_DOMAIN_GTT,
&adev->gfx.rlc.cp_table_obj, &adev->gfx.rlc.cp_table_obj,
&adev->gfx.rlc.cp_table_gpu_addr, &adev->gfx.rlc.cp_table_gpu_addr,
(void **)&adev->gfx.rlc.cp_table_ptr); (void **)&adev->gfx.rlc.cp_table_ptr);
......
...@@ -331,8 +331,11 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) ...@@ -331,8 +331,11 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
if (adev->uvd.harvest_config & (1 << j)) if (adev->uvd.harvest_config & (1 << j))
continue; continue;
r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE, r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM, &adev->uvd.inst[j].vcpu_bo, AMDGPU_GEM_DOMAIN_VRAM |
&adev->uvd.inst[j].gpu_addr, &adev->uvd.inst[j].cpu_addr); AMDGPU_GEM_DOMAIN_GTT,
&adev->uvd.inst[j].vcpu_bo,
&adev->uvd.inst[j].gpu_addr,
&adev->uvd.inst[j].cpu_addr);
if (r) { if (r) {
dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r); dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r);
return r; return r;
......
...@@ -186,7 +186,9 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size) ...@@ -186,7 +186,9 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
(binary_id << 8)); (binary_id << 8));
r = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE, r = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM, &adev->vce.vcpu_bo, AMDGPU_GEM_DOMAIN_VRAM |
AMDGPU_GEM_DOMAIN_GTT,
&adev->vce.vcpu_bo,
&adev->vce.gpu_addr, &adev->vce.cpu_addr); &adev->vce.gpu_addr, &adev->vce.cpu_addr);
if (r) { if (r) {
dev_err(adev->dev, "(%d) failed to allocate VCE bo\n", r); dev_err(adev->dev, "(%d) failed to allocate VCE bo\n", r);
......
...@@ -274,8 +274,11 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev) ...@@ -274,8 +274,11 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
continue; continue;
r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE, r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.inst[i].vcpu_bo, AMDGPU_GEM_DOMAIN_VRAM |
&adev->vcn.inst[i].gpu_addr, &adev->vcn.inst[i].cpu_addr); AMDGPU_GEM_DOMAIN_GTT,
&adev->vcn.inst[i].vcpu_bo,
&adev->vcn.inst[i].gpu_addr,
&adev->vcn.inst[i].cpu_addr);
if (r) { if (r) {
dev_err(adev->dev, "(%d) failed to allocate vcn bo\n", r); dev_err(adev->dev, "(%d) failed to allocate vcn bo\n", r);
return r; return r;
...@@ -296,8 +299,11 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev) ...@@ -296,8 +299,11 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
if (adev->vcn.indirect_sram) { if (adev->vcn.indirect_sram) {
r = amdgpu_bo_create_kernel(adev, 64 * 2 * 4, PAGE_SIZE, r = amdgpu_bo_create_kernel(adev, 64 * 2 * 4, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.inst[i].dpg_sram_bo, AMDGPU_GEM_DOMAIN_VRAM |
&adev->vcn.inst[i].dpg_sram_gpu_addr, &adev->vcn.inst[i].dpg_sram_cpu_addr); AMDGPU_GEM_DOMAIN_GTT,
&adev->vcn.inst[i].dpg_sram_bo,
&adev->vcn.inst[i].dpg_sram_gpu_addr,
&adev->vcn.inst[i].dpg_sram_cpu_addr);
if (r) { if (r) {
dev_err(adev->dev, "VCN %d (%d) failed to allocate DPG bo\n", i, r); dev_err(adev->dev, "VCN %d (%d) failed to allocate DPG bo\n", i, r);
return r; return r;
......
...@@ -232,7 +232,8 @@ int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev) ...@@ -232,7 +232,8 @@ int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev)
return 0; return 0;
r = amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE, r = amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_GEM_DOMAIN_VRAM |
AMDGPU_GEM_DOMAIN_GTT,
&adev->virt.mm_table.bo, &adev->virt.mm_table.bo,
&adev->virt.mm_table.gpu_addr, &adev->virt.mm_table.gpu_addr,
(void *)&adev->virt.mm_table.cpu_addr); (void *)&adev->virt.mm_table.cpu_addr);
......
...@@ -987,7 +987,8 @@ static int gfx_v11_0_rlc_autoload_buffer_init(struct amdgpu_device *adev) ...@@ -987,7 +987,8 @@ static int gfx_v11_0_rlc_autoload_buffer_init(struct amdgpu_device *adev)
total_size = gfx_v11_0_calc_toc_total_size(adev); total_size = gfx_v11_0_calc_toc_total_size(adev);
r = amdgpu_bo_create_reserved(adev, total_size, 64 * 1024, r = amdgpu_bo_create_reserved(adev, total_size, 64 * 1024,
AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_GEM_DOMAIN_VRAM |
AMDGPU_GEM_DOMAIN_GTT,
&adev->gfx.rlc.rlc_autoload_bo, &adev->gfx.rlc.rlc_autoload_bo,
&adev->gfx.rlc.rlc_autoload_gpu_addr, &adev->gfx.rlc.rlc_autoload_gpu_addr,
(void **)&adev->gfx.rlc.rlc_autoload_ptr); (void **)&adev->gfx.rlc.rlc_autoload_ptr);
...@@ -2649,7 +2650,9 @@ static int gfx_v11_0_cp_gfx_load_pfp_microcode_rs64(struct amdgpu_device *adev) ...@@ -2649,7 +2650,9 @@ static int gfx_v11_0_cp_gfx_load_pfp_microcode_rs64(struct amdgpu_device *adev)
/* 64kb align */ /* 64kb align */
r = amdgpu_bo_create_reserved(adev, fw_ucode_size, r = amdgpu_bo_create_reserved(adev, fw_ucode_size,
64 * 1024, AMDGPU_GEM_DOMAIN_VRAM, 64 * 1024,
AMDGPU_GEM_DOMAIN_VRAM |
AMDGPU_GEM_DOMAIN_GTT,
&adev->gfx.pfp.pfp_fw_obj, &adev->gfx.pfp.pfp_fw_obj,
&adev->gfx.pfp.pfp_fw_gpu_addr, &adev->gfx.pfp.pfp_fw_gpu_addr,
(void **)&adev->gfx.pfp.pfp_fw_ptr); (void **)&adev->gfx.pfp.pfp_fw_ptr);
...@@ -2660,7 +2663,9 @@ static int gfx_v11_0_cp_gfx_load_pfp_microcode_rs64(struct amdgpu_device *adev) ...@@ -2660,7 +2663,9 @@ static int gfx_v11_0_cp_gfx_load_pfp_microcode_rs64(struct amdgpu_device *adev)
} }
r = amdgpu_bo_create_reserved(adev, fw_data_size, r = amdgpu_bo_create_reserved(adev, fw_data_size,
64 * 1024, AMDGPU_GEM_DOMAIN_VRAM, 64 * 1024,
AMDGPU_GEM_DOMAIN_VRAM |
AMDGPU_GEM_DOMAIN_GTT,
&adev->gfx.pfp.pfp_fw_data_obj, &adev->gfx.pfp.pfp_fw_data_obj,
&adev->gfx.pfp.pfp_fw_data_gpu_addr, &adev->gfx.pfp.pfp_fw_data_gpu_addr,
(void **)&adev->gfx.pfp.pfp_fw_data_ptr); (void **)&adev->gfx.pfp.pfp_fw_data_ptr);
...@@ -2863,7 +2868,9 @@ static int gfx_v11_0_cp_gfx_load_me_microcode_rs64(struct amdgpu_device *adev) ...@@ -2863,7 +2868,9 @@ static int gfx_v11_0_cp_gfx_load_me_microcode_rs64(struct amdgpu_device *adev)
/* 64kb align*/ /* 64kb align*/
r = amdgpu_bo_create_reserved(adev, fw_ucode_size, r = amdgpu_bo_create_reserved(adev, fw_ucode_size,
64 * 1024, AMDGPU_GEM_DOMAIN_VRAM, 64 * 1024,
AMDGPU_GEM_DOMAIN_VRAM |
AMDGPU_GEM_DOMAIN_GTT,
&adev->gfx.me.me_fw_obj, &adev->gfx.me.me_fw_obj,
&adev->gfx.me.me_fw_gpu_addr, &adev->gfx.me.me_fw_gpu_addr,
(void **)&adev->gfx.me.me_fw_ptr); (void **)&adev->gfx.me.me_fw_ptr);
...@@ -2874,7 +2881,9 @@ static int gfx_v11_0_cp_gfx_load_me_microcode_rs64(struct amdgpu_device *adev) ...@@ -2874,7 +2881,9 @@ static int gfx_v11_0_cp_gfx_load_me_microcode_rs64(struct amdgpu_device *adev)
} }
r = amdgpu_bo_create_reserved(adev, fw_data_size, r = amdgpu_bo_create_reserved(adev, fw_data_size,
64 * 1024, AMDGPU_GEM_DOMAIN_VRAM, 64 * 1024,
AMDGPU_GEM_DOMAIN_VRAM |
AMDGPU_GEM_DOMAIN_GTT,
&adev->gfx.me.me_fw_data_obj, &adev->gfx.me.me_fw_data_obj,
&adev->gfx.me.me_fw_data_gpu_addr, &adev->gfx.me.me_fw_data_gpu_addr,
(void **)&adev->gfx.me.me_fw_data_ptr); (void **)&adev->gfx.me.me_fw_data_ptr);
...@@ -3380,7 +3389,9 @@ static int gfx_v11_0_cp_compute_load_microcode_rs64(struct amdgpu_device *adev) ...@@ -3380,7 +3389,9 @@ static int gfx_v11_0_cp_compute_load_microcode_rs64(struct amdgpu_device *adev)
fw_data_size = le32_to_cpu(mec_hdr->data_size_bytes); fw_data_size = le32_to_cpu(mec_hdr->data_size_bytes);
r = amdgpu_bo_create_reserved(adev, fw_ucode_size, r = amdgpu_bo_create_reserved(adev, fw_ucode_size,
64 * 1024, AMDGPU_GEM_DOMAIN_VRAM, 64 * 1024,
AMDGPU_GEM_DOMAIN_VRAM |
AMDGPU_GEM_DOMAIN_GTT,
&adev->gfx.mec.mec_fw_obj, &adev->gfx.mec.mec_fw_obj,
&adev->gfx.mec.mec_fw_gpu_addr, &adev->gfx.mec.mec_fw_gpu_addr,
(void **)&fw_ucode_ptr); (void **)&fw_ucode_ptr);
...@@ -3391,7 +3402,9 @@ static int gfx_v11_0_cp_compute_load_microcode_rs64(struct amdgpu_device *adev) ...@@ -3391,7 +3402,9 @@ static int gfx_v11_0_cp_compute_load_microcode_rs64(struct amdgpu_device *adev)
} }
r = amdgpu_bo_create_reserved(adev, fw_data_size, r = amdgpu_bo_create_reserved(adev, fw_data_size,
64 * 1024, AMDGPU_GEM_DOMAIN_VRAM, 64 * 1024,
AMDGPU_GEM_DOMAIN_VRAM |
AMDGPU_GEM_DOMAIN_GTT,
&adev->gfx.mec.mec_fw_data_obj, &adev->gfx.mec.mec_fw_data_obj,
&adev->gfx.mec.mec_fw_data_gpu_addr, &adev->gfx.mec.mec_fw_data_gpu_addr,
(void **)&fw_data_ptr); (void **)&fw_data_ptr);
......
...@@ -2375,7 +2375,8 @@ static int gfx_v6_0_rlc_init(struct amdgpu_device *adev) ...@@ -2375,7 +2375,8 @@ static int gfx_v6_0_rlc_init(struct amdgpu_device *adev)
dws = adev->gfx.rlc.clear_state_size + (256 / 4); dws = adev->gfx.rlc.clear_state_size + (256 / 4);
r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE, r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_GEM_DOMAIN_VRAM |
AMDGPU_GEM_DOMAIN_GTT,
&adev->gfx.rlc.clear_state_obj, &adev->gfx.rlc.clear_state_obj,
&adev->gfx.rlc.clear_state_gpu_addr, &adev->gfx.rlc.clear_state_gpu_addr,
(void **)&adev->gfx.rlc.cs_ptr); (void **)&adev->gfx.rlc.cs_ptr);
......
...@@ -2772,7 +2772,8 @@ static int gfx_v7_0_mec_init(struct amdgpu_device *adev) ...@@ -2772,7 +2772,8 @@ static int gfx_v7_0_mec_init(struct amdgpu_device *adev)
* GFX7_MEC_HPD_SIZE * 2; * GFX7_MEC_HPD_SIZE * 2;
r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE, r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_GEM_DOMAIN_VRAM |
AMDGPU_GEM_DOMAIN_GTT,
&adev->gfx.mec.hpd_eop_obj, &adev->gfx.mec.hpd_eop_obj,
&adev->gfx.mec.hpd_eop_gpu_addr, &adev->gfx.mec.hpd_eop_gpu_addr,
(void **)&hpd); (void **)&hpd);
......
...@@ -1340,7 +1340,8 @@ static int gfx_v8_0_mec_init(struct amdgpu_device *adev) ...@@ -1340,7 +1340,8 @@ static int gfx_v8_0_mec_init(struct amdgpu_device *adev)
mec_hpd_size = adev->gfx.num_compute_rings * GFX8_MEC_HPD_SIZE; mec_hpd_size = adev->gfx.num_compute_rings * GFX8_MEC_HPD_SIZE;
if (mec_hpd_size) { if (mec_hpd_size) {
r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE, r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_GEM_DOMAIN_VRAM |
AMDGPU_GEM_DOMAIN_GTT,
&adev->gfx.mec.hpd_eop_obj, &adev->gfx.mec.hpd_eop_obj,
&adev->gfx.mec.hpd_eop_gpu_addr, &adev->gfx.mec.hpd_eop_gpu_addr,
(void **)&hpd); (void **)&hpd);
......
...@@ -1783,7 +1783,8 @@ static int gfx_v9_0_mec_init(struct amdgpu_device *adev) ...@@ -1783,7 +1783,8 @@ static int gfx_v9_0_mec_init(struct amdgpu_device *adev)
mec_hpd_size = adev->gfx.num_compute_rings * GFX9_MEC_HPD_SIZE; mec_hpd_size = adev->gfx.num_compute_rings * GFX9_MEC_HPD_SIZE;
if (mec_hpd_size) { if (mec_hpd_size) {
r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE, r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_GEM_DOMAIN_VRAM |
AMDGPU_GEM_DOMAIN_GTT,
&adev->gfx.mec.hpd_eop_obj, &adev->gfx.mec.hpd_eop_obj,
&adev->gfx.mec.hpd_eop_gpu_addr, &adev->gfx.mec.hpd_eop_gpu_addr,
(void **)&hpd); (void **)&hpd);
......
...@@ -549,7 +549,9 @@ static int mes_v11_0_allocate_ucode_buffer(struct amdgpu_device *adev, ...@@ -549,7 +549,9 @@ static int mes_v11_0_allocate_ucode_buffer(struct amdgpu_device *adev,
fw_size = le32_to_cpu(mes_hdr->mes_ucode_size_bytes); fw_size = le32_to_cpu(mes_hdr->mes_ucode_size_bytes);
r = amdgpu_bo_create_reserved(adev, fw_size, r = amdgpu_bo_create_reserved(adev, fw_size,
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM |
AMDGPU_GEM_DOMAIN_GTT,
&adev->mes.ucode_fw_obj[pipe], &adev->mes.ucode_fw_obj[pipe],
&adev->mes.ucode_fw_gpu_addr[pipe], &adev->mes.ucode_fw_gpu_addr[pipe],
(void **)&adev->mes.ucode_fw_ptr[pipe]); (void **)&adev->mes.ucode_fw_ptr[pipe]);
...@@ -582,7 +584,9 @@ static int mes_v11_0_allocate_ucode_data_buffer(struct amdgpu_device *adev, ...@@ -582,7 +584,9 @@ static int mes_v11_0_allocate_ucode_data_buffer(struct amdgpu_device *adev,
fw_size = le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes); fw_size = le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes);
r = amdgpu_bo_create_reserved(adev, fw_size, r = amdgpu_bo_create_reserved(adev, fw_size,
64 * 1024, AMDGPU_GEM_DOMAIN_VRAM, 64 * 1024,
AMDGPU_GEM_DOMAIN_VRAM |
AMDGPU_GEM_DOMAIN_GTT,
&adev->mes.data_fw_obj[pipe], &adev->mes.data_fw_obj[pipe],
&adev->mes.data_fw_gpu_addr[pipe], &adev->mes.data_fw_gpu_addr[pipe],
(void **)&adev->mes.data_fw_ptr[pipe]); (void **)&adev->mes.data_fw_ptr[pipe]);
......
...@@ -2085,7 +2085,9 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev) ...@@ -2085,7 +2085,9 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
* TODO: Move this into GART. * TODO: Move this into GART.
*/ */
r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE, r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo, AMDGPU_GEM_DOMAIN_VRAM |
AMDGPU_GEM_DOMAIN_GTT,
&adev->dm.dmub_bo,
&adev->dm.dmub_bo_gpu_addr, &adev->dm.dmub_bo_gpu_addr,
&adev->dm.dmub_bo_cpu_addr); &adev->dm.dmub_bo_cpu_addr);
if (r) if (r)
......
...@@ -250,9 +250,8 @@ static int smu10_smu_init(struct pp_hwmgr *hwmgr) ...@@ -250,9 +250,8 @@ static int smu10_smu_init(struct pp_hwmgr *hwmgr)
/* allocate space for watermarks table */ /* allocate space for watermarks table */
r = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev, r = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
sizeof(Watermarks_t), sizeof(Watermarks_t), PAGE_SIZE,
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT,
AMDGPU_GEM_DOMAIN_VRAM,
&priv->smu_tables.entry[SMU10_WMTABLE].handle, &priv->smu_tables.entry[SMU10_WMTABLE].handle,
&priv->smu_tables.entry[SMU10_WMTABLE].mc_addr, &priv->smu_tables.entry[SMU10_WMTABLE].mc_addr,
&priv->smu_tables.entry[SMU10_WMTABLE].table); &priv->smu_tables.entry[SMU10_WMTABLE].table);
...@@ -266,9 +265,8 @@ static int smu10_smu_init(struct pp_hwmgr *hwmgr) ...@@ -266,9 +265,8 @@ static int smu10_smu_init(struct pp_hwmgr *hwmgr)
/* allocate space for watermarks table */ /* allocate space for watermarks table */
r = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev, r = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
sizeof(DpmClocks_t), sizeof(DpmClocks_t), PAGE_SIZE,
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT,
AMDGPU_GEM_DOMAIN_VRAM,
&priv->smu_tables.entry[SMU10_CLOCKTABLE].handle, &priv->smu_tables.entry[SMU10_CLOCKTABLE].handle,
&priv->smu_tables.entry[SMU10_CLOCKTABLE].mc_addr, &priv->smu_tables.entry[SMU10_CLOCKTABLE].mc_addr,
&priv->smu_tables.entry[SMU10_CLOCKTABLE].table); &priv->smu_tables.entry[SMU10_CLOCKTABLE].table);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册