提交 dd41fb85 编写于 作者: D Dave Airlie

Merge branch 'drm-next-4.18' of git://people.freedesktop.org/~agd5f/linux into drm-next

Last feature request for 4.18.  Mostly vega20 support.
- Vega20 support
- clock and powergating for VCN
- misc bug fixes
Signed-off-by: NDave Airlie <airlied@redhat.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180524152427.32713-1-alexander.deucher@amd.com
...@@ -62,11 +62,13 @@ amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \ ...@@ -62,11 +62,13 @@ amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \
amdgpu-$(CONFIG_DRM_AMDGPU_SI)+= si.o gmc_v6_0.o gfx_v6_0.o si_ih.o si_dma.o dce_v6_0.o si_dpm.o si_smc.o amdgpu-$(CONFIG_DRM_AMDGPU_SI)+= si.o gmc_v6_0.o gfx_v6_0.o si_ih.o si_dma.o dce_v6_0.o si_dpm.o si_smc.o
amdgpu-y += \ amdgpu-y += \
vi.o mxgpu_vi.o nbio_v6_1.o soc15.o emu_soc.o mxgpu_ai.o nbio_v7_0.o vega10_reg_init.o vi.o mxgpu_vi.o nbio_v6_1.o soc15.o emu_soc.o mxgpu_ai.o nbio_v7_0.o vega10_reg_init.o \
vega20_reg_init.o
# add DF block # add DF block
amdgpu-y += \ amdgpu-y += \
df_v1_7.o df_v1_7.o \
df_v3_6.o
# add GMC block # add GMC block
amdgpu-y += \ amdgpu-y += \
......
...@@ -1401,6 +1401,8 @@ struct amdgpu_df_funcs { ...@@ -1401,6 +1401,8 @@ struct amdgpu_df_funcs {
bool enable); bool enable);
void (*get_clockgating_state)(struct amdgpu_device *adev, void (*get_clockgating_state)(struct amdgpu_device *adev,
u32 *flags); u32 *flags);
void (*enable_ecc_force_par_wr_rmw)(struct amdgpu_device *adev,
bool enable);
}; };
/* Define the HW IP blocks will be used in driver , add more if necessary */ /* Define the HW IP blocks will be used in driver , add more if necessary */
enum amd_hw_ip_block_type { enum amd_hw_ip_block_type {
......
...@@ -322,3 +322,47 @@ int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev) ...@@ -322,3 +322,47 @@ int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev)
return ret; return ret;
} }
union gfx_info {
struct atom_gfx_info_v2_4 v24;
};
int amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device *adev)
{
struct amdgpu_mode_info *mode_info = &adev->mode_info;
int index;
uint8_t frev, crev;
uint16_t data_offset;
index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
gfx_info);
if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
&frev, &crev, &data_offset)) {
union gfx_info *gfx_info = (union gfx_info *)
(mode_info->atom_context->bios + data_offset);
switch (crev) {
case 4:
adev->gfx.config.max_shader_engines = gfx_info->v24.gc_num_se;
adev->gfx.config.max_cu_per_sh = gfx_info->v24.gc_num_cu_per_sh;
adev->gfx.config.max_sh_per_se = gfx_info->v24.gc_num_sh_per_se;
adev->gfx.config.max_backends_per_se = gfx_info->v24.gc_num_rb_per_se;
adev->gfx.config.max_texture_channel_caches = gfx_info->v24.gc_num_tccs;
adev->gfx.config.max_gprs = le16_to_cpu(gfx_info->v24.gc_num_gprs);
adev->gfx.config.max_gs_threads = gfx_info->v24.gc_num_max_gs_thds;
adev->gfx.config.gs_vgt_table_depth = gfx_info->v24.gc_gs_table_depth;
adev->gfx.config.gs_prim_buffer_depth =
le16_to_cpu(gfx_info->v24.gc_gsprim_buff_depth);
adev->gfx.config.double_offchip_lds_buf =
gfx_info->v24.gc_double_offchip_lds_buffer;
adev->gfx.cu_info.wave_front_size = le16_to_cpu(gfx_info->v24.gc_wave_size);
adev->gfx.cu_info.max_waves_per_simd = le16_to_cpu(gfx_info->v24.gc_max_waves_per_simd);
adev->gfx.cu_info.max_scratch_slots_per_cu = gfx_info->v24.gc_max_scratch_slots_per_cu;
adev->gfx.cu_info.lds_size = le16_to_cpu(gfx_info->v24.gc_lds_size);
return 0;
default:
return -EINVAL;
}
}
return -EINVAL;
}
...@@ -30,5 +30,6 @@ int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev); ...@@ -30,5 +30,6 @@ int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev);
int amdgpu_atomfirmware_get_vram_width(struct amdgpu_device *adev); int amdgpu_atomfirmware_get_vram_width(struct amdgpu_device *adev);
int amdgpu_atomfirmware_get_vram_type(struct amdgpu_device *adev); int amdgpu_atomfirmware_get_vram_type(struct amdgpu_device *adev);
int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev); int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev);
int amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device *adev);
#endif #endif
...@@ -400,6 +400,9 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, ...@@ -400,6 +400,9 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
case CHIP_VEGA12: case CHIP_VEGA12:
strcpy(fw_name, "amdgpu/vega12_smc.bin"); strcpy(fw_name, "amdgpu/vega12_smc.bin");
break; break;
case CHIP_VEGA20:
strcpy(fw_name, "amdgpu/vega20_smc.bin");
break;
default: default:
DRM_ERROR("SMC firmware not supported\n"); DRM_ERROR("SMC firmware not supported\n");
return -EINVAL; return -EINVAL;
......
...@@ -173,9 +173,14 @@ static void amdgpu_ctx_do_release(struct kref *ref) ...@@ -173,9 +173,14 @@ static void amdgpu_ctx_do_release(struct kref *ref)
ctx = container_of(ref, struct amdgpu_ctx, refcount); ctx = container_of(ref, struct amdgpu_ctx, refcount);
for (i = 0; i < ctx->adev->num_rings; i++) for (i = 0; i < ctx->adev->num_rings; i++) {
if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring)
continue;
drm_sched_entity_fini(&ctx->adev->rings[i]->sched, drm_sched_entity_fini(&ctx->adev->rings[i]->sched,
&ctx->rings[i].entity); &ctx->rings[i].entity);
}
amdgpu_ctx_fini(ref); amdgpu_ctx_fini(ref);
} }
...@@ -452,12 +457,17 @@ void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr) ...@@ -452,12 +457,17 @@ void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
if (!ctx->adev) if (!ctx->adev)
return; return;
for (i = 0; i < ctx->adev->num_rings; i++) for (i = 0; i < ctx->adev->num_rings; i++) {
if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring)
continue;
if (kref_read(&ctx->refcount) == 1) if (kref_read(&ctx->refcount) == 1)
drm_sched_entity_do_release(&ctx->adev->rings[i]->sched, drm_sched_entity_do_release(&ctx->adev->rings[i]->sched,
&ctx->rings[i].entity); &ctx->rings[i].entity);
else else
DRM_ERROR("ctx %p is still alive\n", ctx); DRM_ERROR("ctx %p is still alive\n", ctx);
}
} }
} }
...@@ -474,12 +484,17 @@ void amdgpu_ctx_mgr_entity_cleanup(struct amdgpu_ctx_mgr *mgr) ...@@ -474,12 +484,17 @@ void amdgpu_ctx_mgr_entity_cleanup(struct amdgpu_ctx_mgr *mgr)
if (!ctx->adev) if (!ctx->adev)
return; return;
for (i = 0; i < ctx->adev->num_rings; i++) for (i = 0; i < ctx->adev->num_rings; i++) {
if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring)
continue;
if (kref_read(&ctx->refcount) == 1) if (kref_read(&ctx->refcount) == 1)
drm_sched_entity_cleanup(&ctx->adev->rings[i]->sched, drm_sched_entity_cleanup(&ctx->adev->rings[i]->sched,
&ctx->rings[i].entity); &ctx->rings[i].entity);
else else
DRM_ERROR("ctx %p is still alive\n", ctx); DRM_ERROR("ctx %p is still alive\n", ctx);
}
} }
} }
......
...@@ -86,6 +86,7 @@ static const char *amdgpu_asic_name[] = { ...@@ -86,6 +86,7 @@ static const char *amdgpu_asic_name[] = {
"VEGAM", "VEGAM",
"VEGA10", "VEGA10",
"VEGA12", "VEGA12",
"VEGA20",
"RAVEN", "RAVEN",
"LAST", "LAST",
}; };
...@@ -1387,6 +1388,7 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev) ...@@ -1387,6 +1388,7 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
case CHIP_KABINI: case CHIP_KABINI:
case CHIP_MULLINS: case CHIP_MULLINS:
#endif #endif
case CHIP_VEGA20:
default: default:
return 0; return 0;
case CHIP_VEGA10: case CHIP_VEGA10:
...@@ -1521,6 +1523,7 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) ...@@ -1521,6 +1523,7 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
#endif #endif
case CHIP_VEGA10: case CHIP_VEGA10:
case CHIP_VEGA12: case CHIP_VEGA12:
case CHIP_VEGA20:
case CHIP_RAVEN: case CHIP_RAVEN:
if (adev->asic_type == CHIP_RAVEN) if (adev->asic_type == CHIP_RAVEN)
adev->family = AMDGPU_FAMILY_RV; adev->family = AMDGPU_FAMILY_RV;
...@@ -1715,6 +1718,7 @@ static int amdgpu_device_ip_late_set_cg_state(struct amdgpu_device *adev) ...@@ -1715,6 +1718,7 @@ static int amdgpu_device_ip_late_set_cg_state(struct amdgpu_device *adev)
/* skip CG for VCE/UVD, it's handled specially */ /* skip CG for VCE/UVD, it's handled specially */
if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD && if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE && adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
adev->ip_blocks[i].version->funcs->set_clockgating_state) { adev->ip_blocks[i].version->funcs->set_clockgating_state) {
/* enable clockgating to save power */ /* enable clockgating to save power */
r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev, r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
...@@ -1814,6 +1818,7 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev) ...@@ -1814,6 +1818,7 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD && if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE && adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
adev->ip_blocks[i].version->funcs->set_clockgating_state) { adev->ip_blocks[i].version->funcs->set_clockgating_state) {
/* ungate blocks before hw fini so that we can shutdown the blocks safely */ /* ungate blocks before hw fini so that we can shutdown the blocks safely */
r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev, r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
...@@ -2155,6 +2160,7 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type) ...@@ -2155,6 +2160,7 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
case CHIP_FIJI: case CHIP_FIJI:
case CHIP_VEGA10: case CHIP_VEGA10:
case CHIP_VEGA12: case CHIP_VEGA12:
case CHIP_VEGA20:
#if defined(CONFIG_DRM_AMD_DC_DCN1_0) #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
case CHIP_RAVEN: case CHIP_RAVEN:
#endif #endif
...@@ -3172,7 +3178,6 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev, ...@@ -3172,7 +3178,6 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
int amdgpu_device_gpu_recover(struct amdgpu_device *adev, int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
struct amdgpu_job *job, bool force) struct amdgpu_job *job, bool force)
{ {
struct drm_atomic_state *state = NULL;
int i, r, resched; int i, r, resched;
if (!force && !amdgpu_device_ip_check_soft_reset(adev)) { if (!force && !amdgpu_device_ip_check_soft_reset(adev)) {
...@@ -3195,10 +3200,6 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, ...@@ -3195,10 +3200,6 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
/* block TTM */ /* block TTM */
resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev); resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
/* store modesetting */
if (amdgpu_device_has_dc_support(adev))
state = drm_atomic_helper_suspend(adev->ddev);
/* block all schedulers and reset given job's ring */ /* block all schedulers and reset given job's ring */
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i]; struct amdgpu_ring *ring = adev->rings[i];
...@@ -3238,10 +3239,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, ...@@ -3238,10 +3239,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
kthread_unpark(ring->sched.thread); kthread_unpark(ring->sched.thread);
} }
if (amdgpu_device_has_dc_support(adev)) { if (!amdgpu_device_has_dc_support(adev)) {
if (drm_atomic_helper_resume(adev->ddev, state))
dev_info(adev->dev, "drm resume failed:%d\n", r);
} else {
drm_helper_resume_force_mode(adev->ddev); drm_helper_resume_force_mode(adev->ddev);
} }
......
...@@ -560,6 +560,13 @@ static const struct pci_device_id pciidlist[] = { ...@@ -560,6 +560,13 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x69A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA12}, {0x1002, 0x69A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA12},
{0x1002, 0x69A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA12}, {0x1002, 0x69A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA12},
{0x1002, 0x69AF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA12}, {0x1002, 0x69AF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA12},
/* Vega 20 */
{0x1002, 0x66A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20|AMD_EXP_HW_SUPPORT},
{0x1002, 0x66A1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20|AMD_EXP_HW_SUPPORT},
{0x1002, 0x66A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20|AMD_EXP_HW_SUPPORT},
{0x1002, 0x66A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20|AMD_EXP_HW_SUPPORT},
{0x1002, 0x66A7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20|AMD_EXP_HW_SUPPORT},
{0x1002, 0x66AF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20|AMD_EXP_HW_SUPPORT},
/* Raven */ /* Raven */
{0x1002, 0x15dd, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RAVEN|AMD_IS_APU}, {0x1002, 0x15dd, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RAVEN|AMD_IS_APU},
......
...@@ -376,14 +376,14 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring, ...@@ -376,14 +376,14 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
struct amdgpu_device *adev = ring->adev; struct amdgpu_device *adev = ring->adev;
uint64_t index; uint64_t index;
if (ring != &adev->uvd.ring) { if (ring != &adev->uvd.inst[ring->me].ring) {
ring->fence_drv.cpu_addr = &adev->wb.wb[ring->fence_offs]; ring->fence_drv.cpu_addr = &adev->wb.wb[ring->fence_offs];
ring->fence_drv.gpu_addr = adev->wb.gpu_addr + (ring->fence_offs * 4); ring->fence_drv.gpu_addr = adev->wb.gpu_addr + (ring->fence_offs * 4);
} else { } else {
/* put fence directly behind firmware */ /* put fence directly behind firmware */
index = ALIGN(adev->uvd.fw->size, 8); index = ALIGN(adev->uvd.fw->size, 8);
ring->fence_drv.cpu_addr = adev->uvd.cpu_addr + index; ring->fence_drv.cpu_addr = adev->uvd.inst[ring->me].cpu_addr + index;
ring->fence_drv.gpu_addr = adev->uvd.gpu_addr + index; ring->fence_drv.gpu_addr = adev->uvd.inst[ring->me].gpu_addr + index;
} }
amdgpu_fence_write(ring, atomic_read(&ring->fence_drv.last_seq)); amdgpu_fence_write(ring, atomic_read(&ring->fence_drv.last_seq));
amdgpu_irq_get(adev, irq_src, irq_type); amdgpu_irq_get(adev, irq_src, irq_type);
......
...@@ -286,7 +286,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file ...@@ -286,7 +286,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
struct drm_crtc *crtc; struct drm_crtc *crtc;
uint32_t ui32 = 0; uint32_t ui32 = 0;
uint64_t ui64 = 0; uint64_t ui64 = 0;
int i, found; int i, j, found;
int ui32_size = sizeof(ui32); int ui32_size = sizeof(ui32);
if (!info->return_size || !info->return_pointer) if (!info->return_size || !info->return_pointer)
...@@ -348,7 +348,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file ...@@ -348,7 +348,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
break; break;
case AMDGPU_HW_IP_UVD: case AMDGPU_HW_IP_UVD:
type = AMD_IP_BLOCK_TYPE_UVD; type = AMD_IP_BLOCK_TYPE_UVD;
ring_mask = adev->uvd.ring.ready ? 1 : 0; for (i = 0; i < adev->uvd.num_uvd_inst; i++)
ring_mask |= ((adev->uvd.inst[i].ring.ready ? 1 : 0) << i);
ib_start_alignment = AMDGPU_GPU_PAGE_SIZE; ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
ib_size_alignment = 16; ib_size_alignment = 16;
break; break;
...@@ -361,8 +362,11 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file ...@@ -361,8 +362,11 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
break; break;
case AMDGPU_HW_IP_UVD_ENC: case AMDGPU_HW_IP_UVD_ENC:
type = AMD_IP_BLOCK_TYPE_UVD; type = AMD_IP_BLOCK_TYPE_UVD;
for (i = 0; i < adev->uvd.num_enc_rings; i++) for (i = 0; i < adev->uvd.num_uvd_inst; i++)
ring_mask |= ((adev->uvd.ring_enc[i].ready ? 1 : 0) << i); for (j = 0; j < adev->uvd.num_enc_rings; j++)
ring_mask |=
((adev->uvd.inst[i].ring_enc[j].ready ? 1 : 0) <<
(j + i * adev->uvd.num_enc_rings));
ib_start_alignment = AMDGPU_GPU_PAGE_SIZE; ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
ib_size_alignment = 1; ib_size_alignment = 1;
break; break;
......
...@@ -52,6 +52,7 @@ static int psp_sw_init(void *handle) ...@@ -52,6 +52,7 @@ static int psp_sw_init(void *handle)
switch (adev->asic_type) { switch (adev->asic_type) {
case CHIP_VEGA10: case CHIP_VEGA10:
case CHIP_VEGA12: case CHIP_VEGA12:
case CHIP_VEGA20:
psp_v3_1_set_psp_funcs(psp); psp_v3_1_set_psp_funcs(psp);
break; break;
case CHIP_RAVEN: case CHIP_RAVEN:
......
...@@ -66,6 +66,8 @@ static int amdgpu_identity_map(struct amdgpu_device *adev, ...@@ -66,6 +66,8 @@ static int amdgpu_identity_map(struct amdgpu_device *adev,
u32 ring, u32 ring,
struct amdgpu_ring **out_ring) struct amdgpu_ring **out_ring)
{ {
u32 instance;
switch (mapper->hw_ip) { switch (mapper->hw_ip) {
case AMDGPU_HW_IP_GFX: case AMDGPU_HW_IP_GFX:
*out_ring = &adev->gfx.gfx_ring[ring]; *out_ring = &adev->gfx.gfx_ring[ring];
...@@ -77,13 +79,16 @@ static int amdgpu_identity_map(struct amdgpu_device *adev, ...@@ -77,13 +79,16 @@ static int amdgpu_identity_map(struct amdgpu_device *adev,
*out_ring = &adev->sdma.instance[ring].ring; *out_ring = &adev->sdma.instance[ring].ring;
break; break;
case AMDGPU_HW_IP_UVD: case AMDGPU_HW_IP_UVD:
*out_ring = &adev->uvd.ring; instance = ring;
*out_ring = &adev->uvd.inst[instance].ring;
break; break;
case AMDGPU_HW_IP_VCE: case AMDGPU_HW_IP_VCE:
*out_ring = &adev->vce.ring[ring]; *out_ring = &adev->vce.ring[ring];
break; break;
case AMDGPU_HW_IP_UVD_ENC: case AMDGPU_HW_IP_UVD_ENC:
*out_ring = &adev->uvd.ring_enc[ring]; instance = ring / adev->uvd.num_enc_rings;
*out_ring =
&adev->uvd.inst[instance].ring_enc[ring%adev->uvd.num_enc_rings];
break; break;
case AMDGPU_HW_IP_VCN_DEC: case AMDGPU_HW_IP_VCN_DEC:
*out_ring = &adev->vcn.ring_dec; *out_ring = &adev->vcn.ring_dec;
...@@ -240,13 +245,14 @@ int amdgpu_queue_mgr_map(struct amdgpu_device *adev, ...@@ -240,13 +245,14 @@ int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
ip_num_rings = adev->sdma.num_instances; ip_num_rings = adev->sdma.num_instances;
break; break;
case AMDGPU_HW_IP_UVD: case AMDGPU_HW_IP_UVD:
ip_num_rings = 1; ip_num_rings = adev->uvd.num_uvd_inst;
break; break;
case AMDGPU_HW_IP_VCE: case AMDGPU_HW_IP_VCE:
ip_num_rings = adev->vce.num_rings; ip_num_rings = adev->vce.num_rings;
break; break;
case AMDGPU_HW_IP_UVD_ENC: case AMDGPU_HW_IP_UVD_ENC:
ip_num_rings = adev->uvd.num_enc_rings; ip_num_rings =
adev->uvd.num_enc_rings * adev->uvd.num_uvd_inst;
break; break;
case AMDGPU_HW_IP_VCN_DEC: case AMDGPU_HW_IP_VCN_DEC:
ip_num_rings = 1; ip_num_rings = 1;
......
...@@ -362,6 +362,7 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring) ...@@ -362,6 +362,7 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring)
dma_fence_put(ring->vmid_wait); dma_fence_put(ring->vmid_wait);
ring->vmid_wait = NULL; ring->vmid_wait = NULL;
ring->me = 0;
ring->adev->rings[ring->idx] = NULL; ring->adev->rings[ring->idx] = NULL;
} }
......
...@@ -29,7 +29,7 @@ ...@@ -29,7 +29,7 @@
#include <drm/drm_print.h> #include <drm/drm_print.h>
/* max number of rings */ /* max number of rings */
#define AMDGPU_MAX_RINGS 18 #define AMDGPU_MAX_RINGS 21
#define AMDGPU_MAX_GFX_RINGS 1 #define AMDGPU_MAX_GFX_RINGS 1
#define AMDGPU_MAX_COMPUTE_RINGS 8 #define AMDGPU_MAX_COMPUTE_RINGS 8
#define AMDGPU_MAX_VCE_RINGS 3 #define AMDGPU_MAX_VCE_RINGS 3
......
...@@ -307,6 +307,8 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type) ...@@ -307,6 +307,8 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type)
return AMDGPU_FW_LOAD_DIRECT; return AMDGPU_FW_LOAD_DIRECT;
else else
return AMDGPU_FW_LOAD_PSP; return AMDGPU_FW_LOAD_PSP;
case CHIP_VEGA20:
return AMDGPU_FW_LOAD_DIRECT;
default: default:
DRM_ERROR("Unknown firmware load type\n"); DRM_ERROR("Unknown firmware load type\n");
} }
......
...@@ -70,12 +70,14 @@ ...@@ -70,12 +70,14 @@
#define FIRMWARE_VEGA10 "amdgpu/vega10_uvd.bin" #define FIRMWARE_VEGA10 "amdgpu/vega10_uvd.bin"
#define FIRMWARE_VEGA12 "amdgpu/vega12_uvd.bin" #define FIRMWARE_VEGA12 "amdgpu/vega12_uvd.bin"
#define FIRMWARE_VEGA20 "amdgpu/vega20_uvd.bin"
#define mmUVD_GPCOM_VCPU_DATA0_VEGA10 (0x03c4 + 0x7e00) /* These are common relative offsets for all asics, from uvd_7_0_offset.h, */
#define mmUVD_GPCOM_VCPU_DATA1_VEGA10 (0x03c5 + 0x7e00) #define UVD_GPCOM_VCPU_CMD 0x03c3
#define mmUVD_GPCOM_VCPU_CMD_VEGA10 (0x03c3 + 0x7e00) #define UVD_GPCOM_VCPU_DATA0 0x03c4
#define mmUVD_NO_OP_VEGA10 (0x03ff + 0x7e00) #define UVD_GPCOM_VCPU_DATA1 0x03c5
#define mmUVD_ENGINE_CNTL_VEGA10 (0x03c6 + 0x7e00) #define UVD_NO_OP 0x03ff
#define UVD_BASE_SI 0x3800
/** /**
* amdgpu_uvd_cs_ctx - Command submission parser context * amdgpu_uvd_cs_ctx - Command submission parser context
...@@ -114,6 +116,7 @@ MODULE_FIRMWARE(FIRMWARE_VEGAM); ...@@ -114,6 +116,7 @@ MODULE_FIRMWARE(FIRMWARE_VEGAM);
MODULE_FIRMWARE(FIRMWARE_VEGA10); MODULE_FIRMWARE(FIRMWARE_VEGA10);
MODULE_FIRMWARE(FIRMWARE_VEGA12); MODULE_FIRMWARE(FIRMWARE_VEGA12);
MODULE_FIRMWARE(FIRMWARE_VEGA20);
static void amdgpu_uvd_idle_work_handler(struct work_struct *work); static void amdgpu_uvd_idle_work_handler(struct work_struct *work);
...@@ -125,9 +128,9 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) ...@@ -125,9 +128,9 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
const char *fw_name; const char *fw_name;
const struct common_firmware_header *hdr; const struct common_firmware_header *hdr;
unsigned version_major, version_minor, family_id; unsigned version_major, version_minor, family_id;
int i, r; int i, j, r;
INIT_DELAYED_WORK(&adev->uvd.idle_work, amdgpu_uvd_idle_work_handler); INIT_DELAYED_WORK(&adev->uvd.inst->idle_work, amdgpu_uvd_idle_work_handler);
switch (adev->asic_type) { switch (adev->asic_type) {
#ifdef CONFIG_DRM_AMDGPU_CIK #ifdef CONFIG_DRM_AMDGPU_CIK
...@@ -177,6 +180,9 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) ...@@ -177,6 +180,9 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
case CHIP_VEGAM: case CHIP_VEGAM:
fw_name = FIRMWARE_VEGAM; fw_name = FIRMWARE_VEGAM;
break; break;
case CHIP_VEGA20:
fw_name = FIRMWARE_VEGA20;
break;
default: default:
return -EINVAL; return -EINVAL;
} }
...@@ -231,28 +237,30 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) ...@@ -231,28 +237,30 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8); bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE, for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
AMDGPU_GEM_DOMAIN_VRAM, &adev->uvd.vcpu_bo,
&adev->uvd.gpu_addr, &adev->uvd.cpu_addr);
if (r) {
dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r);
return r;
}
ring = &adev->uvd.ring; r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; AMDGPU_GEM_DOMAIN_VRAM, &adev->uvd.inst[j].vcpu_bo,
r = drm_sched_entity_init(&ring->sched, &adev->uvd.entity, &adev->uvd.inst[j].gpu_addr, &adev->uvd.inst[j].cpu_addr);
rq, NULL); if (r) {
if (r != 0) { dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r);
DRM_ERROR("Failed setting up UVD run queue.\n"); return r;
return r; }
}
for (i = 0; i < adev->uvd.max_handles; ++i) { ring = &adev->uvd.inst[j].ring;
atomic_set(&adev->uvd.handles[i], 0); rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
adev->uvd.filp[i] = NULL; r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst[j].entity,
} rq, NULL);
if (r != 0) {
DRM_ERROR("Failed setting up UVD(%d) run queue.\n", j);
return r;
}
for (i = 0; i < adev->uvd.max_handles; ++i) {
atomic_set(&adev->uvd.inst[j].handles[i], 0);
adev->uvd.inst[j].filp[i] = NULL;
}
}
/* from uvd v5.0 HW addressing capacity increased to 64 bits */ /* from uvd v5.0 HW addressing capacity increased to 64 bits */
if (!amdgpu_device_ip_block_version_cmp(adev, AMD_IP_BLOCK_TYPE_UVD, 5, 0)) if (!amdgpu_device_ip_block_version_cmp(adev, AMD_IP_BLOCK_TYPE_UVD, 5, 0))
adev->uvd.address_64_bit = true; adev->uvd.address_64_bit = true;
...@@ -279,20 +287,22 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) ...@@ -279,20 +287,22 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
int amdgpu_uvd_sw_fini(struct amdgpu_device *adev) int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
{ {
int i; int i, j;
kfree(adev->uvd.saved_bo);
drm_sched_entity_fini(&adev->uvd.ring.sched, &adev->uvd.entity); for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
kfree(adev->uvd.inst[j].saved_bo);
amdgpu_bo_free_kernel(&adev->uvd.vcpu_bo, drm_sched_entity_fini(&adev->uvd.inst[j].ring.sched, &adev->uvd.inst[j].entity);
&adev->uvd.gpu_addr,
(void **)&adev->uvd.cpu_addr);
amdgpu_ring_fini(&adev->uvd.ring); amdgpu_bo_free_kernel(&adev->uvd.inst[j].vcpu_bo,
&adev->uvd.inst[j].gpu_addr,
(void **)&adev->uvd.inst[j].cpu_addr);
for (i = 0; i < AMDGPU_MAX_UVD_ENC_RINGS; ++i) amdgpu_ring_fini(&adev->uvd.inst[j].ring);
amdgpu_ring_fini(&adev->uvd.ring_enc[i]);
for (i = 0; i < AMDGPU_MAX_UVD_ENC_RINGS; ++i)
amdgpu_ring_fini(&adev->uvd.inst[j].ring_enc[i]);
}
release_firmware(adev->uvd.fw); release_firmware(adev->uvd.fw);
return 0; return 0;
...@@ -302,32 +312,33 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev) ...@@ -302,32 +312,33 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)
{ {
unsigned size; unsigned size;
void *ptr; void *ptr;
int i; int i, j;
if (adev->uvd.vcpu_bo == NULL)
return 0;
cancel_delayed_work_sync(&adev->uvd.idle_work); for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
if (adev->uvd.inst[j].vcpu_bo == NULL)
continue;
/* only valid for physical mode */ cancel_delayed_work_sync(&adev->uvd.inst[j].idle_work);
if (adev->asic_type < CHIP_POLARIS10) {
for (i = 0; i < adev->uvd.max_handles; ++i)
if (atomic_read(&adev->uvd.handles[i]))
break;
if (i == adev->uvd.max_handles) /* only valid for physical mode */
return 0; if (adev->asic_type < CHIP_POLARIS10) {
} for (i = 0; i < adev->uvd.max_handles; ++i)
if (atomic_read(&adev->uvd.inst[j].handles[i]))
break;
size = amdgpu_bo_size(adev->uvd.vcpu_bo); if (i == adev->uvd.max_handles)
ptr = adev->uvd.cpu_addr; continue;
}
adev->uvd.saved_bo = kmalloc(size, GFP_KERNEL); size = amdgpu_bo_size(adev->uvd.inst[j].vcpu_bo);
if (!adev->uvd.saved_bo) ptr = adev->uvd.inst[j].cpu_addr;
return -ENOMEM;
memcpy_fromio(adev->uvd.saved_bo, ptr, size); adev->uvd.inst[j].saved_bo = kmalloc(size, GFP_KERNEL);
if (!adev->uvd.inst[j].saved_bo)
return -ENOMEM;
memcpy_fromio(adev->uvd.inst[j].saved_bo, ptr, size);
}
return 0; return 0;
} }
...@@ -335,59 +346,65 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev) ...@@ -335,59 +346,65 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev)
{ {
unsigned size; unsigned size;
void *ptr; void *ptr;
int i;
if (adev->uvd.vcpu_bo == NULL) for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
return -EINVAL; if (adev->uvd.inst[i].vcpu_bo == NULL)
return -EINVAL;
size = amdgpu_bo_size(adev->uvd.vcpu_bo); size = amdgpu_bo_size(adev->uvd.inst[i].vcpu_bo);
ptr = adev->uvd.cpu_addr; ptr = adev->uvd.inst[i].cpu_addr;
if (adev->uvd.saved_bo != NULL) { if (adev->uvd.inst[i].saved_bo != NULL) {
memcpy_toio(ptr, adev->uvd.saved_bo, size); memcpy_toio(ptr, adev->uvd.inst[i].saved_bo, size);
kfree(adev->uvd.saved_bo); kfree(adev->uvd.inst[i].saved_bo);
adev->uvd.saved_bo = NULL; adev->uvd.inst[i].saved_bo = NULL;
} else { } else {
const struct common_firmware_header *hdr; const struct common_firmware_header *hdr;
unsigned offset; unsigned offset;
hdr = (const struct common_firmware_header *)adev->uvd.fw->data; hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
offset = le32_to_cpu(hdr->ucode_array_offset_bytes); offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
memcpy_toio(adev->uvd.cpu_addr, adev->uvd.fw->data + offset, memcpy_toio(adev->uvd.inst[i].cpu_addr, adev->uvd.fw->data + offset,
le32_to_cpu(hdr->ucode_size_bytes)); le32_to_cpu(hdr->ucode_size_bytes));
size -= le32_to_cpu(hdr->ucode_size_bytes); size -= le32_to_cpu(hdr->ucode_size_bytes);
ptr += le32_to_cpu(hdr->ucode_size_bytes); ptr += le32_to_cpu(hdr->ucode_size_bytes);
}
memset_io(ptr, 0, size);
/* to restore uvd fence seq */
amdgpu_fence_driver_force_completion(&adev->uvd.inst[i].ring);
} }
memset_io(ptr, 0, size);
/* to restore uvd fence seq */
amdgpu_fence_driver_force_completion(&adev->uvd.ring);
} }
return 0; return 0;
} }
void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp) void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
{ {
struct amdgpu_ring *ring = &adev->uvd.ring; struct amdgpu_ring *ring;
int i, r; int i, j, r;
for (i = 0; i < adev->uvd.max_handles; ++i) { for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
uint32_t handle = atomic_read(&adev->uvd.handles[i]); ring = &adev->uvd.inst[j].ring;
if (handle != 0 && adev->uvd.filp[i] == filp) {
struct dma_fence *fence; for (i = 0; i < adev->uvd.max_handles; ++i) {
uint32_t handle = atomic_read(&adev->uvd.inst[j].handles[i]);
r = amdgpu_uvd_get_destroy_msg(ring, handle, if (handle != 0 && adev->uvd.inst[j].filp[i] == filp) {
false, &fence); struct dma_fence *fence;
if (r) {
DRM_ERROR("Error destroying UVD (%d)!\n", r); r = amdgpu_uvd_get_destroy_msg(ring, handle,
continue; false, &fence);
} if (r) {
DRM_ERROR("Error destroying UVD(%d) %d!\n", j, r);
continue;
}
dma_fence_wait(fence, false); dma_fence_wait(fence, false);
dma_fence_put(fence); dma_fence_put(fence);
adev->uvd.filp[i] = NULL; adev->uvd.inst[j].filp[i] = NULL;
atomic_set(&adev->uvd.handles[i], 0); atomic_set(&adev->uvd.inst[j].handles[i], 0);
}
} }
} }
} }
...@@ -662,15 +679,16 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx, ...@@ -662,15 +679,16 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
void *ptr; void *ptr;
long r; long r;
int i; int i;
uint32_t ip_instance = ctx->parser->job->ring->me;
if (offset & 0x3F) { if (offset & 0x3F) {
DRM_ERROR("UVD messages must be 64 byte aligned!\n"); DRM_ERROR("UVD(%d) messages must be 64 byte aligned!\n", ip_instance);
return -EINVAL; return -EINVAL;
} }
r = amdgpu_bo_kmap(bo, &ptr); r = amdgpu_bo_kmap(bo, &ptr);
if (r) { if (r) {
DRM_ERROR("Failed mapping the UVD message (%ld)!\n", r); DRM_ERROR("Failed mapping the UVD(%d) message (%ld)!\n", ip_instance, r);
return r; return r;
} }
...@@ -680,7 +698,7 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx, ...@@ -680,7 +698,7 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
handle = msg[2]; handle = msg[2];
if (handle == 0) { if (handle == 0) {
DRM_ERROR("Invalid UVD handle!\n"); DRM_ERROR("Invalid UVD(%d) handle!\n", ip_instance);
return -EINVAL; return -EINVAL;
} }
...@@ -691,18 +709,18 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx, ...@@ -691,18 +709,18 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
/* try to alloc a new handle */ /* try to alloc a new handle */
for (i = 0; i < adev->uvd.max_handles; ++i) { for (i = 0; i < adev->uvd.max_handles; ++i) {
if (atomic_read(&adev->uvd.handles[i]) == handle) { if (atomic_read(&adev->uvd.inst[ip_instance].handles[i]) == handle) {
DRM_ERROR("Handle 0x%x already in use!\n", handle); DRM_ERROR("(%d)Handle 0x%x already in use!\n", ip_instance, handle);
return -EINVAL; return -EINVAL;
} }
if (!atomic_cmpxchg(&adev->uvd.handles[i], 0, handle)) { if (!atomic_cmpxchg(&adev->uvd.inst[ip_instance].handles[i], 0, handle)) {
adev->uvd.filp[i] = ctx->parser->filp; adev->uvd.inst[ip_instance].filp[i] = ctx->parser->filp;
return 0; return 0;
} }
} }
DRM_ERROR("No more free UVD handles!\n"); DRM_ERROR("No more free UVD(%d) handles!\n", ip_instance);
return -ENOSPC; return -ENOSPC;
case 1: case 1:
...@@ -714,27 +732,27 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx, ...@@ -714,27 +732,27 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
/* validate the handle */ /* validate the handle */
for (i = 0; i < adev->uvd.max_handles; ++i) { for (i = 0; i < adev->uvd.max_handles; ++i) {
if (atomic_read(&adev->uvd.handles[i]) == handle) { if (atomic_read(&adev->uvd.inst[ip_instance].handles[i]) == handle) {
if (adev->uvd.filp[i] != ctx->parser->filp) { if (adev->uvd.inst[ip_instance].filp[i] != ctx->parser->filp) {
DRM_ERROR("UVD handle collision detected!\n"); DRM_ERROR("UVD(%d) handle collision detected!\n", ip_instance);
return -EINVAL; return -EINVAL;
} }
return 0; return 0;
} }
} }
DRM_ERROR("Invalid UVD handle 0x%x!\n", handle); DRM_ERROR("Invalid UVD(%d) handle 0x%x!\n", ip_instance, handle);
return -ENOENT; return -ENOENT;
case 2: case 2:
/* it's a destroy msg, free the handle */ /* it's a destroy msg, free the handle */
for (i = 0; i < adev->uvd.max_handles; ++i) for (i = 0; i < adev->uvd.max_handles; ++i)
atomic_cmpxchg(&adev->uvd.handles[i], handle, 0); atomic_cmpxchg(&adev->uvd.inst[ip_instance].handles[i], handle, 0);
amdgpu_bo_kunmap(bo); amdgpu_bo_kunmap(bo);
return 0; return 0;
default: default:
DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type); DRM_ERROR("Illegal UVD(%d) message type (%d)!\n", ip_instance, msg_type);
return -EINVAL; return -EINVAL;
} }
BUG(); BUG();
...@@ -805,7 +823,7 @@ static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx) ...@@ -805,7 +823,7 @@ static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx)
} }
if ((cmd == 0 || cmd == 0x3) && if ((cmd == 0 || cmd == 0x3) &&
(start >> 28) != (ctx->parser->adev->uvd.gpu_addr >> 28)) { (start >> 28) != (ctx->parser->adev->uvd.inst->gpu_addr >> 28)) {
DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n", DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n",
start, end); start, end);
return -EINVAL; return -EINVAL;
...@@ -973,6 +991,8 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, ...@@ -973,6 +991,8 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
uint64_t addr; uint64_t addr;
long r; long r;
int i; int i;
unsigned offset_idx = 0;
unsigned offset[3] = { UVD_BASE_SI, 0, 0 };
amdgpu_bo_kunmap(bo); amdgpu_bo_kunmap(bo);
amdgpu_bo_unpin(bo); amdgpu_bo_unpin(bo);
...@@ -992,17 +1012,16 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, ...@@ -992,17 +1012,16 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
goto err; goto err;
if (adev->asic_type >= CHIP_VEGA10) { if (adev->asic_type >= CHIP_VEGA10) {
data[0] = PACKET0(mmUVD_GPCOM_VCPU_DATA0_VEGA10, 0); offset_idx = 1 + ring->me;
data[1] = PACKET0(mmUVD_GPCOM_VCPU_DATA1_VEGA10, 0); offset[1] = adev->reg_offset[UVD_HWIP][0][1];
data[2] = PACKET0(mmUVD_GPCOM_VCPU_CMD_VEGA10, 0); offset[2] = adev->reg_offset[UVD_HWIP][1][1];
data[3] = PACKET0(mmUVD_NO_OP_VEGA10, 0);
} else {
data[0] = PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0);
data[1] = PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0);
data[2] = PACKET0(mmUVD_GPCOM_VCPU_CMD, 0);
data[3] = PACKET0(mmUVD_NO_OP, 0);
} }
data[0] = PACKET0(offset[offset_idx] + UVD_GPCOM_VCPU_DATA0, 0);
data[1] = PACKET0(offset[offset_idx] + UVD_GPCOM_VCPU_DATA1, 0);
data[2] = PACKET0(offset[offset_idx] + UVD_GPCOM_VCPU_CMD, 0);
data[3] = PACKET0(offset[offset_idx] + UVD_NO_OP, 0);
ib = &job->ibs[0]; ib = &job->ibs[0];
addr = amdgpu_bo_gpu_offset(bo); addr = amdgpu_bo_gpu_offset(bo);
ib->ptr[0] = data[0]; ib->ptr[0] = data[0];
...@@ -1038,7 +1057,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, ...@@ -1038,7 +1057,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
if (r) if (r)
goto err_free; goto err_free;
r = amdgpu_job_submit(job, ring, &adev->uvd.entity, r = amdgpu_job_submit(job, ring, &adev->uvd.inst[ring->me].entity,
AMDGPU_FENCE_OWNER_UNDEFINED, &f); AMDGPU_FENCE_OWNER_UNDEFINED, &f);
if (r) if (r)
goto err_free; goto err_free;
...@@ -1126,8 +1145,15 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, ...@@ -1126,8 +1145,15 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
static void amdgpu_uvd_idle_work_handler(struct work_struct *work) static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
{ {
struct amdgpu_device *adev = struct amdgpu_device *adev =
container_of(work, struct amdgpu_device, uvd.idle_work.work); container_of(work, struct amdgpu_device, uvd.inst->idle_work.work);
unsigned fences = amdgpu_fence_count_emitted(&adev->uvd.ring); unsigned fences = 0, i, j;
for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
fences += amdgpu_fence_count_emitted(&adev->uvd.inst[i].ring);
for (j = 0; j < adev->uvd.num_enc_rings; ++j) {
fences += amdgpu_fence_count_emitted(&adev->uvd.inst[i].ring_enc[j]);
}
}
if (fences == 0) { if (fences == 0) {
if (adev->pm.dpm_enabled) { if (adev->pm.dpm_enabled) {
...@@ -1141,7 +1167,7 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work) ...@@ -1141,7 +1167,7 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
AMD_CG_STATE_GATE); AMD_CG_STATE_GATE);
} }
} else { } else {
schedule_delayed_work(&adev->uvd.idle_work, UVD_IDLE_TIMEOUT); schedule_delayed_work(&adev->uvd.inst->idle_work, UVD_IDLE_TIMEOUT);
} }
} }
...@@ -1153,7 +1179,7 @@ void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring) ...@@ -1153,7 +1179,7 @@ void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring)
if (amdgpu_sriov_vf(adev)) if (amdgpu_sriov_vf(adev))
return; return;
set_clocks = !cancel_delayed_work_sync(&adev->uvd.idle_work); set_clocks = !cancel_delayed_work_sync(&adev->uvd.inst->idle_work);
if (set_clocks) { if (set_clocks) {
if (adev->pm.dpm_enabled) { if (adev->pm.dpm_enabled) {
amdgpu_dpm_enable_uvd(adev, true); amdgpu_dpm_enable_uvd(adev, true);
...@@ -1170,7 +1196,7 @@ void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring) ...@@ -1170,7 +1196,7 @@ void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring)
void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring) void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring)
{ {
if (!amdgpu_sriov_vf(ring->adev)) if (!amdgpu_sriov_vf(ring->adev))
schedule_delayed_work(&ring->adev->uvd.idle_work, UVD_IDLE_TIMEOUT); schedule_delayed_work(&ring->adev->uvd.inst->idle_work, UVD_IDLE_TIMEOUT);
} }
/** /**
...@@ -1184,27 +1210,28 @@ int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout) ...@@ -1184,27 +1210,28 @@ int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{ {
struct dma_fence *fence; struct dma_fence *fence;
long r; long r;
uint32_t ip_instance = ring->me;
r = amdgpu_uvd_get_create_msg(ring, 1, NULL); r = amdgpu_uvd_get_create_msg(ring, 1, NULL);
if (r) { if (r) {
DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r); DRM_ERROR("amdgpu: (%d)failed to get create msg (%ld).\n", ip_instance, r);
goto error; goto error;
} }
r = amdgpu_uvd_get_destroy_msg(ring, 1, true, &fence); r = amdgpu_uvd_get_destroy_msg(ring, 1, true, &fence);
if (r) { if (r) {
DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r); DRM_ERROR("amdgpu: (%d)failed to get destroy ib (%ld).\n", ip_instance, r);
goto error; goto error;
} }
r = dma_fence_wait_timeout(fence, false, timeout); r = dma_fence_wait_timeout(fence, false, timeout);
if (r == 0) { if (r == 0) {
DRM_ERROR("amdgpu: IB test timed out.\n"); DRM_ERROR("amdgpu: (%d)IB test timed out.\n", ip_instance);
r = -ETIMEDOUT; r = -ETIMEDOUT;
} else if (r < 0) { } else if (r < 0) {
DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); DRM_ERROR("amdgpu: (%d)fence wait failed (%ld).\n", ip_instance, r);
} else { } else {
DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx); DRM_DEBUG("ib test on (%d)ring %d succeeded\n", ip_instance, ring->idx);
r = 0; r = 0;
} }
...@@ -1232,7 +1259,7 @@ uint32_t amdgpu_uvd_used_handles(struct amdgpu_device *adev) ...@@ -1232,7 +1259,7 @@ uint32_t amdgpu_uvd_used_handles(struct amdgpu_device *adev)
* necessarily linear. So we need to count * necessarily linear. So we need to count
* all non-zero handles. * all non-zero handles.
*/ */
if (atomic_read(&adev->uvd.handles[i])) if (atomic_read(&adev->uvd.inst->handles[i]))
used_handles++; used_handles++;
} }
......
...@@ -31,30 +31,37 @@ ...@@ -31,30 +31,37 @@
#define AMDGPU_UVD_SESSION_SIZE (50*1024) #define AMDGPU_UVD_SESSION_SIZE (50*1024)
#define AMDGPU_UVD_FIRMWARE_OFFSET 256 #define AMDGPU_UVD_FIRMWARE_OFFSET 256
#define AMDGPU_MAX_UVD_INSTANCES 2
#define AMDGPU_UVD_FIRMWARE_SIZE(adev) \ #define AMDGPU_UVD_FIRMWARE_SIZE(adev) \
(AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(((const struct common_firmware_header *)(adev)->uvd.fw->data)->ucode_size_bytes) + \ (AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(((const struct common_firmware_header *)(adev)->uvd.fw->data)->ucode_size_bytes) + \
8) - AMDGPU_UVD_FIRMWARE_OFFSET) 8) - AMDGPU_UVD_FIRMWARE_OFFSET)
struct amdgpu_uvd { struct amdgpu_uvd_inst {
struct amdgpu_bo *vcpu_bo; struct amdgpu_bo *vcpu_bo;
void *cpu_addr; void *cpu_addr;
uint64_t gpu_addr; uint64_t gpu_addr;
unsigned fw_version;
void *saved_bo; void *saved_bo;
unsigned max_handles;
atomic_t handles[AMDGPU_MAX_UVD_HANDLES]; atomic_t handles[AMDGPU_MAX_UVD_HANDLES];
struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES]; struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES];
struct delayed_work idle_work; struct delayed_work idle_work;
const struct firmware *fw; /* UVD firmware */
struct amdgpu_ring ring; struct amdgpu_ring ring;
struct amdgpu_ring ring_enc[AMDGPU_MAX_UVD_ENC_RINGS]; struct amdgpu_ring ring_enc[AMDGPU_MAX_UVD_ENC_RINGS];
struct amdgpu_irq_src irq; struct amdgpu_irq_src irq;
bool address_64_bit;
bool use_ctx_buf;
struct drm_sched_entity entity; struct drm_sched_entity entity;
struct drm_sched_entity entity_enc; struct drm_sched_entity entity_enc;
uint32_t srbm_soft_reset; uint32_t srbm_soft_reset;
};
struct amdgpu_uvd {
const struct firmware *fw; /* UVD firmware */
unsigned fw_version;
unsigned max_handles;
unsigned num_enc_rings; unsigned num_enc_rings;
uint8_t num_uvd_inst;
bool address_64_bit;
bool use_ctx_buf;
struct amdgpu_uvd_inst inst[AMDGPU_MAX_UVD_INSTANCES];
}; };
int amdgpu_uvd_sw_init(struct amdgpu_device *adev); int amdgpu_uvd_sw_init(struct amdgpu_device *adev);
......
...@@ -57,6 +57,7 @@ ...@@ -57,6 +57,7 @@
#define FIRMWARE_VEGA10 "amdgpu/vega10_vce.bin" #define FIRMWARE_VEGA10 "amdgpu/vega10_vce.bin"
#define FIRMWARE_VEGA12 "amdgpu/vega12_vce.bin" #define FIRMWARE_VEGA12 "amdgpu/vega12_vce.bin"
#define FIRMWARE_VEGA20 "amdgpu/vega20_vce.bin"
#ifdef CONFIG_DRM_AMDGPU_CIK #ifdef CONFIG_DRM_AMDGPU_CIK
MODULE_FIRMWARE(FIRMWARE_BONAIRE); MODULE_FIRMWARE(FIRMWARE_BONAIRE);
...@@ -76,6 +77,7 @@ MODULE_FIRMWARE(FIRMWARE_VEGAM); ...@@ -76,6 +77,7 @@ MODULE_FIRMWARE(FIRMWARE_VEGAM);
MODULE_FIRMWARE(FIRMWARE_VEGA10); MODULE_FIRMWARE(FIRMWARE_VEGA10);
MODULE_FIRMWARE(FIRMWARE_VEGA12); MODULE_FIRMWARE(FIRMWARE_VEGA12);
MODULE_FIRMWARE(FIRMWARE_VEGA20);
static void amdgpu_vce_idle_work_handler(struct work_struct *work); static void amdgpu_vce_idle_work_handler(struct work_struct *work);
...@@ -143,6 +145,9 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size) ...@@ -143,6 +145,9 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
case CHIP_VEGA12: case CHIP_VEGA12:
fw_name = FIRMWARE_VEGA12; fw_name = FIRMWARE_VEGA12;
break; break;
case CHIP_VEGA20:
fw_name = FIRMWARE_VEGA20;
break;
default: default:
return -EINVAL; return -EINVAL;
......
...@@ -205,13 +205,18 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work) ...@@ -205,13 +205,18 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
struct amdgpu_device *adev = struct amdgpu_device *adev =
container_of(work, struct amdgpu_device, vcn.idle_work.work); container_of(work, struct amdgpu_device, vcn.idle_work.work);
unsigned fences = amdgpu_fence_count_emitted(&adev->vcn.ring_dec); unsigned fences = amdgpu_fence_count_emitted(&adev->vcn.ring_dec);
unsigned i;
for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
fences += amdgpu_fence_count_emitted(&adev->vcn.ring_enc[i]);
}
if (fences == 0) { if (fences == 0) {
if (adev->pm.dpm_enabled) { if (adev->pm.dpm_enabled)
/* might be used when with pg/cg
amdgpu_dpm_enable_uvd(adev, false); amdgpu_dpm_enable_uvd(adev, false);
*/ else
} amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
AMD_PG_STATE_GATE);
} else { } else {
schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT); schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
} }
...@@ -223,9 +228,11 @@ void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring) ...@@ -223,9 +228,11 @@ void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work); bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work);
if (set_clocks && adev->pm.dpm_enabled) { if (set_clocks && adev->pm.dpm_enabled) {
/* might be used when with pg/cg if (adev->pm.dpm_enabled)
amdgpu_dpm_enable_uvd(adev, true); amdgpu_dpm_enable_uvd(adev, true);
*/ else
amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
AMD_PG_STATE_UNGATE);
} }
} }
......
...@@ -45,6 +45,17 @@ ...@@ -45,6 +45,17 @@
#define VCN_ENC_CMD_REG_WRITE 0x0000000b #define VCN_ENC_CMD_REG_WRITE 0x0000000b
#define VCN_ENC_CMD_REG_WAIT 0x0000000c #define VCN_ENC_CMD_REG_WAIT 0x0000000c
enum engine_status_constants {
UVD_PGFSM_STATUS__UVDM_UVDU_PWR_ON = 0x2AAAA0,
UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON = 0x00000002,
UVD_STATUS__UVD_BUSY = 0x00000004,
GB_ADDR_CONFIG_DEFAULT = 0x26010011,
UVD_STATUS__IDLE = 0x2,
UVD_STATUS__BUSY = 0x5,
UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF = 0x1,
UVD_STATUS__RBC_BUSY = 0x1,
};
struct amdgpu_vcn { struct amdgpu_vcn {
struct amdgpu_bo *vcpu_bo; struct amdgpu_bo *vcpu_bo;
void *cpu_addr; void *cpu_addr;
......
...@@ -119,9 +119,7 @@ static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base, ...@@ -119,9 +119,7 @@ static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
* is currently evicted. add the bo to the evicted list to make sure it * is currently evicted. add the bo to the evicted list to make sure it
* is validated on next vm use to avoid fault. * is validated on next vm use to avoid fault.
* */ * */
spin_lock(&vm->status_lock);
list_move_tail(&base->vm_status, &vm->evicted); list_move_tail(&base->vm_status, &vm->evicted);
spin_unlock(&vm->status_lock);
} }
/** /**
...@@ -226,24 +224,16 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, ...@@ -226,24 +224,16 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
void *param) void *param)
{ {
struct ttm_bo_global *glob = adev->mman.bdev.glob; struct ttm_bo_global *glob = adev->mman.bdev.glob;
int r; struct amdgpu_vm_bo_base *bo_base, *tmp;
int r = 0;
spin_lock(&vm->status_lock);
while (!list_empty(&vm->evicted)) {
struct amdgpu_vm_bo_base *bo_base;
struct amdgpu_bo *bo;
bo_base = list_first_entry(&vm->evicted, list_for_each_entry_safe(bo_base, tmp, &vm->evicted, vm_status) {
struct amdgpu_vm_bo_base, struct amdgpu_bo *bo = bo_base->bo;
vm_status);
spin_unlock(&vm->status_lock);
bo = bo_base->bo;
BUG_ON(!bo);
if (bo->parent) { if (bo->parent) {
r = validate(param, bo); r = validate(param, bo);
if (r) if (r)
return r; break;
spin_lock(&glob->lru_lock); spin_lock(&glob->lru_lock);
ttm_bo_move_to_lru_tail(&bo->tbo); ttm_bo_move_to_lru_tail(&bo->tbo);
...@@ -252,22 +242,29 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, ...@@ -252,22 +242,29 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
spin_unlock(&glob->lru_lock); spin_unlock(&glob->lru_lock);
} }
if (bo->tbo.type == ttm_bo_type_kernel && if (bo->tbo.type != ttm_bo_type_kernel) {
vm->use_cpu_for_update) { spin_lock(&vm->moved_lock);
r = amdgpu_bo_kmap(bo, NULL);
if (r)
return r;
}
spin_lock(&vm->status_lock);
if (bo->tbo.type != ttm_bo_type_kernel)
list_move(&bo_base->vm_status, &vm->moved); list_move(&bo_base->vm_status, &vm->moved);
else spin_unlock(&vm->moved_lock);
} else {
list_move(&bo_base->vm_status, &vm->relocated); list_move(&bo_base->vm_status, &vm->relocated);
}
} }
spin_unlock(&vm->status_lock);
return 0; spin_lock(&glob->lru_lock);
list_for_each_entry(bo_base, &vm->idle, vm_status) {
struct amdgpu_bo *bo = bo_base->bo;
if (!bo->parent)
continue;
ttm_bo_move_to_lru_tail(&bo->tbo);
if (bo->shadow)
ttm_bo_move_to_lru_tail(&bo->shadow->tbo);
}
spin_unlock(&glob->lru_lock);
return r;
} }
/** /**
...@@ -279,13 +276,7 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, ...@@ -279,13 +276,7 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
*/ */
bool amdgpu_vm_ready(struct amdgpu_vm *vm) bool amdgpu_vm_ready(struct amdgpu_vm *vm)
{ {
bool ready; return list_empty(&vm->evicted);
spin_lock(&vm->status_lock);
ready = list_empty(&vm->evicted);
spin_unlock(&vm->status_lock);
return ready;
} }
/** /**
...@@ -477,9 +468,7 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev, ...@@ -477,9 +468,7 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
pt->parent = amdgpu_bo_ref(parent->base.bo); pt->parent = amdgpu_bo_ref(parent->base.bo);
amdgpu_vm_bo_base_init(&entry->base, vm, pt); amdgpu_vm_bo_base_init(&entry->base, vm, pt);
spin_lock(&vm->status_lock);
list_move(&entry->base.vm_status, &vm->relocated); list_move(&entry->base.vm_status, &vm->relocated);
spin_unlock(&vm->status_lock);
} }
if (level < AMDGPU_VM_PTB) { if (level < AMDGPU_VM_PTB) {
...@@ -926,10 +915,8 @@ static void amdgpu_vm_invalidate_level(struct amdgpu_device *adev, ...@@ -926,10 +915,8 @@ static void amdgpu_vm_invalidate_level(struct amdgpu_device *adev,
if (!entry->base.bo) if (!entry->base.bo)
continue; continue;
spin_lock(&vm->status_lock); if (!entry->base.moved)
if (list_empty(&entry->base.vm_status)) list_move(&entry->base.vm_status, &vm->relocated);
list_add(&entry->base.vm_status, &vm->relocated);
spin_unlock(&vm->status_lock);
amdgpu_vm_invalidate_level(adev, vm, entry, level + 1); amdgpu_vm_invalidate_level(adev, vm, entry, level + 1);
} }
} }
...@@ -959,6 +946,14 @@ int amdgpu_vm_update_directories(struct amdgpu_device *adev, ...@@ -959,6 +946,14 @@ int amdgpu_vm_update_directories(struct amdgpu_device *adev,
params.adev = adev; params.adev = adev;
if (vm->use_cpu_for_update) { if (vm->use_cpu_for_update) {
struct amdgpu_vm_bo_base *bo_base;
list_for_each_entry(bo_base, &vm->relocated, vm_status) {
r = amdgpu_bo_kmap(bo_base->bo, NULL);
if (unlikely(r))
return r;
}
r = amdgpu_vm_wait_pd(adev, vm, AMDGPU_FENCE_OWNER_VM); r = amdgpu_vm_wait_pd(adev, vm, AMDGPU_FENCE_OWNER_VM);
if (unlikely(r)) if (unlikely(r))
return r; return r;
...@@ -974,7 +969,6 @@ int amdgpu_vm_update_directories(struct amdgpu_device *adev, ...@@ -974,7 +969,6 @@ int amdgpu_vm_update_directories(struct amdgpu_device *adev,
params.func = amdgpu_vm_do_set_ptes; params.func = amdgpu_vm_do_set_ptes;
} }
spin_lock(&vm->status_lock);
while (!list_empty(&vm->relocated)) { while (!list_empty(&vm->relocated)) {
struct amdgpu_vm_bo_base *bo_base, *parent; struct amdgpu_vm_bo_base *bo_base, *parent;
struct amdgpu_vm_pt *pt, *entry; struct amdgpu_vm_pt *pt, *entry;
...@@ -983,14 +977,12 @@ int amdgpu_vm_update_directories(struct amdgpu_device *adev, ...@@ -983,14 +977,12 @@ int amdgpu_vm_update_directories(struct amdgpu_device *adev,
bo_base = list_first_entry(&vm->relocated, bo_base = list_first_entry(&vm->relocated,
struct amdgpu_vm_bo_base, struct amdgpu_vm_bo_base,
vm_status); vm_status);
list_del_init(&bo_base->vm_status); bo_base->moved = false;
spin_unlock(&vm->status_lock); list_move(&bo_base->vm_status, &vm->idle);
bo = bo_base->bo->parent; bo = bo_base->bo->parent;
if (!bo) { if (!bo)
spin_lock(&vm->status_lock);
continue; continue;
}
parent = list_first_entry(&bo->va, struct amdgpu_vm_bo_base, parent = list_first_entry(&bo->va, struct amdgpu_vm_bo_base,
bo_list); bo_list);
...@@ -999,12 +991,10 @@ int amdgpu_vm_update_directories(struct amdgpu_device *adev, ...@@ -999,12 +991,10 @@ int amdgpu_vm_update_directories(struct amdgpu_device *adev,
amdgpu_vm_update_pde(&params, vm, pt, entry); amdgpu_vm_update_pde(&params, vm, pt, entry);
spin_lock(&vm->status_lock);
if (!vm->use_cpu_for_update && if (!vm->use_cpu_for_update &&
(ndw - params.ib->length_dw) < 32) (ndw - params.ib->length_dw) < 32)
break; break;
} }
spin_unlock(&vm->status_lock);
if (vm->use_cpu_for_update) { if (vm->use_cpu_for_update) {
/* Flush HDP */ /* Flush HDP */
...@@ -1107,9 +1097,7 @@ static void amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p, ...@@ -1107,9 +1097,7 @@ static void amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p,
if (entry->huge) { if (entry->huge) {
/* Add the entry to the relocated list to update it. */ /* Add the entry to the relocated list to update it. */
entry->huge = false; entry->huge = false;
spin_lock(&p->vm->status_lock);
list_move(&entry->base.vm_status, &p->vm->relocated); list_move(&entry->base.vm_status, &p->vm->relocated);
spin_unlock(&p->vm->status_lock);
} }
return; return;
} }
...@@ -1588,18 +1576,22 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, ...@@ -1588,18 +1576,22 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
amdgpu_asic_flush_hdp(adev, NULL); amdgpu_asic_flush_hdp(adev, NULL);
} }
spin_lock(&vm->status_lock); spin_lock(&vm->moved_lock);
list_del_init(&bo_va->base.vm_status); list_del_init(&bo_va->base.vm_status);
spin_unlock(&vm->moved_lock);
/* If the BO is not in its preferred location add it back to /* If the BO is not in its preferred location add it back to
* the evicted list so that it gets validated again on the * the evicted list so that it gets validated again on the
* next command submission. * next command submission.
*/ */
if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv && if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv) {
!(bo->preferred_domains & uint32_t mem_type = bo->tbo.mem.mem_type;
amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type)))
list_add_tail(&bo_va->base.vm_status, &vm->evicted); if (!(bo->preferred_domains & amdgpu_mem_type_to_domain(mem_type)))
spin_unlock(&vm->status_lock); list_add_tail(&bo_va->base.vm_status, &vm->evicted);
else
list_add(&bo_va->base.vm_status, &vm->idle);
}
list_splice_init(&bo_va->invalids, &bo_va->valids); list_splice_init(&bo_va->invalids, &bo_va->valids);
bo_va->cleared = clear; bo_va->cleared = clear;
...@@ -1808,19 +1800,18 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev, ...@@ -1808,19 +1800,18 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
int amdgpu_vm_handle_moved(struct amdgpu_device *adev, int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
struct amdgpu_vm *vm) struct amdgpu_vm *vm)
{ {
struct amdgpu_bo_va *bo_va, *tmp;
struct list_head moved;
bool clear; bool clear;
int r = 0; int r;
spin_lock(&vm->status_lock);
while (!list_empty(&vm->moved)) {
struct amdgpu_bo_va *bo_va;
struct reservation_object *resv;
bo_va = list_first_entry(&vm->moved, INIT_LIST_HEAD(&moved);
struct amdgpu_bo_va, base.vm_status); spin_lock(&vm->moved_lock);
spin_unlock(&vm->status_lock); list_splice_init(&vm->moved, &moved);
spin_unlock(&vm->moved_lock);
resv = bo_va->base.bo->tbo.resv; list_for_each_entry_safe(bo_va, tmp, &moved, base.vm_status) {
struct reservation_object *resv = bo_va->base.bo->tbo.resv;
/* Per VM BOs never need to bo cleared in the page tables */ /* Per VM BOs never need to bo cleared in the page tables */
if (resv == vm->root.base.bo->tbo.resv) if (resv == vm->root.base.bo->tbo.resv)
...@@ -1833,17 +1824,19 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev, ...@@ -1833,17 +1824,19 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
clear = true; clear = true;
r = amdgpu_vm_bo_update(adev, bo_va, clear); r = amdgpu_vm_bo_update(adev, bo_va, clear);
if (r) if (r) {
spin_lock(&vm->moved_lock);
list_splice(&moved, &vm->moved);
spin_unlock(&vm->moved_lock);
return r; return r;
}
if (!clear && resv != vm->root.base.bo->tbo.resv) if (!clear && resv != vm->root.base.bo->tbo.resv)
reservation_object_unlock(resv); reservation_object_unlock(resv);
spin_lock(&vm->status_lock);
} }
spin_unlock(&vm->status_lock);
return r; return 0;
} }
/** /**
...@@ -1902,11 +1895,11 @@ static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev, ...@@ -1902,11 +1895,11 @@ static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
if (mapping->flags & AMDGPU_PTE_PRT) if (mapping->flags & AMDGPU_PTE_PRT)
amdgpu_vm_prt_get(adev); amdgpu_vm_prt_get(adev);
if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv) { if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv &&
spin_lock(&vm->status_lock); !bo_va->base.moved) {
if (list_empty(&bo_va->base.vm_status)) spin_lock(&vm->moved_lock);
list_add(&bo_va->base.vm_status, &vm->moved); list_move(&bo_va->base.vm_status, &vm->moved);
spin_unlock(&vm->status_lock); spin_unlock(&vm->moved_lock);
} }
trace_amdgpu_vm_bo_map(bo_va, mapping); trace_amdgpu_vm_bo_map(bo_va, mapping);
} }
...@@ -2216,9 +2209,9 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, ...@@ -2216,9 +2209,9 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
list_del(&bo_va->base.bo_list); list_del(&bo_va->base.bo_list);
spin_lock(&vm->status_lock); spin_lock(&vm->moved_lock);
list_del(&bo_va->base.vm_status); list_del(&bo_va->base.vm_status);
spin_unlock(&vm->status_lock); spin_unlock(&vm->moved_lock);
list_for_each_entry_safe(mapping, next, &bo_va->valids, list) { list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
list_del(&mapping->list); list_del(&mapping->list);
...@@ -2258,31 +2251,28 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, ...@@ -2258,31 +2251,28 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
list_for_each_entry(bo_base, &bo->va, bo_list) { list_for_each_entry(bo_base, &bo->va, bo_list) {
struct amdgpu_vm *vm = bo_base->vm; struct amdgpu_vm *vm = bo_base->vm;
bool was_moved = bo_base->moved;
bo_base->moved = true; bo_base->moved = true;
if (evicted && bo->tbo.resv == vm->root.base.bo->tbo.resv) { if (evicted && bo->tbo.resv == vm->root.base.bo->tbo.resv) {
spin_lock(&bo_base->vm->status_lock);
if (bo->tbo.type == ttm_bo_type_kernel) if (bo->tbo.type == ttm_bo_type_kernel)
list_move(&bo_base->vm_status, &vm->evicted); list_move(&bo_base->vm_status, &vm->evicted);
else else
list_move_tail(&bo_base->vm_status, list_move_tail(&bo_base->vm_status,
&vm->evicted); &vm->evicted);
spin_unlock(&bo_base->vm->status_lock);
continue; continue;
} }
if (bo->tbo.type == ttm_bo_type_kernel) { if (was_moved)
spin_lock(&bo_base->vm->status_lock);
if (list_empty(&bo_base->vm_status))
list_add(&bo_base->vm_status, &vm->relocated);
spin_unlock(&bo_base->vm->status_lock);
continue; continue;
}
spin_lock(&bo_base->vm->status_lock); if (bo->tbo.type == ttm_bo_type_kernel) {
if (list_empty(&bo_base->vm_status)) list_move(&bo_base->vm_status, &vm->relocated);
list_add(&bo_base->vm_status, &vm->moved); } else {
spin_unlock(&bo_base->vm->status_lock); spin_lock(&bo_base->vm->moved_lock);
list_move(&bo_base->vm_status, &vm->moved);
spin_unlock(&bo_base->vm->moved_lock);
}
} }
} }
...@@ -2391,10 +2381,11 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, ...@@ -2391,10 +2381,11 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
vm->va = RB_ROOT_CACHED; vm->va = RB_ROOT_CACHED;
for (i = 0; i < AMDGPU_MAX_VMHUBS; i++) for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
vm->reserved_vmid[i] = NULL; vm->reserved_vmid[i] = NULL;
spin_lock_init(&vm->status_lock);
INIT_LIST_HEAD(&vm->evicted); INIT_LIST_HEAD(&vm->evicted);
INIT_LIST_HEAD(&vm->relocated); INIT_LIST_HEAD(&vm->relocated);
spin_lock_init(&vm->moved_lock);
INIT_LIST_HEAD(&vm->moved); INIT_LIST_HEAD(&vm->moved);
INIT_LIST_HEAD(&vm->idle);
INIT_LIST_HEAD(&vm->freed); INIT_LIST_HEAD(&vm->freed);
/* create scheduler entity for page table updates */ /* create scheduler entity for page table updates */
......
...@@ -168,9 +168,6 @@ struct amdgpu_vm { ...@@ -168,9 +168,6 @@ struct amdgpu_vm {
/* tree of virtual addresses mapped */ /* tree of virtual addresses mapped */
struct rb_root_cached va; struct rb_root_cached va;
/* protecting invalidated */
spinlock_t status_lock;
/* BOs who needs a validation */ /* BOs who needs a validation */
struct list_head evicted; struct list_head evicted;
...@@ -179,6 +176,10 @@ struct amdgpu_vm { ...@@ -179,6 +176,10 @@ struct amdgpu_vm {
/* BOs moved, but not yet updated in the PT */ /* BOs moved, but not yet updated in the PT */
struct list_head moved; struct list_head moved;
spinlock_t moved_lock;
/* All BOs of this VM not currently in the state machine */
struct list_head idle;
/* BO mappings freed, but not yet updated in the PT */ /* BO mappings freed, but not yet updated in the PT */
struct list_head freed; struct list_head freed;
...@@ -187,9 +188,6 @@ struct amdgpu_vm { ...@@ -187,9 +188,6 @@ struct amdgpu_vm {
struct amdgpu_vm_pt root; struct amdgpu_vm_pt root;
struct dma_fence *last_update; struct dma_fence *last_update;
/* protecting freed */
spinlock_t freed_lock;
/* Scheduler entity for page table updates */ /* Scheduler entity for page table updates */
struct drm_sched_entity entity; struct drm_sched_entity entity;
......
...@@ -473,6 +473,7 @@ static int dce_virtual_hw_init(void *handle) ...@@ -473,6 +473,7 @@ static int dce_virtual_hw_init(void *handle)
break; break;
case CHIP_VEGA10: case CHIP_VEGA10:
case CHIP_VEGA12: case CHIP_VEGA12:
case CHIP_VEGA20:
break; break;
default: default:
DRM_ERROR("Virtual display unsupported ASIC type: 0x%X\n", adev->asic_type); DRM_ERROR("Virtual display unsupported ASIC type: 0x%X\n", adev->asic_type);
......
...@@ -102,6 +102,13 @@ static void df_v1_7_get_clockgating_state(struct amdgpu_device *adev, ...@@ -102,6 +102,13 @@ static void df_v1_7_get_clockgating_state(struct amdgpu_device *adev,
*flags |= AMD_CG_SUPPORT_DF_MGCG; *flags |= AMD_CG_SUPPORT_DF_MGCG;
} }
static void df_v1_7_enable_ecc_force_par_wr_rmw(struct amdgpu_device *adev,
bool enable)
{
WREG32_FIELD15(DF, 0, DF_CS_AON0_CoherentSlaveModeCtrlA0,
ForceParWrRMW, enable);
}
const struct amdgpu_df_funcs df_v1_7_funcs = { const struct amdgpu_df_funcs df_v1_7_funcs = {
.init = df_v1_7_init, .init = df_v1_7_init,
.enable_broadcast_mode = df_v1_7_enable_broadcast_mode, .enable_broadcast_mode = df_v1_7_enable_broadcast_mode,
...@@ -109,4 +116,5 @@ const struct amdgpu_df_funcs df_v1_7_funcs = { ...@@ -109,4 +116,5 @@ const struct amdgpu_df_funcs df_v1_7_funcs = {
.get_hbm_channel_number = df_v1_7_get_hbm_channel_number, .get_hbm_channel_number = df_v1_7_get_hbm_channel_number,
.update_medium_grain_clock_gating = df_v1_7_update_medium_grain_clock_gating, .update_medium_grain_clock_gating = df_v1_7_update_medium_grain_clock_gating,
.get_clockgating_state = df_v1_7_get_clockgating_state, .get_clockgating_state = df_v1_7_get_clockgating_state,
.enable_ecc_force_par_wr_rmw = df_v1_7_enable_ecc_force_par_wr_rmw,
}; };
/*
* Copyright 2018 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "amdgpu.h"
#include "df_v3_6.h"
#include "df/df_3_6_default.h"
#include "df/df_3_6_offset.h"
#include "df/df_3_6_sh_mask.h"
static u32 df_v3_6_channel_number[] = {1, 2, 0, 4, 0, 8, 0,
16, 32, 0, 0, 0, 2, 4, 8};
static void df_v3_6_init(struct amdgpu_device *adev)
{
}
static void df_v3_6_enable_broadcast_mode(struct amdgpu_device *adev,
bool enable)
{
u32 tmp;
if (enable) {
tmp = RREG32_SOC15(DF, 0, mmFabricConfigAccessControl);
tmp &= ~FabricConfigAccessControl__CfgRegInstAccEn_MASK;
WREG32_SOC15(DF, 0, mmFabricConfigAccessControl, tmp);
} else
WREG32_SOC15(DF, 0, mmFabricConfigAccessControl,
mmFabricConfigAccessControl_DEFAULT);
}
static u32 df_v3_6_get_fb_channel_number(struct amdgpu_device *adev)
{
u32 tmp;
tmp = RREG32_SOC15(DF, 0, mmDF_CS_UMC_AON0_DramBaseAddress0);
tmp &= DF_CS_UMC_AON0_DramBaseAddress0__IntLvNumChan_MASK;
tmp >>= DF_CS_UMC_AON0_DramBaseAddress0__IntLvNumChan__SHIFT;
return tmp;
}
static u32 df_v3_6_get_hbm_channel_number(struct amdgpu_device *adev)
{
int fb_channel_number;
fb_channel_number = adev->df_funcs->get_fb_channel_number(adev);
if (fb_channel_number > ARRAY_SIZE(df_v3_6_channel_number))
fb_channel_number = 0;
return df_v3_6_channel_number[fb_channel_number];
}
static void df_v3_6_update_medium_grain_clock_gating(struct amdgpu_device *adev,
bool enable)
{
u32 tmp;
/* Put DF on broadcast mode */
adev->df_funcs->enable_broadcast_mode(adev, true);
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DF_MGCG)) {
tmp = RREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater);
tmp &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK;
tmp |= DF_V3_6_MGCG_ENABLE_15_CYCLE_DELAY;
WREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater, tmp);
} else {
tmp = RREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater);
tmp &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK;
tmp |= DF_V3_6_MGCG_DISABLE;
WREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater, tmp);
}
/* Exit broadcast mode */
adev->df_funcs->enable_broadcast_mode(adev, false);
}
static void df_v3_6_get_clockgating_state(struct amdgpu_device *adev,
u32 *flags)
{
u32 tmp;
/* AMD_CG_SUPPORT_DF_MGCG */
tmp = RREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater);
if (tmp & DF_V3_6_MGCG_ENABLE_15_CYCLE_DELAY)
*flags |= AMD_CG_SUPPORT_DF_MGCG;
}
const struct amdgpu_df_funcs df_v3_6_funcs = {
.init = df_v3_6_init,
.enable_broadcast_mode = df_v3_6_enable_broadcast_mode,
.get_fb_channel_number = df_v3_6_get_fb_channel_number,
.get_hbm_channel_number = df_v3_6_get_hbm_channel_number,
.update_medium_grain_clock_gating =
df_v3_6_update_medium_grain_clock_gating,
.get_clockgating_state = df_v3_6_get_clockgating_state,
};
/*
* Copyright 2018 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#ifndef __DF_V3_6_H__
#define __DF_V3_6_H__
#include "soc15_common.h"
enum DF_V3_6_MGCG {
DF_V3_6_MGCG_DISABLE = 0,
DF_V3_6_MGCG_ENABLE_00_CYCLE_DELAY = 1,
DF_V3_6_MGCG_ENABLE_01_CYCLE_DELAY = 2,
DF_V3_6_MGCG_ENABLE_15_CYCLE_DELAY = 13,
DF_V3_6_MGCG_ENABLE_31_CYCLE_DELAY = 14,
DF_V3_6_MGCG_ENABLE_63_CYCLE_DELAY = 15
};
extern const struct amdgpu_df_funcs df_v3_6_funcs;
#endif
...@@ -27,6 +27,7 @@ ...@@ -27,6 +27,7 @@
#include "amdgpu_gfx.h" #include "amdgpu_gfx.h"
#include "soc15.h" #include "soc15.h"
#include "soc15d.h" #include "soc15d.h"
#include "amdgpu_atomfirmware.h"
#include "gc/gc_9_0_offset.h" #include "gc/gc_9_0_offset.h"
#include "gc/gc_9_0_sh_mask.h" #include "gc/gc_9_0_sh_mask.h"
...@@ -63,6 +64,13 @@ MODULE_FIRMWARE("amdgpu/vega12_mec.bin"); ...@@ -63,6 +64,13 @@ MODULE_FIRMWARE("amdgpu/vega12_mec.bin");
MODULE_FIRMWARE("amdgpu/vega12_mec2.bin"); MODULE_FIRMWARE("amdgpu/vega12_mec2.bin");
MODULE_FIRMWARE("amdgpu/vega12_rlc.bin"); MODULE_FIRMWARE("amdgpu/vega12_rlc.bin");
MODULE_FIRMWARE("amdgpu/vega20_ce.bin");
MODULE_FIRMWARE("amdgpu/vega20_pfp.bin");
MODULE_FIRMWARE("amdgpu/vega20_me.bin");
MODULE_FIRMWARE("amdgpu/vega20_mec.bin");
MODULE_FIRMWARE("amdgpu/vega20_mec2.bin");
MODULE_FIRMWARE("amdgpu/vega20_rlc.bin");
MODULE_FIRMWARE("amdgpu/raven_ce.bin"); MODULE_FIRMWARE("amdgpu/raven_ce.bin");
MODULE_FIRMWARE("amdgpu/raven_pfp.bin"); MODULE_FIRMWARE("amdgpu/raven_pfp.bin");
MODULE_FIRMWARE("amdgpu/raven_me.bin"); MODULE_FIRMWARE("amdgpu/raven_me.bin");
...@@ -72,29 +80,22 @@ MODULE_FIRMWARE("amdgpu/raven_rlc.bin"); ...@@ -72,29 +80,22 @@ MODULE_FIRMWARE("amdgpu/raven_rlc.bin");
static const struct soc15_reg_golden golden_settings_gc_9_0[] = static const struct soc15_reg_golden golden_settings_gc_9_0[] =
{ {
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x08000000, 0x08000080),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0x08000000, 0x08000080),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x08000000, 0x08000080),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000420), SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000420),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x08000000, 0x08000080),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024), SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001), SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0x08000000, 0x08000080),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0x08000000, 0x08000080),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0x08000000, 0x08000080),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0x08000000, 0x08000080),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0x08000000, 0x08000080),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSH_MEM_CONFIG, 0x00001000, 0x00001000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSH_MEM_CONFIG, 0x00001000, 0x00001000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0x0000000f, 0x01000107), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_0, 0x0007ffff, 0x00000800),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_1, 0x0007ffff, 0x00000800),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_0, 0x01ffffff, 0x0000ff87),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_1, 0x01ffffff, 0x0000ff8f),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQC_CONFIG, 0x03000000, 0x020a2000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQC_CONFIG, 0x03000000, 0x020a2000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x4a2c0e68), SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x4a2c0e68),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0xb5d3f197), SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0xb5d3f197),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff), SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff)
SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080)
}; };
static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] = static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] =
...@@ -108,6 +109,20 @@ static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] = ...@@ -108,6 +109,20 @@ static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x00001800, 0x00000800) SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x00001800, 0x00000800)
}; };
static const struct soc15_reg_golden golden_settings_gc_9_0_vg20[] =
{
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0x0f000000, 0x0a000000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xf3e777ff, 0x22014042),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xf3e777ff, 0x22014042),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0x00003e00, 0x00000400),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xff840000, 0x04040000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00030000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0xffff010f, 0x01000107),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0x000b0000, 0x000b0000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01000000, 0x01000000)
};
static const struct soc15_reg_golden golden_settings_gc_9_1[] = static const struct soc15_reg_golden golden_settings_gc_9_1[] =
{ {
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104), SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
...@@ -241,6 +256,14 @@ static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev) ...@@ -241,6 +256,14 @@ static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
golden_settings_gc_9_2_1_vg12, golden_settings_gc_9_2_1_vg12,
ARRAY_SIZE(golden_settings_gc_9_2_1_vg12)); ARRAY_SIZE(golden_settings_gc_9_2_1_vg12));
break; break;
case CHIP_VEGA20:
soc15_program_register_sequence(adev,
golden_settings_gc_9_0,
ARRAY_SIZE(golden_settings_gc_9_0));
soc15_program_register_sequence(adev,
golden_settings_gc_9_0_vg20,
ARRAY_SIZE(golden_settings_gc_9_0_vg20));
break;
case CHIP_RAVEN: case CHIP_RAVEN:
soc15_program_register_sequence(adev, soc15_program_register_sequence(adev,
golden_settings_gc_9_1, golden_settings_gc_9_1,
...@@ -468,6 +491,9 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev) ...@@ -468,6 +491,9 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
case CHIP_VEGA12: case CHIP_VEGA12:
chip_name = "vega12"; chip_name = "vega12";
break; break;
case CHIP_VEGA20:
chip_name = "vega20";
break;
case CHIP_RAVEN: case CHIP_RAVEN:
chip_name = "raven"; chip_name = "raven";
break; break;
...@@ -1088,9 +1114,10 @@ static const struct amdgpu_gfx_funcs gfx_v9_0_gfx_funcs = { ...@@ -1088,9 +1114,10 @@ static const struct amdgpu_gfx_funcs gfx_v9_0_gfx_funcs = {
.select_me_pipe_q = &gfx_v9_0_select_me_pipe_q .select_me_pipe_q = &gfx_v9_0_select_me_pipe_q
}; };
static void gfx_v9_0_gpu_early_init(struct amdgpu_device *adev) static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
{ {
u32 gb_addr_config; u32 gb_addr_config;
int err;
adev->gfx.funcs = &gfx_v9_0_gfx_funcs; adev->gfx.funcs = &gfx_v9_0_gfx_funcs;
...@@ -1112,6 +1139,20 @@ static void gfx_v9_0_gpu_early_init(struct amdgpu_device *adev) ...@@ -1112,6 +1139,20 @@ static void gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
gb_addr_config = VEGA12_GB_ADDR_CONFIG_GOLDEN; gb_addr_config = VEGA12_GB_ADDR_CONFIG_GOLDEN;
DRM_INFO("fix gfx.config for vega12\n"); DRM_INFO("fix gfx.config for vega12\n");
break; break;
case CHIP_VEGA20:
adev->gfx.config.max_hw_contexts = 8;
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG);
gb_addr_config &= ~0xf3e777ff;
gb_addr_config |= 0x22014042;
/* check vbios table if gpu info is not available */
err = amdgpu_atomfirmware_get_gfx_info(adev);
if (err)
return err;
break;
case CHIP_RAVEN: case CHIP_RAVEN:
adev->gfx.config.max_hw_contexts = 8; adev->gfx.config.max_hw_contexts = 8;
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
...@@ -1161,6 +1202,8 @@ static void gfx_v9_0_gpu_early_init(struct amdgpu_device *adev) ...@@ -1161,6 +1202,8 @@ static void gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
adev->gfx.config.gb_addr_config, adev->gfx.config.gb_addr_config,
GB_ADDR_CONFIG, GB_ADDR_CONFIG,
PIPE_INTERLEAVE_SIZE)); PIPE_INTERLEAVE_SIZE));
return 0;
} }
static int gfx_v9_0_ngg_create_buf(struct amdgpu_device *adev, static int gfx_v9_0_ngg_create_buf(struct amdgpu_device *adev,
...@@ -1394,6 +1437,7 @@ static int gfx_v9_0_sw_init(void *handle) ...@@ -1394,6 +1437,7 @@ static int gfx_v9_0_sw_init(void *handle)
switch (adev->asic_type) { switch (adev->asic_type) {
case CHIP_VEGA10: case CHIP_VEGA10:
case CHIP_VEGA12: case CHIP_VEGA12:
case CHIP_VEGA20:
case CHIP_RAVEN: case CHIP_RAVEN:
adev->gfx.mec.num_mec = 2; adev->gfx.mec.num_mec = 2;
break; break;
...@@ -1521,7 +1565,9 @@ static int gfx_v9_0_sw_init(void *handle) ...@@ -1521,7 +1565,9 @@ static int gfx_v9_0_sw_init(void *handle)
adev->gfx.ce_ram_size = 0x8000; adev->gfx.ce_ram_size = 0x8000;
gfx_v9_0_gpu_early_init(adev); r = gfx_v9_0_gpu_early_init(adev);
if (r)
return r;
r = gfx_v9_0_ngg_init(adev); r = gfx_v9_0_ngg_init(adev);
if (r) if (r)
...@@ -3688,6 +3734,7 @@ static int gfx_v9_0_set_clockgating_state(void *handle, ...@@ -3688,6 +3734,7 @@ static int gfx_v9_0_set_clockgating_state(void *handle,
switch (adev->asic_type) { switch (adev->asic_type) {
case CHIP_VEGA10: case CHIP_VEGA10:
case CHIP_VEGA12: case CHIP_VEGA12:
case CHIP_VEGA20:
case CHIP_RAVEN: case CHIP_RAVEN:
gfx_v9_0_update_gfx_clock_gating(adev, gfx_v9_0_update_gfx_clock_gating(adev,
state == AMD_CG_STATE_GATE ? true : false); state == AMD_CG_STATE_GATE ? true : false);
...@@ -4680,6 +4727,7 @@ static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev) ...@@ -4680,6 +4727,7 @@ static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev)
switch (adev->asic_type) { switch (adev->asic_type) {
case CHIP_VEGA10: case CHIP_VEGA10:
case CHIP_VEGA12: case CHIP_VEGA12:
case CHIP_VEGA20:
case CHIP_RAVEN: case CHIP_RAVEN:
adev->gfx.rlc.funcs = &gfx_v9_0_rlc_funcs; adev->gfx.rlc.funcs = &gfx_v9_0_rlc_funcs;
break; break;
......
...@@ -675,6 +675,7 @@ static int gmc_v9_0_late_init(void *handle) ...@@ -675,6 +675,7 @@ static int gmc_v9_0_late_init(void *handle)
DRM_INFO("ECC is active.\n"); DRM_INFO("ECC is active.\n");
} else if (r == 0) { } else if (r == 0) {
DRM_INFO("ECC is not present.\n"); DRM_INFO("ECC is not present.\n");
adev->df_funcs->enable_ecc_force_par_wr_rmw(adev, false);
} else { } else {
DRM_ERROR("gmc_v9_0_ecc_available() failed. r: %d\n", r); DRM_ERROR("gmc_v9_0_ecc_available() failed. r: %d\n", r);
return r; return r;
...@@ -693,10 +694,7 @@ static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev, ...@@ -693,10 +694,7 @@ static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
amdgpu_device_vram_location(adev, &adev->gmc, base); amdgpu_device_vram_location(adev, &adev->gmc, base);
amdgpu_device_gart_location(adev, mc); amdgpu_device_gart_location(adev, mc);
/* base offset of vram pages */ /* base offset of vram pages */
if (adev->flags & AMD_IS_APU) adev->vm_manager.vram_base_offset = gfxhub_v1_0_get_mc_fb_offset(adev);
adev->vm_manager.vram_base_offset = gfxhub_v1_0_get_mc_fb_offset(adev);
else
adev->vm_manager.vram_base_offset = 0;
} }
/** /**
...@@ -755,6 +753,7 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev) ...@@ -755,6 +753,7 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
switch (adev->asic_type) { switch (adev->asic_type) {
case CHIP_VEGA10: /* all engines support GPUVM */ case CHIP_VEGA10: /* all engines support GPUVM */
case CHIP_VEGA12: /* all engines support GPUVM */ case CHIP_VEGA12: /* all engines support GPUVM */
case CHIP_VEGA20:
default: default:
adev->gmc.gart_size = 512ULL << 20; adev->gmc.gart_size = 512ULL << 20;
break; break;
...@@ -860,6 +859,7 @@ static int gmc_v9_0_sw_init(void *handle) ...@@ -860,6 +859,7 @@ static int gmc_v9_0_sw_init(void *handle)
break; break;
case CHIP_VEGA10: case CHIP_VEGA10:
case CHIP_VEGA12: case CHIP_VEGA12:
case CHIP_VEGA20:
/* /*
* To fulfill 4-level page support, * To fulfill 4-level page support,
* vm size is 256TB (48bit), maximum size of Vega10, * vm size is 256TB (48bit), maximum size of Vega10,
...@@ -977,6 +977,7 @@ static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev) ...@@ -977,6 +977,7 @@ static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
switch (adev->asic_type) { switch (adev->asic_type) {
case CHIP_VEGA10: case CHIP_VEGA10:
case CHIP_VEGA20:
soc15_program_register_sequence(adev, soc15_program_register_sequence(adev,
golden_settings_mmhub_1_0_0, golden_settings_mmhub_1_0_0,
ARRAY_SIZE(golden_settings_mmhub_1_0_0)); ARRAY_SIZE(golden_settings_mmhub_1_0_0));
......
...@@ -734,6 +734,7 @@ int mmhub_v1_0_set_clockgating(struct amdgpu_device *adev, ...@@ -734,6 +734,7 @@ int mmhub_v1_0_set_clockgating(struct amdgpu_device *adev,
switch (adev->asic_type) { switch (adev->asic_type) {
case CHIP_VEGA10: case CHIP_VEGA10:
case CHIP_VEGA12: case CHIP_VEGA12:
case CHIP_VEGA20:
case CHIP_RAVEN: case CHIP_RAVEN:
mmhub_v1_0_update_medium_grain_clock_gating(adev, mmhub_v1_0_update_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE ? true : false); state == AMD_CG_STATE_GATE ? true : false);
......
...@@ -34,10 +34,19 @@ ...@@ -34,10 +34,19 @@
#define smnCPM_CONTROL 0x11180460 #define smnCPM_CONTROL 0x11180460
#define smnPCIE_CNTL2 0x11180070 #define smnPCIE_CNTL2 0x11180070
/* vega20 */
#define mmRCC_DEV0_EPF0_STRAP0_VG20 0x0011
#define mmRCC_DEV0_EPF0_STRAP0_VG20_BASE_IDX 2
static u32 nbio_v7_0_get_rev_id(struct amdgpu_device *adev) static u32 nbio_v7_0_get_rev_id(struct amdgpu_device *adev)
{ {
u32 tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0); u32 tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0);
if (adev->asic_type == CHIP_VEGA20)
tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0_VG20);
else
tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0);
tmp &= RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0_MASK; tmp &= RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0_MASK;
tmp >>= RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0__SHIFT; tmp >>= RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0__SHIFT;
...@@ -75,10 +84,14 @@ static void nbio_v7_0_sdma_doorbell_range(struct amdgpu_device *adev, int instan ...@@ -75,10 +84,14 @@ static void nbio_v7_0_sdma_doorbell_range(struct amdgpu_device *adev, int instan
SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA1_DOORBELL_RANGE); SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA1_DOORBELL_RANGE);
u32 doorbell_range = RREG32(reg); u32 doorbell_range = RREG32(reg);
u32 range = 2;
if (adev->asic_type == CHIP_VEGA20)
range = 8;
if (use_doorbell) { if (use_doorbell) {
doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, OFFSET, doorbell_index); doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, OFFSET, doorbell_index);
doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, 2); doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, range);
} else } else
doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, 0); doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, 0);
...@@ -133,6 +146,9 @@ static void nbio_v7_0_update_medium_grain_clock_gating(struct amdgpu_device *ade ...@@ -133,6 +146,9 @@ static void nbio_v7_0_update_medium_grain_clock_gating(struct amdgpu_device *ade
{ {
uint32_t def, data; uint32_t def, data;
if (adev->asic_type == CHIP_VEGA20)
return;
/* NBIF_MGCG_CTRL_LCLK */ /* NBIF_MGCG_CTRL_LCLK */
def = data = RREG32_PCIE(smnNBIF_MGCG_CTRL_LCLK); def = data = RREG32_PCIE(smnNBIF_MGCG_CTRL_LCLK);
......
...@@ -41,6 +41,9 @@ MODULE_FIRMWARE("amdgpu/vega10_sos.bin"); ...@@ -41,6 +41,9 @@ MODULE_FIRMWARE("amdgpu/vega10_sos.bin");
MODULE_FIRMWARE("amdgpu/vega10_asd.bin"); MODULE_FIRMWARE("amdgpu/vega10_asd.bin");
MODULE_FIRMWARE("amdgpu/vega12_sos.bin"); MODULE_FIRMWARE("amdgpu/vega12_sos.bin");
MODULE_FIRMWARE("amdgpu/vega12_asd.bin"); MODULE_FIRMWARE("amdgpu/vega12_asd.bin");
MODULE_FIRMWARE("amdgpu/vega20_sos.bin");
MODULE_FIRMWARE("amdgpu/vega20_asd.bin");
#define smnMP1_FIRMWARE_FLAGS 0x3010028 #define smnMP1_FIRMWARE_FLAGS 0x3010028
......
...@@ -42,6 +42,8 @@ MODULE_FIRMWARE("amdgpu/vega10_sdma.bin"); ...@@ -42,6 +42,8 @@ MODULE_FIRMWARE("amdgpu/vega10_sdma.bin");
MODULE_FIRMWARE("amdgpu/vega10_sdma1.bin"); MODULE_FIRMWARE("amdgpu/vega10_sdma1.bin");
MODULE_FIRMWARE("amdgpu/vega12_sdma.bin"); MODULE_FIRMWARE("amdgpu/vega12_sdma.bin");
MODULE_FIRMWARE("amdgpu/vega12_sdma1.bin"); MODULE_FIRMWARE("amdgpu/vega12_sdma1.bin");
MODULE_FIRMWARE("amdgpu/vega20_sdma.bin");
MODULE_FIRMWARE("amdgpu/vega20_sdma1.bin");
MODULE_FIRMWARE("amdgpu/raven_sdma.bin"); MODULE_FIRMWARE("amdgpu/raven_sdma.bin");
#define SDMA0_POWER_CNTL__ON_OFF_CONDITION_HOLD_TIME_MASK 0x000000F8L #define SDMA0_POWER_CNTL__ON_OFF_CONDITION_HOLD_TIME_MASK 0x000000F8L
...@@ -107,6 +109,28 @@ static const struct soc15_reg_golden golden_settings_sdma_4_1[] = ...@@ -107,6 +109,28 @@ static const struct soc15_reg_golden golden_settings_sdma_4_1[] =
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0) SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0)
}; };
static const struct soc15_reg_golden golden_settings_sdma_4_2[] =
{
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CLK_CTRL, 0xffffffff, 0x3f000100),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff0, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CLK_CTRL, 0xffffffff, 0x3f000100),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_PAGE_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_PAGE, 0x000003ff, 0x000003c0)
};
static const struct soc15_reg_golden golden_settings_sdma_rv1[] = static const struct soc15_reg_golden golden_settings_sdma_rv1[] =
{ {
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00000002), SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00000002),
...@@ -139,6 +163,11 @@ static void sdma_v4_0_init_golden_registers(struct amdgpu_device *adev) ...@@ -139,6 +163,11 @@ static void sdma_v4_0_init_golden_registers(struct amdgpu_device *adev)
golden_settings_sdma_vg12, golden_settings_sdma_vg12,
ARRAY_SIZE(golden_settings_sdma_vg12)); ARRAY_SIZE(golden_settings_sdma_vg12));
break; break;
case CHIP_VEGA20:
soc15_program_register_sequence(adev,
golden_settings_sdma_4_2,
ARRAY_SIZE(golden_settings_sdma_4_2));
break;
case CHIP_RAVEN: case CHIP_RAVEN:
soc15_program_register_sequence(adev, soc15_program_register_sequence(adev,
golden_settings_sdma_4_1, golden_settings_sdma_4_1,
...@@ -182,6 +211,9 @@ static int sdma_v4_0_init_microcode(struct amdgpu_device *adev) ...@@ -182,6 +211,9 @@ static int sdma_v4_0_init_microcode(struct amdgpu_device *adev)
case CHIP_VEGA12: case CHIP_VEGA12:
chip_name = "vega12"; chip_name = "vega12";
break; break;
case CHIP_VEGA20:
chip_name = "vega20";
break;
case CHIP_RAVEN: case CHIP_RAVEN:
chip_name = "raven"; chip_name = "raven";
break; break;
...@@ -1516,6 +1548,7 @@ static int sdma_v4_0_set_clockgating_state(void *handle, ...@@ -1516,6 +1548,7 @@ static int sdma_v4_0_set_clockgating_state(void *handle,
switch (adev->asic_type) { switch (adev->asic_type) {
case CHIP_VEGA10: case CHIP_VEGA10:
case CHIP_VEGA12: case CHIP_VEGA12:
case CHIP_VEGA20:
case CHIP_RAVEN: case CHIP_RAVEN:
sdma_v4_0_update_medium_grain_clock_gating(adev, sdma_v4_0_update_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE ? true : false); state == AMD_CG_STATE_GATE ? true : false);
......
...@@ -41,8 +41,6 @@ ...@@ -41,8 +41,6 @@
#include "sdma1/sdma1_4_0_offset.h" #include "sdma1/sdma1_4_0_offset.h"
#include "hdp/hdp_4_0_offset.h" #include "hdp/hdp_4_0_offset.h"
#include "hdp/hdp_4_0_sh_mask.h" #include "hdp/hdp_4_0_sh_mask.h"
#include "mp/mp_9_0_offset.h"
#include "mp/mp_9_0_sh_mask.h"
#include "smuio/smuio_9_0_offset.h" #include "smuio/smuio_9_0_offset.h"
#include "smuio/smuio_9_0_sh_mask.h" #include "smuio/smuio_9_0_sh_mask.h"
...@@ -53,6 +51,7 @@ ...@@ -53,6 +51,7 @@
#include "gfxhub_v1_0.h" #include "gfxhub_v1_0.h"
#include "mmhub_v1_0.h" #include "mmhub_v1_0.h"
#include "df_v1_7.h" #include "df_v1_7.h"
#include "df_v3_6.h"
#include "vega10_ih.h" #include "vega10_ih.h"
#include "sdma_v4_0.h" #include "sdma_v4_0.h"
#include "uvd_v7_0.h" #include "uvd_v7_0.h"
...@@ -489,16 +488,24 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev) ...@@ -489,16 +488,24 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
case CHIP_RAVEN: case CHIP_RAVEN:
vega10_reg_base_init(adev); vega10_reg_base_init(adev);
break; break;
case CHIP_VEGA20:
vega20_reg_base_init(adev);
break;
default: default:
return -EINVAL; return -EINVAL;
} }
if (adev->flags & AMD_IS_APU) if (adev->flags & AMD_IS_APU)
adev->nbio_funcs = &nbio_v7_0_funcs; adev->nbio_funcs = &nbio_v7_0_funcs;
else if (adev->asic_type == CHIP_VEGA20)
adev->nbio_funcs = &nbio_v7_0_funcs;
else else
adev->nbio_funcs = &nbio_v6_1_funcs; adev->nbio_funcs = &nbio_v6_1_funcs;
adev->df_funcs = &df_v1_7_funcs; if (adev->asic_type == CHIP_VEGA20)
adev->df_funcs = &df_v3_6_funcs;
else
adev->df_funcs = &df_v1_7_funcs;
adev->nbio_funcs->detect_hw_virt(adev); adev->nbio_funcs->detect_hw_virt(adev);
if (amdgpu_sriov_vf(adev)) if (amdgpu_sriov_vf(adev))
...@@ -507,12 +514,15 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev) ...@@ -507,12 +514,15 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
switch (adev->asic_type) { switch (adev->asic_type) {
case CHIP_VEGA10: case CHIP_VEGA10:
case CHIP_VEGA12: case CHIP_VEGA12:
case CHIP_VEGA20:
amdgpu_device_ip_block_add(adev, &vega10_common_ip_block); amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block); amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block); if (adev->asic_type != CHIP_VEGA20) {
if (!amdgpu_sriov_vf(adev)) amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); if (!amdgpu_sriov_vf(adev))
amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
}
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
#if defined(CONFIG_DRM_AMD_DC) #if defined(CONFIG_DRM_AMD_DC)
...@@ -660,6 +670,27 @@ static int soc15_common_early_init(void *handle) ...@@ -660,6 +670,27 @@ static int soc15_common_early_init(void *handle)
adev->pg_flags = 0; adev->pg_flags = 0;
adev->external_rev_id = adev->rev_id + 0x14; adev->external_rev_id = adev->rev_id + 0x14;
break; break;
case CHIP_VEGA20:
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_MGLS |
AMD_CG_SUPPORT_GFX_CGCG |
AMD_CG_SUPPORT_GFX_CGLS |
AMD_CG_SUPPORT_GFX_3D_CGCG |
AMD_CG_SUPPORT_GFX_3D_CGLS |
AMD_CG_SUPPORT_GFX_CP_LS |
AMD_CG_SUPPORT_MC_LS |
AMD_CG_SUPPORT_MC_MGCG |
AMD_CG_SUPPORT_SDMA_MGCG |
AMD_CG_SUPPORT_SDMA_LS |
AMD_CG_SUPPORT_BIF_MGCG |
AMD_CG_SUPPORT_BIF_LS |
AMD_CG_SUPPORT_HDP_MGCG |
AMD_CG_SUPPORT_ROM_MGCG |
AMD_CG_SUPPORT_VCE_MGCG |
AMD_CG_SUPPORT_UVD_MGCG;
adev->pg_flags = 0;
adev->external_rev_id = adev->rev_id + 0x28;
break;
case CHIP_RAVEN: case CHIP_RAVEN:
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_MGLS | AMD_CG_SUPPORT_GFX_MGLS |
...@@ -679,8 +710,10 @@ static int soc15_common_early_init(void *handle) ...@@ -679,8 +710,10 @@ static int soc15_common_early_init(void *handle)
AMD_CG_SUPPORT_MC_MGCG | AMD_CG_SUPPORT_MC_MGCG |
AMD_CG_SUPPORT_MC_LS | AMD_CG_SUPPORT_MC_LS |
AMD_CG_SUPPORT_SDMA_MGCG | AMD_CG_SUPPORT_SDMA_MGCG |
AMD_CG_SUPPORT_SDMA_LS; AMD_CG_SUPPORT_SDMA_LS |
adev->pg_flags = AMD_PG_SUPPORT_SDMA; AMD_CG_SUPPORT_VCN_MGCG;
adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN;
if (adev->powerplay.pp_feature & PP_GFXOFF_MASK) if (adev->powerplay.pp_feature & PP_GFXOFF_MASK)
adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG | adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
...@@ -872,6 +905,7 @@ static int soc15_common_set_clockgating_state(void *handle, ...@@ -872,6 +905,7 @@ static int soc15_common_set_clockgating_state(void *handle,
switch (adev->asic_type) { switch (adev->asic_type) {
case CHIP_VEGA10: case CHIP_VEGA10:
case CHIP_VEGA12: case CHIP_VEGA12:
case CHIP_VEGA20:
adev->nbio_funcs->update_medium_grain_clock_gating(adev, adev->nbio_funcs->update_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE ? true : false); state == AMD_CG_STATE_GATE ? true : false);
adev->nbio_funcs->update_medium_grain_light_sleep(adev, adev->nbio_funcs->update_medium_grain_light_sleep(adev,
......
...@@ -55,5 +55,6 @@ void soc15_program_register_sequence(struct amdgpu_device *adev, ...@@ -55,5 +55,6 @@ void soc15_program_register_sequence(struct amdgpu_device *adev,
const u32 array_size); const u32 array_size);
int vega10_reg_base_init(struct amdgpu_device *adev); int vega10_reg_base_init(struct amdgpu_device *adev);
int vega20_reg_base_init(struct amdgpu_device *adev);
#endif #endif
...@@ -47,6 +47,21 @@ ...@@ -47,6 +47,21 @@
#define WREG32_SOC15_OFFSET(ip, inst, reg, offset, value) \ #define WREG32_SOC15_OFFSET(ip, inst, reg, offset, value) \
WREG32((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + offset, value) WREG32((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + offset, value)
#define SOC15_WAIT_ON_RREG(ip, inst, reg, expected_value, mask, ret) \
do { \
uint32_t tmp_ = RREG32(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg); \
uint32_t loop = adev->usec_timeout; \
while ((tmp_ & (mask)) != (expected_value)) { \
udelay(2); \
tmp_ = RREG32(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg); \
loop--; \
if (!loop) { \
ret = -ETIMEDOUT; \
break; \
} \
} \
} while (0)
#endif #endif
...@@ -93,6 +93,7 @@ static void uvd_v4_2_ring_set_wptr(struct amdgpu_ring *ring) ...@@ -93,6 +93,7 @@ static void uvd_v4_2_ring_set_wptr(struct amdgpu_ring *ring)
static int uvd_v4_2_early_init(void *handle) static int uvd_v4_2_early_init(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
adev->uvd.num_uvd_inst = 1;
uvd_v4_2_set_ring_funcs(adev); uvd_v4_2_set_ring_funcs(adev);
uvd_v4_2_set_irq_funcs(adev); uvd_v4_2_set_irq_funcs(adev);
...@@ -107,7 +108,7 @@ static int uvd_v4_2_sw_init(void *handle) ...@@ -107,7 +108,7 @@ static int uvd_v4_2_sw_init(void *handle)
int r; int r;
/* UVD TRAP */ /* UVD TRAP */
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 124, &adev->uvd.irq); r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 124, &adev->uvd.inst->irq);
if (r) if (r)
return r; return r;
...@@ -119,9 +120,9 @@ static int uvd_v4_2_sw_init(void *handle) ...@@ -119,9 +120,9 @@ static int uvd_v4_2_sw_init(void *handle)
if (r) if (r)
return r; return r;
ring = &adev->uvd.ring; ring = &adev->uvd.inst->ring;
sprintf(ring->name, "uvd"); sprintf(ring->name, "uvd");
r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0); r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
return r; return r;
} }
...@@ -150,7 +151,7 @@ static void uvd_v4_2_enable_mgcg(struct amdgpu_device *adev, ...@@ -150,7 +151,7 @@ static void uvd_v4_2_enable_mgcg(struct amdgpu_device *adev,
static int uvd_v4_2_hw_init(void *handle) static int uvd_v4_2_hw_init(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct amdgpu_ring *ring = &adev->uvd.ring; struct amdgpu_ring *ring = &adev->uvd.inst->ring;
uint32_t tmp; uint32_t tmp;
int r; int r;
...@@ -208,7 +209,7 @@ static int uvd_v4_2_hw_init(void *handle) ...@@ -208,7 +209,7 @@ static int uvd_v4_2_hw_init(void *handle)
static int uvd_v4_2_hw_fini(void *handle) static int uvd_v4_2_hw_fini(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct amdgpu_ring *ring = &adev->uvd.ring; struct amdgpu_ring *ring = &adev->uvd.inst->ring;
if (RREG32(mmUVD_STATUS) != 0) if (RREG32(mmUVD_STATUS) != 0)
uvd_v4_2_stop(adev); uvd_v4_2_stop(adev);
...@@ -251,7 +252,7 @@ static int uvd_v4_2_resume(void *handle) ...@@ -251,7 +252,7 @@ static int uvd_v4_2_resume(void *handle)
*/ */
static int uvd_v4_2_start(struct amdgpu_device *adev) static int uvd_v4_2_start(struct amdgpu_device *adev)
{ {
struct amdgpu_ring *ring = &adev->uvd.ring; struct amdgpu_ring *ring = &adev->uvd.inst->ring;
uint32_t rb_bufsz; uint32_t rb_bufsz;
int i, j, r; int i, j, r;
u32 tmp; u32 tmp;
...@@ -523,6 +524,18 @@ static void uvd_v4_2_ring_emit_ib(struct amdgpu_ring *ring, ...@@ -523,6 +524,18 @@ static void uvd_v4_2_ring_emit_ib(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, ib->length_dw); amdgpu_ring_write(ring, ib->length_dw);
} }
static void uvd_v4_2_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
{
int i;
WARN_ON(ring->wptr % 2 || count % 2);
for (i = 0; i < count / 2; i++) {
amdgpu_ring_write(ring, PACKET0(mmUVD_NO_OP, 0));
amdgpu_ring_write(ring, 0);
}
}
/** /**
* uvd_v4_2_mc_resume - memory controller programming * uvd_v4_2_mc_resume - memory controller programming
* *
...@@ -536,7 +549,7 @@ static void uvd_v4_2_mc_resume(struct amdgpu_device *adev) ...@@ -536,7 +549,7 @@ static void uvd_v4_2_mc_resume(struct amdgpu_device *adev)
uint32_t size; uint32_t size;
/* programm the VCPU memory controller bits 0-27 */ /* programm the VCPU memory controller bits 0-27 */
addr = (adev->uvd.gpu_addr + AMDGPU_UVD_FIRMWARE_OFFSET) >> 3; addr = (adev->uvd.inst->gpu_addr + AMDGPU_UVD_FIRMWARE_OFFSET) >> 3;
size = AMDGPU_UVD_FIRMWARE_SIZE(adev) >> 3; size = AMDGPU_UVD_FIRMWARE_SIZE(adev) >> 3;
WREG32(mmUVD_VCPU_CACHE_OFFSET0, addr); WREG32(mmUVD_VCPU_CACHE_OFFSET0, addr);
WREG32(mmUVD_VCPU_CACHE_SIZE0, size); WREG32(mmUVD_VCPU_CACHE_SIZE0, size);
...@@ -553,11 +566,11 @@ static void uvd_v4_2_mc_resume(struct amdgpu_device *adev) ...@@ -553,11 +566,11 @@ static void uvd_v4_2_mc_resume(struct amdgpu_device *adev)
WREG32(mmUVD_VCPU_CACHE_SIZE2, size); WREG32(mmUVD_VCPU_CACHE_SIZE2, size);
/* bits 28-31 */ /* bits 28-31 */
addr = (adev->uvd.gpu_addr >> 28) & 0xF; addr = (adev->uvd.inst->gpu_addr >> 28) & 0xF;
WREG32(mmUVD_LMI_ADDR_EXT, (addr << 12) | (addr << 0)); WREG32(mmUVD_LMI_ADDR_EXT, (addr << 12) | (addr << 0));
/* bits 32-39 */ /* bits 32-39 */
addr = (adev->uvd.gpu_addr >> 32) & 0xFF; addr = (adev->uvd.inst->gpu_addr >> 32) & 0xFF;
WREG32(mmUVD_LMI_EXT40_ADDR, addr | (0x9 << 16) | (0x1 << 31)); WREG32(mmUVD_LMI_EXT40_ADDR, addr | (0x9 << 16) | (0x1 << 31));
WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config); WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
...@@ -664,7 +677,7 @@ static int uvd_v4_2_process_interrupt(struct amdgpu_device *adev, ...@@ -664,7 +677,7 @@ static int uvd_v4_2_process_interrupt(struct amdgpu_device *adev,
struct amdgpu_iv_entry *entry) struct amdgpu_iv_entry *entry)
{ {
DRM_DEBUG("IH: UVD TRAP\n"); DRM_DEBUG("IH: UVD TRAP\n");
amdgpu_fence_process(&adev->uvd.ring); amdgpu_fence_process(&adev->uvd.inst->ring);
return 0; return 0;
} }
...@@ -732,7 +745,6 @@ static const struct amd_ip_funcs uvd_v4_2_ip_funcs = { ...@@ -732,7 +745,6 @@ static const struct amd_ip_funcs uvd_v4_2_ip_funcs = {
static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = { static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = {
.type = AMDGPU_RING_TYPE_UVD, .type = AMDGPU_RING_TYPE_UVD,
.align_mask = 0xf, .align_mask = 0xf,
.nop = PACKET0(mmUVD_NO_OP, 0),
.support_64bit_ptrs = false, .support_64bit_ptrs = false,
.get_rptr = uvd_v4_2_ring_get_rptr, .get_rptr = uvd_v4_2_ring_get_rptr,
.get_wptr = uvd_v4_2_ring_get_wptr, .get_wptr = uvd_v4_2_ring_get_wptr,
...@@ -745,7 +757,7 @@ static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = { ...@@ -745,7 +757,7 @@ static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = {
.emit_fence = uvd_v4_2_ring_emit_fence, .emit_fence = uvd_v4_2_ring_emit_fence,
.test_ring = uvd_v4_2_ring_test_ring, .test_ring = uvd_v4_2_ring_test_ring,
.test_ib = amdgpu_uvd_ring_test_ib, .test_ib = amdgpu_uvd_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop, .insert_nop = uvd_v4_2_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib, .pad_ib = amdgpu_ring_generic_pad_ib,
.begin_use = amdgpu_uvd_ring_begin_use, .begin_use = amdgpu_uvd_ring_begin_use,
.end_use = amdgpu_uvd_ring_end_use, .end_use = amdgpu_uvd_ring_end_use,
...@@ -753,7 +765,7 @@ static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = { ...@@ -753,7 +765,7 @@ static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = {
static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev) static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev)
{ {
adev->uvd.ring.funcs = &uvd_v4_2_ring_funcs; adev->uvd.inst->ring.funcs = &uvd_v4_2_ring_funcs;
} }
static const struct amdgpu_irq_src_funcs uvd_v4_2_irq_funcs = { static const struct amdgpu_irq_src_funcs uvd_v4_2_irq_funcs = {
...@@ -763,8 +775,8 @@ static const struct amdgpu_irq_src_funcs uvd_v4_2_irq_funcs = { ...@@ -763,8 +775,8 @@ static const struct amdgpu_irq_src_funcs uvd_v4_2_irq_funcs = {
static void uvd_v4_2_set_irq_funcs(struct amdgpu_device *adev) static void uvd_v4_2_set_irq_funcs(struct amdgpu_device *adev)
{ {
adev->uvd.irq.num_types = 1; adev->uvd.inst->irq.num_types = 1;
adev->uvd.irq.funcs = &uvd_v4_2_irq_funcs; adev->uvd.inst->irq.funcs = &uvd_v4_2_irq_funcs;
} }
const struct amdgpu_ip_block_version uvd_v4_2_ip_block = const struct amdgpu_ip_block_version uvd_v4_2_ip_block =
......
...@@ -89,6 +89,7 @@ static void uvd_v5_0_ring_set_wptr(struct amdgpu_ring *ring) ...@@ -89,6 +89,7 @@ static void uvd_v5_0_ring_set_wptr(struct amdgpu_ring *ring)
static int uvd_v5_0_early_init(void *handle) static int uvd_v5_0_early_init(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
adev->uvd.num_uvd_inst = 1;
uvd_v5_0_set_ring_funcs(adev); uvd_v5_0_set_ring_funcs(adev);
uvd_v5_0_set_irq_funcs(adev); uvd_v5_0_set_irq_funcs(adev);
...@@ -103,7 +104,7 @@ static int uvd_v5_0_sw_init(void *handle) ...@@ -103,7 +104,7 @@ static int uvd_v5_0_sw_init(void *handle)
int r; int r;
/* UVD TRAP */ /* UVD TRAP */
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 124, &adev->uvd.irq); r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 124, &adev->uvd.inst->irq);
if (r) if (r)
return r; return r;
...@@ -115,9 +116,9 @@ static int uvd_v5_0_sw_init(void *handle) ...@@ -115,9 +116,9 @@ static int uvd_v5_0_sw_init(void *handle)
if (r) if (r)
return r; return r;
ring = &adev->uvd.ring; ring = &adev->uvd.inst->ring;
sprintf(ring->name, "uvd"); sprintf(ring->name, "uvd");
r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0); r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
return r; return r;
} }
...@@ -144,7 +145,7 @@ static int uvd_v5_0_sw_fini(void *handle) ...@@ -144,7 +145,7 @@ static int uvd_v5_0_sw_fini(void *handle)
static int uvd_v5_0_hw_init(void *handle) static int uvd_v5_0_hw_init(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct amdgpu_ring *ring = &adev->uvd.ring; struct amdgpu_ring *ring = &adev->uvd.inst->ring;
uint32_t tmp; uint32_t tmp;
int r; int r;
...@@ -204,7 +205,7 @@ static int uvd_v5_0_hw_init(void *handle) ...@@ -204,7 +205,7 @@ static int uvd_v5_0_hw_init(void *handle)
static int uvd_v5_0_hw_fini(void *handle) static int uvd_v5_0_hw_fini(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct amdgpu_ring *ring = &adev->uvd.ring; struct amdgpu_ring *ring = &adev->uvd.inst->ring;
if (RREG32(mmUVD_STATUS) != 0) if (RREG32(mmUVD_STATUS) != 0)
uvd_v5_0_stop(adev); uvd_v5_0_stop(adev);
...@@ -253,9 +254,9 @@ static void uvd_v5_0_mc_resume(struct amdgpu_device *adev) ...@@ -253,9 +254,9 @@ static void uvd_v5_0_mc_resume(struct amdgpu_device *adev)
/* programm memory controller bits 0-27 */ /* programm memory controller bits 0-27 */
WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
lower_32_bits(adev->uvd.gpu_addr)); lower_32_bits(adev->uvd.inst->gpu_addr));
WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
upper_32_bits(adev->uvd.gpu_addr)); upper_32_bits(adev->uvd.inst->gpu_addr));
offset = AMDGPU_UVD_FIRMWARE_OFFSET; offset = AMDGPU_UVD_FIRMWARE_OFFSET;
size = AMDGPU_UVD_FIRMWARE_SIZE(adev); size = AMDGPU_UVD_FIRMWARE_SIZE(adev);
...@@ -287,7 +288,7 @@ static void uvd_v5_0_mc_resume(struct amdgpu_device *adev) ...@@ -287,7 +288,7 @@ static void uvd_v5_0_mc_resume(struct amdgpu_device *adev)
*/ */
static int uvd_v5_0_start(struct amdgpu_device *adev) static int uvd_v5_0_start(struct amdgpu_device *adev)
{ {
struct amdgpu_ring *ring = &adev->uvd.ring; struct amdgpu_ring *ring = &adev->uvd.inst->ring;
uint32_t rb_bufsz, tmp; uint32_t rb_bufsz, tmp;
uint32_t lmi_swap_cntl; uint32_t lmi_swap_cntl;
uint32_t mp_swap_cntl; uint32_t mp_swap_cntl;
...@@ -540,6 +541,18 @@ static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring, ...@@ -540,6 +541,18 @@ static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, ib->length_dw); amdgpu_ring_write(ring, ib->length_dw);
} }
static void uvd_v5_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
{
int i;
WARN_ON(ring->wptr % 2 || count % 2);
for (i = 0; i < count / 2; i++) {
amdgpu_ring_write(ring, PACKET0(mmUVD_NO_OP, 0));
amdgpu_ring_write(ring, 0);
}
}
static bool uvd_v5_0_is_idle(void *handle) static bool uvd_v5_0_is_idle(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
...@@ -586,7 +599,7 @@ static int uvd_v5_0_process_interrupt(struct amdgpu_device *adev, ...@@ -586,7 +599,7 @@ static int uvd_v5_0_process_interrupt(struct amdgpu_device *adev,
struct amdgpu_iv_entry *entry) struct amdgpu_iv_entry *entry)
{ {
DRM_DEBUG("IH: UVD TRAP\n"); DRM_DEBUG("IH: UVD TRAP\n");
amdgpu_fence_process(&adev->uvd.ring); amdgpu_fence_process(&adev->uvd.inst->ring);
return 0; return 0;
} }
...@@ -840,7 +853,6 @@ static const struct amd_ip_funcs uvd_v5_0_ip_funcs = { ...@@ -840,7 +853,6 @@ static const struct amd_ip_funcs uvd_v5_0_ip_funcs = {
static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = { static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = {
.type = AMDGPU_RING_TYPE_UVD, .type = AMDGPU_RING_TYPE_UVD,
.align_mask = 0xf, .align_mask = 0xf,
.nop = PACKET0(mmUVD_NO_OP, 0),
.support_64bit_ptrs = false, .support_64bit_ptrs = false,
.get_rptr = uvd_v5_0_ring_get_rptr, .get_rptr = uvd_v5_0_ring_get_rptr,
.get_wptr = uvd_v5_0_ring_get_wptr, .get_wptr = uvd_v5_0_ring_get_wptr,
...@@ -853,7 +865,7 @@ static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = { ...@@ -853,7 +865,7 @@ static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = {
.emit_fence = uvd_v5_0_ring_emit_fence, .emit_fence = uvd_v5_0_ring_emit_fence,
.test_ring = uvd_v5_0_ring_test_ring, .test_ring = uvd_v5_0_ring_test_ring,
.test_ib = amdgpu_uvd_ring_test_ib, .test_ib = amdgpu_uvd_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop, .insert_nop = uvd_v5_0_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib, .pad_ib = amdgpu_ring_generic_pad_ib,
.begin_use = amdgpu_uvd_ring_begin_use, .begin_use = amdgpu_uvd_ring_begin_use,
.end_use = amdgpu_uvd_ring_end_use, .end_use = amdgpu_uvd_ring_end_use,
...@@ -861,7 +873,7 @@ static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = { ...@@ -861,7 +873,7 @@ static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = {
static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev) static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev)
{ {
adev->uvd.ring.funcs = &uvd_v5_0_ring_funcs; adev->uvd.inst->ring.funcs = &uvd_v5_0_ring_funcs;
} }
static const struct amdgpu_irq_src_funcs uvd_v5_0_irq_funcs = { static const struct amdgpu_irq_src_funcs uvd_v5_0_irq_funcs = {
...@@ -871,8 +883,8 @@ static const struct amdgpu_irq_src_funcs uvd_v5_0_irq_funcs = { ...@@ -871,8 +883,8 @@ static const struct amdgpu_irq_src_funcs uvd_v5_0_irq_funcs = {
static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev) static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev)
{ {
adev->uvd.irq.num_types = 1; adev->uvd.inst->irq.num_types = 1;
adev->uvd.irq.funcs = &uvd_v5_0_irq_funcs; adev->uvd.inst->irq.funcs = &uvd_v5_0_irq_funcs;
} }
const struct amdgpu_ip_block_version uvd_v5_0_ip_block = const struct amdgpu_ip_block_version uvd_v5_0_ip_block =
......
...@@ -91,7 +91,7 @@ static uint64_t uvd_v6_0_enc_ring_get_rptr(struct amdgpu_ring *ring) ...@@ -91,7 +91,7 @@ static uint64_t uvd_v6_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
{ {
struct amdgpu_device *adev = ring->adev; struct amdgpu_device *adev = ring->adev;
if (ring == &adev->uvd.ring_enc[0]) if (ring == &adev->uvd.inst->ring_enc[0])
return RREG32(mmUVD_RB_RPTR); return RREG32(mmUVD_RB_RPTR);
else else
return RREG32(mmUVD_RB_RPTR2); return RREG32(mmUVD_RB_RPTR2);
...@@ -121,7 +121,7 @@ static uint64_t uvd_v6_0_enc_ring_get_wptr(struct amdgpu_ring *ring) ...@@ -121,7 +121,7 @@ static uint64_t uvd_v6_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
{ {
struct amdgpu_device *adev = ring->adev; struct amdgpu_device *adev = ring->adev;
if (ring == &adev->uvd.ring_enc[0]) if (ring == &adev->uvd.inst->ring_enc[0])
return RREG32(mmUVD_RB_WPTR); return RREG32(mmUVD_RB_WPTR);
else else
return RREG32(mmUVD_RB_WPTR2); return RREG32(mmUVD_RB_WPTR2);
...@@ -152,7 +152,7 @@ static void uvd_v6_0_enc_ring_set_wptr(struct amdgpu_ring *ring) ...@@ -152,7 +152,7 @@ static void uvd_v6_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
{ {
struct amdgpu_device *adev = ring->adev; struct amdgpu_device *adev = ring->adev;
if (ring == &adev->uvd.ring_enc[0]) if (ring == &adev->uvd.inst->ring_enc[0])
WREG32(mmUVD_RB_WPTR, WREG32(mmUVD_RB_WPTR,
lower_32_bits(ring->wptr)); lower_32_bits(ring->wptr));
else else
...@@ -375,6 +375,7 @@ static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout) ...@@ -375,6 +375,7 @@ static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
static int uvd_v6_0_early_init(void *handle) static int uvd_v6_0_early_init(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
adev->uvd.num_uvd_inst = 1;
if (!(adev->flags & AMD_IS_APU) && if (!(adev->flags & AMD_IS_APU) &&
(RREG32_SMC(ixCC_HARVEST_FUSES) & CC_HARVEST_FUSES__UVD_DISABLE_MASK)) (RREG32_SMC(ixCC_HARVEST_FUSES) & CC_HARVEST_FUSES__UVD_DISABLE_MASK))
...@@ -399,14 +400,14 @@ static int uvd_v6_0_sw_init(void *handle) ...@@ -399,14 +400,14 @@ static int uvd_v6_0_sw_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* UVD TRAP */ /* UVD TRAP */
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 124, &adev->uvd.irq); r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 124, &adev->uvd.inst->irq);
if (r) if (r)
return r; return r;
/* UVD ENC TRAP */ /* UVD ENC TRAP */
if (uvd_v6_0_enc_support(adev)) { if (uvd_v6_0_enc_support(adev)) {
for (i = 0; i < adev->uvd.num_enc_rings; ++i) { for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i + 119, &adev->uvd.irq); r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i + 119, &adev->uvd.inst->irq);
if (r) if (r)
return r; return r;
} }
...@@ -418,17 +419,17 @@ static int uvd_v6_0_sw_init(void *handle) ...@@ -418,17 +419,17 @@ static int uvd_v6_0_sw_init(void *handle)
if (!uvd_v6_0_enc_support(adev)) { if (!uvd_v6_0_enc_support(adev)) {
for (i = 0; i < adev->uvd.num_enc_rings; ++i) for (i = 0; i < adev->uvd.num_enc_rings; ++i)
adev->uvd.ring_enc[i].funcs = NULL; adev->uvd.inst->ring_enc[i].funcs = NULL;
adev->uvd.irq.num_types = 1; adev->uvd.inst->irq.num_types = 1;
adev->uvd.num_enc_rings = 0; adev->uvd.num_enc_rings = 0;
DRM_INFO("UVD ENC is disabled\n"); DRM_INFO("UVD ENC is disabled\n");
} else { } else {
struct drm_sched_rq *rq; struct drm_sched_rq *rq;
ring = &adev->uvd.ring_enc[0]; ring = &adev->uvd.inst->ring_enc[0];
rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
r = drm_sched_entity_init(&ring->sched, &adev->uvd.entity_enc, r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst->entity_enc,
rq, NULL); rq, NULL);
if (r) { if (r) {
DRM_ERROR("Failed setting up UVD ENC run queue.\n"); DRM_ERROR("Failed setting up UVD ENC run queue.\n");
...@@ -440,17 +441,17 @@ static int uvd_v6_0_sw_init(void *handle) ...@@ -440,17 +441,17 @@ static int uvd_v6_0_sw_init(void *handle)
if (r) if (r)
return r; return r;
ring = &adev->uvd.ring; ring = &adev->uvd.inst->ring;
sprintf(ring->name, "uvd"); sprintf(ring->name, "uvd");
r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0); r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
if (r) if (r)
return r; return r;
if (uvd_v6_0_enc_support(adev)) { if (uvd_v6_0_enc_support(adev)) {
for (i = 0; i < adev->uvd.num_enc_rings; ++i) { for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
ring = &adev->uvd.ring_enc[i]; ring = &adev->uvd.inst->ring_enc[i];
sprintf(ring->name, "uvd_enc%d", i); sprintf(ring->name, "uvd_enc%d", i);
r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0); r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
if (r) if (r)
return r; return r;
} }
...@@ -469,10 +470,10 @@ static int uvd_v6_0_sw_fini(void *handle) ...@@ -469,10 +470,10 @@ static int uvd_v6_0_sw_fini(void *handle)
return r; return r;
if (uvd_v6_0_enc_support(adev)) { if (uvd_v6_0_enc_support(adev)) {
drm_sched_entity_fini(&adev->uvd.ring_enc[0].sched, &adev->uvd.entity_enc); drm_sched_entity_fini(&adev->uvd.inst->ring_enc[0].sched, &adev->uvd.inst->entity_enc);
for (i = 0; i < adev->uvd.num_enc_rings; ++i) for (i = 0; i < adev->uvd.num_enc_rings; ++i)
amdgpu_ring_fini(&adev->uvd.ring_enc[i]); amdgpu_ring_fini(&adev->uvd.inst->ring_enc[i]);
} }
return amdgpu_uvd_sw_fini(adev); return amdgpu_uvd_sw_fini(adev);
...@@ -488,7 +489,7 @@ static int uvd_v6_0_sw_fini(void *handle) ...@@ -488,7 +489,7 @@ static int uvd_v6_0_sw_fini(void *handle)
static int uvd_v6_0_hw_init(void *handle) static int uvd_v6_0_hw_init(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct amdgpu_ring *ring = &adev->uvd.ring; struct amdgpu_ring *ring = &adev->uvd.inst->ring;
uint32_t tmp; uint32_t tmp;
int i, r; int i, r;
...@@ -532,7 +533,7 @@ static int uvd_v6_0_hw_init(void *handle) ...@@ -532,7 +533,7 @@ static int uvd_v6_0_hw_init(void *handle)
if (uvd_v6_0_enc_support(adev)) { if (uvd_v6_0_enc_support(adev)) {
for (i = 0; i < adev->uvd.num_enc_rings; ++i) { for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
ring = &adev->uvd.ring_enc[i]; ring = &adev->uvd.inst->ring_enc[i];
ring->ready = true; ring->ready = true;
r = amdgpu_ring_test_ring(ring); r = amdgpu_ring_test_ring(ring);
if (r) { if (r) {
...@@ -563,7 +564,7 @@ static int uvd_v6_0_hw_init(void *handle) ...@@ -563,7 +564,7 @@ static int uvd_v6_0_hw_init(void *handle)
static int uvd_v6_0_hw_fini(void *handle) static int uvd_v6_0_hw_fini(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct amdgpu_ring *ring = &adev->uvd.ring; struct amdgpu_ring *ring = &adev->uvd.inst->ring;
if (RREG32(mmUVD_STATUS) != 0) if (RREG32(mmUVD_STATUS) != 0)
uvd_v6_0_stop(adev); uvd_v6_0_stop(adev);
...@@ -611,9 +612,9 @@ static void uvd_v6_0_mc_resume(struct amdgpu_device *adev) ...@@ -611,9 +612,9 @@ static void uvd_v6_0_mc_resume(struct amdgpu_device *adev)
/* programm memory controller bits 0-27 */ /* programm memory controller bits 0-27 */
WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
lower_32_bits(adev->uvd.gpu_addr)); lower_32_bits(adev->uvd.inst->gpu_addr));
WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
upper_32_bits(adev->uvd.gpu_addr)); upper_32_bits(adev->uvd.inst->gpu_addr));
offset = AMDGPU_UVD_FIRMWARE_OFFSET; offset = AMDGPU_UVD_FIRMWARE_OFFSET;
size = AMDGPU_UVD_FIRMWARE_SIZE(adev); size = AMDGPU_UVD_FIRMWARE_SIZE(adev);
...@@ -726,7 +727,7 @@ static void cz_set_uvd_clock_gating_branches(struct amdgpu_device *adev, ...@@ -726,7 +727,7 @@ static void cz_set_uvd_clock_gating_branches(struct amdgpu_device *adev,
*/ */
static int uvd_v6_0_start(struct amdgpu_device *adev) static int uvd_v6_0_start(struct amdgpu_device *adev)
{ {
struct amdgpu_ring *ring = &adev->uvd.ring; struct amdgpu_ring *ring = &adev->uvd.inst->ring;
uint32_t rb_bufsz, tmp; uint32_t rb_bufsz, tmp;
uint32_t lmi_swap_cntl; uint32_t lmi_swap_cntl;
uint32_t mp_swap_cntl; uint32_t mp_swap_cntl;
...@@ -866,14 +867,14 @@ static int uvd_v6_0_start(struct amdgpu_device *adev) ...@@ -866,14 +867,14 @@ static int uvd_v6_0_start(struct amdgpu_device *adev)
WREG32_FIELD(UVD_RBC_RB_CNTL, RB_NO_FETCH, 0); WREG32_FIELD(UVD_RBC_RB_CNTL, RB_NO_FETCH, 0);
if (uvd_v6_0_enc_support(adev)) { if (uvd_v6_0_enc_support(adev)) {
ring = &adev->uvd.ring_enc[0]; ring = &adev->uvd.inst->ring_enc[0];
WREG32(mmUVD_RB_RPTR, lower_32_bits(ring->wptr)); WREG32(mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
WREG32(mmUVD_RB_WPTR, lower_32_bits(ring->wptr)); WREG32(mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
WREG32(mmUVD_RB_BASE_LO, ring->gpu_addr); WREG32(mmUVD_RB_BASE_LO, ring->gpu_addr);
WREG32(mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); WREG32(mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
WREG32(mmUVD_RB_SIZE, ring->ring_size / 4); WREG32(mmUVD_RB_SIZE, ring->ring_size / 4);
ring = &adev->uvd.ring_enc[1]; ring = &adev->uvd.inst->ring_enc[1];
WREG32(mmUVD_RB_RPTR2, lower_32_bits(ring->wptr)); WREG32(mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
WREG32(mmUVD_RB_WPTR2, lower_32_bits(ring->wptr)); WREG32(mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
WREG32(mmUVD_RB_BASE_LO2, ring->gpu_addr); WREG32(mmUVD_RB_BASE_LO2, ring->gpu_addr);
...@@ -1099,6 +1100,18 @@ static void uvd_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) ...@@ -1099,6 +1100,18 @@ static void uvd_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, 0xE); amdgpu_ring_write(ring, 0xE);
} }
static void uvd_v6_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
{
int i;
WARN_ON(ring->wptr % 2 || count % 2);
for (i = 0; i < count / 2; i++) {
amdgpu_ring_write(ring, PACKET0(mmUVD_NO_OP, 0));
amdgpu_ring_write(ring, 0);
}
}
static void uvd_v6_0_enc_ring_emit_pipeline_sync(struct amdgpu_ring *ring) static void uvd_v6_0_enc_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
{ {
uint32_t seq = ring->fence_drv.sync_seq; uint32_t seq = ring->fence_drv.sync_seq;
...@@ -1158,10 +1171,10 @@ static bool uvd_v6_0_check_soft_reset(void *handle) ...@@ -1158,10 +1171,10 @@ static bool uvd_v6_0_check_soft_reset(void *handle)
srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_UVD, 1); srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_UVD, 1);
if (srbm_soft_reset) { if (srbm_soft_reset) {
adev->uvd.srbm_soft_reset = srbm_soft_reset; adev->uvd.inst->srbm_soft_reset = srbm_soft_reset;
return true; return true;
} else { } else {
adev->uvd.srbm_soft_reset = 0; adev->uvd.inst->srbm_soft_reset = 0;
return false; return false;
} }
} }
...@@ -1170,7 +1183,7 @@ static int uvd_v6_0_pre_soft_reset(void *handle) ...@@ -1170,7 +1183,7 @@ static int uvd_v6_0_pre_soft_reset(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (!adev->uvd.srbm_soft_reset) if (!adev->uvd.inst->srbm_soft_reset)
return 0; return 0;
uvd_v6_0_stop(adev); uvd_v6_0_stop(adev);
...@@ -1182,9 +1195,9 @@ static int uvd_v6_0_soft_reset(void *handle) ...@@ -1182,9 +1195,9 @@ static int uvd_v6_0_soft_reset(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 srbm_soft_reset; u32 srbm_soft_reset;
if (!adev->uvd.srbm_soft_reset) if (!adev->uvd.inst->srbm_soft_reset)
return 0; return 0;
srbm_soft_reset = adev->uvd.srbm_soft_reset; srbm_soft_reset = adev->uvd.inst->srbm_soft_reset;
if (srbm_soft_reset) { if (srbm_soft_reset) {
u32 tmp; u32 tmp;
...@@ -1212,7 +1225,7 @@ static int uvd_v6_0_post_soft_reset(void *handle) ...@@ -1212,7 +1225,7 @@ static int uvd_v6_0_post_soft_reset(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (!adev->uvd.srbm_soft_reset) if (!adev->uvd.inst->srbm_soft_reset)
return 0; return 0;
mdelay(5); mdelay(5);
...@@ -1238,17 +1251,17 @@ static int uvd_v6_0_process_interrupt(struct amdgpu_device *adev, ...@@ -1238,17 +1251,17 @@ static int uvd_v6_0_process_interrupt(struct amdgpu_device *adev,
switch (entry->src_id) { switch (entry->src_id) {
case 124: case 124:
amdgpu_fence_process(&adev->uvd.ring); amdgpu_fence_process(&adev->uvd.inst->ring);
break; break;
case 119: case 119:
if (likely(uvd_v6_0_enc_support(adev))) if (likely(uvd_v6_0_enc_support(adev)))
amdgpu_fence_process(&adev->uvd.ring_enc[0]); amdgpu_fence_process(&adev->uvd.inst->ring_enc[0]);
else else
int_handled = false; int_handled = false;
break; break;
case 120: case 120:
if (likely(uvd_v6_0_enc_support(adev))) if (likely(uvd_v6_0_enc_support(adev)))
amdgpu_fence_process(&adev->uvd.ring_enc[1]); amdgpu_fence_process(&adev->uvd.inst->ring_enc[1]);
else else
int_handled = false; int_handled = false;
break; break;
...@@ -1531,7 +1544,6 @@ static const struct amd_ip_funcs uvd_v6_0_ip_funcs = { ...@@ -1531,7 +1544,6 @@ static const struct amd_ip_funcs uvd_v6_0_ip_funcs = {
static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = { static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = {
.type = AMDGPU_RING_TYPE_UVD, .type = AMDGPU_RING_TYPE_UVD,
.align_mask = 0xf, .align_mask = 0xf,
.nop = PACKET0(mmUVD_NO_OP, 0),
.support_64bit_ptrs = false, .support_64bit_ptrs = false,
.get_rptr = uvd_v6_0_ring_get_rptr, .get_rptr = uvd_v6_0_ring_get_rptr,
.get_wptr = uvd_v6_0_ring_get_wptr, .get_wptr = uvd_v6_0_ring_get_wptr,
...@@ -1547,7 +1559,7 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = { ...@@ -1547,7 +1559,7 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = {
.emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush, .emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush,
.test_ring = uvd_v6_0_ring_test_ring, .test_ring = uvd_v6_0_ring_test_ring,
.test_ib = amdgpu_uvd_ring_test_ib, .test_ib = amdgpu_uvd_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop, .insert_nop = uvd_v6_0_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib, .pad_ib = amdgpu_ring_generic_pad_ib,
.begin_use = amdgpu_uvd_ring_begin_use, .begin_use = amdgpu_uvd_ring_begin_use,
.end_use = amdgpu_uvd_ring_end_use, .end_use = amdgpu_uvd_ring_end_use,
...@@ -1612,10 +1624,10 @@ static const struct amdgpu_ring_funcs uvd_v6_0_enc_ring_vm_funcs = { ...@@ -1612,10 +1624,10 @@ static const struct amdgpu_ring_funcs uvd_v6_0_enc_ring_vm_funcs = {
static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev) static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev)
{ {
if (adev->asic_type >= CHIP_POLARIS10) { if (adev->asic_type >= CHIP_POLARIS10) {
adev->uvd.ring.funcs = &uvd_v6_0_ring_vm_funcs; adev->uvd.inst->ring.funcs = &uvd_v6_0_ring_vm_funcs;
DRM_INFO("UVD is enabled in VM mode\n"); DRM_INFO("UVD is enabled in VM mode\n");
} else { } else {
adev->uvd.ring.funcs = &uvd_v6_0_ring_phys_funcs; adev->uvd.inst->ring.funcs = &uvd_v6_0_ring_phys_funcs;
DRM_INFO("UVD is enabled in physical mode\n"); DRM_INFO("UVD is enabled in physical mode\n");
} }
} }
...@@ -1625,7 +1637,7 @@ static void uvd_v6_0_set_enc_ring_funcs(struct amdgpu_device *adev) ...@@ -1625,7 +1637,7 @@ static void uvd_v6_0_set_enc_ring_funcs(struct amdgpu_device *adev)
int i; int i;
for (i = 0; i < adev->uvd.num_enc_rings; ++i) for (i = 0; i < adev->uvd.num_enc_rings; ++i)
adev->uvd.ring_enc[i].funcs = &uvd_v6_0_enc_ring_vm_funcs; adev->uvd.inst->ring_enc[i].funcs = &uvd_v6_0_enc_ring_vm_funcs;
DRM_INFO("UVD ENC is enabled in VM mode\n"); DRM_INFO("UVD ENC is enabled in VM mode\n");
} }
...@@ -1638,11 +1650,11 @@ static const struct amdgpu_irq_src_funcs uvd_v6_0_irq_funcs = { ...@@ -1638,11 +1650,11 @@ static const struct amdgpu_irq_src_funcs uvd_v6_0_irq_funcs = {
static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev) static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev)
{ {
if (uvd_v6_0_enc_support(adev)) if (uvd_v6_0_enc_support(adev))
adev->uvd.irq.num_types = adev->uvd.num_enc_rings + 1; adev->uvd.inst->irq.num_types = adev->uvd.num_enc_rings + 1;
else else
adev->uvd.irq.num_types = 1; adev->uvd.inst->irq.num_types = 1;
adev->uvd.irq.funcs = &uvd_v6_0_irq_funcs; adev->uvd.inst->irq.funcs = &uvd_v6_0_irq_funcs;
} }
const struct amdgpu_ip_block_version uvd_v6_0_ip_block = const struct amdgpu_ip_block_version uvd_v6_0_ip_block =
......
/*
* Copyright 2018 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "amdgpu.h"
#include "soc15.h"
#include "soc15_common.h"
#include "soc15_hw_ip.h"
#include "vega20_ip_offset.h"
int vega20_reg_base_init(struct amdgpu_device *adev)
{
/* HW has more IP blocks, only initialized the blocke beend by our driver */
uint32_t i;
for (i = 0 ; i < MAX_INSTANCE ; ++i) {
adev->reg_offset[GC_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
adev->reg_offset[HDP_HWIP][i] = (uint32_t *)(&(HDP_BASE.instance[i]));
adev->reg_offset[MMHUB_HWIP][i] = (uint32_t *)(&(MMHUB_BASE.instance[i]));
adev->reg_offset[ATHUB_HWIP][i] = (uint32_t *)(&(ATHUB_BASE.instance[i]));
adev->reg_offset[NBIO_HWIP][i] = (uint32_t *)(&(NBIO_BASE.instance[i]));
adev->reg_offset[MP0_HWIP][i] = (uint32_t *)(&(MP0_BASE.instance[i]));
adev->reg_offset[UVD_HWIP][i] = (uint32_t *)(&(UVD_BASE.instance[i]));
adev->reg_offset[VCE_HWIP][i] = (uint32_t *)(&(VCE_BASE.instance[i]));
adev->reg_offset[DF_HWIP][i] = (uint32_t *)(&(DF_BASE.instance[i]));
adev->reg_offset[DCE_HWIP][i] = (uint32_t *)(&(DCE_BASE.instance[i]));
adev->reg_offset[OSSSYS_HWIP][i] = (uint32_t *)(&(OSSSYS_BASE.instance[i]));
adev->reg_offset[SDMA0_HWIP][i] = (uint32_t *)(&(SDMA0_BASE.instance[i]));
adev->reg_offset[SDMA1_HWIP][i] = (uint32_t *)(&(SDMA1_BASE.instance[i]));
adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i]));
}
return 0;
}
...@@ -34,10 +34,4 @@ config DEBUG_KERNEL_DC ...@@ -34,10 +34,4 @@ config DEBUG_KERNEL_DC
if you want to hit if you want to hit
kdgb_break in assert. kdgb_break in assert.
config DRM_AMD_DC_VEGAM
bool "VEGAM support"
depends on DRM_AMD_DC
help
Choose this option if you want to have
VEGAM support for display engine
endmenu endmenu
...@@ -911,6 +911,7 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector) ...@@ -911,6 +911,7 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
drm_mode_connector_update_edid_property(connector, NULL); drm_mode_connector_update_edid_property(connector, NULL);
aconnector->num_modes = 0; aconnector->num_modes = 0;
aconnector->dc_sink = NULL; aconnector->dc_sink = NULL;
aconnector->edid = NULL;
} }
mutex_unlock(&dev->mode_config.mutex); mutex_unlock(&dev->mode_config.mutex);
...@@ -1115,6 +1116,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev) ...@@ -1115,6 +1116,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)
if (adev->asic_type == CHIP_VEGA10 || if (adev->asic_type == CHIP_VEGA10 ||
adev->asic_type == CHIP_VEGA12 || adev->asic_type == CHIP_VEGA12 ||
adev->asic_type == CHIP_VEGA20 ||
adev->asic_type == CHIP_RAVEN) adev->asic_type == CHIP_RAVEN)
client_id = SOC15_IH_CLIENTID_DCE; client_id = SOC15_IH_CLIENTID_DCE;
...@@ -1513,11 +1515,10 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) ...@@ -1513,11 +1515,10 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
case CHIP_POLARIS11: case CHIP_POLARIS11:
case CHIP_POLARIS10: case CHIP_POLARIS10:
case CHIP_POLARIS12: case CHIP_POLARIS12:
#if defined(CONFIG_DRM_AMD_DC_VEGAM)
case CHIP_VEGAM: case CHIP_VEGAM:
#endif
case CHIP_VEGA10: case CHIP_VEGA10:
case CHIP_VEGA12: case CHIP_VEGA12:
case CHIP_VEGA20:
if (dce110_register_irq_handlers(dm->adev)) { if (dce110_register_irq_handlers(dm->adev)) {
DRM_ERROR("DM: Failed to initialize IRQ\n"); DRM_ERROR("DM: Failed to initialize IRQ\n");
goto fail; goto fail;
...@@ -1708,9 +1709,7 @@ static int dm_early_init(void *handle) ...@@ -1708,9 +1709,7 @@ static int dm_early_init(void *handle)
adev->mode_info.plane_type = dm_plane_type_default; adev->mode_info.plane_type = dm_plane_type_default;
break; break;
case CHIP_POLARIS10: case CHIP_POLARIS10:
#if defined(CONFIG_DRM_AMD_DC_VEGAM)
case CHIP_VEGAM: case CHIP_VEGAM:
#endif
adev->mode_info.num_crtc = 6; adev->mode_info.num_crtc = 6;
adev->mode_info.num_hpd = 6; adev->mode_info.num_hpd = 6;
adev->mode_info.num_dig = 6; adev->mode_info.num_dig = 6;
...@@ -1718,6 +1717,7 @@ static int dm_early_init(void *handle) ...@@ -1718,6 +1717,7 @@ static int dm_early_init(void *handle)
break; break;
case CHIP_VEGA10: case CHIP_VEGA10:
case CHIP_VEGA12: case CHIP_VEGA12:
case CHIP_VEGA20:
adev->mode_info.num_crtc = 6; adev->mode_info.num_crtc = 6;
adev->mode_info.num_hpd = 6; adev->mode_info.num_hpd = 6;
adev->mode_info.num_dig = 6; adev->mode_info.num_dig = 6;
...@@ -1966,6 +1966,7 @@ static int fill_plane_attributes_from_fb(struct amdgpu_device *adev, ...@@ -1966,6 +1966,7 @@ static int fill_plane_attributes_from_fb(struct amdgpu_device *adev,
if (adev->asic_type == CHIP_VEGA10 || if (adev->asic_type == CHIP_VEGA10 ||
adev->asic_type == CHIP_VEGA12 || adev->asic_type == CHIP_VEGA12 ||
adev->asic_type == CHIP_VEGA20 ||
adev->asic_type == CHIP_RAVEN) { adev->asic_type == CHIP_RAVEN) {
/* Fill GFX9 params */ /* Fill GFX9 params */
plane_state->tiling_info.gfx9.num_pipes = plane_state->tiling_info.gfx9.num_pipes =
......
...@@ -88,9 +88,9 @@ static void __drm_lut_to_dc_gamma(struct drm_color_lut *lut, ...@@ -88,9 +88,9 @@ static void __drm_lut_to_dc_gamma(struct drm_color_lut *lut,
g = drm_color_lut_extract(lut[i].green, 16); g = drm_color_lut_extract(lut[i].green, 16);
b = drm_color_lut_extract(lut[i].blue, 16); b = drm_color_lut_extract(lut[i].blue, 16);
gamma->entries.red[i] = dal_fixed31_32_from_int(r); gamma->entries.red[i] = dc_fixpt_from_int(r);
gamma->entries.green[i] = dal_fixed31_32_from_int(g); gamma->entries.green[i] = dc_fixpt_from_int(g);
gamma->entries.blue[i] = dal_fixed31_32_from_int(b); gamma->entries.blue[i] = dc_fixpt_from_int(b);
} }
return; return;
} }
...@@ -101,9 +101,9 @@ static void __drm_lut_to_dc_gamma(struct drm_color_lut *lut, ...@@ -101,9 +101,9 @@ static void __drm_lut_to_dc_gamma(struct drm_color_lut *lut,
g = drm_color_lut_extract(lut[i].green, 16); g = drm_color_lut_extract(lut[i].green, 16);
b = drm_color_lut_extract(lut[i].blue, 16); b = drm_color_lut_extract(lut[i].blue, 16);
gamma->entries.red[i] = dal_fixed31_32_from_fraction(r, MAX_DRM_LUT_VALUE); gamma->entries.red[i] = dc_fixpt_from_fraction(r, MAX_DRM_LUT_VALUE);
gamma->entries.green[i] = dal_fixed31_32_from_fraction(g, MAX_DRM_LUT_VALUE); gamma->entries.green[i] = dc_fixpt_from_fraction(g, MAX_DRM_LUT_VALUE);
gamma->entries.blue[i] = dal_fixed31_32_from_fraction(b, MAX_DRM_LUT_VALUE); gamma->entries.blue[i] = dc_fixpt_from_fraction(b, MAX_DRM_LUT_VALUE);
} }
} }
...@@ -208,7 +208,7 @@ void amdgpu_dm_set_ctm(struct dm_crtc_state *crtc) ...@@ -208,7 +208,7 @@ void amdgpu_dm_set_ctm(struct dm_crtc_state *crtc)
for (i = 0; i < 12; i++) { for (i = 0; i < 12; i++) {
/* Skip 4th element */ /* Skip 4th element */
if (i % 4 == 3) { if (i % 4 == 3) {
stream->gamut_remap_matrix.matrix[i] = dal_fixed31_32_zero; stream->gamut_remap_matrix.matrix[i] = dc_fixpt_zero;
continue; continue;
} }
......
...@@ -330,11 +330,6 @@ bool dm_helpers_dp_mst_send_payload_allocation( ...@@ -330,11 +330,6 @@ bool dm_helpers_dp_mst_send_payload_allocation(
return true; return true;
} }
bool dm_helpers_dc_conn_log(struct dc_context *ctx, struct log_entry *entry, enum dc_log_type event)
{
return true;
}
void dm_dtn_log_begin(struct dc_context *ctx) void dm_dtn_log_begin(struct dc_context *ctx)
{} {}
......
...@@ -24,7 +24,7 @@ ...@@ -24,7 +24,7 @@
# It provides the general basic services required by other DAL # It provides the general basic services required by other DAL
# subcomponents. # subcomponents.
BASICS = conversion.o fixpt31_32.o fixpt32_32.o \ BASICS = conversion.o fixpt31_32.o \
logger.o log_helpers.o vector.o logger.o log_helpers.o vector.o
AMD_DAL_BASICS = $(addprefix $(AMDDALPATH)/dc/basics/,$(BASICS)) AMD_DAL_BASICS = $(addprefix $(AMDDALPATH)/dc/basics/,$(BASICS))
......
...@@ -94,7 +94,6 @@ void dc_conn_log(struct dc_context *ctx, ...@@ -94,7 +94,6 @@ void dc_conn_log(struct dc_context *ctx,
dm_logger_append(&entry, "%2.2X ", hex_data[i]); dm_logger_append(&entry, "%2.2X ", hex_data[i]);
dm_logger_append(&entry, "^\n"); dm_logger_append(&entry, "^\n");
dm_helpers_dc_conn_log(ctx, &entry, event);
fail: fail:
dm_logger_close(&entry); dm_logger_close(&entry);
......
...@@ -61,7 +61,7 @@ static const struct dc_log_type_info log_type_info_tbl[] = { ...@@ -61,7 +61,7 @@ static const struct dc_log_type_info log_type_info_tbl[] = {
{LOG_EVENT_UNDERFLOW, "Underflow"}, {LOG_EVENT_UNDERFLOW, "Underflow"},
{LOG_IF_TRACE, "InterfaceTrace"}, {LOG_IF_TRACE, "InterfaceTrace"},
{LOG_DTN, "DTN"}, {LOG_DTN, "DTN"},
{LOG_PROFILING, "Profiling"} {LOG_DISPLAYSTATS, "DisplayStats"}
}; };
...@@ -402,3 +402,4 @@ void dm_logger_close(struct log_entry *entry) ...@@ -402,3 +402,4 @@ void dm_logger_close(struct log_entry *entry)
entry->max_buf_bytes = 0; entry->max_buf_bytes = 0;
} }
} }
...@@ -1330,6 +1330,9 @@ static enum bp_result bios_parser_get_firmware_info( ...@@ -1330,6 +1330,9 @@ static enum bp_result bios_parser_get_firmware_info(
case 2: case 2:
result = get_firmware_info_v3_2(bp, info); result = get_firmware_info_v3_2(bp, info);
break; break;
case 3:
result = get_firmware_info_v3_2(bp, info);
break;
default: default:
break; break;
} }
......
...@@ -51,9 +51,7 @@ bool dal_bios_parser_init_cmd_tbl_helper( ...@@ -51,9 +51,7 @@ bool dal_bios_parser_init_cmd_tbl_helper(
return true; return true;
case DCE_VERSION_11_2: case DCE_VERSION_11_2:
#if defined(CONFIG_DRM_AMD_DC_VEGAM)
case DCE_VERSION_11_22: case DCE_VERSION_11_22:
#endif
*h = dal_cmd_tbl_helper_dce112_get_table(); *h = dal_cmd_tbl_helper_dce112_get_table();
return true; return true;
......
...@@ -52,9 +52,7 @@ bool dal_bios_parser_init_cmd_tbl_helper2( ...@@ -52,9 +52,7 @@ bool dal_bios_parser_init_cmd_tbl_helper2(
return true; return true;
case DCE_VERSION_11_2: case DCE_VERSION_11_2:
#if defined(CONFIG_DRM_AMD_DC_VEGAM)
case DCE_VERSION_11_22: case DCE_VERSION_11_22:
#endif
*h = dal_cmd_tbl_helper_dce112_get_table2(); *h = dal_cmd_tbl_helper_dce112_get_table2();
return true; return true;
#if defined(CONFIG_DRM_AMD_DC_DCN1_0) #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
......
...@@ -11,8 +11,6 @@ ...@@ -11,8 +11,6 @@
#include "dc_link_dp.h" #include "dc_link_dp.h"
#include "dc_link_ddc.h" #include "dc_link_ddc.h"
#include "dm_helpers.h" #include "dm_helpers.h"
#include "dce/dce_link_encoder.h"
#include "dce/dce_stream_encoder.h"
#include "dpcd_defs.h" #include "dpcd_defs.h"
enum dc_status core_link_read_dpcd( enum dc_status core_link_read_dpcd(
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册