提交 4efdddbc 编写于 作者: D Daniel Vetter

Merge tag 'amd-drm-next-5.17-2022-01-12' of...

Merge tag 'amd-drm-next-5.17-2022-01-12' of https://gitlab.freedesktop.org/agd5f/linux into drm-next

amd-drm-next-5.17-2022-01-12:

amdgpu:
- SR-IOV fixes
- Suspend/resume fixes
- Display fixes
- DMCUB fixes
- DP alt mode fixes
- RAS fixes
- UBSAN fix
- Navy Flounder VCN fix
- ttm resource manager cleanup
- default_groups change for kobj_type
- vkms fix
- Aldebaran fixes

amdkfd:
- SDMA ECC interrupt fix
- License clarification
- Pointer check fix
- DQM fixes for hawaii
- default_groups change for kobj_type
- Typo fixes
Signed-off-by: NDaniel Vetter <daniel.vetter@ffwll.ch>
From: Alex Deucher <alexander.deucher@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20220113030537.5758-1-alexander.deucher@amd.com
...@@ -514,13 +514,6 @@ int amdgpu_amdkfd_get_dmabuf_info(struct amdgpu_device *adev, int dma_buf_fd, ...@@ -514,13 +514,6 @@ int amdgpu_amdkfd_get_dmabuf_info(struct amdgpu_device *adev, int dma_buf_fd,
return r; return r;
} }
uint64_t amdgpu_amdkfd_get_vram_usage(struct amdgpu_device *adev)
{
struct ttm_resource_manager *vram_man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
return amdgpu_vram_mgr_usage(vram_man);
}
uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct amdgpu_device *dst, uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct amdgpu_device *dst,
struct amdgpu_device *src) struct amdgpu_device *src)
{ {
......
...@@ -223,7 +223,6 @@ int amdgpu_amdkfd_get_dmabuf_info(struct amdgpu_device *adev, int dma_buf_fd, ...@@ -223,7 +223,6 @@ int amdgpu_amdkfd_get_dmabuf_info(struct amdgpu_device *adev, int dma_buf_fd,
uint64_t *bo_size, void *metadata_buffer, uint64_t *bo_size, void *metadata_buffer,
size_t buffer_size, uint32_t *metadata_size, size_t buffer_size, uint32_t *metadata_size,
uint32_t *flags); uint32_t *flags);
uint64_t amdgpu_amdkfd_get_vram_usage(struct amdgpu_device *adev);
uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct amdgpu_device *dst, uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct amdgpu_device *dst,
struct amdgpu_device *src); struct amdgpu_device *src);
int amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(struct amdgpu_device *dst, int amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(struct amdgpu_device *dst,
......
...@@ -298,7 +298,6 @@ static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev, ...@@ -298,7 +298,6 @@ static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
{ {
s64 time_us, increment_us; s64 time_us, increment_us;
u64 free_vram, total_vram, used_vram; u64 free_vram, total_vram, used_vram;
struct ttm_resource_manager *vram_man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
/* Allow a maximum of 200 accumulated ms. This is basically per-IB /* Allow a maximum of 200 accumulated ms. This is basically per-IB
* throttling. * throttling.
* *
...@@ -315,7 +314,7 @@ static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev, ...@@ -315,7 +314,7 @@ static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
} }
total_vram = adev->gmc.real_vram_size - atomic64_read(&adev->vram_pin_size); total_vram = adev->gmc.real_vram_size - atomic64_read(&adev->vram_pin_size);
used_vram = amdgpu_vram_mgr_usage(vram_man); used_vram = amdgpu_vram_mgr_usage(&adev->mman.vram_mgr);
free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram; free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram;
spin_lock(&adev->mm_stats.lock); spin_lock(&adev->mm_stats.lock);
...@@ -362,7 +361,7 @@ static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev, ...@@ -362,7 +361,7 @@ static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
if (!amdgpu_gmc_vram_full_visible(&adev->gmc)) { if (!amdgpu_gmc_vram_full_visible(&adev->gmc)) {
u64 total_vis_vram = adev->gmc.visible_vram_size; u64 total_vis_vram = adev->gmc.visible_vram_size;
u64 used_vis_vram = u64 used_vis_vram =
amdgpu_vram_mgr_vis_usage(vram_man); amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr);
if (used_vis_vram < total_vis_vram) { if (used_vis_vram < total_vis_vram) {
u64 free_vis_vram = total_vis_vram - used_vis_vram; u64 free_vis_vram = total_vis_vram - used_vis_vram;
......
...@@ -552,7 +552,7 @@ void amdgpu_device_wreg(struct amdgpu_device *adev, ...@@ -552,7 +552,7 @@ void amdgpu_device_wreg(struct amdgpu_device *adev,
} }
/** /**
* amdgpu_mm_wreg_mmio_rlc - write register either with mmio or with RLC path if in range * amdgpu_mm_wreg_mmio_rlc - write register either with direct/indirect mmio or with RLC path if in range
* *
* this function is invoked only the debugfs register access * this function is invoked only the debugfs register access
*/ */
...@@ -567,6 +567,8 @@ void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev, ...@@ -567,6 +567,8 @@ void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
adev->gfx.rlc.funcs->is_rlcg_access_range) { adev->gfx.rlc.funcs->is_rlcg_access_range) {
if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg)) if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg))
return adev->gfx.rlc.funcs->sriov_wreg(adev, reg, v, 0, 0); return adev->gfx.rlc.funcs->sriov_wreg(adev, reg, v, 0, 0);
} else if ((reg * 4) >= adev->rmmio_size) {
adev->pcie_wreg(adev, reg * 4, v);
} else { } else {
writel(v, ((void __iomem *)adev->rmmio) + (reg * 4)); writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
} }
...@@ -1448,7 +1450,7 @@ static int amdgpu_device_init_apu_flags(struct amdgpu_device *adev) ...@@ -1448,7 +1450,7 @@ static int amdgpu_device_init_apu_flags(struct amdgpu_device *adev)
adev->apu_flags |= AMD_APU_IS_CYAN_SKILLFISH2; adev->apu_flags |= AMD_APU_IS_CYAN_SKILLFISH2;
break; break;
default: default:
return -EINVAL; break;
} }
return 0; return 0;
...@@ -3496,9 +3498,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, ...@@ -3496,9 +3498,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
mutex_init(&adev->psp.mutex); mutex_init(&adev->psp.mutex);
mutex_init(&adev->notifier_lock); mutex_init(&adev->notifier_lock);
r = amdgpu_device_init_apu_flags(adev); amdgpu_device_init_apu_flags(adev);
if (r)
return r;
r = amdgpu_device_check_arguments(adev); r = amdgpu_device_check_arguments(adev);
if (r) if (r)
...@@ -3833,6 +3833,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, ...@@ -3833,6 +3833,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
static void amdgpu_device_unmap_mmio(struct amdgpu_device *adev) static void amdgpu_device_unmap_mmio(struct amdgpu_device *adev)
{ {
/* Clear all CPU mappings pointing to this device */ /* Clear all CPU mappings pointing to this device */
unmap_mapping_range(adev->ddev.anon_inode->i_mapping, 0, 0, 1); unmap_mapping_range(adev->ddev.anon_inode->i_mapping, 0, 0, 1);
...@@ -3913,6 +3914,8 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev) ...@@ -3913,6 +3914,8 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
void amdgpu_device_fini_sw(struct amdgpu_device *adev) void amdgpu_device_fini_sw(struct amdgpu_device *adev)
{ {
int idx;
amdgpu_fence_driver_sw_fini(adev); amdgpu_fence_driver_sw_fini(adev);
amdgpu_device_ip_fini(adev); amdgpu_device_ip_fini(adev);
release_firmware(adev->firmware.gpu_info_fw); release_firmware(adev->firmware.gpu_info_fw);
...@@ -3937,6 +3940,14 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev) ...@@ -3937,6 +3940,14 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev)
if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA) if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
vga_client_unregister(adev->pdev); vga_client_unregister(adev->pdev);
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
iounmap(adev->rmmio);
adev->rmmio = NULL;
amdgpu_device_doorbell_fini(adev);
drm_dev_exit(idx);
}
if (IS_ENABLED(CONFIG_PERF_EVENTS)) if (IS_ENABLED(CONFIG_PERF_EVENTS))
amdgpu_pmu_fini(adev); amdgpu_pmu_fini(adev);
if (adev->mman.discovery_bin) if (adev->mman.discovery_bin)
...@@ -3957,8 +3968,8 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev) ...@@ -3957,8 +3968,8 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev)
*/ */
static void amdgpu_device_evict_resources(struct amdgpu_device *adev) static void amdgpu_device_evict_resources(struct amdgpu_device *adev)
{ {
/* No need to evict vram on APUs for suspend to ram */ /* No need to evict vram on APUs for suspend to ram or s2idle */
if (adev->in_s3 && (adev->flags & AMD_IS_APU)) if ((adev->in_s3 || adev->in_s0ix) && (adev->flags & AMD_IS_APU))
return; return;
if (amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM)) if (amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM))
...@@ -4005,16 +4016,11 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon) ...@@ -4005,16 +4016,11 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
if (!adev->in_s0ix) if (!adev->in_s0ix)
amdgpu_amdkfd_suspend(adev, adev->in_runpm); amdgpu_amdkfd_suspend(adev, adev->in_runpm);
/* First evict vram memory */
amdgpu_device_evict_resources(adev); amdgpu_device_evict_resources(adev);
amdgpu_fence_driver_hw_fini(adev); amdgpu_fence_driver_hw_fini(adev);
amdgpu_device_ip_suspend_phase2(adev); amdgpu_device_ip_suspend_phase2(adev);
/* This second call to evict device resources is to evict
* the gart page table using the CPU.
*/
amdgpu_device_evict_resources(adev);
return 0; return 0;
} }
...@@ -4359,8 +4365,6 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev, ...@@ -4359,8 +4365,6 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
goto error; goto error;
amdgpu_virt_init_data_exchange(adev); amdgpu_virt_init_data_exchange(adev);
/* we need recover gart prior to run SMC/CP/SDMA resume */
amdgpu_gtt_mgr_recover(ttm_manager_type(&adev->mman.bdev, TTM_PL_TT));
r = amdgpu_device_fw_loading(adev); r = amdgpu_device_fw_loading(adev);
if (r) if (r)
...@@ -4680,10 +4684,6 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle, ...@@ -4680,10 +4684,6 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle,
amdgpu_inc_vram_lost(tmp_adev); amdgpu_inc_vram_lost(tmp_adev);
} }
r = amdgpu_gtt_mgr_recover(ttm_manager_type(&tmp_adev->mman.bdev, TTM_PL_TT));
if (r)
goto out;
r = amdgpu_device_fw_loading(tmp_adev); r = amdgpu_device_fw_loading(tmp_adev);
if (r) if (r)
return r; return r;
......
...@@ -550,7 +550,8 @@ void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev) ...@@ -550,7 +550,8 @@ void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
} }
/* some IP discovery tables on Navy Flounder don't have this set correctly */ /* some IP discovery tables on Navy Flounder don't have this set correctly */
if ((adev->ip_versions[UVD_HWIP][1] == IP_VERSION(3, 0, 1)) && if ((adev->ip_versions[UVD_HWIP][1] == IP_VERSION(3, 0, 1)) &&
(adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 2))) (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 2)) &&
(adev->pdev->revision != 0xFF))
adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1; adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1;
if (vcn_harvest_count == adev->vcn.num_vcn_inst) { if (vcn_harvest_count == adev->vcn.num_vcn_inst) {
adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK; adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK;
......
...@@ -2166,9 +2166,9 @@ static int amdgpu_pmops_suspend(struct device *dev) ...@@ -2166,9 +2166,9 @@ static int amdgpu_pmops_suspend(struct device *dev)
if (amdgpu_acpi_is_s0ix_active(adev)) if (amdgpu_acpi_is_s0ix_active(adev))
adev->in_s0ix = true; adev->in_s0ix = true;
adev->in_s3 = true; else
adev->in_s3 = true;
r = amdgpu_device_suspend(drm_dev, true); r = amdgpu_device_suspend(drm_dev, true);
adev->in_s3 = false;
if (r) if (r)
return r; return r;
if (!adev->in_s0ix) if (!adev->in_s0ix)
...@@ -2189,6 +2189,8 @@ static int amdgpu_pmops_resume(struct device *dev) ...@@ -2189,6 +2189,8 @@ static int amdgpu_pmops_resume(struct device *dev)
r = amdgpu_device_resume(drm_dev, true); r = amdgpu_device_resume(drm_dev, true);
if (amdgpu_acpi_is_s0ix_active(adev)) if (amdgpu_acpi_is_s0ix_active(adev))
adev->in_s0ix = false; adev->in_s0ix = false;
else
adev->in_s3 = false;
return r; return r;
} }
......
...@@ -114,80 +114,12 @@ void amdgpu_gart_dummy_page_fini(struct amdgpu_device *adev) ...@@ -114,80 +114,12 @@ void amdgpu_gart_dummy_page_fini(struct amdgpu_device *adev)
*/ */
int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev) int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev)
{ {
int r; if (adev->gart.bo != NULL)
return 0;
if (adev->gart.bo == NULL) {
struct amdgpu_bo_param bp;
memset(&bp, 0, sizeof(bp));
bp.size = adev->gart.table_size;
bp.byte_align = PAGE_SIZE;
bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
bp.flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
bp.type = ttm_bo_type_kernel;
bp.resv = NULL;
bp.bo_ptr_size = sizeof(struct amdgpu_bo);
r = amdgpu_bo_create(adev, &bp, &adev->gart.bo);
if (r) {
return r;
}
}
return 0;
}
/**
* amdgpu_gart_table_vram_pin - pin gart page table in vram
*
* @adev: amdgpu_device pointer
*
* Pin the GART page table in vram so it will not be moved
* by the memory manager (pcie r4xx, r5xx+). These asics require the
* gart table to be in video memory.
* Returns 0 for success, error for failure.
*/
int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev)
{
int r;
r = amdgpu_bo_reserve(adev->gart.bo, false);
if (unlikely(r != 0))
return r;
r = amdgpu_bo_pin(adev->gart.bo, AMDGPU_GEM_DOMAIN_VRAM);
if (r) {
amdgpu_bo_unreserve(adev->gart.bo);
return r;
}
r = amdgpu_bo_kmap(adev->gart.bo, &adev->gart.ptr);
if (r)
amdgpu_bo_unpin(adev->gart.bo);
amdgpu_bo_unreserve(adev->gart.bo);
return r;
}
/**
* amdgpu_gart_table_vram_unpin - unpin gart page table in vram
*
* @adev: amdgpu_device pointer
*
* Unpin the GART page table in vram (pcie r4xx, r5xx+).
* These asics require the gart table to be in video memory.
*/
void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev)
{
int r;
if (adev->gart.bo == NULL) { return amdgpu_bo_create_kernel(adev, adev->gart.table_size, PAGE_SIZE,
return; AMDGPU_GEM_DOMAIN_VRAM, &adev->gart.bo,
} NULL, (void *)&adev->gart.ptr);
r = amdgpu_bo_reserve(adev->gart.bo, true);
if (likely(r == 0)) {
amdgpu_bo_kunmap(adev->gart.bo);
amdgpu_bo_unpin(adev->gart.bo);
amdgpu_bo_unreserve(adev->gart.bo);
adev->gart.ptr = NULL;
}
} }
/** /**
...@@ -201,11 +133,7 @@ void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev) ...@@ -201,11 +133,7 @@ void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev)
*/ */
void amdgpu_gart_table_vram_free(struct amdgpu_device *adev) void amdgpu_gart_table_vram_free(struct amdgpu_device *adev)
{ {
if (adev->gart.bo == NULL) { amdgpu_bo_free_kernel(&adev->gart.bo, NULL, (void *)&adev->gart.ptr);
return;
}
amdgpu_bo_unref(&adev->gart.bo);
adev->gart.ptr = NULL;
} }
/* /*
......
...@@ -264,9 +264,6 @@ static int amdgpu_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_str ...@@ -264,9 +264,6 @@ static int amdgpu_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_str
!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC))) !(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)))
vma->vm_flags &= ~VM_MAYWRITE; vma->vm_flags &= ~VM_MAYWRITE;
if (bo->kfd_bo)
vma->vm_flags |= VM_DONTCOPY;
return drm_gem_ttm_mmap(obj, vma); return drm_gem_ttm_mmap(obj, vma);
} }
......
...@@ -77,10 +77,8 @@ static ssize_t amdgpu_mem_info_gtt_used_show(struct device *dev, ...@@ -77,10 +77,8 @@ static ssize_t amdgpu_mem_info_gtt_used_show(struct device *dev,
{ {
struct drm_device *ddev = dev_get_drvdata(dev); struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev); struct amdgpu_device *adev = drm_to_adev(ddev);
struct ttm_resource_manager *man;
man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT); return sysfs_emit(buf, "%llu\n", amdgpu_gtt_mgr_usage(&adev->mman.gtt_mgr));
return sysfs_emit(buf, "%llu\n", amdgpu_gtt_mgr_usage(man));
} }
static DEVICE_ATTR(mem_info_gtt_total, S_IRUGO, static DEVICE_ATTR(mem_info_gtt_total, S_IRUGO,
...@@ -206,30 +204,27 @@ static void amdgpu_gtt_mgr_del(struct ttm_resource_manager *man, ...@@ -206,30 +204,27 @@ static void amdgpu_gtt_mgr_del(struct ttm_resource_manager *man,
/** /**
* amdgpu_gtt_mgr_usage - return usage of GTT domain * amdgpu_gtt_mgr_usage - return usage of GTT domain
* *
* @man: TTM memory type manager * @mgr: amdgpu_gtt_mgr pointer
* *
* Return how many bytes are used in the GTT domain * Return how many bytes are used in the GTT domain
*/ */
uint64_t amdgpu_gtt_mgr_usage(struct ttm_resource_manager *man) uint64_t amdgpu_gtt_mgr_usage(struct amdgpu_gtt_mgr *mgr)
{ {
struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man);
return atomic64_read(&mgr->used) * PAGE_SIZE; return atomic64_read(&mgr->used) * PAGE_SIZE;
} }
/** /**
* amdgpu_gtt_mgr_recover - re-init gart * amdgpu_gtt_mgr_recover - re-init gart
* *
* @man: TTM memory type manager * @mgr: amdgpu_gtt_mgr pointer
* *
* Re-init the gart for each known BO in the GTT. * Re-init the gart for each known BO in the GTT.
*/ */
int amdgpu_gtt_mgr_recover(struct ttm_resource_manager *man) int amdgpu_gtt_mgr_recover(struct amdgpu_gtt_mgr *mgr)
{ {
struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man);
struct amdgpu_device *adev;
struct amdgpu_gtt_node *node; struct amdgpu_gtt_node *node;
struct drm_mm_node *mm_node; struct drm_mm_node *mm_node;
struct amdgpu_device *adev;
int r = 0; int r = 0;
adev = container_of(mgr, typeof(*adev), mman.gtt_mgr); adev = container_of(mgr, typeof(*adev), mman.gtt_mgr);
......
...@@ -672,13 +672,13 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) ...@@ -672,13 +672,13 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
ui64 = atomic64_read(&adev->num_vram_cpu_page_faults); ui64 = atomic64_read(&adev->num_vram_cpu_page_faults);
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
case AMDGPU_INFO_VRAM_USAGE: case AMDGPU_INFO_VRAM_USAGE:
ui64 = amdgpu_vram_mgr_usage(ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM)); ui64 = amdgpu_vram_mgr_usage(&adev->mman.vram_mgr);
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
case AMDGPU_INFO_VIS_VRAM_USAGE: case AMDGPU_INFO_VIS_VRAM_USAGE:
ui64 = amdgpu_vram_mgr_vis_usage(ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM)); ui64 = amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr);
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
case AMDGPU_INFO_GTT_USAGE: case AMDGPU_INFO_GTT_USAGE:
ui64 = amdgpu_gtt_mgr_usage(ttm_manager_type(&adev->mman.bdev, TTM_PL_TT)); ui64 = amdgpu_gtt_mgr_usage(&adev->mman.gtt_mgr);
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
case AMDGPU_INFO_GDS_CONFIG: { case AMDGPU_INFO_GDS_CONFIG: {
struct drm_amdgpu_info_gds gds_info; struct drm_amdgpu_info_gds gds_info;
...@@ -709,8 +709,6 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) ...@@ -709,8 +709,6 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
} }
case AMDGPU_INFO_MEMORY: { case AMDGPU_INFO_MEMORY: {
struct drm_amdgpu_memory_info mem; struct drm_amdgpu_memory_info mem;
struct ttm_resource_manager *vram_man =
ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
struct ttm_resource_manager *gtt_man = struct ttm_resource_manager *gtt_man =
ttm_manager_type(&adev->mman.bdev, TTM_PL_TT); ttm_manager_type(&adev->mman.bdev, TTM_PL_TT);
memset(&mem, 0, sizeof(mem)); memset(&mem, 0, sizeof(mem));
...@@ -719,7 +717,7 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) ...@@ -719,7 +717,7 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
atomic64_read(&adev->vram_pin_size) - atomic64_read(&adev->vram_pin_size) -
AMDGPU_VM_RESERVED_VRAM; AMDGPU_VM_RESERVED_VRAM;
mem.vram.heap_usage = mem.vram.heap_usage =
amdgpu_vram_mgr_usage(vram_man); amdgpu_vram_mgr_usage(&adev->mman.vram_mgr);
mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4; mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4;
mem.cpu_accessible_vram.total_heap_size = mem.cpu_accessible_vram.total_heap_size =
...@@ -729,7 +727,7 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) ...@@ -729,7 +727,7 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
atomic64_read(&adev->visible_pin_size), atomic64_read(&adev->visible_pin_size),
mem.vram.usable_heap_size); mem.vram.usable_heap_size);
mem.cpu_accessible_vram.heap_usage = mem.cpu_accessible_vram.heap_usage =
amdgpu_vram_mgr_vis_usage(vram_man); amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr);
mem.cpu_accessible_vram.max_allocation = mem.cpu_accessible_vram.max_allocation =
mem.cpu_accessible_vram.usable_heap_size * 3 / 4; mem.cpu_accessible_vram.usable_heap_size * 3 / 4;
...@@ -738,7 +736,7 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) ...@@ -738,7 +736,7 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
mem.gtt.usable_heap_size = mem.gtt.total_heap_size - mem.gtt.usable_heap_size = mem.gtt.total_heap_size -
atomic64_read(&adev->gart_pin_size); atomic64_read(&adev->gart_pin_size);
mem.gtt.heap_usage = mem.gtt.heap_usage =
amdgpu_gtt_mgr_usage(gtt_man); amdgpu_gtt_mgr_usage(&adev->mman.gtt_mgr);
mem.gtt.max_allocation = mem.gtt.usable_heap_size * 3 / 4; mem.gtt.max_allocation = mem.gtt.usable_heap_size * 3 / 4;
return copy_to_user(out, &mem, return copy_to_user(out, &mem,
......
...@@ -33,6 +33,7 @@ ...@@ -33,6 +33,7 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/dma-buf.h> #include <linux/dma-buf.h>
#include <drm/drm_drv.h>
#include <drm/amdgpu_drm.h> #include <drm/amdgpu_drm.h>
#include <drm/drm_cache.h> #include <drm/drm_cache.h>
#include "amdgpu.h" #include "amdgpu.h"
...@@ -1061,7 +1062,18 @@ int amdgpu_bo_init(struct amdgpu_device *adev) ...@@ -1061,7 +1062,18 @@ int amdgpu_bo_init(struct amdgpu_device *adev)
*/ */
void amdgpu_bo_fini(struct amdgpu_device *adev) void amdgpu_bo_fini(struct amdgpu_device *adev)
{ {
int idx;
amdgpu_ttm_fini(adev); amdgpu_ttm_fini(adev);
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
if (!adev->gmc.xgmi.connected_to_cpu) {
arch_phys_wc_del(adev->gmc.vram_mtrr);
arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
}
drm_dev_exit(idx);
}
} }
/** /**
......
...@@ -1592,6 +1592,7 @@ static void amdgpu_ras_interrupt_handler(struct ras_manager *obj) ...@@ -1592,6 +1592,7 @@ static void amdgpu_ras_interrupt_handler(struct ras_manager *obj)
/* Let IP handle its data, maybe we need get the output /* Let IP handle its data, maybe we need get the output
* from the callback to udpate the error type/count, etc * from the callback to udpate the error type/count, etc
*/ */
memset(&err_data, 0, sizeof(err_data));
ret = data->cb(obj->adev, &err_data, &entry); ret = data->cb(obj->adev, &err_data, &entry);
/* ue will trigger an interrupt, and in that case /* ue will trigger an interrupt, and in that case
* we need do a reset to recovery the whole system. * we need do a reset to recovery the whole system.
...@@ -1838,8 +1839,7 @@ static int amdgpu_ras_badpages_read(struct amdgpu_device *adev, ...@@ -1838,8 +1839,7 @@ static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
.size = AMDGPU_GPU_PAGE_SIZE, .size = AMDGPU_GPU_PAGE_SIZE,
.flags = AMDGPU_RAS_RETIRE_PAGE_RESERVED, .flags = AMDGPU_RAS_RETIRE_PAGE_RESERVED,
}; };
status = amdgpu_vram_mgr_query_page_status( status = amdgpu_vram_mgr_query_page_status(&adev->mman.vram_mgr,
ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM),
data->bps[i].retired_page); data->bps[i].retired_page);
if (status == -EBUSY) if (status == -EBUSY)
(*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_PENDING; (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_PENDING;
...@@ -1940,8 +1940,7 @@ int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev, ...@@ -1940,8 +1940,7 @@ int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
goto out; goto out;
} }
amdgpu_vram_mgr_reserve_range( amdgpu_vram_mgr_reserve_range(&adev->mman.vram_mgr,
ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM),
bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT, bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT,
AMDGPU_GPU_PAGE_SIZE); AMDGPU_GPU_PAGE_SIZE);
......
...@@ -43,6 +43,7 @@ ...@@ -43,6 +43,7 @@
#include <linux/sizes.h> #include <linux/sizes.h>
#include <linux/module.h> #include <linux/module.h>
#include <drm/drm_drv.h>
#include <drm/ttm/ttm_bo_api.h> #include <drm/ttm/ttm_bo_api.h>
#include <drm/ttm/ttm_bo_driver.h> #include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h> #include <drm/ttm/ttm_placement.h>
...@@ -1804,6 +1805,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) ...@@ -1804,6 +1805,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
*/ */
void amdgpu_ttm_fini(struct amdgpu_device *adev) void amdgpu_ttm_fini(struct amdgpu_device *adev)
{ {
int idx;
if (!adev->mman.initialized) if (!adev->mman.initialized)
return; return;
...@@ -1818,6 +1820,15 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev) ...@@ -1818,6 +1820,15 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
NULL, NULL); NULL, NULL);
amdgpu_ttm_fw_reserve_vram_fini(adev); amdgpu_ttm_fw_reserve_vram_fini(adev);
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
if (adev->mman.aper_base_kaddr)
iounmap(adev->mman.aper_base_kaddr);
adev->mman.aper_base_kaddr = NULL;
drm_dev_exit(idx);
}
amdgpu_vram_mgr_fini(adev); amdgpu_vram_mgr_fini(adev);
amdgpu_gtt_mgr_fini(adev); amdgpu_gtt_mgr_fini(adev);
amdgpu_preempt_mgr_fini(adev); amdgpu_preempt_mgr_fini(adev);
......
...@@ -114,8 +114,8 @@ int amdgpu_vram_mgr_init(struct amdgpu_device *adev); ...@@ -114,8 +114,8 @@ int amdgpu_vram_mgr_init(struct amdgpu_device *adev);
void amdgpu_vram_mgr_fini(struct amdgpu_device *adev); void amdgpu_vram_mgr_fini(struct amdgpu_device *adev);
bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_resource *mem); bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_resource *mem);
uint64_t amdgpu_gtt_mgr_usage(struct ttm_resource_manager *man); uint64_t amdgpu_gtt_mgr_usage(struct amdgpu_gtt_mgr *mgr);
int amdgpu_gtt_mgr_recover(struct ttm_resource_manager *man); int amdgpu_gtt_mgr_recover(struct amdgpu_gtt_mgr *mgr);
uint64_t amdgpu_preempt_mgr_usage(struct ttm_resource_manager *man); uint64_t amdgpu_preempt_mgr_usage(struct ttm_resource_manager *man);
...@@ -129,11 +129,11 @@ int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev, ...@@ -129,11 +129,11 @@ int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
void amdgpu_vram_mgr_free_sgt(struct device *dev, void amdgpu_vram_mgr_free_sgt(struct device *dev,
enum dma_data_direction dir, enum dma_data_direction dir,
struct sg_table *sgt); struct sg_table *sgt);
uint64_t amdgpu_vram_mgr_usage(struct ttm_resource_manager *man); uint64_t amdgpu_vram_mgr_usage(struct amdgpu_vram_mgr *mgr);
uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_resource_manager *man); uint64_t amdgpu_vram_mgr_vis_usage(struct amdgpu_vram_mgr *mgr);
int amdgpu_vram_mgr_reserve_range(struct ttm_resource_manager *man, int amdgpu_vram_mgr_reserve_range(struct amdgpu_vram_mgr *mgr,
uint64_t start, uint64_t size); uint64_t start, uint64_t size);
int amdgpu_vram_mgr_query_page_status(struct ttm_resource_manager *man, int amdgpu_vram_mgr_query_page_status(struct amdgpu_vram_mgr *mgr,
uint64_t start); uint64_t start);
int amdgpu_ttm_init(struct amdgpu_device *adev); int amdgpu_ttm_init(struct amdgpu_device *adev);
......
...@@ -553,7 +553,6 @@ static void amdgpu_virt_populate_vf2pf_ucode_info(struct amdgpu_device *adev) ...@@ -553,7 +553,6 @@ static void amdgpu_virt_populate_vf2pf_ucode_info(struct amdgpu_device *adev)
static int amdgpu_virt_write_vf2pf_data(struct amdgpu_device *adev) static int amdgpu_virt_write_vf2pf_data(struct amdgpu_device *adev)
{ {
struct amd_sriov_msg_vf2pf_info *vf2pf_info; struct amd_sriov_msg_vf2pf_info *vf2pf_info;
struct ttm_resource_manager *vram_man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
vf2pf_info = (struct amd_sriov_msg_vf2pf_info *) adev->virt.fw_reserve.p_vf2pf; vf2pf_info = (struct amd_sriov_msg_vf2pf_info *) adev->virt.fw_reserve.p_vf2pf;
...@@ -576,8 +575,8 @@ static int amdgpu_virt_write_vf2pf_data(struct amdgpu_device *adev) ...@@ -576,8 +575,8 @@ static int amdgpu_virt_write_vf2pf_data(struct amdgpu_device *adev)
vf2pf_info->driver_cert = 0; vf2pf_info->driver_cert = 0;
vf2pf_info->os_info.all = 0; vf2pf_info->os_info.all = 0;
vf2pf_info->fb_usage = amdgpu_vram_mgr_usage(vram_man) >> 20; vf2pf_info->fb_usage = amdgpu_vram_mgr_usage(&adev->mman.vram_mgr) >> 20;
vf2pf_info->fb_vis_usage = amdgpu_vram_mgr_vis_usage(vram_man) >> 20; vf2pf_info->fb_vis_usage = amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr) >> 20;
vf2pf_info->fb_size = adev->gmc.real_vram_size >> 20; vf2pf_info->fb_size = adev->gmc.real_vram_size >> 20;
vf2pf_info->fb_vis_size = adev->gmc.visible_vram_size >> 20; vf2pf_info->fb_vis_size = adev->gmc.visible_vram_size >> 20;
...@@ -727,6 +726,10 @@ void amdgpu_detect_virtualization(struct amdgpu_device *adev) ...@@ -727,6 +726,10 @@ void amdgpu_detect_virtualization(struct amdgpu_device *adev)
vi_set_virt_ops(adev); vi_set_virt_ops(adev);
break; break;
case CHIP_VEGA10: case CHIP_VEGA10:
soc15_set_virt_ops(adev);
/* send a dummy GPU_INIT_DATA request to host on vega10 */
amdgpu_virt_request_init_data(adev);
break;
case CHIP_VEGA20: case CHIP_VEGA20:
case CHIP_ARCTURUS: case CHIP_ARCTURUS:
case CHIP_ALDEBARAN: case CHIP_ALDEBARAN:
......
...@@ -144,15 +144,16 @@ static void amdgpu_vkms_crtc_atomic_disable(struct drm_crtc *crtc, ...@@ -144,15 +144,16 @@ static void amdgpu_vkms_crtc_atomic_disable(struct drm_crtc *crtc,
static void amdgpu_vkms_crtc_atomic_flush(struct drm_crtc *crtc, static void amdgpu_vkms_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_atomic_state *state) struct drm_atomic_state *state)
{ {
unsigned long flags;
if (crtc->state->event) { if (crtc->state->event) {
spin_lock(&crtc->dev->event_lock); spin_lock_irqsave(&crtc->dev->event_lock, flags);
if (drm_crtc_vblank_get(crtc) != 0) if (drm_crtc_vblank_get(crtc) != 0)
drm_crtc_send_vblank_event(crtc, crtc->state->event); drm_crtc_send_vblank_event(crtc, crtc->state->event);
else else
drm_crtc_arm_vblank_event(crtc, crtc->state->event); drm_crtc_arm_vblank_event(crtc, crtc->state->event);
spin_unlock(&crtc->dev->event_lock); spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
crtc->state->event = NULL; crtc->state->event = NULL;
} }
......
...@@ -96,10 +96,9 @@ static ssize_t amdgpu_mem_info_vram_used_show(struct device *dev, ...@@ -96,10 +96,9 @@ static ssize_t amdgpu_mem_info_vram_used_show(struct device *dev,
{ {
struct drm_device *ddev = dev_get_drvdata(dev); struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev); struct amdgpu_device *adev = drm_to_adev(ddev);
struct ttm_resource_manager *man;
man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); return sysfs_emit(buf, "%llu\n",
return sysfs_emit(buf, "%llu\n", amdgpu_vram_mgr_usage(man)); amdgpu_vram_mgr_usage(&adev->mman.vram_mgr));
} }
/** /**
...@@ -116,10 +115,9 @@ static ssize_t amdgpu_mem_info_vis_vram_used_show(struct device *dev, ...@@ -116,10 +115,9 @@ static ssize_t amdgpu_mem_info_vis_vram_used_show(struct device *dev,
{ {
struct drm_device *ddev = dev_get_drvdata(dev); struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev); struct amdgpu_device *adev = drm_to_adev(ddev);
struct ttm_resource_manager *man;
man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); return sysfs_emit(buf, "%llu\n",
return sysfs_emit(buf, "%llu\n", amdgpu_vram_mgr_vis_usage(man)); amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr));
} }
/** /**
...@@ -263,16 +261,15 @@ static void amdgpu_vram_mgr_do_reserve(struct ttm_resource_manager *man) ...@@ -263,16 +261,15 @@ static void amdgpu_vram_mgr_do_reserve(struct ttm_resource_manager *man)
/** /**
* amdgpu_vram_mgr_reserve_range - Reserve a range from VRAM * amdgpu_vram_mgr_reserve_range - Reserve a range from VRAM
* *
* @man: TTM memory type manager * @mgr: amdgpu_vram_mgr pointer
* @start: start address of the range in VRAM * @start: start address of the range in VRAM
* @size: size of the range * @size: size of the range
* *
* Reserve memory from start addess with the specified size in VRAM * Reserve memory from start address with the specified size in VRAM
*/ */
int amdgpu_vram_mgr_reserve_range(struct ttm_resource_manager *man, int amdgpu_vram_mgr_reserve_range(struct amdgpu_vram_mgr *mgr,
uint64_t start, uint64_t size) uint64_t start, uint64_t size)
{ {
struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
struct amdgpu_vram_reservation *rsv; struct amdgpu_vram_reservation *rsv;
rsv = kzalloc(sizeof(*rsv), GFP_KERNEL); rsv = kzalloc(sizeof(*rsv), GFP_KERNEL);
...@@ -285,7 +282,7 @@ int amdgpu_vram_mgr_reserve_range(struct ttm_resource_manager *man, ...@@ -285,7 +282,7 @@ int amdgpu_vram_mgr_reserve_range(struct ttm_resource_manager *man,
spin_lock(&mgr->lock); spin_lock(&mgr->lock);
list_add_tail(&mgr->reservations_pending, &rsv->node); list_add_tail(&mgr->reservations_pending, &rsv->node);
amdgpu_vram_mgr_do_reserve(man); amdgpu_vram_mgr_do_reserve(&mgr->manager);
spin_unlock(&mgr->lock); spin_unlock(&mgr->lock);
return 0; return 0;
...@@ -294,7 +291,7 @@ int amdgpu_vram_mgr_reserve_range(struct ttm_resource_manager *man, ...@@ -294,7 +291,7 @@ int amdgpu_vram_mgr_reserve_range(struct ttm_resource_manager *man,
/** /**
* amdgpu_vram_mgr_query_page_status - query the reservation status * amdgpu_vram_mgr_query_page_status - query the reservation status
* *
* @man: TTM memory type manager * @mgr: amdgpu_vram_mgr pointer
* @start: start address of a page in VRAM * @start: start address of a page in VRAM
* *
* Returns: * Returns:
...@@ -302,10 +299,9 @@ int amdgpu_vram_mgr_reserve_range(struct ttm_resource_manager *man, ...@@ -302,10 +299,9 @@ int amdgpu_vram_mgr_reserve_range(struct ttm_resource_manager *man,
* 0: the page has been reserved * 0: the page has been reserved
* -ENOENT: the input page is not a reservation * -ENOENT: the input page is not a reservation
*/ */
int amdgpu_vram_mgr_query_page_status(struct ttm_resource_manager *man, int amdgpu_vram_mgr_query_page_status(struct amdgpu_vram_mgr *mgr,
uint64_t start) uint64_t start)
{ {
struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
struct amdgpu_vram_reservation *rsv; struct amdgpu_vram_reservation *rsv;
int ret; int ret;
...@@ -632,28 +628,24 @@ void amdgpu_vram_mgr_free_sgt(struct device *dev, ...@@ -632,28 +628,24 @@ void amdgpu_vram_mgr_free_sgt(struct device *dev,
/** /**
* amdgpu_vram_mgr_usage - how many bytes are used in this domain * amdgpu_vram_mgr_usage - how many bytes are used in this domain
* *
* @man: TTM memory type manager * @mgr: amdgpu_vram_mgr pointer
* *
* Returns how many bytes are used in this domain. * Returns how many bytes are used in this domain.
*/ */
uint64_t amdgpu_vram_mgr_usage(struct ttm_resource_manager *man) uint64_t amdgpu_vram_mgr_usage(struct amdgpu_vram_mgr *mgr)
{ {
struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
return atomic64_read(&mgr->usage); return atomic64_read(&mgr->usage);
} }
/** /**
* amdgpu_vram_mgr_vis_usage - how many bytes are used in the visible part * amdgpu_vram_mgr_vis_usage - how many bytes are used in the visible part
* *
* @man: TTM memory type manager * @mgr: amdgpu_vram_mgr pointer
* *
* Returns how many bytes are used in the visible part of VRAM * Returns how many bytes are used in the visible part of VRAM
*/ */
uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_resource_manager *man) uint64_t amdgpu_vram_mgr_vis_usage(struct amdgpu_vram_mgr *mgr)
{ {
struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
return atomic64_read(&mgr->vis_usage); return atomic64_read(&mgr->vis_usage);
} }
...@@ -675,8 +667,8 @@ static void amdgpu_vram_mgr_debug(struct ttm_resource_manager *man, ...@@ -675,8 +667,8 @@ static void amdgpu_vram_mgr_debug(struct ttm_resource_manager *man,
spin_unlock(&mgr->lock); spin_unlock(&mgr->lock);
drm_printf(printer, "man size:%llu pages, ram usage:%lluMB, vis usage:%lluMB\n", drm_printf(printer, "man size:%llu pages, ram usage:%lluMB, vis usage:%lluMB\n",
man->size, amdgpu_vram_mgr_usage(man) >> 20, man->size, amdgpu_vram_mgr_usage(mgr) >> 20,
amdgpu_vram_mgr_vis_usage(man) >> 20); amdgpu_vram_mgr_vis_usage(mgr) >> 20);
} }
static const struct ttm_resource_manager_func amdgpu_vram_mgr_func = { static const struct ttm_resource_manager_func amdgpu_vram_mgr_func = {
......
...@@ -208,6 +208,7 @@ static struct attribute *amdgpu_xgmi_hive_attrs[] = { ...@@ -208,6 +208,7 @@ static struct attribute *amdgpu_xgmi_hive_attrs[] = {
&amdgpu_xgmi_hive_id, &amdgpu_xgmi_hive_id,
NULL NULL
}; };
ATTRIBUTE_GROUPS(amdgpu_xgmi_hive);
static ssize_t amdgpu_xgmi_show_attrs(struct kobject *kobj, static ssize_t amdgpu_xgmi_show_attrs(struct kobject *kobj,
struct attribute *attr, char *buf) struct attribute *attr, char *buf)
...@@ -237,7 +238,7 @@ static const struct sysfs_ops amdgpu_xgmi_hive_ops = { ...@@ -237,7 +238,7 @@ static const struct sysfs_ops amdgpu_xgmi_hive_ops = {
struct kobj_type amdgpu_xgmi_hive_type = { struct kobj_type amdgpu_xgmi_hive_type = {
.release = amdgpu_xgmi_hive_release, .release = amdgpu_xgmi_hive_release,
.sysfs_ops = &amdgpu_xgmi_hive_ops, .sysfs_ops = &amdgpu_xgmi_hive_ops,
.default_attrs = amdgpu_xgmi_hive_attrs, .default_groups = amdgpu_xgmi_hive_groups,
}; };
static ssize_t amdgpu_xgmi_show_device_id(struct device *dev, static ssize_t amdgpu_xgmi_show_device_id(struct device *dev,
......
...@@ -989,7 +989,7 @@ static int gmc_v10_0_gart_enable(struct amdgpu_device *adev) ...@@ -989,7 +989,7 @@ static int gmc_v10_0_gart_enable(struct amdgpu_device *adev)
if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev))
goto skip_pin_bo; goto skip_pin_bo;
r = amdgpu_gart_table_vram_pin(adev); r = amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
if (r) if (r)
return r; return r;
...@@ -1060,7 +1060,6 @@ static void gmc_v10_0_gart_disable(struct amdgpu_device *adev) ...@@ -1060,7 +1060,6 @@ static void gmc_v10_0_gart_disable(struct amdgpu_device *adev)
{ {
adev->gfxhub.funcs->gart_disable(adev); adev->gfxhub.funcs->gart_disable(adev);
adev->mmhub.funcs->gart_disable(adev); adev->mmhub.funcs->gart_disable(adev);
amdgpu_gart_table_vram_unpin(adev);
} }
static int gmc_v10_0_hw_fini(void *handle) static int gmc_v10_0_hw_fini(void *handle)
......
...@@ -476,7 +476,7 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev) ...@@ -476,7 +476,7 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
dev_err(adev->dev, "No VRAM object for PCIE GART.\n"); dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
return -EINVAL; return -EINVAL;
} }
r = amdgpu_gart_table_vram_pin(adev); r = amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
if (r) if (r)
return r; return r;
...@@ -608,7 +608,6 @@ static void gmc_v6_0_gart_disable(struct amdgpu_device *adev) ...@@ -608,7 +608,6 @@ static void gmc_v6_0_gart_disable(struct amdgpu_device *adev)
WREG32(mmVM_L2_CNTL3, WREG32(mmVM_L2_CNTL3,
VM_L2_CNTL3__L2_CACHE_BIGK_ASSOCIATIVITY_MASK | VM_L2_CNTL3__L2_CACHE_BIGK_ASSOCIATIVITY_MASK |
(0UL << VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT)); (0UL << VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT));
amdgpu_gart_table_vram_unpin(adev);
} }
static void gmc_v6_0_vm_decode_fault(struct amdgpu_device *adev, static void gmc_v6_0_vm_decode_fault(struct amdgpu_device *adev,
......
...@@ -620,7 +620,7 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev) ...@@ -620,7 +620,7 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
dev_err(adev->dev, "No VRAM object for PCIE GART.\n"); dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
return -EINVAL; return -EINVAL;
} }
r = amdgpu_gart_table_vram_pin(adev); r = amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
if (r) if (r)
return r; return r;
...@@ -758,7 +758,6 @@ static void gmc_v7_0_gart_disable(struct amdgpu_device *adev) ...@@ -758,7 +758,6 @@ static void gmc_v7_0_gart_disable(struct amdgpu_device *adev)
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0); tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
WREG32(mmVM_L2_CNTL, tmp); WREG32(mmVM_L2_CNTL, tmp);
WREG32(mmVM_L2_CNTL2, 0); WREG32(mmVM_L2_CNTL2, 0);
amdgpu_gart_table_vram_unpin(adev);
} }
/** /**
......
...@@ -844,7 +844,7 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev) ...@@ -844,7 +844,7 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
dev_err(adev->dev, "No VRAM object for PCIE GART.\n"); dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
return -EINVAL; return -EINVAL;
} }
r = amdgpu_gart_table_vram_pin(adev); r = amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
if (r) if (r)
return r; return r;
...@@ -999,7 +999,6 @@ static void gmc_v8_0_gart_disable(struct amdgpu_device *adev) ...@@ -999,7 +999,6 @@ static void gmc_v8_0_gart_disable(struct amdgpu_device *adev)
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0); tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
WREG32(mmVM_L2_CNTL, tmp); WREG32(mmVM_L2_CNTL, tmp);
WREG32(mmVM_L2_CNTL2, 0); WREG32(mmVM_L2_CNTL2, 0);
amdgpu_gart_table_vram_unpin(adev);
} }
/** /**
......
...@@ -72,6 +72,9 @@ ...@@ -72,6 +72,9 @@
#define mmDCHUBBUB_SDPIF_MMIO_CNTRL_0 0x049d #define mmDCHUBBUB_SDPIF_MMIO_CNTRL_0 0x049d
#define mmDCHUBBUB_SDPIF_MMIO_CNTRL_0_BASE_IDX 2 #define mmDCHUBBUB_SDPIF_MMIO_CNTRL_0_BASE_IDX 2
#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_DCN2 0x05ea
#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_DCN2_BASE_IDX 2
static const char *gfxhub_client_ids[] = { static const char *gfxhub_client_ids[] = {
"CB", "CB",
...@@ -1134,6 +1137,8 @@ static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev) ...@@ -1134,6 +1137,8 @@ static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL); u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL);
unsigned size; unsigned size;
/* TODO move to DC so GMC doesn't need to hard-code DCN registers */
if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) { if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
size = AMDGPU_VBIOS_VGA_ALLOCATION; size = AMDGPU_VBIOS_VGA_ALLOCATION;
} else { } else {
...@@ -1142,7 +1147,6 @@ static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev) ...@@ -1142,7 +1147,6 @@ static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
switch (adev->ip_versions[DCE_HWIP][0]) { switch (adev->ip_versions[DCE_HWIP][0]) {
case IP_VERSION(1, 0, 0): case IP_VERSION(1, 0, 0):
case IP_VERSION(1, 0, 1): case IP_VERSION(1, 0, 1):
case IP_VERSION(2, 1, 0):
viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION); viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
size = (REG_GET_FIELD(viewport, size = (REG_GET_FIELD(viewport,
HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) * HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
...@@ -1150,6 +1154,14 @@ static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev) ...@@ -1150,6 +1154,14 @@ static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH) * HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH) *
4); 4);
break; break;
case IP_VERSION(2, 1, 0):
viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_DCN2);
size = (REG_GET_FIELD(viewport,
HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
REG_GET_FIELD(viewport,
HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH) *
4);
break;
default: default:
viewport = RREG32_SOC15(DCE, 0, mmSCL0_VIEWPORT_SIZE); viewport = RREG32_SOC15(DCE, 0, mmSCL0_VIEWPORT_SIZE);
size = (REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_HEIGHT) * size = (REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
...@@ -1743,7 +1755,7 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev) ...@@ -1743,7 +1755,7 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev))
goto skip_pin_bo; goto skip_pin_bo;
r = amdgpu_gart_table_vram_pin(adev); r = amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
if (r) if (r)
return r; return r;
...@@ -1821,7 +1833,6 @@ static void gmc_v9_0_gart_disable(struct amdgpu_device *adev) ...@@ -1821,7 +1833,6 @@ static void gmc_v9_0_gart_disable(struct amdgpu_device *adev)
{ {
adev->gfxhub.funcs->gart_disable(adev); adev->gfxhub.funcs->gart_disable(adev);
adev->mmhub.funcs->gart_disable(adev); adev->mmhub.funcs->gart_disable(adev);
amdgpu_gart_table_vram_unpin(adev);
} }
static int gmc_v9_0_hw_fini(void *handle) static int gmc_v9_0_hw_fini(void *handle)
......
...@@ -180,6 +180,11 @@ static int xgpu_ai_send_access_requests(struct amdgpu_device *adev, ...@@ -180,6 +180,11 @@ static int xgpu_ai_send_access_requests(struct amdgpu_device *adev,
RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW2)); mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW2));
} }
} else if (req == IDH_REQ_GPU_INIT_DATA){
/* Dummy REQ_GPU_INIT_DATA handling */
r = xgpu_ai_poll_msg(adev, IDH_REQ_GPU_INIT_DATA_READY);
/* version set to 0 since dummy */
adev->virt.req_init_data_ver = 0;
} }
return 0; return 0;
...@@ -381,10 +386,16 @@ void xgpu_ai_mailbox_put_irq(struct amdgpu_device *adev) ...@@ -381,10 +386,16 @@ void xgpu_ai_mailbox_put_irq(struct amdgpu_device *adev)
amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0); amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
} }
static int xgpu_ai_request_init_data(struct amdgpu_device *adev)
{
return xgpu_ai_send_access_requests(adev, IDH_REQ_GPU_INIT_DATA);
}
const struct amdgpu_virt_ops xgpu_ai_virt_ops = { const struct amdgpu_virt_ops xgpu_ai_virt_ops = {
.req_full_gpu = xgpu_ai_request_full_gpu_access, .req_full_gpu = xgpu_ai_request_full_gpu_access,
.rel_full_gpu = xgpu_ai_release_full_gpu_access, .rel_full_gpu = xgpu_ai_release_full_gpu_access,
.reset_gpu = xgpu_ai_request_reset, .reset_gpu = xgpu_ai_request_reset,
.wait_reset = NULL, .wait_reset = NULL,
.trans_msg = xgpu_ai_mailbox_trans_msg, .trans_msg = xgpu_ai_mailbox_trans_msg,
.req_init_data = xgpu_ai_request_init_data,
}; };
...@@ -35,6 +35,7 @@ enum idh_request { ...@@ -35,6 +35,7 @@ enum idh_request {
IDH_REQ_GPU_FINI_ACCESS, IDH_REQ_GPU_FINI_ACCESS,
IDH_REL_GPU_FINI_ACCESS, IDH_REL_GPU_FINI_ACCESS,
IDH_REQ_GPU_RESET_ACCESS, IDH_REQ_GPU_RESET_ACCESS,
IDH_REQ_GPU_INIT_DATA,
IDH_LOG_VF_ERROR = 200, IDH_LOG_VF_ERROR = 200,
IDH_READY_TO_RESET = 201, IDH_READY_TO_RESET = 201,
...@@ -48,6 +49,7 @@ enum idh_event { ...@@ -48,6 +49,7 @@ enum idh_event {
IDH_SUCCESS, IDH_SUCCESS,
IDH_FAIL, IDH_FAIL,
IDH_QUERY_ALIVE, IDH_QUERY_ALIVE,
IDH_REQ_GPU_INIT_DATA_READY,
IDH_TEXT_MESSAGE = 255, IDH_TEXT_MESSAGE = 255,
}; };
......
...@@ -1060,6 +1060,9 @@ static int kfd_parse_subtype_iolink(struct crat_subtype_iolink *iolink, ...@@ -1060,6 +1060,9 @@ static int kfd_parse_subtype_iolink(struct crat_subtype_iolink *iolink,
return -ENODEV; return -ENODEV;
/* same everything but the other direction */ /* same everything but the other direction */
props2 = kmemdup(props, sizeof(*props2), GFP_KERNEL); props2 = kmemdup(props, sizeof(*props2), GFP_KERNEL);
if (!props2)
return -ENOMEM;
props2->node_from = id_to; props2->node_from = id_to;
props2->node_to = id_from; props2->node_to = id_from;
props2->kobj = NULL; props2->kobj = NULL;
......
...@@ -68,20 +68,20 @@ static void kfd_device_info_set_sdma_queue_num(struct kfd_dev *kfd) ...@@ -68,20 +68,20 @@ static void kfd_device_info_set_sdma_queue_num(struct kfd_dev *kfd)
case IP_VERSION(4, 0, 1):/* VEGA12 */ case IP_VERSION(4, 0, 1):/* VEGA12 */
case IP_VERSION(4, 1, 0):/* RAVEN */ case IP_VERSION(4, 1, 0):/* RAVEN */
case IP_VERSION(4, 1, 1):/* RAVEN */ case IP_VERSION(4, 1, 1):/* RAVEN */
case IP_VERSION(4, 1, 2):/* RENIOR */ case IP_VERSION(4, 1, 2):/* RENOIR */
case IP_VERSION(5, 2, 1):/* VANGOGH */ case IP_VERSION(5, 2, 1):/* VANGOGH */
case IP_VERSION(5, 2, 3):/* YELLOW_CARP */ case IP_VERSION(5, 2, 3):/* YELLOW_CARP */
kfd->device_info.num_sdma_queues_per_engine = 2; kfd->device_info.num_sdma_queues_per_engine = 2;
break; break;
case IP_VERSION(4, 2, 0):/* VEGA20 */ case IP_VERSION(4, 2, 0):/* VEGA20 */
case IP_VERSION(4, 2, 2):/* ARCTUTUS */ case IP_VERSION(4, 2, 2):/* ARCTURUS */
case IP_VERSION(4, 4, 0):/* ALDEBARAN */ case IP_VERSION(4, 4, 0):/* ALDEBARAN */
case IP_VERSION(5, 0, 0):/* NAVI10 */ case IP_VERSION(5, 0, 0):/* NAVI10 */
case IP_VERSION(5, 0, 1):/* CYAN_SKILLFISH */ case IP_VERSION(5, 0, 1):/* CYAN_SKILLFISH */
case IP_VERSION(5, 0, 2):/* NAVI14 */ case IP_VERSION(5, 0, 2):/* NAVI14 */
case IP_VERSION(5, 0, 5):/* NAVI12 */ case IP_VERSION(5, 0, 5):/* NAVI12 */
case IP_VERSION(5, 2, 0):/* SIENNA_CICHLID */ case IP_VERSION(5, 2, 0):/* SIENNA_CICHLID */
case IP_VERSION(5, 2, 2):/* NAVY_FLOUDER */ case IP_VERSION(5, 2, 2):/* NAVY_FLOUNDER */
case IP_VERSION(5, 2, 4):/* DIMGREY_CAVEFISH */ case IP_VERSION(5, 2, 4):/* DIMGREY_CAVEFISH */
case IP_VERSION(5, 2, 5):/* BEIGE_GOBY */ case IP_VERSION(5, 2, 5):/* BEIGE_GOBY */
kfd->device_info.num_sdma_queues_per_engine = 8; kfd->device_info.num_sdma_queues_per_engine = 8;
......
...@@ -1004,14 +1004,17 @@ static void uninitialize(struct device_queue_manager *dqm) ...@@ -1004,14 +1004,17 @@ static void uninitialize(struct device_queue_manager *dqm)
static int start_nocpsch(struct device_queue_manager *dqm) static int start_nocpsch(struct device_queue_manager *dqm)
{ {
int r = 0;
pr_info("SW scheduler is used"); pr_info("SW scheduler is used");
init_interrupts(dqm); init_interrupts(dqm);
if (dqm->dev->adev->asic_type == CHIP_HAWAII) if (dqm->dev->adev->asic_type == CHIP_HAWAII)
return pm_init(&dqm->packet_mgr, dqm); r = pm_init(&dqm->packet_mgr, dqm);
dqm->sched_running = true; if (!r)
dqm->sched_running = true;
return 0; return r;
} }
static int stop_nocpsch(struct device_queue_manager *dqm) static int stop_nocpsch(struct device_queue_manager *dqm)
......
...@@ -197,6 +197,7 @@ static bool event_interrupt_isr_v9(struct kfd_dev *dev, ...@@ -197,6 +197,7 @@ static bool event_interrupt_isr_v9(struct kfd_dev *dev,
*/ */
return source_id == SOC15_INTSRC_CP_END_OF_PIPE || return source_id == SOC15_INTSRC_CP_END_OF_PIPE ||
source_id == SOC15_INTSRC_SDMA_TRAP || source_id == SOC15_INTSRC_SDMA_TRAP ||
source_id == SOC15_INTSRC_SDMA_ECC ||
source_id == SOC15_INTSRC_SQ_INTERRUPT_MSG || source_id == SOC15_INTSRC_SQ_INTERRUPT_MSG ||
source_id == SOC15_INTSRC_CP_BAD_OPCODE || source_id == SOC15_INTSRC_CP_BAD_OPCODE ||
((client_id == SOC15_IH_CLIENTID_VMC || ((client_id == SOC15_IH_CLIENTID_VMC ||
......
...@@ -461,6 +461,7 @@ static struct attribute *procfs_queue_attrs[] = { ...@@ -461,6 +461,7 @@ static struct attribute *procfs_queue_attrs[] = {
&attr_queue_gpuid, &attr_queue_gpuid,
NULL NULL
}; };
ATTRIBUTE_GROUPS(procfs_queue);
static const struct sysfs_ops procfs_queue_ops = { static const struct sysfs_ops procfs_queue_ops = {
.show = kfd_procfs_queue_show, .show = kfd_procfs_queue_show,
...@@ -468,7 +469,7 @@ static const struct sysfs_ops procfs_queue_ops = { ...@@ -468,7 +469,7 @@ static const struct sysfs_ops procfs_queue_ops = {
static struct kobj_type procfs_queue_type = { static struct kobj_type procfs_queue_type = {
.sysfs_ops = &procfs_queue_ops, .sysfs_ops = &procfs_queue_ops,
.default_attrs = procfs_queue_attrs, .default_groups = procfs_queue_groups,
}; };
static const struct sysfs_ops procfs_stats_ops = { static const struct sysfs_ops procfs_stats_ops = {
......
...@@ -107,7 +107,7 @@ static void svm_range_add_to_svms(struct svm_range *prange) ...@@ -107,7 +107,7 @@ static void svm_range_add_to_svms(struct svm_range *prange)
pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms, pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms,
prange, prange->start, prange->last); prange, prange->start, prange->last);
list_add_tail(&prange->list, &prange->svms->list); list_move_tail(&prange->list, &prange->svms->list);
prange->it_node.start = prange->start; prange->it_node.start = prange->start;
prange->it_node.last = prange->last; prange->it_node.last = prange->last;
interval_tree_insert(&prange->it_node, &prange->svms->objects); interval_tree_insert(&prange->it_node, &prange->svms->objects);
...@@ -295,8 +295,6 @@ svm_range *svm_range_new(struct svm_range_list *svms, uint64_t start, ...@@ -295,8 +295,6 @@ svm_range *svm_range_new(struct svm_range_list *svms, uint64_t start,
prange->last = last; prange->last = last;
INIT_LIST_HEAD(&prange->list); INIT_LIST_HEAD(&prange->list);
INIT_LIST_HEAD(&prange->update_list); INIT_LIST_HEAD(&prange->update_list);
INIT_LIST_HEAD(&prange->remove_list);
INIT_LIST_HEAD(&prange->insert_list);
INIT_LIST_HEAD(&prange->svm_bo_list); INIT_LIST_HEAD(&prange->svm_bo_list);
INIT_LIST_HEAD(&prange->deferred_list); INIT_LIST_HEAD(&prange->deferred_list);
INIT_LIST_HEAD(&prange->child_list); INIT_LIST_HEAD(&prange->child_list);
...@@ -1018,7 +1016,7 @@ svm_range_split_tail(struct svm_range *prange, ...@@ -1018,7 +1016,7 @@ svm_range_split_tail(struct svm_range *prange,
int r = svm_range_split(prange, prange->start, new_last, &tail); int r = svm_range_split(prange, prange->start, new_last, &tail);
if (!r) if (!r)
list_add(&tail->insert_list, insert_list); list_add(&tail->list, insert_list);
return r; return r;
} }
...@@ -1030,7 +1028,7 @@ svm_range_split_head(struct svm_range *prange, ...@@ -1030,7 +1028,7 @@ svm_range_split_head(struct svm_range *prange,
int r = svm_range_split(prange, new_start, prange->last, &head); int r = svm_range_split(prange, new_start, prange->last, &head);
if (!r) if (!r)
list_add(&head->insert_list, insert_list); list_add(&head->list, insert_list);
return r; return r;
} }
...@@ -1898,8 +1896,8 @@ svm_range_add(struct kfd_process *p, uint64_t start, uint64_t size, ...@@ -1898,8 +1896,8 @@ svm_range_add(struct kfd_process *p, uint64_t start, uint64_t size,
goto out; goto out;
} }
list_add(&old->remove_list, remove_list); list_add(&old->update_list, remove_list);
list_add(&prange->insert_list, insert_list); list_add(&prange->list, insert_list);
list_add(&prange->update_list, update_list); list_add(&prange->update_list, update_list);
if (node->start < start) { if (node->start < start) {
...@@ -1931,7 +1929,7 @@ svm_range_add(struct kfd_process *p, uint64_t start, uint64_t size, ...@@ -1931,7 +1929,7 @@ svm_range_add(struct kfd_process *p, uint64_t start, uint64_t size,
goto out; goto out;
} }
list_add(&prange->insert_list, insert_list); list_add(&prange->list, insert_list);
list_add(&prange->update_list, update_list); list_add(&prange->update_list, update_list);
} }
...@@ -1946,13 +1944,13 @@ svm_range_add(struct kfd_process *p, uint64_t start, uint64_t size, ...@@ -1946,13 +1944,13 @@ svm_range_add(struct kfd_process *p, uint64_t start, uint64_t size,
r = -ENOMEM; r = -ENOMEM;
goto out; goto out;
} }
list_add(&prange->insert_list, insert_list); list_add(&prange->list, insert_list);
list_add(&prange->update_list, update_list); list_add(&prange->update_list, update_list);
} }
out: out:
if (r) if (r)
list_for_each_entry_safe(prange, tmp, insert_list, insert_list) list_for_each_entry_safe(prange, tmp, insert_list, list)
svm_range_free(prange); svm_range_free(prange);
return r; return r;
...@@ -3236,7 +3234,7 @@ svm_range_set_attr(struct kfd_process *p, uint64_t start, uint64_t size, ...@@ -3236,7 +3234,7 @@ svm_range_set_attr(struct kfd_process *p, uint64_t start, uint64_t size,
goto out; goto out;
} }
/* Apply changes as a transaction */ /* Apply changes as a transaction */
list_for_each_entry_safe(prange, next, &insert_list, insert_list) { list_for_each_entry_safe(prange, next, &insert_list, list) {
svm_range_add_to_svms(prange); svm_range_add_to_svms(prange);
svm_range_add_notifier_locked(mm, prange); svm_range_add_notifier_locked(mm, prange);
} }
...@@ -3244,8 +3242,7 @@ svm_range_set_attr(struct kfd_process *p, uint64_t start, uint64_t size, ...@@ -3244,8 +3242,7 @@ svm_range_set_attr(struct kfd_process *p, uint64_t start, uint64_t size,
svm_range_apply_attrs(p, prange, nattr, attrs); svm_range_apply_attrs(p, prange, nattr, attrs);
/* TODO: unmap ranges from GPU that lost access */ /* TODO: unmap ranges from GPU that lost access */
} }
list_for_each_entry_safe(prange, next, &remove_list, list_for_each_entry_safe(prange, next, &remove_list, update_list) {
remove_list) {
pr_debug("unlink old 0x%p prange 0x%p [0x%lx 0x%lx]\n", pr_debug("unlink old 0x%p prange 0x%p [0x%lx 0x%lx]\n",
prange->svms, prange, prange->start, prange->svms, prange, prange->start,
prange->last); prange->last);
......
...@@ -76,8 +76,6 @@ struct svm_work_list_item { ...@@ -76,8 +76,6 @@ struct svm_work_list_item {
* aligned, page size is (last - start + 1) * aligned, page size is (last - start + 1)
* @list: link list node, used to scan all ranges of svms * @list: link list node, used to scan all ranges of svms
* @update_list:link list node used to add to update_list * @update_list:link list node used to add to update_list
* @remove_list:link list node used to add to remove list
* @insert_list:link list node used to add to insert list
* @mapping: bo_va mapping structure to create and update GPU page table * @mapping: bo_va mapping structure to create and update GPU page table
* @npages: number of pages * @npages: number of pages
* @dma_addr: dma mapping address on each GPU for system memory physical page * @dma_addr: dma mapping address on each GPU for system memory physical page
...@@ -113,8 +111,6 @@ struct svm_range { ...@@ -113,8 +111,6 @@ struct svm_range {
struct interval_tree_node it_node; struct interval_tree_node it_node;
struct list_head list; struct list_head list;
struct list_head update_list; struct list_head update_list;
struct list_head remove_list;
struct list_head insert_list;
uint64_t npages; uint64_t npages;
dma_addr_t *dma_addr[MAX_GPU_INSTANCE]; dma_addr_t *dma_addr[MAX_GPU_INSTANCE];
struct ttm_resource *ttm_res; struct ttm_resource *ttm_res;
......
...@@ -658,7 +658,7 @@ static void dmub_hpd_callback(struct amdgpu_device *adev, ...@@ -658,7 +658,7 @@ static void dmub_hpd_callback(struct amdgpu_device *adev,
struct drm_connector_list_iter iter; struct drm_connector_list_iter iter;
struct dc_link *link; struct dc_link *link;
uint8_t link_index = 0; uint8_t link_index = 0;
struct drm_device *dev = adev->dm.ddev; struct drm_device *dev;
if (adev == NULL) if (adev == NULL)
return; return;
...@@ -675,6 +675,7 @@ static void dmub_hpd_callback(struct amdgpu_device *adev, ...@@ -675,6 +675,7 @@ static void dmub_hpd_callback(struct amdgpu_device *adev,
link_index = notify->link_index; link_index = notify->link_index;
link = adev->dm.dc->links[link_index]; link = adev->dm.dc->links[link_index];
dev = adev->dm.ddev;
drm_connector_list_iter_begin(dev, &iter); drm_connector_list_iter_begin(dev, &iter);
drm_for_each_connector_iter(connector, &iter) { drm_for_each_connector_iter(connector, &iter) {
...@@ -1161,6 +1162,32 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev) ...@@ -1161,6 +1162,32 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
return 0; return 0;
} }
static void dm_dmub_hw_resume(struct amdgpu_device *adev)
{
struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
enum dmub_status status;
bool init;
if (!dmub_srv) {
/* DMUB isn't supported on the ASIC. */
return;
}
status = dmub_srv_is_hw_init(dmub_srv, &init);
if (status != DMUB_STATUS_OK)
DRM_WARN("DMUB hardware init check failed: %d\n", status);
if (status == DMUB_STATUS_OK && init) {
/* Wait for firmware load to finish. */
status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
if (status != DMUB_STATUS_OK)
DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
} else {
/* Perform the full hardware initialization. */
dm_dmub_hw_init(adev);
}
}
#if defined(CONFIG_DRM_AMD_DC_DCN) #if defined(CONFIG_DRM_AMD_DC_DCN)
static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config) static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
{ {
...@@ -2637,9 +2664,7 @@ static int dm_resume(void *handle) ...@@ -2637,9 +2664,7 @@ static int dm_resume(void *handle)
amdgpu_dm_outbox_init(adev); amdgpu_dm_outbox_init(adev);
/* Before powering on DC we need to re-initialize DMUB. */ /* Before powering on DC we need to re-initialize DMUB. */
r = dm_dmub_hw_init(adev); dm_dmub_hw_resume(adev);
if (r)
DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
/* power on hardware */ /* power on hardware */
dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0); dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
...@@ -6073,6 +6098,7 @@ static void update_dsc_caps(struct amdgpu_dm_connector *aconnector, ...@@ -6073,6 +6098,7 @@ static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
struct dsc_dec_dpcd_caps *dsc_caps) struct dsc_dec_dpcd_caps *dsc_caps)
{ {
stream->timing.flags.DSC = 0; stream->timing.flags.DSC = 0;
dsc_caps->is_dsc_supported = false;
if (aconnector->dc_link && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT || if (aconnector->dc_link && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
sink->sink_signal == SIGNAL_TYPE_EDP)) { sink->sink_signal == SIGNAL_TYPE_EDP)) {
...@@ -10737,6 +10763,8 @@ static int dm_update_plane_state(struct dc *dc, ...@@ -10737,6 +10763,8 @@ static int dm_update_plane_state(struct dc *dc,
dm_new_plane_state->dc_state = dc_new_plane_state; dm_new_plane_state->dc_state = dc_new_plane_state;
dm_new_crtc_state->mpo_requested |= (plane->type == DRM_PLANE_TYPE_OVERLAY);
/* Tell DC to do a full surface update every time there /* Tell DC to do a full surface update every time there
* is a plane change. Inefficient, but works for now. * is a plane change. Inefficient, but works for now.
*/ */
...@@ -10889,7 +10917,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, ...@@ -10889,7 +10917,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
enum dc_status status; enum dc_status status;
int ret, i; int ret, i;
bool lock_and_validation_needed = false; bool lock_and_validation_needed = false;
struct dm_crtc_state *dm_old_crtc_state; struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
#if defined(CONFIG_DRM_AMD_DC_DCN) #if defined(CONFIG_DRM_AMD_DC_DCN)
struct dsc_mst_fairness_vars vars[MAX_PIPES]; struct dsc_mst_fairness_vars vars[MAX_PIPES];
struct drm_dp_mst_topology_state *mst_state; struct drm_dp_mst_topology_state *mst_state;
...@@ -11071,6 +11099,12 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, ...@@ -11071,6 +11099,12 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
goto fail; goto fail;
} }
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
if (dm_new_crtc_state->mpo_requested)
DRM_DEBUG_DRIVER("MPO enablement requested on crtc:[%p]\n", crtc);
}
/* Check cursor planes scaling */ /* Check cursor planes scaling */
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
ret = dm_check_crtc_cursor(state, crtc, new_crtc_state); ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
......
...@@ -626,6 +626,8 @@ struct dm_crtc_state { ...@@ -626,6 +626,8 @@ struct dm_crtc_state {
bool cm_has_degamma; bool cm_has_degamma;
bool cm_is_degamma_srgb; bool cm_is_degamma_srgb;
bool mpo_requested;
int update_type; int update_type;
int active_planes; int active_planes;
......
...@@ -119,6 +119,12 @@ static int dcn31_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr, ...@@ -119,6 +119,12 @@ static int dcn31_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr,
result = dcn31_smu_wait_for_response(clk_mgr, 10, 200000); result = dcn31_smu_wait_for_response(clk_mgr, 10, 200000);
if (result == VBIOSSMC_Result_Failed) {
ASSERT(0);
REG_WRITE(MP1_SMN_C2PMSG_91, VBIOSSMC_Result_OK);
return -1;
}
if (IS_SMU_TIMEOUT(result)) { if (IS_SMU_TIMEOUT(result)) {
ASSERT(0); ASSERT(0);
dm_helpers_smu_timeout(CTX, msg_id, param, 10 * 200000); dm_helpers_smu_timeout(CTX, msg_id, param, 10 * 200000);
......
...@@ -3971,102 +3971,73 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx) ...@@ -3971,102 +3971,73 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off) static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off)
{ {
struct cp_psp *cp_psp = &pipe_ctx->stream->ctx->cp_psp; struct cp_psp *cp_psp = &pipe_ctx->stream->ctx->cp_psp;
#if defined(CONFIG_DRM_AMD_DC_DCN)
struct link_encoder *link_enc = NULL; struct link_encoder *link_enc = NULL;
#endif struct cp_psp_stream_config config = {0};
enum dp_panel_mode panel_mode =
dp_get_panel_mode(pipe_ctx->stream->link);
if (cp_psp && cp_psp->funcs.update_stream_config) { if (cp_psp == NULL || cp_psp->funcs.update_stream_config == NULL)
struct cp_psp_stream_config config = {0}; return;
enum dp_panel_mode panel_mode =
dp_get_panel_mode(pipe_ctx->stream->link);
config.otg_inst = (uint8_t) pipe_ctx->stream_res.tg->inst; if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_PHY)
/*stream_enc_inst*/ link_enc = pipe_ctx->stream->link->link_enc;
config.dig_fe = (uint8_t) pipe_ctx->stream_res.stream_enc->stream_enc_inst; else if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA &&
config.dig_be = pipe_ctx->stream->link->link_enc_hw_inst; pipe_ctx->stream->link->dc->res_pool->funcs->link_encs_assign)
#if defined(CONFIG_DRM_AMD_DC_DCN) link_enc = link_enc_cfg_get_link_enc_used_by_stream(
config.stream_enc_idx = pipe_ctx->stream_res.stream_enc->id - ENGINE_ID_DIGA; pipe_ctx->stream->ctx->dc,
pipe_ctx->stream);
if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_PHY || ASSERT(link_enc);
pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) { if (link_enc == NULL)
if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_PHY) return;
link_enc = pipe_ctx->stream->link->link_enc;
else if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
if (pipe_ctx->stream->link->dc->res_pool->funcs->link_encs_assign) {
link_enc = link_enc_cfg_get_link_enc_used_by_stream(
pipe_ctx->stream->ctx->dc,
pipe_ctx->stream);
}
ASSERT(link_enc);
// Initialize PHY ID with ABCDE - 01234 mapping except when it is B0 /* otg instance */
config.phy_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A; config.otg_inst = (uint8_t) pipe_ctx->stream_res.tg->inst;
// Add flag to guard new A0 DIG mapping /* dig front end */
if (pipe_ctx->stream->ctx->dc->enable_c20_dtm_b0 == true && config.dig_fe = (uint8_t) pipe_ctx->stream_res.stream_enc->stream_enc_inst;
pipe_ctx->stream->link->dc->ctx->dce_version == DCN_VERSION_3_1) {
config.dig_be = link_enc->preferred_engine;
config.dio_output_type = pipe_ctx->stream->link->ep_type;
config.dio_output_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A;
} else {
config.dio_output_type = 0;
config.dio_output_idx = 0;
}
// Add flag to guard B0 implementation /* stream encoder index */
if (pipe_ctx->stream->ctx->dc->enable_c20_dtm_b0 == true && config.stream_enc_idx = pipe_ctx->stream_res.stream_enc->id - ENGINE_ID_DIGA;
link_enc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0) { #if defined(CONFIG_DRM_AMD_DC_DCN)
if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) { if (is_dp_128b_132b_signal(pipe_ctx))
// enum ID 1-4 maps to DPIA PHY ID 0-3 config.stream_enc_idx =
config.phy_idx = pipe_ctx->stream->link->link_id.enum_id - ENUM_ID_1; pipe_ctx->stream_res.hpo_dp_stream_enc->id - ENGINE_ID_HPO_DP_0;
} else { // for non DPIA mode over B0, ABCDE maps to 01564 #endif
switch (link_enc->transmitter) {
case TRANSMITTER_UNIPHY_A:
config.phy_idx = 0;
break;
case TRANSMITTER_UNIPHY_B:
config.phy_idx = 1;
break;
case TRANSMITTER_UNIPHY_C:
config.phy_idx = 5;
break;
case TRANSMITTER_UNIPHY_D:
config.phy_idx = 6;
break;
case TRANSMITTER_UNIPHY_E:
config.phy_idx = 4;
break;
default:
config.phy_idx = 0;
break;
}
} /* dig back end */
} config.dig_be = pipe_ctx->stream->link->link_enc_hw_inst;
} else if (pipe_ctx->stream->link->dc->res_pool->funcs->link_encs_assign) {
link_enc = link_enc_cfg_get_link_enc_used_by_stream(
pipe_ctx->stream->ctx->dc,
pipe_ctx->stream);
config.phy_idx = 0; /* Clear phy_idx for non-physical display endpoints. */
}
ASSERT(link_enc);
if (link_enc)
config.link_enc_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A;
if (is_dp_128b_132b_signal(pipe_ctx)) {
config.stream_enc_idx = pipe_ctx->stream_res.hpo_dp_stream_enc->id - ENGINE_ID_HPO_DP_0;
config.link_enc_idx = pipe_ctx->link_res.hpo_dp_link_enc->inst; /* link encoder index */
config.dp2_enabled = 1; config.link_enc_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A;
} #if defined(CONFIG_DRM_AMD_DC_DCN)
if (is_dp_128b_132b_signal(pipe_ctx))
config.link_enc_idx = pipe_ctx->link_res.hpo_dp_link_enc->inst;
#endif #endif
config.dpms_off = dpms_off; /* dio output index */
config.dm_stream_ctx = pipe_ctx->stream->dm_stream_context; config.dio_output_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A;
config.assr_enabled = (panel_mode == DP_PANEL_MODE_EDP);
config.mst_enabled = (pipe_ctx->stream->signal == /* phy index */
SIGNAL_TYPE_DISPLAY_PORT_MST); config.phy_idx = resource_transmitter_to_phy_idx(
cp_psp->funcs.update_stream_config(cp_psp->handle, &config); pipe_ctx->stream->link->dc, link_enc->transmitter);
} if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
/* USB4 DPIA doesn't use PHY in our soc, initialize it to 0 */
config.phy_idx = 0;
/* stream properties */
config.assr_enabled = (panel_mode == DP_PANEL_MODE_EDP) ? 1 : 0;
config.mst_enabled = (pipe_ctx->stream->signal ==
SIGNAL_TYPE_DISPLAY_PORT_MST) ? 1 : 0;
#if defined(CONFIG_DRM_AMD_DC_DCN)
config.dp2_enabled = is_dp_128b_132b_signal(pipe_ctx) ? 1 : 0;
#endif
config.usb4_enabled = (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) ?
1 : 0;
config.dpms_off = dpms_off;
/* dm stream context */
config.dm_stream_ctx = pipe_ctx->stream->dm_stream_context;
cp_psp->funcs.update_stream_config(cp_psp->handle, &config);
} }
#endif #endif
......
...@@ -3270,3 +3270,36 @@ void check_syncd_pipes_for_disabled_master_pipe(struct dc *dc, ...@@ -3270,3 +3270,36 @@ void check_syncd_pipes_for_disabled_master_pipe(struct dc *dc,
i, disabled_master_pipe_idx); i, disabled_master_pipe_idx);
} }
} }
uint8_t resource_transmitter_to_phy_idx(const struct dc *dc, enum transmitter transmitter)
{
/* TODO - get transmitter to phy idx mapping from DMUB */
uint8_t phy_idx = transmitter - TRANSMITTER_UNIPHY_A;
#if defined(CONFIG_DRM_AMD_DC_DCN)
if (dc->ctx->dce_version == DCN_VERSION_3_1 &&
dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0) {
switch (transmitter) {
case TRANSMITTER_UNIPHY_A:
phy_idx = 0;
break;
case TRANSMITTER_UNIPHY_B:
phy_idx = 1;
break;
case TRANSMITTER_UNIPHY_C:
phy_idx = 5;
break;
case TRANSMITTER_UNIPHY_D:
phy_idx = 6;
break;
case TRANSMITTER_UNIPHY_E:
phy_idx = 4;
break;
default:
phy_idx = 0;
break;
}
}
#endif
return phy_idx;
}
...@@ -1365,7 +1365,12 @@ void dcn10_init_pipes(struct dc *dc, struct dc_state *context) ...@@ -1365,7 +1365,12 @@ void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
uint32_t opp_id_src1 = OPP_ID_INVALID; uint32_t opp_id_src1 = OPP_ID_INVALID;
// Step 1: To find out which OPTC is running & OPTC DSC is ON // Step 1: To find out which OPTC is running & OPTC DSC is ON
for (i = 0; i < dc->res_pool->res_cap->num_timing_generator; i++) { // We can't use res_pool->res_cap->num_timing_generator to check
// Because it records display pipes default setting built in driver,
// not display pipes of the current chip.
// Some ASICs would be fused display pipes less than the default setting.
// In dcnxx_resource_construct function, driver would obatin real information.
for (i = 0; i < dc->res_pool->timing_generator_count; i++) {
uint32_t optc_dsc_state = 0; uint32_t optc_dsc_state = 0;
struct timing_generator *tg = dc->res_pool->timing_generators[i]; struct timing_generator *tg = dc->res_pool->timing_generators[i];
......
...@@ -100,6 +100,35 @@ static uint8_t phy_id_from_transmitter(enum transmitter t) ...@@ -100,6 +100,35 @@ static uint8_t phy_id_from_transmitter(enum transmitter t)
return phy_id; return phy_id;
} }
static bool has_query_dp_alt(struct link_encoder *enc)
{
struct dc_dmub_srv *dc_dmub_srv = enc->ctx->dmub_srv;
/* Supports development firmware and firmware >= 4.0.11 */
return dc_dmub_srv &&
!(dc_dmub_srv->dmub->fw_version >= DMUB_FW_VERSION(4, 0, 0) &&
dc_dmub_srv->dmub->fw_version <= DMUB_FW_VERSION(4, 0, 10));
}
static bool query_dp_alt_from_dmub(struct link_encoder *enc,
union dmub_rb_cmd *cmd)
{
struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
struct dc_dmub_srv *dc_dmub_srv = enc->ctx->dmub_srv;
memset(cmd, 0, sizeof(*cmd));
cmd->query_dp_alt.header.type = DMUB_CMD__VBIOS;
cmd->query_dp_alt.header.sub_type =
DMUB_CMD__VBIOS_TRANSMITTER_QUERY_DP_ALT;
cmd->query_dp_alt.header.payload_bytes = sizeof(cmd->query_dp_alt.data);
cmd->query_dp_alt.data.phy_id = phy_id_from_transmitter(enc10->base.transmitter);
if (!dc_dmub_srv_cmd_with_reply_data(dc_dmub_srv, cmd))
return false;
return true;
}
void dcn31_link_encoder_set_dio_phy_mux( void dcn31_link_encoder_set_dio_phy_mux(
struct link_encoder *enc, struct link_encoder *enc,
enum encoder_type_select sel, enum encoder_type_select sel,
...@@ -569,45 +598,90 @@ void dcn31_link_encoder_disable_output( ...@@ -569,45 +598,90 @@ void dcn31_link_encoder_disable_output(
bool dcn31_link_encoder_is_in_alt_mode(struct link_encoder *enc) bool dcn31_link_encoder_is_in_alt_mode(struct link_encoder *enc)
{ {
struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
struct dc_dmub_srv *dc_dmub_srv = enc->ctx->dmub_srv;
union dmub_rb_cmd cmd; union dmub_rb_cmd cmd;
bool is_usb_c_alt_mode = false; uint32_t dp_alt_mode_disable;
if (enc->features.flags.bits.DP_IS_USB_C && dc_dmub_srv) { /* Only applicable to USB-C PHY. */
memset(&cmd, 0, sizeof(cmd)); if (!enc->features.flags.bits.DP_IS_USB_C)
cmd.query_dp_alt.header.type = DMUB_CMD__VBIOS; return false;
cmd.query_dp_alt.header.sub_type = DMUB_CMD__VBIOS_TRANSMITTER_QUERY_DP_ALT;
cmd.query_dp_alt.header.payload_bytes = sizeof(cmd.panel_cntl.data);
cmd.query_dp_alt.data.phy_id = phy_id_from_transmitter(enc10->base.transmitter);
if (!dc_dmub_srv_cmd_with_reply_data(dc_dmub_srv, &cmd)) /*
* Use the new interface from DMCUB if available.
* Avoids hanging the RDCPSPIPE if DMCUB wasn't already running.
*/
if (has_query_dp_alt(enc)) {
if (!query_dp_alt_from_dmub(enc, &cmd))
return false; return false;
is_usb_c_alt_mode = (cmd.query_dp_alt.data.is_dp_alt_disable == 0); return (cmd.query_dp_alt.data.is_dp_alt_disable == 0);
} }
return is_usb_c_alt_mode; /* Legacy path, avoid if possible. */
if (enc->ctx->asic_id.hw_internal_rev != YELLOW_CARP_B0) {
REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE,
&dp_alt_mode_disable);
} else {
/*
* B0 phys use a new set of registers to check whether alt mode is disabled.
* if value == 1 alt mode is disabled, otherwise it is enabled.
*/
if ((enc10->base.transmitter == TRANSMITTER_UNIPHY_A) ||
(enc10->base.transmitter == TRANSMITTER_UNIPHY_B) ||
(enc10->base.transmitter == TRANSMITTER_UNIPHY_E)) {
REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE,
&dp_alt_mode_disable);
} else {
REG_GET(RDPCSPIPE_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE,
&dp_alt_mode_disable);
}
}
return (dp_alt_mode_disable == 0);
} }
void dcn31_link_encoder_get_max_link_cap(struct link_encoder *enc, struct dc_link_settings *link_settings) void dcn31_link_encoder_get_max_link_cap(struct link_encoder *enc, struct dc_link_settings *link_settings)
{ {
struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
struct dc_dmub_srv *dc_dmub_srv = enc->ctx->dmub_srv;
union dmub_rb_cmd cmd; union dmub_rb_cmd cmd;
uint32_t is_in_usb_c_dp4_mode = 0;
dcn10_link_encoder_get_max_link_cap(enc, link_settings); dcn10_link_encoder_get_max_link_cap(enc, link_settings);
if (enc->features.flags.bits.DP_IS_USB_C && dc_dmub_srv) { /* Take the link cap directly if not USB */
memset(&cmd, 0, sizeof(cmd)); if (!enc->features.flags.bits.DP_IS_USB_C)
cmd.query_dp_alt.header.type = DMUB_CMD__VBIOS; return;
cmd.query_dp_alt.header.sub_type = DMUB_CMD__VBIOS_TRANSMITTER_QUERY_DP_ALT;
cmd.query_dp_alt.header.payload_bytes = sizeof(cmd.panel_cntl.data);
cmd.query_dp_alt.data.phy_id = phy_id_from_transmitter(enc10->base.transmitter);
if (!dc_dmub_srv_cmd_with_reply_data(dc_dmub_srv, &cmd)) /*
* Use the new interface from DMCUB if available.
* Avoids hanging the RDCPSPIPE if DMCUB wasn't already running.
*/
if (has_query_dp_alt(enc)) {
if (!query_dp_alt_from_dmub(enc, &cmd))
return; return;
if (cmd.query_dp_alt.data.is_usb && cmd.query_dp_alt.data.is_dp4 == 0) if (cmd.query_dp_alt.data.is_usb &&
cmd.query_dp_alt.data.is_dp4 == 0)
link_settings->lane_count = MIN(LANE_COUNT_TWO, link_settings->lane_count); link_settings->lane_count = MIN(LANE_COUNT_TWO, link_settings->lane_count);
return;
} }
/* Legacy path, avoid if possible. */
if (enc->ctx->asic_id.hw_internal_rev != YELLOW_CARP_B0) {
REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DP4,
&is_in_usb_c_dp4_mode);
} else {
if ((enc10->base.transmitter == TRANSMITTER_UNIPHY_A) ||
(enc10->base.transmitter == TRANSMITTER_UNIPHY_B) ||
(enc10->base.transmitter == TRANSMITTER_UNIPHY_E)) {
REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DP4,
&is_in_usb_c_dp4_mode);
} else {
REG_GET(RDPCSPIPE_PHY_CNTL6, RDPCS_PHY_DPALT_DP4,
&is_in_usb_c_dp4_mode);
}
}
if (!is_in_usb_c_dp4_mode)
link_settings->lane_count = MIN(LANE_COUNT_TWO, link_settings->lane_count);
} }
...@@ -1984,7 +1984,7 @@ static void dcn31_calculate_wm_and_dlg_fp( ...@@ -1984,7 +1984,7 @@ static void dcn31_calculate_wm_and_dlg_fp(
pipes[pipe_idx].clks_cfg.dispclk_mhz = get_dispclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt); pipes[pipe_idx].clks_cfg.dispclk_mhz = get_dispclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt);
pipes[pipe_idx].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx); pipes[pipe_idx].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
if (dc->config.forced_clocks) { if (dc->config.forced_clocks || dc->debug.max_disp_clk) {
pipes[pipe_idx].clks_cfg.dispclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dispclk_mhz; pipes[pipe_idx].clks_cfg.dispclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dispclk_mhz;
pipes[pipe_idx].clks_cfg.dppclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dppclk_mhz; pipes[pipe_idx].clks_cfg.dppclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dppclk_mhz;
} }
......
...@@ -34,12 +34,12 @@ struct cp_psp_stream_config { ...@@ -34,12 +34,12 @@ struct cp_psp_stream_config {
uint8_t dig_fe; uint8_t dig_fe;
uint8_t link_enc_idx; uint8_t link_enc_idx;
uint8_t stream_enc_idx; uint8_t stream_enc_idx;
uint8_t phy_idx;
uint8_t dio_output_idx; uint8_t dio_output_idx;
uint8_t dio_output_type; uint8_t phy_idx;
uint8_t assr_enabled; uint8_t assr_enabled;
uint8_t mst_enabled; uint8_t mst_enabled;
uint8_t dp2_enabled; uint8_t dp2_enabled;
uint8_t usb4_enabled;
void *dm_stream_ctx; void *dm_stream_ctx;
bool dpms_off; bool dpms_off;
}; };
......
...@@ -218,5 +218,6 @@ void reset_syncd_pipes_from_disabled_pipes(struct dc *dc, ...@@ -218,5 +218,6 @@ void reset_syncd_pipes_from_disabled_pipes(struct dc *dc,
void check_syncd_pipes_for_disabled_master_pipe(struct dc *dc, void check_syncd_pipes_for_disabled_master_pipe(struct dc *dc,
struct dc_state *context, struct dc_state *context,
uint8_t disabled_master_pipe_idx); uint8_t disabled_master_pipe_idx);
uint8_t resource_transmitter_to_phy_idx(const struct dc *dc, enum transmitter transmitter);
#endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_RESOURCE_H_ */ #endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_RESOURCE_H_ */
...@@ -104,6 +104,7 @@ struct mod_hdcp_displayport { ...@@ -104,6 +104,7 @@ struct mod_hdcp_displayport {
uint8_t rev; uint8_t rev;
uint8_t assr_enabled; uint8_t assr_enabled;
uint8_t mst_enabled; uint8_t mst_enabled;
uint8_t usb4_enabled;
}; };
struct mod_hdcp_hdmi { struct mod_hdcp_hdmi {
...@@ -249,7 +250,6 @@ struct mod_hdcp_link { ...@@ -249,7 +250,6 @@ struct mod_hdcp_link {
uint8_t ddc_line; uint8_t ddc_line;
uint8_t link_enc_idx; uint8_t link_enc_idx;
uint8_t phy_idx; uint8_t phy_idx;
uint8_t dio_output_type;
uint8_t dio_output_id; uint8_t dio_output_id;
uint8_t hdcp_supported_informational; uint8_t hdcp_supported_informational;
union { union {
......
...@@ -1405,8 +1405,14 @@ static int smu_disable_dpms(struct smu_context *smu) ...@@ -1405,8 +1405,14 @@ static int smu_disable_dpms(struct smu_context *smu)
{ {
struct amdgpu_device *adev = smu->adev; struct amdgpu_device *adev = smu->adev;
int ret = 0; int ret = 0;
/*
* TODO: (adev->in_suspend && !adev->in_s0ix) is added to pair
* the workaround which always reset the asic in suspend.
* It's likely that workaround will be dropped in the future.
* Then the change here should be dropped together.
*/
bool use_baco = !smu->is_apu && bool use_baco = !smu->is_apu &&
((amdgpu_in_reset(adev) && (((amdgpu_in_reset(adev) || (adev->in_suspend && !adev->in_s0ix)) &&
(amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) || (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) ||
((adev->in_runpm || adev->in_s4) && amdgpu_asic_supports_baco(adev))); ((adev->in_runpm || adev->in_s4) && amdgpu_asic_supports_baco(adev)));
......
...@@ -1625,10 +1625,18 @@ static int aldebaran_set_df_cstate(struct smu_context *smu, ...@@ -1625,10 +1625,18 @@ static int aldebaran_set_df_cstate(struct smu_context *smu,
static int aldebaran_allow_xgmi_power_down(struct smu_context *smu, bool en) static int aldebaran_allow_xgmi_power_down(struct smu_context *smu, bool en)
{ {
return smu_cmn_send_smc_msg_with_param(smu, struct amdgpu_device *adev = smu->adev;
SMU_MSG_GmiPwrDnControl,
en ? 0 : 1, /* The message only works on master die and NACK will be sent
NULL); back for other dies, only send it on master die */
if (!adev->smuio.funcs->get_socket_id(adev) &&
!adev->smuio.funcs->get_die_id(adev))
return smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_GmiPwrDnControl,
en ? 0 : 1,
NULL);
else
return 0;
} }
static const struct throttling_logging_label { static const struct throttling_logging_label {
......
/* SPDX-License-Identifier: GPL-2.0 OR MIT WITH Linux-syscall-note */ /* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR MIT */
/* /*
* Copyright 2021 Advanced Micro Devices, Inc. * Copyright 2021 Advanced Micro Devices, Inc.
* *
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册