From 38748ad88a2f9673bb63dda607204bb3a9bc21a0 Mon Sep 17 00:00:00 2001 From: Monk Liu Date: Wed, 22 Apr 2020 19:28:48 +0800 Subject: [PATCH] drm/amdgpu: enable one vf mode for nv12 Signed-off-by: Monk Liu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/amdgpu_smu.c | 12 +++--- drivers/gpu/drm/amd/powerplay/navi10_ppt.c | 6 ++- drivers/gpu/drm/amd/powerplay/smu_v11_0.c | 49 ++++++++++++++++++---- 3 files changed, 52 insertions(+), 15 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c index 361a5b6cbe3b..5964d6323a13 100644 --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c @@ -347,13 +347,13 @@ int smu_get_dpm_freq_by_index(struct smu_context *smu, enum smu_clk_type clk_typ param = (uint32_t)(((clk_id & 0xffff) << 16) | (level & 0xffff)); ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetDpmFreqByIndex, - param, ¶m); + param, value); if (ret) return ret; /* BIT31: 0 - Fine grained DPM, 1 - Dicrete DPM * now, we un-support it */ - *value = param & 0x7fffffff; + *value = *value & 0x7fffffff; return ret; } @@ -535,7 +535,6 @@ int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int int table_id = smu_table_get_index(smu, table_index); uint32_t table_size; int ret = 0; - if (!table_data || table_id >= SMU_TABLE_COUNT || table_id < 0) return -EINVAL; @@ -691,7 +690,6 @@ int smu_feature_is_enabled(struct smu_context *smu, enum smu_feature_mask mask) if (smu->is_apu) return 1; - feature_id = smu_feature_get_index(smu, mask); if (feature_id < 0) return 0; @@ -1339,6 +1337,9 @@ static int smu_hw_init(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct smu_context *smu = &adev->smu; + if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) + return 0; + ret = smu_start_smc_engine(smu); if (ret) { pr_err("SMU is not ready yet!\n"); @@ -1352,9 +1353,6 @@ static int smu_hw_init(void *handle) smu_set_gfx_cgpg(&adev->smu, true); } - if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) - return 0; - if (!smu->pm_enabled) return 0; diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c index c94270f7c198..2184d247a9f7 100644 --- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c @@ -1817,7 +1817,8 @@ static int navi10_get_power_limit(struct smu_context *smu, int power_src; if (!smu->power_limit) { - if (smu_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) { + if (smu_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT) && + !amdgpu_sriov_vf(smu->adev)) { power_src = smu_power_get_index(smu, SMU_POWER_SOURCE_AC); if (power_src < 0) return -EINVAL; @@ -1960,6 +1961,9 @@ static int navi10_set_default_od_settings(struct smu_context *smu, bool initiali OverDriveTable_t *od_table, *boot_od_table; int ret = 0; + if (amdgpu_sriov_vf(smu->adev)) + return 0; + ret = smu_v11_0_set_default_od_settings(smu, initialize, sizeof(OverDriveTable_t)); if (ret) return ret; diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c index a97b2964ca7c..3e1b3ed8a05e 100644 --- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c +++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c @@ -57,7 +57,7 @@ static int smu_v11_0_send_msg_without_waiting(struct smu_context *smu, uint16_t msg) { struct amdgpu_device *adev = smu->adev; - WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg); + WREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_66, msg); return 0; } @@ -65,7 +65,7 @@ static int smu_v11_0_read_arg(struct smu_context *smu, uint32_t *arg) { struct amdgpu_device *adev = smu->adev; - *arg = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82); + *arg = RREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_82); return 0; } @@ -75,7 +75,7 @@ static int smu_v11_0_wait_for_response(struct smu_context *smu) uint32_t cur_value, i, timeout = adev->usec_timeout * 10; for (i = 0; i < timeout; i++) { - cur_value = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90); + cur_value = RREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_90); if ((cur_value & MP1_C2PMSG_90__CONTENT_MASK) != 0) return cur_value == 0x1 ? 0 : -EIO; @@ -83,7 +83,10 @@ static int smu_v11_0_wait_for_response(struct smu_context *smu) } /* timeout means wrong logic */ - return -ETIME; + if (i == timeout) + return -ETIME; + + return RREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_90) == 0x1 ? 0 : -EIO; } int @@ -107,9 +110,9 @@ smu_v11_0_send_msg_with_param(struct smu_context *smu, goto out; } - WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0); + WREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_90, 0); - WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, param); + WREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_82, param); smu_v11_0_send_msg_without_waiting(smu, (uint16_t)index); @@ -119,6 +122,7 @@ smu_v11_0_send_msg_with_param(struct smu_context *smu, smu_get_message_name(smu, msg), index, param, ret); goto out; } + if (read_arg) { ret = smu_v11_0_read_arg(smu, read_arg); if (ret) { @@ -728,8 +732,9 @@ int smu_v11_0_parse_pptable(struct smu_context *smu) struct smu_table_context *table_context = &smu->smu_table; struct smu_table *table = &table_context->tables[SMU_TABLE_PPTABLE]; + /* during TDR we need to free and alloc the pptable */ if (table_context->driver_pptable) - return -EINVAL; + kfree(table_context->driver_pptable); table_context->driver_pptable = kzalloc(table->size, GFP_KERNEL); @@ -769,6 +774,9 @@ int smu_v11_0_set_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk) { int ret; + if (amdgpu_sriov_vf(smu->adev)) + return 0; + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetMinDeepSleepDcefclk, clk, NULL); if (ret) @@ -812,6 +820,9 @@ int smu_v11_0_set_tool_table_location(struct smu_context *smu) int ret = 0; struct smu_table *tool_table = &smu->smu_table.tables[SMU_TABLE_PMSTATUSLOG]; + if (amdgpu_sriov_vf(smu->adev)) + return 0; + if (tool_table->mc_address) { ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetToolsDramAddrHigh, @@ -831,6 +842,12 @@ int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count) { int ret = 0; + if (amdgpu_sriov_vf(smu->adev)) + return 0; + + if (!smu->pm_enabled) + return ret; + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, count, NULL); return ret; } @@ -842,6 +859,9 @@ int smu_v11_0_set_allowed_mask(struct smu_context *smu) int ret = 0; uint32_t feature_mask[2]; + if (amdgpu_sriov_vf(smu->adev)) + return 0; + mutex_lock(&feature->mutex); if (bitmap_empty(feature->allowed, SMU_FEATURE_MAX) || feature->feature_num < 64) goto failed; @@ -870,6 +890,9 @@ int smu_v11_0_get_enabled_mask(struct smu_context *smu, struct smu_feature *feature = &smu->smu_feature; int ret = 0; + if (amdgpu_sriov_vf(smu->adev) && !amdgpu_sriov_is_pp_one_vf(smu->adev)) + return 0; + if (!feature_mask || num < 2) return -EINVAL; @@ -925,6 +948,12 @@ int smu_v11_0_notify_display_change(struct smu_context *smu) { int ret = 0; + if (amdgpu_sriov_vf(smu->adev)) + return 0; + + if (!smu->pm_enabled) + return ret; + if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT) && smu->adev->gmc.vram_type == AMDGPU_VRAM_TYPE_HBM) ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetUclkFastSwitch, 1, NULL); @@ -1084,6 +1113,9 @@ int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n) int ret = 0; uint32_t max_power_limit; + if (amdgpu_sriov_vf(smu->adev)) + return 0; + max_power_limit = smu_v11_0_get_max_power_limit(smu); if (n > max_power_limit) { @@ -1809,6 +1841,9 @@ int smu_v11_0_override_pcie_parameters(struct smu_context *smu) uint32_t pcie_gen = 0, pcie_width = 0; int ret; + if (amdgpu_sriov_vf(smu->adev)) + return 0; + if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4) pcie_gen = 3; else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) -- GitLab