提交 66429300 编写于 作者: A Alex Deucher

drm/amdgpu/pm: fix ref count leak when pm_runtime_get_sync fails

The call to pm_runtime_get_sync increments the counter even in case of
failure, leading to incorrect ref count.
In case of failure, decrement the ref count before returning.
Reviewed-by: NEvan Quan <evan.quan@amd.com>
Signed-off-by: NAlex Deucher <alexander.deucher@amd.com>
上级 9eee152a
...@@ -167,8 +167,10 @@ static ssize_t amdgpu_get_power_dpm_state(struct device *dev, ...@@ -167,8 +167,10 @@ static ssize_t amdgpu_get_power_dpm_state(struct device *dev,
return -EPERM; return -EPERM;
ret = pm_runtime_get_sync(ddev->dev); ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0) if (ret < 0) {
pm_runtime_put_autosuspend(ddev->dev);
return ret; return ret;
}
if (is_support_sw_smu(adev)) { if (is_support_sw_smu(adev)) {
if (adev->smu.ppt_funcs->get_current_power_state) if (adev->smu.ppt_funcs->get_current_power_state)
...@@ -212,8 +214,10 @@ static ssize_t amdgpu_set_power_dpm_state(struct device *dev, ...@@ -212,8 +214,10 @@ static ssize_t amdgpu_set_power_dpm_state(struct device *dev,
return -EINVAL; return -EINVAL;
ret = pm_runtime_get_sync(ddev->dev); ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0) if (ret < 0) {
pm_runtime_put_autosuspend(ddev->dev);
return ret; return ret;
}
if (is_support_sw_smu(adev)) { if (is_support_sw_smu(adev)) {
mutex_lock(&adev->pm.mutex); mutex_lock(&adev->pm.mutex);
...@@ -307,8 +311,10 @@ static ssize_t amdgpu_get_power_dpm_force_performance_level(struct device *dev, ...@@ -307,8 +311,10 @@ static ssize_t amdgpu_get_power_dpm_force_performance_level(struct device *dev,
return -EPERM; return -EPERM;
ret = pm_runtime_get_sync(ddev->dev); ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0) if (ret < 0) {
pm_runtime_put_autosuspend(ddev->dev);
return ret; return ret;
}
if (is_support_sw_smu(adev)) if (is_support_sw_smu(adev))
level = smu_get_performance_level(&adev->smu); level = smu_get_performance_level(&adev->smu);
...@@ -369,8 +375,10 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev, ...@@ -369,8 +375,10 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
} }
ret = pm_runtime_get_sync(ddev->dev); ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0) if (ret < 0) {
pm_runtime_put_autosuspend(ddev->dev);
return ret; return ret;
}
if (is_support_sw_smu(adev)) if (is_support_sw_smu(adev))
current_level = smu_get_performance_level(&adev->smu); current_level = smu_get_performance_level(&adev->smu);
...@@ -449,8 +457,10 @@ static ssize_t amdgpu_get_pp_num_states(struct device *dev, ...@@ -449,8 +457,10 @@ static ssize_t amdgpu_get_pp_num_states(struct device *dev,
return -EPERM; return -EPERM;
ret = pm_runtime_get_sync(ddev->dev); ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0) if (ret < 0) {
pm_runtime_put_autosuspend(ddev->dev);
return ret; return ret;
}
if (is_support_sw_smu(adev)) { if (is_support_sw_smu(adev)) {
ret = smu_get_power_num_states(&adev->smu, &data); ret = smu_get_power_num_states(&adev->smu, &data);
...@@ -491,8 +501,10 @@ static ssize_t amdgpu_get_pp_cur_state(struct device *dev, ...@@ -491,8 +501,10 @@ static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
return -EPERM; return -EPERM;
ret = pm_runtime_get_sync(ddev->dev); ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0) if (ret < 0) {
pm_runtime_put_autosuspend(ddev->dev);
return ret; return ret;
}
if (is_support_sw_smu(adev)) { if (is_support_sw_smu(adev)) {
pm = smu_get_current_power_state(smu); pm = smu_get_current_power_state(smu);
...@@ -567,8 +579,10 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev, ...@@ -567,8 +579,10 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev,
state = data.states[idx]; state = data.states[idx];
ret = pm_runtime_get_sync(ddev->dev); ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0) if (ret < 0) {
pm_runtime_put_autosuspend(ddev->dev);
return ret; return ret;
}
/* only set user selected power states */ /* only set user selected power states */
if (state != POWER_STATE_TYPE_INTERNAL_BOOT && if (state != POWER_STATE_TYPE_INTERNAL_BOOT &&
...@@ -608,8 +622,10 @@ static ssize_t amdgpu_get_pp_table(struct device *dev, ...@@ -608,8 +622,10 @@ static ssize_t amdgpu_get_pp_table(struct device *dev,
return -EPERM; return -EPERM;
ret = pm_runtime_get_sync(ddev->dev); ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0) if (ret < 0) {
pm_runtime_put_autosuspend(ddev->dev);
return ret; return ret;
}
if (is_support_sw_smu(adev)) { if (is_support_sw_smu(adev)) {
size = smu_sys_get_pp_table(&adev->smu, (void **)&table); size = smu_sys_get_pp_table(&adev->smu, (void **)&table);
...@@ -650,8 +666,10 @@ static ssize_t amdgpu_set_pp_table(struct device *dev, ...@@ -650,8 +666,10 @@ static ssize_t amdgpu_set_pp_table(struct device *dev,
return -EPERM; return -EPERM;
ret = pm_runtime_get_sync(ddev->dev); ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0) if (ret < 0) {
pm_runtime_put_autosuspend(ddev->dev);
return ret; return ret;
}
if (is_support_sw_smu(adev)) { if (is_support_sw_smu(adev)) {
ret = smu_sys_set_pp_table(&adev->smu, (void *)buf, count); ret = smu_sys_set_pp_table(&adev->smu, (void *)buf, count);
...@@ -790,8 +808,10 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev, ...@@ -790,8 +808,10 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
} }
ret = pm_runtime_get_sync(ddev->dev); ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0) if (ret < 0) {
pm_runtime_put_autosuspend(ddev->dev);
return ret; return ret;
}
if (is_support_sw_smu(adev)) { if (is_support_sw_smu(adev)) {
ret = smu_od_edit_dpm_table(&adev->smu, type, ret = smu_od_edit_dpm_table(&adev->smu, type,
...@@ -847,8 +867,10 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev, ...@@ -847,8 +867,10 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
return -EPERM; return -EPERM;
ret = pm_runtime_get_sync(ddev->dev); ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0) if (ret < 0) {
pm_runtime_put_autosuspend(ddev->dev);
return ret; return ret;
}
if (is_support_sw_smu(adev)) { if (is_support_sw_smu(adev)) {
size = smu_print_clk_levels(&adev->smu, SMU_OD_SCLK, buf); size = smu_print_clk_levels(&adev->smu, SMU_OD_SCLK, buf);
...@@ -905,8 +927,10 @@ static ssize_t amdgpu_set_pp_features(struct device *dev, ...@@ -905,8 +927,10 @@ static ssize_t amdgpu_set_pp_features(struct device *dev,
pr_debug("featuremask = 0x%llx\n", featuremask); pr_debug("featuremask = 0x%llx\n", featuremask);
ret = pm_runtime_get_sync(ddev->dev); ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0) if (ret < 0) {
pm_runtime_put_autosuspend(ddev->dev);
return ret; return ret;
}
if (is_support_sw_smu(adev)) { if (is_support_sw_smu(adev)) {
ret = smu_sys_set_pp_feature_mask(&adev->smu, featuremask); ret = smu_sys_set_pp_feature_mask(&adev->smu, featuremask);
...@@ -942,8 +966,10 @@ static ssize_t amdgpu_get_pp_features(struct device *dev, ...@@ -942,8 +966,10 @@ static ssize_t amdgpu_get_pp_features(struct device *dev,
return -EPERM; return -EPERM;
ret = pm_runtime_get_sync(ddev->dev); ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0) if (ret < 0) {
pm_runtime_put_autosuspend(ddev->dev);
return ret; return ret;
}
if (is_support_sw_smu(adev)) if (is_support_sw_smu(adev))
size = smu_sys_get_pp_feature_mask(&adev->smu, buf); size = smu_sys_get_pp_feature_mask(&adev->smu, buf);
...@@ -1001,8 +1027,10 @@ static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev, ...@@ -1001,8 +1027,10 @@ static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
return -EPERM; return -EPERM;
ret = pm_runtime_get_sync(ddev->dev); ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0) if (ret < 0) {
pm_runtime_put_autosuspend(ddev->dev);
return ret; return ret;
}
if (is_support_sw_smu(adev)) if (is_support_sw_smu(adev))
size = smu_print_clk_levels(&adev->smu, SMU_SCLK, buf); size = smu_print_clk_levels(&adev->smu, SMU_SCLK, buf);
...@@ -1071,8 +1099,10 @@ static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev, ...@@ -1071,8 +1099,10 @@ static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
return ret; return ret;
ret = pm_runtime_get_sync(ddev->dev); ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0) if (ret < 0) {
pm_runtime_put_autosuspend(ddev->dev);
return ret; return ret;
}
if (is_support_sw_smu(adev)) if (is_support_sw_smu(adev))
ret = smu_force_clk_levels(&adev->smu, SMU_SCLK, mask, true); ret = smu_force_clk_levels(&adev->smu, SMU_SCLK, mask, true);
...@@ -1101,8 +1131,10 @@ static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev, ...@@ -1101,8 +1131,10 @@ static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev,
return -EPERM; return -EPERM;
ret = pm_runtime_get_sync(ddev->dev); ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0) if (ret < 0) {
pm_runtime_put_autosuspend(ddev->dev);
return ret; return ret;
}
if (is_support_sw_smu(adev)) if (is_support_sw_smu(adev))
size = smu_print_clk_levels(&adev->smu, SMU_MCLK, buf); size = smu_print_clk_levels(&adev->smu, SMU_MCLK, buf);
...@@ -1135,8 +1167,10 @@ static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev, ...@@ -1135,8 +1167,10 @@ static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
return ret; return ret;
ret = pm_runtime_get_sync(ddev->dev); ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0) if (ret < 0) {
pm_runtime_put_autosuspend(ddev->dev);
return ret; return ret;
}
if (is_support_sw_smu(adev)) if (is_support_sw_smu(adev))
ret = smu_force_clk_levels(&adev->smu, SMU_MCLK, mask, true); ret = smu_force_clk_levels(&adev->smu, SMU_MCLK, mask, true);
...@@ -1165,8 +1199,10 @@ static ssize_t amdgpu_get_pp_dpm_socclk(struct device *dev, ...@@ -1165,8 +1199,10 @@ static ssize_t amdgpu_get_pp_dpm_socclk(struct device *dev,
return -EPERM; return -EPERM;
ret = pm_runtime_get_sync(ddev->dev); ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0) if (ret < 0) {
pm_runtime_put_autosuspend(ddev->dev);
return ret; return ret;
}
if (is_support_sw_smu(adev)) if (is_support_sw_smu(adev))
size = smu_print_clk_levels(&adev->smu, SMU_SOCCLK, buf); size = smu_print_clk_levels(&adev->smu, SMU_SOCCLK, buf);
...@@ -1199,8 +1235,10 @@ static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev, ...@@ -1199,8 +1235,10 @@ static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev,
return ret; return ret;
ret = pm_runtime_get_sync(ddev->dev); ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0) if (ret < 0) {
pm_runtime_put_autosuspend(ddev->dev);
return ret; return ret;
}
if (is_support_sw_smu(adev)) if (is_support_sw_smu(adev))
ret = smu_force_clk_levels(&adev->smu, SMU_SOCCLK, mask, true); ret = smu_force_clk_levels(&adev->smu, SMU_SOCCLK, mask, true);
...@@ -1231,8 +1269,10 @@ static ssize_t amdgpu_get_pp_dpm_fclk(struct device *dev, ...@@ -1231,8 +1269,10 @@ static ssize_t amdgpu_get_pp_dpm_fclk(struct device *dev,
return -EPERM; return -EPERM;
ret = pm_runtime_get_sync(ddev->dev); ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0) if (ret < 0) {
pm_runtime_put_autosuspend(ddev->dev);
return ret; return ret;
}
if (is_support_sw_smu(adev)) if (is_support_sw_smu(adev))
size = smu_print_clk_levels(&adev->smu, SMU_FCLK, buf); size = smu_print_clk_levels(&adev->smu, SMU_FCLK, buf);
...@@ -1265,8 +1305,10 @@ static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev, ...@@ -1265,8 +1305,10 @@ static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev,
return ret; return ret;
ret = pm_runtime_get_sync(ddev->dev); ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0) if (ret < 0) {
pm_runtime_put_autosuspend(ddev->dev);
return ret; return ret;
}
if (is_support_sw_smu(adev)) if (is_support_sw_smu(adev))
ret = smu_force_clk_levels(&adev->smu, SMU_FCLK, mask, true); ret = smu_force_clk_levels(&adev->smu, SMU_FCLK, mask, true);
...@@ -1297,8 +1339,10 @@ static ssize_t amdgpu_get_pp_dpm_dcefclk(struct device *dev, ...@@ -1297,8 +1339,10 @@ static ssize_t amdgpu_get_pp_dpm_dcefclk(struct device *dev,
return -EPERM; return -EPERM;
ret = pm_runtime_get_sync(ddev->dev); ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0) if (ret < 0) {
pm_runtime_put_autosuspend(ddev->dev);
return ret; return ret;
}
if (is_support_sw_smu(adev)) if (is_support_sw_smu(adev))
size = smu_print_clk_levels(&adev->smu, SMU_DCEFCLK, buf); size = smu_print_clk_levels(&adev->smu, SMU_DCEFCLK, buf);
...@@ -1331,8 +1375,10 @@ static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev, ...@@ -1331,8 +1375,10 @@ static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev,
return ret; return ret;
ret = pm_runtime_get_sync(ddev->dev); ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0) if (ret < 0) {
pm_runtime_put_autosuspend(ddev->dev);
return ret; return ret;
}
if (is_support_sw_smu(adev)) if (is_support_sw_smu(adev))
ret = smu_force_clk_levels(&adev->smu, SMU_DCEFCLK, mask, true); ret = smu_force_clk_levels(&adev->smu, SMU_DCEFCLK, mask, true);
...@@ -1363,8 +1409,10 @@ static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev, ...@@ -1363,8 +1409,10 @@ static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev,
return -EPERM; return -EPERM;
ret = pm_runtime_get_sync(ddev->dev); ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0) if (ret < 0) {
pm_runtime_put_autosuspend(ddev->dev);
return ret; return ret;
}
if (is_support_sw_smu(adev)) if (is_support_sw_smu(adev))
size = smu_print_clk_levels(&adev->smu, SMU_PCIE, buf); size = smu_print_clk_levels(&adev->smu, SMU_PCIE, buf);
...@@ -1397,8 +1445,10 @@ static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev, ...@@ -1397,8 +1445,10 @@ static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
return ret; return ret;
ret = pm_runtime_get_sync(ddev->dev); ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0) if (ret < 0) {
pm_runtime_put_autosuspend(ddev->dev);
return ret; return ret;
}
if (is_support_sw_smu(adev)) if (is_support_sw_smu(adev))
ret = smu_force_clk_levels(&adev->smu, SMU_PCIE, mask, true); ret = smu_force_clk_levels(&adev->smu, SMU_PCIE, mask, true);
...@@ -1429,8 +1479,10 @@ static ssize_t amdgpu_get_pp_sclk_od(struct device *dev, ...@@ -1429,8 +1479,10 @@ static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,
return -EPERM; return -EPERM;
ret = pm_runtime_get_sync(ddev->dev); ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0) if (ret < 0) {
pm_runtime_put_autosuspend(ddev->dev);
return ret; return ret;
}
if (is_support_sw_smu(adev)) if (is_support_sw_smu(adev))
value = smu_get_od_percentage(&(adev->smu), SMU_OD_SCLK); value = smu_get_od_percentage(&(adev->smu), SMU_OD_SCLK);
...@@ -1462,8 +1514,10 @@ static ssize_t amdgpu_set_pp_sclk_od(struct device *dev, ...@@ -1462,8 +1514,10 @@ static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
return -EINVAL; return -EINVAL;
ret = pm_runtime_get_sync(ddev->dev); ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0) if (ret < 0) {
pm_runtime_put_autosuspend(ddev->dev);
return ret; return ret;
}
if (is_support_sw_smu(adev)) { if (is_support_sw_smu(adev)) {
value = smu_set_od_percentage(&(adev->smu), SMU_OD_SCLK, (uint32_t)value); value = smu_set_od_percentage(&(adev->smu), SMU_OD_SCLK, (uint32_t)value);
...@@ -1498,8 +1552,10 @@ static ssize_t amdgpu_get_pp_mclk_od(struct device *dev, ...@@ -1498,8 +1552,10 @@ static ssize_t amdgpu_get_pp_mclk_od(struct device *dev,
return -EPERM; return -EPERM;
ret = pm_runtime_get_sync(ddev->dev); ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0) if (ret < 0) {
pm_runtime_put_autosuspend(ddev->dev);
return ret; return ret;
}
if (is_support_sw_smu(adev)) if (is_support_sw_smu(adev))
value = smu_get_od_percentage(&(adev->smu), SMU_OD_MCLK); value = smu_get_od_percentage(&(adev->smu), SMU_OD_MCLK);
...@@ -1531,8 +1587,10 @@ static ssize_t amdgpu_set_pp_mclk_od(struct device *dev, ...@@ -1531,8 +1587,10 @@ static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
return -EINVAL; return -EINVAL;
ret = pm_runtime_get_sync(ddev->dev); ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0) if (ret < 0) {
pm_runtime_put_autosuspend(ddev->dev);
return ret; return ret;
}
if (is_support_sw_smu(adev)) { if (is_support_sw_smu(adev)) {
value = smu_set_od_percentage(&(adev->smu), SMU_OD_MCLK, (uint32_t)value); value = smu_set_od_percentage(&(adev->smu), SMU_OD_MCLK, (uint32_t)value);
...@@ -1587,8 +1645,10 @@ static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev, ...@@ -1587,8 +1645,10 @@ static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev,
return -EPERM; return -EPERM;
ret = pm_runtime_get_sync(ddev->dev); ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0) if (ret < 0) {
pm_runtime_put_autosuspend(ddev->dev);
return ret; return ret;
}
if (is_support_sw_smu(adev)) if (is_support_sw_smu(adev))
size = smu_get_power_profile_mode(&adev->smu, buf); size = smu_get_power_profile_mode(&adev->smu, buf);
...@@ -1650,8 +1710,10 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev, ...@@ -1650,8 +1710,10 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
parameter[parameter_size] = profile_mode; parameter[parameter_size] = profile_mode;
ret = pm_runtime_get_sync(ddev->dev); ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0) if (ret < 0) {
pm_runtime_put_autosuspend(ddev->dev);
return ret; return ret;
}
if (is_support_sw_smu(adev)) if (is_support_sw_smu(adev))
ret = smu_set_power_profile_mode(&adev->smu, parameter, parameter_size, true); ret = smu_set_power_profile_mode(&adev->smu, parameter, parameter_size, true);
...@@ -1687,8 +1749,10 @@ static ssize_t amdgpu_get_gpu_busy_percent(struct device *dev, ...@@ -1687,8 +1749,10 @@ static ssize_t amdgpu_get_gpu_busy_percent(struct device *dev,
return -EPERM; return -EPERM;
r = pm_runtime_get_sync(ddev->dev); r = pm_runtime_get_sync(ddev->dev);
if (r < 0) if (r < 0) {
pm_runtime_put_autosuspend(ddev->dev);
return r; return r;
}
/* read the IP busy sensor */ /* read the IP busy sensor */
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD, r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD,
...@@ -1723,8 +1787,10 @@ static ssize_t amdgpu_get_mem_busy_percent(struct device *dev, ...@@ -1723,8 +1787,10 @@ static ssize_t amdgpu_get_mem_busy_percent(struct device *dev,
return -EPERM; return -EPERM;
r = pm_runtime_get_sync(ddev->dev); r = pm_runtime_get_sync(ddev->dev);
if (r < 0) if (r < 0) {
pm_runtime_put_autosuspend(ddev->dev);
return r; return r;
}
/* read the IP busy sensor */ /* read the IP busy sensor */
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_LOAD, r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_LOAD,
...@@ -1770,8 +1836,10 @@ static ssize_t amdgpu_get_pcie_bw(struct device *dev, ...@@ -1770,8 +1836,10 @@ static ssize_t amdgpu_get_pcie_bw(struct device *dev,
return -ENODATA; return -ENODATA;
ret = pm_runtime_get_sync(ddev->dev); ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0) if (ret < 0) {
pm_runtime_put_autosuspend(ddev->dev);
return ret; return ret;
}
amdgpu_asic_get_pcie_usage(adev, &count0, &count1); amdgpu_asic_get_pcie_usage(adev, &count0, &count1);
...@@ -2073,8 +2141,10 @@ static ssize_t amdgpu_hwmon_show_temp(struct device *dev, ...@@ -2073,8 +2141,10 @@ static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
return -EINVAL; return -EINVAL;
r = pm_runtime_get_sync(adev->ddev->dev); r = pm_runtime_get_sync(adev->ddev->dev);
if (r < 0) if (r < 0) {
pm_runtime_put_autosuspend(adev->ddev->dev);
return r; return r;
}
switch (channel) { switch (channel) {
case PP_TEMP_JUNCTION: case PP_TEMP_JUNCTION:
...@@ -2204,8 +2274,10 @@ static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev, ...@@ -2204,8 +2274,10 @@ static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
return -EPERM; return -EPERM;
ret = pm_runtime_get_sync(adev->ddev->dev); ret = pm_runtime_get_sync(adev->ddev->dev);
if (ret < 0) if (ret < 0) {
pm_runtime_put_autosuspend(adev->ddev->dev);
return ret; return ret;
}
if (is_support_sw_smu(adev)) { if (is_support_sw_smu(adev)) {
pwm_mode = smu_get_fan_control_mode(&adev->smu); pwm_mode = smu_get_fan_control_mode(&adev->smu);
...@@ -2242,8 +2314,10 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev, ...@@ -2242,8 +2314,10 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
return err; return err;
ret = pm_runtime_get_sync(adev->ddev->dev); ret = pm_runtime_get_sync(adev->ddev->dev);
if (ret < 0) if (ret < 0) {
pm_runtime_put_autosuspend(adev->ddev->dev);
return ret; return ret;
}
if (is_support_sw_smu(adev)) { if (is_support_sw_smu(adev)) {
smu_set_fan_control_mode(&adev->smu, value); smu_set_fan_control_mode(&adev->smu, value);
...@@ -2290,8 +2364,10 @@ static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev, ...@@ -2290,8 +2364,10 @@ static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
return -EPERM; return -EPERM;
err = pm_runtime_get_sync(adev->ddev->dev); err = pm_runtime_get_sync(adev->ddev->dev);
if (err < 0) if (err < 0) {
pm_runtime_put_autosuspend(adev->ddev->dev);
return err; return err;
}
if (is_support_sw_smu(adev)) if (is_support_sw_smu(adev))
pwm_mode = smu_get_fan_control_mode(&adev->smu); pwm_mode = smu_get_fan_control_mode(&adev->smu);
...@@ -2342,8 +2418,10 @@ static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev, ...@@ -2342,8 +2418,10 @@ static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
return -EPERM; return -EPERM;
err = pm_runtime_get_sync(adev->ddev->dev); err = pm_runtime_get_sync(adev->ddev->dev);
if (err < 0) if (err < 0) {
pm_runtime_put_autosuspend(adev->ddev->dev);
return err; return err;
}
if (is_support_sw_smu(adev)) if (is_support_sw_smu(adev))
err = smu_get_fan_speed_percent(&adev->smu, &speed); err = smu_get_fan_speed_percent(&adev->smu, &speed);
...@@ -2375,8 +2453,10 @@ static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev, ...@@ -2375,8 +2453,10 @@ static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
return -EPERM; return -EPERM;
err = pm_runtime_get_sync(adev->ddev->dev); err = pm_runtime_get_sync(adev->ddev->dev);
if (err < 0) if (err < 0) {
pm_runtime_put_autosuspend(adev->ddev->dev);
return err; return err;
}
if (is_support_sw_smu(adev)) if (is_support_sw_smu(adev))
err = smu_get_fan_speed_rpm(&adev->smu, &speed); err = smu_get_fan_speed_rpm(&adev->smu, &speed);
...@@ -2407,8 +2487,10 @@ static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev, ...@@ -2407,8 +2487,10 @@ static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev,
return -EPERM; return -EPERM;
r = pm_runtime_get_sync(adev->ddev->dev); r = pm_runtime_get_sync(adev->ddev->dev);
if (r < 0) if (r < 0) {
pm_runtime_put_autosuspend(adev->ddev->dev);
return r; return r;
}
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MIN_FAN_RPM, r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MIN_FAN_RPM,
(void *)&min_rpm, &size); (void *)&min_rpm, &size);
...@@ -2435,8 +2517,10 @@ static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev, ...@@ -2435,8 +2517,10 @@ static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev,
return -EPERM; return -EPERM;
r = pm_runtime_get_sync(adev->ddev->dev); r = pm_runtime_get_sync(adev->ddev->dev);
if (r < 0) if (r < 0) {
pm_runtime_put_autosuspend(adev->ddev->dev);
return r; return r;
}
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MAX_FAN_RPM, r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MAX_FAN_RPM,
(void *)&max_rpm, &size); (void *)&max_rpm, &size);
...@@ -2462,8 +2546,10 @@ static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev, ...@@ -2462,8 +2546,10 @@ static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev,
return -EPERM; return -EPERM;
err = pm_runtime_get_sync(adev->ddev->dev); err = pm_runtime_get_sync(adev->ddev->dev);
if (err < 0) if (err < 0) {
pm_runtime_put_autosuspend(adev->ddev->dev);
return err; return err;
}
if (is_support_sw_smu(adev)) if (is_support_sw_smu(adev))
err = smu_get_fan_speed_rpm(&adev->smu, &rpm); err = smu_get_fan_speed_rpm(&adev->smu, &rpm);
...@@ -2494,8 +2580,10 @@ static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev, ...@@ -2494,8 +2580,10 @@ static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev,
return -EPERM; return -EPERM;
err = pm_runtime_get_sync(adev->ddev->dev); err = pm_runtime_get_sync(adev->ddev->dev);
if (err < 0) if (err < 0) {
pm_runtime_put_autosuspend(adev->ddev->dev);
return err; return err;
}
if (is_support_sw_smu(adev)) if (is_support_sw_smu(adev))
pwm_mode = smu_get_fan_control_mode(&adev->smu); pwm_mode = smu_get_fan_control_mode(&adev->smu);
...@@ -2543,8 +2631,10 @@ static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev, ...@@ -2543,8 +2631,10 @@ static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev,
return -EPERM; return -EPERM;
ret = pm_runtime_get_sync(adev->ddev->dev); ret = pm_runtime_get_sync(adev->ddev->dev);
if (ret < 0) if (ret < 0) {
pm_runtime_put_autosuspend(adev->ddev->dev);
return ret; return ret;
}
if (is_support_sw_smu(adev)) { if (is_support_sw_smu(adev)) {
pwm_mode = smu_get_fan_control_mode(&adev->smu); pwm_mode = smu_get_fan_control_mode(&adev->smu);
...@@ -2589,8 +2679,10 @@ static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev, ...@@ -2589,8 +2679,10 @@ static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev,
return -EINVAL; return -EINVAL;
err = pm_runtime_get_sync(adev->ddev->dev); err = pm_runtime_get_sync(adev->ddev->dev);
if (err < 0) if (err < 0) {
pm_runtime_put_autosuspend(adev->ddev->dev);
return err; return err;
}
if (is_support_sw_smu(adev)) { if (is_support_sw_smu(adev)) {
smu_set_fan_control_mode(&adev->smu, pwm_mode); smu_set_fan_control_mode(&adev->smu, pwm_mode);
...@@ -2621,8 +2713,10 @@ static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev, ...@@ -2621,8 +2713,10 @@ static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev,
return -EPERM; return -EPERM;
r = pm_runtime_get_sync(adev->ddev->dev); r = pm_runtime_get_sync(adev->ddev->dev);
if (r < 0) if (r < 0) {
pm_runtime_put_autosuspend(adev->ddev->dev);
return r; return r;
}
/* get the voltage */ /* get the voltage */
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX, r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX,
...@@ -2660,8 +2754,10 @@ static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev, ...@@ -2660,8 +2754,10 @@ static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev,
return -EINVAL; return -EINVAL;
r = pm_runtime_get_sync(adev->ddev->dev); r = pm_runtime_get_sync(adev->ddev->dev);
if (r < 0) if (r < 0) {
pm_runtime_put_autosuspend(adev->ddev->dev);
return r; return r;
}
/* get the voltage */ /* get the voltage */
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB,
...@@ -2696,8 +2792,10 @@ static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev, ...@@ -2696,8 +2792,10 @@ static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev,
return -EPERM; return -EPERM;
r = pm_runtime_get_sync(adev->ddev->dev); r = pm_runtime_get_sync(adev->ddev->dev);
if (r < 0) if (r < 0) {
pm_runtime_put_autosuspend(adev->ddev->dev);
return r; return r;
}
/* get the voltage */ /* get the voltage */
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER, r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER,
...@@ -2735,8 +2833,10 @@ static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev, ...@@ -2735,8 +2833,10 @@ static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev,
return -EPERM; return -EPERM;
r = pm_runtime_get_sync(adev->ddev->dev); r = pm_runtime_get_sync(adev->ddev->dev);
if (r < 0) if (r < 0) {
pm_runtime_put_autosuspend(adev->ddev->dev);
return r; return r;
}
if (is_support_sw_smu(adev)) { if (is_support_sw_smu(adev)) {
smu_get_power_limit(&adev->smu, &limit, true); smu_get_power_limit(&adev->smu, &limit, true);
...@@ -2767,8 +2867,10 @@ static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev, ...@@ -2767,8 +2867,10 @@ static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,
return -EPERM; return -EPERM;
r = pm_runtime_get_sync(adev->ddev->dev); r = pm_runtime_get_sync(adev->ddev->dev);
if (r < 0) if (r < 0) {
pm_runtime_put_autosuspend(adev->ddev->dev);
return r; return r;
}
if (is_support_sw_smu(adev)) { if (is_support_sw_smu(adev)) {
smu_get_power_limit(&adev->smu, &limit, false); smu_get_power_limit(&adev->smu, &limit, false);
...@@ -2810,8 +2912,10 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev, ...@@ -2810,8 +2912,10 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
err = pm_runtime_get_sync(adev->ddev->dev); err = pm_runtime_get_sync(adev->ddev->dev);
if (err < 0) if (err < 0) {
pm_runtime_put_autosuspend(adev->ddev->dev);
return err; return err;
}
if (is_support_sw_smu(adev)) if (is_support_sw_smu(adev))
err = smu_set_power_limit(&adev->smu, value); err = smu_set_power_limit(&adev->smu, value);
...@@ -2841,8 +2945,10 @@ static ssize_t amdgpu_hwmon_show_sclk(struct device *dev, ...@@ -2841,8 +2945,10 @@ static ssize_t amdgpu_hwmon_show_sclk(struct device *dev,
return -EPERM; return -EPERM;
r = pm_runtime_get_sync(adev->ddev->dev); r = pm_runtime_get_sync(adev->ddev->dev);
if (r < 0) if (r < 0) {
pm_runtime_put_autosuspend(adev->ddev->dev);
return r; return r;
}
/* get the sclk */ /* get the sclk */
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK, r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK,
...@@ -2876,8 +2982,10 @@ static ssize_t amdgpu_hwmon_show_mclk(struct device *dev, ...@@ -2876,8 +2982,10 @@ static ssize_t amdgpu_hwmon_show_mclk(struct device *dev,
return -EPERM; return -EPERM;
r = pm_runtime_get_sync(adev->ddev->dev); r = pm_runtime_get_sync(adev->ddev->dev);
if (r < 0) if (r < 0) {
pm_runtime_put_autosuspend(adev->ddev->dev);
return r; return r;
}
/* get the sclk */ /* get the sclk */
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK, r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK,
...@@ -3739,8 +3847,10 @@ static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data) ...@@ -3739,8 +3847,10 @@ static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data)
return -EPERM; return -EPERM;
r = pm_runtime_get_sync(dev->dev); r = pm_runtime_get_sync(dev->dev);
if (r < 0) if (r < 0) {
pm_runtime_put_autosuspend(dev->dev);
return r; return r;
}
amdgpu_device_ip_get_clockgating_state(adev, &flags); amdgpu_device_ip_get_clockgating_state(adev, &flags);
seq_printf(m, "Clock Gating Flags Mask: 0x%x\n", flags); seq_printf(m, "Clock Gating Flags Mask: 0x%x\n", flags);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册