提交 d2ae842d 编写于 作者: A Alex Deucher

drm/amdgpu/pm: bail on sysfs/debugfs queries during platform suspend

The GPU is in the process of being shutdown.  Spurious queries during
suspend and resume can put the SMU into a bad state.  Runtime PM is
handled dynamically so we check if we are in non-runtime suspend.
Reviewed-by: NEvan Quan <evan.quan@amd.com>
Acked-by: NChristian König <christian.koenig@amd.com>
Signed-off-by: NAlex Deucher <alexander.deucher@amd.com>
上级 dd67d7a6
...@@ -128,6 +128,8 @@ static ssize_t amdgpu_get_power_dpm_state(struct device *dev, ...@@ -128,6 +128,8 @@ static ssize_t amdgpu_get_power_dpm_state(struct device *dev,
if (amdgpu_in_reset(adev)) if (amdgpu_in_reset(adev))
return -EPERM; return -EPERM;
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
ret = pm_runtime_get_sync(ddev->dev); ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0) { if (ret < 0) {
...@@ -161,6 +163,8 @@ static ssize_t amdgpu_set_power_dpm_state(struct device *dev, ...@@ -161,6 +163,8 @@ static ssize_t amdgpu_set_power_dpm_state(struct device *dev,
if (amdgpu_in_reset(adev)) if (amdgpu_in_reset(adev))
return -EPERM; return -EPERM;
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
if (strncmp("battery", buf, strlen("battery")) == 0) if (strncmp("battery", buf, strlen("battery")) == 0)
state = POWER_STATE_TYPE_BATTERY; state = POWER_STATE_TYPE_BATTERY;
...@@ -267,6 +271,8 @@ static ssize_t amdgpu_get_power_dpm_force_performance_level(struct device *dev, ...@@ -267,6 +271,8 @@ static ssize_t amdgpu_get_power_dpm_force_performance_level(struct device *dev,
if (amdgpu_in_reset(adev)) if (amdgpu_in_reset(adev))
return -EPERM; return -EPERM;
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
ret = pm_runtime_get_sync(ddev->dev); ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0) { if (ret < 0) {
...@@ -309,6 +315,8 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev, ...@@ -309,6 +315,8 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
if (amdgpu_in_reset(adev)) if (amdgpu_in_reset(adev))
return -EPERM; return -EPERM;
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
if (strncmp("low", buf, strlen("low")) == 0) { if (strncmp("low", buf, strlen("low")) == 0) {
level = AMD_DPM_FORCED_LEVEL_LOW; level = AMD_DPM_FORCED_LEVEL_LOW;
...@@ -407,6 +415,8 @@ static ssize_t amdgpu_get_pp_num_states(struct device *dev, ...@@ -407,6 +415,8 @@ static ssize_t amdgpu_get_pp_num_states(struct device *dev,
if (amdgpu_in_reset(adev)) if (amdgpu_in_reset(adev))
return -EPERM; return -EPERM;
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
ret = pm_runtime_get_sync(ddev->dev); ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0) { if (ret < 0) {
...@@ -447,6 +457,8 @@ static ssize_t amdgpu_get_pp_cur_state(struct device *dev, ...@@ -447,6 +457,8 @@ static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
if (amdgpu_in_reset(adev)) if (amdgpu_in_reset(adev))
return -EPERM; return -EPERM;
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
ret = pm_runtime_get_sync(ddev->dev); ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0) { if (ret < 0) {
...@@ -483,6 +495,8 @@ static ssize_t amdgpu_get_pp_force_state(struct device *dev, ...@@ -483,6 +495,8 @@ static ssize_t amdgpu_get_pp_force_state(struct device *dev,
if (amdgpu_in_reset(adev)) if (amdgpu_in_reset(adev))
return -EPERM; return -EPERM;
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
if (adev->pp_force_state_enabled) if (adev->pp_force_state_enabled)
return amdgpu_get_pp_cur_state(dev, attr, buf); return amdgpu_get_pp_cur_state(dev, attr, buf);
...@@ -503,6 +517,8 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev, ...@@ -503,6 +517,8 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev,
if (amdgpu_in_reset(adev)) if (amdgpu_in_reset(adev))
return -EPERM; return -EPERM;
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
if (strlen(buf) == 1) if (strlen(buf) == 1)
adev->pp_force_state_enabled = false; adev->pp_force_state_enabled = false;
...@@ -563,6 +579,8 @@ static ssize_t amdgpu_get_pp_table(struct device *dev, ...@@ -563,6 +579,8 @@ static ssize_t amdgpu_get_pp_table(struct device *dev,
if (amdgpu_in_reset(adev)) if (amdgpu_in_reset(adev))
return -EPERM; return -EPERM;
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
ret = pm_runtime_get_sync(ddev->dev); ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0) { if (ret < 0) {
...@@ -601,6 +619,8 @@ static ssize_t amdgpu_set_pp_table(struct device *dev, ...@@ -601,6 +619,8 @@ static ssize_t amdgpu_set_pp_table(struct device *dev,
if (amdgpu_in_reset(adev)) if (amdgpu_in_reset(adev))
return -EPERM; return -EPERM;
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
ret = pm_runtime_get_sync(ddev->dev); ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0) { if (ret < 0) {
...@@ -763,6 +783,8 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev, ...@@ -763,6 +783,8 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
if (amdgpu_in_reset(adev)) if (amdgpu_in_reset(adev))
return -EPERM; return -EPERM;
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
if (count > 127) if (count > 127)
return -EINVAL; return -EINVAL;
...@@ -864,6 +886,8 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev, ...@@ -864,6 +886,8 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
if (amdgpu_in_reset(adev)) if (amdgpu_in_reset(adev))
return -EPERM; return -EPERM;
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
ret = pm_runtime_get_sync(ddev->dev); ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0) { if (ret < 0) {
...@@ -915,6 +939,8 @@ static ssize_t amdgpu_set_pp_features(struct device *dev, ...@@ -915,6 +939,8 @@ static ssize_t amdgpu_set_pp_features(struct device *dev,
if (amdgpu_in_reset(adev)) if (amdgpu_in_reset(adev))
return -EPERM; return -EPERM;
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
ret = kstrtou64(buf, 0, &featuremask); ret = kstrtou64(buf, 0, &featuremask);
if (ret) if (ret)
...@@ -951,6 +977,8 @@ static ssize_t amdgpu_get_pp_features(struct device *dev, ...@@ -951,6 +977,8 @@ static ssize_t amdgpu_get_pp_features(struct device *dev,
if (amdgpu_in_reset(adev)) if (amdgpu_in_reset(adev))
return -EPERM; return -EPERM;
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
ret = pm_runtime_get_sync(ddev->dev); ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0) { if (ret < 0) {
...@@ -1010,6 +1038,8 @@ static ssize_t amdgpu_get_pp_dpm_clock(struct device *dev, ...@@ -1010,6 +1038,8 @@ static ssize_t amdgpu_get_pp_dpm_clock(struct device *dev,
if (amdgpu_in_reset(adev)) if (amdgpu_in_reset(adev))
return -EPERM; return -EPERM;
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
ret = pm_runtime_get_sync(ddev->dev); ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0) { if (ret < 0) {
...@@ -1075,6 +1105,8 @@ static ssize_t amdgpu_set_pp_dpm_clock(struct device *dev, ...@@ -1075,6 +1105,8 @@ static ssize_t amdgpu_set_pp_dpm_clock(struct device *dev,
if (amdgpu_in_reset(adev)) if (amdgpu_in_reset(adev))
return -EPERM; return -EPERM;
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
ret = amdgpu_read_mask(buf, count, &mask); ret = amdgpu_read_mask(buf, count, &mask);
if (ret) if (ret)
...@@ -1231,6 +1263,8 @@ static ssize_t amdgpu_get_pp_sclk_od(struct device *dev, ...@@ -1231,6 +1263,8 @@ static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,
if (amdgpu_in_reset(adev)) if (amdgpu_in_reset(adev))
return -EPERM; return -EPERM;
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
ret = pm_runtime_get_sync(ddev->dev); ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0) { if (ret < 0) {
...@@ -1261,6 +1295,8 @@ static ssize_t amdgpu_set_pp_sclk_od(struct device *dev, ...@@ -1261,6 +1295,8 @@ static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
if (amdgpu_in_reset(adev)) if (amdgpu_in_reset(adev))
return -EPERM; return -EPERM;
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
ret = kstrtol(buf, 0, &value); ret = kstrtol(buf, 0, &value);
...@@ -1304,6 +1340,8 @@ static ssize_t amdgpu_get_pp_mclk_od(struct device *dev, ...@@ -1304,6 +1340,8 @@ static ssize_t amdgpu_get_pp_mclk_od(struct device *dev,
if (amdgpu_in_reset(adev)) if (amdgpu_in_reset(adev))
return -EPERM; return -EPERM;
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
ret = pm_runtime_get_sync(ddev->dev); ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0) { if (ret < 0) {
...@@ -1334,6 +1372,8 @@ static ssize_t amdgpu_set_pp_mclk_od(struct device *dev, ...@@ -1334,6 +1372,8 @@ static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
if (amdgpu_in_reset(adev)) if (amdgpu_in_reset(adev))
return -EPERM; return -EPERM;
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
ret = kstrtol(buf, 0, &value); ret = kstrtol(buf, 0, &value);
...@@ -1397,6 +1437,8 @@ static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev, ...@@ -1397,6 +1437,8 @@ static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev,
if (amdgpu_in_reset(adev)) if (amdgpu_in_reset(adev))
return -EPERM; return -EPERM;
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
ret = pm_runtime_get_sync(ddev->dev); ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0) { if (ret < 0) {
...@@ -1435,6 +1477,8 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev, ...@@ -1435,6 +1477,8 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
if (amdgpu_in_reset(adev)) if (amdgpu_in_reset(adev))
return -EPERM; return -EPERM;
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
tmp[0] = *(buf); tmp[0] = *(buf);
tmp[1] = '\0'; tmp[1] = '\0';
...@@ -1498,6 +1542,8 @@ static ssize_t amdgpu_get_gpu_busy_percent(struct device *dev, ...@@ -1498,6 +1542,8 @@ static ssize_t amdgpu_get_gpu_busy_percent(struct device *dev,
if (amdgpu_in_reset(adev)) if (amdgpu_in_reset(adev))
return -EPERM; return -EPERM;
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
r = pm_runtime_get_sync(ddev->dev); r = pm_runtime_get_sync(ddev->dev);
if (r < 0) { if (r < 0) {
...@@ -1536,6 +1582,8 @@ static ssize_t amdgpu_get_mem_busy_percent(struct device *dev, ...@@ -1536,6 +1582,8 @@ static ssize_t amdgpu_get_mem_busy_percent(struct device *dev,
if (amdgpu_in_reset(adev)) if (amdgpu_in_reset(adev))
return -EPERM; return -EPERM;
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
r = pm_runtime_get_sync(ddev->dev); r = pm_runtime_get_sync(ddev->dev);
if (r < 0) { if (r < 0) {
...@@ -1579,6 +1627,8 @@ static ssize_t amdgpu_get_pcie_bw(struct device *dev, ...@@ -1579,6 +1627,8 @@ static ssize_t amdgpu_get_pcie_bw(struct device *dev,
if (amdgpu_in_reset(adev)) if (amdgpu_in_reset(adev))
return -EPERM; return -EPERM;
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
if (adev->flags & AMD_IS_APU) if (adev->flags & AMD_IS_APU)
return -ENODATA; return -ENODATA;
...@@ -1620,6 +1670,8 @@ static ssize_t amdgpu_get_unique_id(struct device *dev, ...@@ -1620,6 +1670,8 @@ static ssize_t amdgpu_get_unique_id(struct device *dev,
if (amdgpu_in_reset(adev)) if (amdgpu_in_reset(adev))
return -EPERM; return -EPERM;
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
if (adev->unique_id) if (adev->unique_id)
return snprintf(buf, PAGE_SIZE, "%016llx\n", adev->unique_id); return snprintf(buf, PAGE_SIZE, "%016llx\n", adev->unique_id);
...@@ -1718,6 +1770,8 @@ static ssize_t amdgpu_get_gpu_metrics(struct device *dev, ...@@ -1718,6 +1770,8 @@ static ssize_t amdgpu_get_gpu_metrics(struct device *dev,
if (amdgpu_in_reset(adev)) if (amdgpu_in_reset(adev))
return -EPERM; return -EPERM;
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
ret = pm_runtime_get_sync(ddev->dev); ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0) { if (ret < 0) {
...@@ -1946,6 +2000,8 @@ static ssize_t amdgpu_hwmon_show_temp(struct device *dev, ...@@ -1946,6 +2000,8 @@ static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
if (amdgpu_in_reset(adev)) if (amdgpu_in_reset(adev))
return -EPERM; return -EPERM;
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
if (channel >= PP_TEMP_MAX) if (channel >= PP_TEMP_MAX)
return -EINVAL; return -EINVAL;
...@@ -2082,6 +2138,8 @@ static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev, ...@@ -2082,6 +2138,8 @@ static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
if (amdgpu_in_reset(adev)) if (amdgpu_in_reset(adev))
return -EPERM; return -EPERM;
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
ret = pm_runtime_get_sync(adev_to_drm(adev)->dev); ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (ret < 0) { if (ret < 0) {
...@@ -2114,6 +2172,8 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev, ...@@ -2114,6 +2172,8 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
if (amdgpu_in_reset(adev)) if (amdgpu_in_reset(adev))
return -EPERM; return -EPERM;
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
err = kstrtoint(buf, 10, &value); err = kstrtoint(buf, 10, &value);
if (err) if (err)
...@@ -2164,6 +2224,8 @@ static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev, ...@@ -2164,6 +2224,8 @@ static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
if (amdgpu_in_reset(adev)) if (amdgpu_in_reset(adev))
return -EPERM; return -EPERM;
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
err = pm_runtime_get_sync(adev_to_drm(adev)->dev); err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (err < 0) { if (err < 0) {
...@@ -2212,6 +2274,8 @@ static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev, ...@@ -2212,6 +2274,8 @@ static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
if (amdgpu_in_reset(adev)) if (amdgpu_in_reset(adev))
return -EPERM; return -EPERM;
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
err = pm_runtime_get_sync(adev_to_drm(adev)->dev); err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (err < 0) { if (err < 0) {
...@@ -2245,6 +2309,8 @@ static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev, ...@@ -2245,6 +2309,8 @@ static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
if (amdgpu_in_reset(adev)) if (amdgpu_in_reset(adev))
return -EPERM; return -EPERM;
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
err = pm_runtime_get_sync(adev_to_drm(adev)->dev); err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (err < 0) { if (err < 0) {
...@@ -2277,6 +2343,8 @@ static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev, ...@@ -2277,6 +2343,8 @@ static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev,
if (amdgpu_in_reset(adev)) if (amdgpu_in_reset(adev))
return -EPERM; return -EPERM;
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
r = pm_runtime_get_sync(adev_to_drm(adev)->dev); r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (r < 0) { if (r < 0) {
...@@ -2307,6 +2375,8 @@ static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev, ...@@ -2307,6 +2375,8 @@ static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev,
if (amdgpu_in_reset(adev)) if (amdgpu_in_reset(adev))
return -EPERM; return -EPERM;
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
r = pm_runtime_get_sync(adev_to_drm(adev)->dev); r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (r < 0) { if (r < 0) {
...@@ -2336,6 +2406,8 @@ static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev, ...@@ -2336,6 +2406,8 @@ static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev,
if (amdgpu_in_reset(adev)) if (amdgpu_in_reset(adev))
return -EPERM; return -EPERM;
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
err = pm_runtime_get_sync(adev_to_drm(adev)->dev); err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (err < 0) { if (err < 0) {
...@@ -2368,6 +2440,8 @@ static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev, ...@@ -2368,6 +2440,8 @@ static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev,
if (amdgpu_in_reset(adev)) if (amdgpu_in_reset(adev))
return -EPERM; return -EPERM;
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
err = pm_runtime_get_sync(adev_to_drm(adev)->dev); err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (err < 0) { if (err < 0) {
...@@ -2414,6 +2488,8 @@ static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev, ...@@ -2414,6 +2488,8 @@ static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev,
if (amdgpu_in_reset(adev)) if (amdgpu_in_reset(adev))
return -EPERM; return -EPERM;
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
ret = pm_runtime_get_sync(adev_to_drm(adev)->dev); ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (ret < 0) { if (ret < 0) {
...@@ -2447,6 +2523,8 @@ static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev, ...@@ -2447,6 +2523,8 @@ static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev,
if (amdgpu_in_reset(adev)) if (amdgpu_in_reset(adev))
return -EPERM; return -EPERM;
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
err = kstrtoint(buf, 10, &value); err = kstrtoint(buf, 10, &value);
if (err) if (err)
...@@ -2488,6 +2566,8 @@ static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev, ...@@ -2488,6 +2566,8 @@ static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev,
if (amdgpu_in_reset(adev)) if (amdgpu_in_reset(adev))
return -EPERM; return -EPERM;
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
r = pm_runtime_get_sync(adev_to_drm(adev)->dev); r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (r < 0) { if (r < 0) {
...@@ -2525,6 +2605,8 @@ static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev, ...@@ -2525,6 +2605,8 @@ static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev,
if (amdgpu_in_reset(adev)) if (amdgpu_in_reset(adev))
return -EPERM; return -EPERM;
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
/* only APUs have vddnb */ /* only APUs have vddnb */
if (!(adev->flags & AMD_IS_APU)) if (!(adev->flags & AMD_IS_APU))
...@@ -2567,6 +2649,8 @@ static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev, ...@@ -2567,6 +2649,8 @@ static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev,
if (amdgpu_in_reset(adev)) if (amdgpu_in_reset(adev))
return -EPERM; return -EPERM;
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
r = pm_runtime_get_sync(adev_to_drm(adev)->dev); r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (r < 0) { if (r < 0) {
...@@ -2611,6 +2695,8 @@ static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev, ...@@ -2611,6 +2695,8 @@ static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev,
if (amdgpu_in_reset(adev)) if (amdgpu_in_reset(adev))
return -EPERM; return -EPERM;
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
r = pm_runtime_get_sync(adev_to_drm(adev)->dev); r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (r < 0) { if (r < 0) {
...@@ -2648,6 +2734,8 @@ static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev, ...@@ -2648,6 +2734,8 @@ static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,
if (amdgpu_in_reset(adev)) if (amdgpu_in_reset(adev))
return -EPERM; return -EPERM;
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
r = pm_runtime_get_sync(adev_to_drm(adev)->dev); r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (r < 0) { if (r < 0) {
...@@ -2685,6 +2773,8 @@ static ssize_t amdgpu_hwmon_show_power_cap_default(struct device *dev, ...@@ -2685,6 +2773,8 @@ static ssize_t amdgpu_hwmon_show_power_cap_default(struct device *dev,
if (amdgpu_in_reset(adev)) if (amdgpu_in_reset(adev))
return -EPERM; return -EPERM;
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
r = pm_runtime_get_sync(adev_to_drm(adev)->dev); r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (r < 0) { if (r < 0) {
...@@ -2731,6 +2821,8 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev, ...@@ -2731,6 +2821,8 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
if (amdgpu_in_reset(adev)) if (amdgpu_in_reset(adev))
return -EPERM; return -EPERM;
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
if (amdgpu_sriov_vf(adev)) if (amdgpu_sriov_vf(adev))
return -EINVAL; return -EINVAL;
...@@ -2772,6 +2864,8 @@ static ssize_t amdgpu_hwmon_show_sclk(struct device *dev, ...@@ -2772,6 +2864,8 @@ static ssize_t amdgpu_hwmon_show_sclk(struct device *dev,
if (amdgpu_in_reset(adev)) if (amdgpu_in_reset(adev))
return -EPERM; return -EPERM;
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
r = pm_runtime_get_sync(adev_to_drm(adev)->dev); r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (r < 0) { if (r < 0) {
...@@ -2809,6 +2903,8 @@ static ssize_t amdgpu_hwmon_show_mclk(struct device *dev, ...@@ -2809,6 +2903,8 @@ static ssize_t amdgpu_hwmon_show_mclk(struct device *dev,
if (amdgpu_in_reset(adev)) if (amdgpu_in_reset(adev))
return -EPERM; return -EPERM;
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
r = pm_runtime_get_sync(adev_to_drm(adev)->dev); r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (r < 0) { if (r < 0) {
...@@ -3382,6 +3478,8 @@ static int amdgpu_debugfs_pm_info_show(struct seq_file *m, void *unused) ...@@ -3382,6 +3478,8 @@ static int amdgpu_debugfs_pm_info_show(struct seq_file *m, void *unused)
if (amdgpu_in_reset(adev)) if (amdgpu_in_reset(adev))
return -EPERM; return -EPERM;
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
r = pm_runtime_get_sync(dev->dev); r = pm_runtime_get_sync(dev->dev);
if (r < 0) { if (r < 0) {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册