提交 5c9669ce 编写于 作者: T Tom O'Rourke 提交者: Daniel Vetter

drm/i915: Finish enabling rps before use by sysfs or debugfs

Enabling rps (turbo setup) was put in a work queue because it may
take quite awhile.  This change flushes the work queue to initialize
rps values before use by sysfs or debugfs.  Specifically,
rps.delayed_resume_work is flushed before using rps.hw_max,
rps.max_delay, rps.min_delay, or rps.cur_delay.

This change fixes a problem in sysfs where show functions using
uninitialized values show incorrect values and store functions
using uninitialized values in range checks incorrectly fail to
store valid input values.  This change also addresses similar use
before initialized problems in debugfs.
Signed-off-by: NTom O'Rourke <Tom.O'Rourke@intel.com>
Signed-off-by: NDaniel Vetter <daniel.vetter@ffwll.ch>
上级 09e14bf3
...@@ -847,6 +847,8 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused) ...@@ -847,6 +847,8 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
int ret; int ret;
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
if (IS_GEN5(dev)) { if (IS_GEN5(dev)) {
u16 rgvswctl = I915_READ16(MEMSWCTL); u16 rgvswctl = I915_READ16(MEMSWCTL);
u16 rgvstat = I915_READ16(MEMSTAT_ILK); u16 rgvstat = I915_READ16(MEMSTAT_ILK);
...@@ -1325,6 +1327,8 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused) ...@@ -1325,6 +1327,8 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
return 0; return 0;
} }
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
if (ret) if (ret)
return ret; return ret;
...@@ -1940,6 +1944,8 @@ i915_max_freq_get(void *data, u64 *val) ...@@ -1940,6 +1944,8 @@ i915_max_freq_get(void *data, u64 *val)
if (!(IS_GEN6(dev) || IS_GEN7(dev))) if (!(IS_GEN6(dev) || IS_GEN7(dev)))
return -ENODEV; return -ENODEV;
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
if (ret) if (ret)
return ret; return ret;
...@@ -1964,6 +1970,8 @@ i915_max_freq_set(void *data, u64 val) ...@@ -1964,6 +1970,8 @@ i915_max_freq_set(void *data, u64 val)
if (!(IS_GEN6(dev) || IS_GEN7(dev))) if (!(IS_GEN6(dev) || IS_GEN7(dev)))
return -ENODEV; return -ENODEV;
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val); DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val);
ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
...@@ -2002,6 +2010,8 @@ i915_min_freq_get(void *data, u64 *val) ...@@ -2002,6 +2010,8 @@ i915_min_freq_get(void *data, u64 *val)
if (!(IS_GEN6(dev) || IS_GEN7(dev))) if (!(IS_GEN6(dev) || IS_GEN7(dev)))
return -ENODEV; return -ENODEV;
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
if (ret) if (ret)
return ret; return ret;
...@@ -2026,6 +2036,8 @@ i915_min_freq_set(void *data, u64 val) ...@@ -2026,6 +2036,8 @@ i915_min_freq_set(void *data, u64 val)
if (!(IS_GEN6(dev) || IS_GEN7(dev))) if (!(IS_GEN6(dev) || IS_GEN7(dev)))
return -ENODEV; return -ENODEV;
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val); DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val);
ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
......
...@@ -251,6 +251,8 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev, ...@@ -251,6 +251,8 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
int ret; int ret;
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
mutex_lock(&dev_priv->rps.hw_lock); mutex_lock(&dev_priv->rps.hw_lock);
if (IS_VALLEYVIEW(dev_priv->dev)) { if (IS_VALLEYVIEW(dev_priv->dev)) {
u32 freq; u32 freq;
...@@ -283,6 +285,8 @@ static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute ...@@ -283,6 +285,8 @@ static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
int ret; int ret;
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
mutex_lock(&dev_priv->rps.hw_lock); mutex_lock(&dev_priv->rps.hw_lock);
if (IS_VALLEYVIEW(dev_priv->dev)) if (IS_VALLEYVIEW(dev_priv->dev))
ret = vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.max_delay); ret = vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.max_delay);
...@@ -307,6 +311,8 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev, ...@@ -307,6 +311,8 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
if (ret) if (ret)
return ret; return ret;
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
mutex_lock(&dev_priv->rps.hw_lock); mutex_lock(&dev_priv->rps.hw_lock);
if (IS_VALLEYVIEW(dev_priv->dev)) { if (IS_VALLEYVIEW(dev_priv->dev)) {
...@@ -355,6 +361,8 @@ static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute ...@@ -355,6 +361,8 @@ static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
int ret; int ret;
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
mutex_lock(&dev_priv->rps.hw_lock); mutex_lock(&dev_priv->rps.hw_lock);
if (IS_VALLEYVIEW(dev_priv->dev)) if (IS_VALLEYVIEW(dev_priv->dev))
ret = vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.min_delay); ret = vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.min_delay);
...@@ -379,6 +387,8 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev, ...@@ -379,6 +387,8 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
if (ret) if (ret)
return ret; return ret;
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
mutex_lock(&dev_priv->rps.hw_lock); mutex_lock(&dev_priv->rps.hw_lock);
if (IS_VALLEYVIEW(dev)) { if (IS_VALLEYVIEW(dev)) {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册