提交 91d14251 编写于 作者: T Tvrtko Ursulin

drm/i915: Small display interrupt handlers tidy

I have noticed some of our interrupt handlers use both dev and
dev_priv while they could get away with only dev_priv in the
huge majority of cases.

Tidying that up had a cascading effect on changing functions
prototypes, so relatively big churn factor, but I think it is
for the better.

For example even where changes cascade out of i915_irq.c, for
functions prefixed with intel_, genX_ or <plat>_, it makes more
sense to take dev_priv directly anyway.

This allows us to eliminate local variables and intermixed usage
of dev and dev_priv where only one is good enough.

End result is shrinkage of both source and the resulting binary.

i915.ko:

 - .text         000b0899
 + .text         000b0619

Or if we look at the Gen8 display irq chain:

 -00000000000006ad t gen8_irq_handler
 +0000000000000663 t gen8_irq_handler
   -0000000000000028 T intel_opregion_asle_intr
   +0000000000000024 T intel_opregion_asle_intr
   -000000000000008c t ilk_hpd_irq_handler
   +000000000000007f t ilk_hpd_irq_handler
   -0000000000000116 T intel_check_page_flip
   +0000000000000112 T intel_check_page_flip
   -000000000000011a T intel_prepare_page_flip
   +0000000000000119 T intel_prepare_page_flip
   -0000000000000014 T intel_finish_page_flip_plane
   +0000000000000013 T intel_finish_page_flip_plane
   -0000000000000053 t hsw_pipe_crc_irq_handler
   +000000000000004c t hsw_pipe_crc_irq_handler
   -000000000000022e t cpt_irq_handler
   +0000000000000213 t cpt_irq_handler

So small shrinkage but it is all fast paths so doesn't harm.

Situation is similar in other interrupt handlers as well.

v2: Tidy intel_queue_rps_boost_for_request as well. (Chris Wilson)
Signed-off-by: NTvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: NChris Wilson <chris@chris-wilson.co.uk>
上级 d6199256
......@@ -768,7 +768,7 @@ static int i915_drm_resume(struct drm_device *dev)
spin_lock_irq(&dev_priv->irq_lock);
if (dev_priv->display.hpd_irq_setup)
dev_priv->display.hpd_irq_setup(dev);
dev_priv->display.hpd_irq_setup(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
intel_dp_mst_resume(dev);
......
......@@ -612,7 +612,7 @@ struct drm_i915_display_funcs {
struct drm_i915_gem_object *obj,
struct drm_i915_gem_request *req,
uint32_t flags);
void (*hpd_irq_setup)(struct drm_device *dev);
void (*hpd_irq_setup)(struct drm_i915_private *dev_priv);
/* clock updates for mode set */
/* cursor updates */
/* render clock increase/decrease */
......@@ -2788,7 +2788,8 @@ extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on);
/* intel_hotplug.c */
void intel_hpd_irq_handler(struct drm_device *dev, u32 pin_mask, u32 long_mask);
void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
u32 pin_mask, u32 long_mask);
void intel_hpd_init(struct drm_i915_private *dev_priv);
void intel_hpd_init_work(struct drm_i915_private *dev_priv);
void intel_hpd_cancel_work(struct drm_i915_private *dev_priv);
......@@ -3493,7 +3494,7 @@ bool intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv,
extern int intel_opregion_setup(struct drm_device *dev);
extern void intel_opregion_init(struct drm_device *dev);
extern void intel_opregion_fini(struct drm_device *dev);
extern void intel_opregion_asle_intr(struct drm_device *dev);
extern void intel_opregion_asle_intr(struct drm_i915_private *dev_priv);
extern int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
bool enable);
extern int intel_opregion_notify_adapter(struct drm_device *dev,
......@@ -3503,7 +3504,9 @@ extern int intel_opregion_get_panel_type(struct drm_device *dev);
static inline int intel_opregion_setup(struct drm_device *dev) { return 0; }
static inline void intel_opregion_init(struct drm_device *dev) { return; }
static inline void intel_opregion_fini(struct drm_device *dev) { return; }
static inline void intel_opregion_asle_intr(struct drm_device *dev) { return; }
static inline void intel_opregion_asle_intr(struct drm_i915_private *dev_priv)
{
}
static inline int
intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, bool enable)
{
......@@ -3539,7 +3542,7 @@ extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
extern void intel_display_resume(struct drm_device *dev);
extern void i915_redisable_vga(struct drm_device *dev);
extern void i915_redisable_vga_power_on(struct drm_device *dev);
extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
extern bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val);
extern void intel_init_pch_refclk(struct drm_device *dev);
extern void intel_set_rps(struct drm_device *dev, u8 val);
extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv,
......
此差异已折叠。
......@@ -3111,16 +3111,16 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
return -ENODEV;
}
static void intel_complete_page_flips(struct drm_device *dev)
static void intel_complete_page_flips(struct drm_i915_private *dev_priv)
{
struct drm_crtc *crtc;
for_each_crtc(dev, crtc) {
for_each_crtc(dev_priv->dev, crtc) {
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum plane plane = intel_crtc->plane;
intel_prepare_page_flip(dev, plane);
intel_finish_page_flip_plane(dev, plane);
intel_prepare_page_flip(dev_priv, plane);
intel_finish_page_flip_plane(dev_priv, plane);
}
}
......@@ -3171,7 +3171,7 @@ void intel_finish_reset(struct drm_device *dev)
* so complete all pending flips so that user space
* will get its events and not get stuck.
*/
intel_complete_page_flips(dev);
intel_complete_page_flips(dev_priv);
/* no reset support for gen2 */
if (IS_GEN2(dev))
......@@ -3203,7 +3203,7 @@ void intel_finish_reset(struct drm_device *dev)
spin_lock_irq(&dev_priv->irq_lock);
if (dev_priv->display.hpd_irq_setup)
dev_priv->display.hpd_irq_setup(dev);
dev_priv->display.hpd_irq_setup(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
intel_display_resume(dev);
......@@ -10874,9 +10874,10 @@ static void intel_unpin_work_fn(struct work_struct *__work)
kfree(work);
}
static void do_intel_finish_page_flip(struct drm_device *dev,
static void do_intel_finish_page_flip(struct drm_i915_private *dev_priv,
struct drm_crtc *crtc)
{
struct drm_device *dev = dev_priv->dev;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_unpin_work *work;
unsigned long flags;
......@@ -10905,20 +10906,18 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
spin_unlock_irqrestore(&dev->event_lock, flags);
}
void intel_finish_page_flip(struct drm_device *dev, int pipe)
void intel_finish_page_flip(struct drm_i915_private *dev_priv, int pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
do_intel_finish_page_flip(dev, crtc);
do_intel_finish_page_flip(dev_priv, crtc);
}
void intel_finish_page_flip_plane(struct drm_device *dev, int plane)
void intel_finish_page_flip_plane(struct drm_i915_private *dev_priv, int plane)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane];
do_intel_finish_page_flip(dev, crtc);
do_intel_finish_page_flip(dev_priv, crtc);
}
/* Is 'a' after or equal to 'b'? */
......@@ -10974,9 +10973,9 @@ static bool page_flip_finished(struct intel_crtc *crtc)
crtc->unpin_work->flip_count);
}
void intel_prepare_page_flip(struct drm_device *dev, int plane)
void intel_prepare_page_flip(struct drm_i915_private *dev_priv, int plane)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_device *dev = dev_priv->dev;
struct intel_crtc *intel_crtc =
to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
unsigned long flags;
......@@ -11476,9 +11475,9 @@ static bool __intel_pageflip_stall_check(struct drm_device *dev,
return addr == work->gtt_offset;
}
void intel_check_page_flip(struct drm_device *dev, int pipe)
void intel_check_page_flip(struct drm_i915_private *dev_priv, int pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_device *dev = dev_priv->dev;
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_unpin_work *work;
......@@ -11498,7 +11497,7 @@ void intel_check_page_flip(struct drm_device *dev, int pipe)
}
if (work != NULL &&
drm_vblank_count(dev, pipe) - work->flip_queued_vblank > 1)
intel_queue_rps_boost_for_request(dev, work->flip_queued_req);
intel_queue_rps_boost_for_request(work->flip_queued_req);
spin_unlock(&dev->event_lock);
}
......
......@@ -1033,8 +1033,8 @@ void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void gen6_reset_rps_interrupts(struct drm_device *dev);
void gen6_enable_rps_interrupts(struct drm_device *dev);
void gen6_disable_rps_interrupts(struct drm_device *dev);
void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv);
void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv);
u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask);
void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv);
void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv);
......@@ -1171,10 +1171,10 @@ struct drm_framebuffer *
__intel_framebuffer_create(struct drm_device *dev,
struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_i915_gem_object *obj);
void intel_prepare_page_flip(struct drm_device *dev, int plane);
void intel_finish_page_flip(struct drm_device *dev, int pipe);
void intel_finish_page_flip_plane(struct drm_device *dev, int plane);
void intel_check_page_flip(struct drm_device *dev, int pipe);
void intel_prepare_page_flip(struct drm_i915_private *dev_priv, int plane);
void intel_finish_page_flip(struct drm_i915_private *dev_priv, int pipe);
void intel_finish_page_flip_plane(struct drm_i915_private *dev_priv, int plane);
void intel_check_page_flip(struct drm_i915_private *dev_priv, int pipe);
int intel_prepare_plane_fb(struct drm_plane *plane,
const struct drm_plane_state *new_state);
void intel_cleanup_plane_fb(struct drm_plane *plane,
......@@ -1625,8 +1625,7 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv);
void gen6_rps_boost(struct drm_i915_private *dev_priv,
struct intel_rps_client *rps,
unsigned long submitted);
void intel_queue_rps_boost_for_request(struct drm_device *dev,
struct drm_i915_gem_request *req);
void intel_queue_rps_boost_for_request(struct drm_i915_gem_request *req);
void vlv_wm_get_hw_state(struct drm_device *dev);
void ilk_wm_get_hw_state(struct drm_device *dev);
void skl_wm_get_hw_state(struct drm_device *dev);
......
......@@ -220,7 +220,7 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
}
}
if (dev_priv->display.hpd_irq_setup)
dev_priv->display.hpd_irq_setup(dev);
dev_priv->display.hpd_irq_setup(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
intel_runtime_pm_put(dev_priv);
......@@ -346,7 +346,7 @@ static void i915_hotplug_work_func(struct work_struct *work)
/**
* intel_hpd_irq_handler - main hotplug irq handler
* @dev: drm device
* @dev_priv: drm_i915_private
* @pin_mask: a mask of hpd pins that have triggered the irq
* @long_mask: a mask of hpd pins that may be long hpd pulses
*
......@@ -360,10 +360,9 @@ static void i915_hotplug_work_func(struct work_struct *work)
* Here, we do hotplug irq storm detection and mitigation, and pass further
* processing to appropriate bottom halves.
*/
void intel_hpd_irq_handler(struct drm_device *dev,
void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
u32 pin_mask, u32 long_mask)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
enum port port;
bool storm_detected = false;
......@@ -407,7 +406,7 @@ void intel_hpd_irq_handler(struct drm_device *dev,
* hotplug bits itself. So only WARN about unexpected
* interrupts on saner platforms.
*/
WARN_ONCE(!HAS_GMCH_DISPLAY(dev),
WARN_ONCE(!HAS_GMCH_DISPLAY(dev_priv),
"Received HPD interrupt on pin %d although disabled\n", i);
continue;
}
......@@ -427,7 +426,7 @@ void intel_hpd_irq_handler(struct drm_device *dev,
}
if (storm_detected)
dev_priv->display.hpd_irq_setup(dev);
dev_priv->display.hpd_irq_setup(dev_priv);
spin_unlock(&dev_priv->irq_lock);
/*
......@@ -485,7 +484,7 @@ void intel_hpd_init(struct drm_i915_private *dev_priv)
*/
spin_lock_irq(&dev_priv->irq_lock);
if (dev_priv->display.hpd_irq_setup)
dev_priv->display.hpd_irq_setup(dev);
dev_priv->display.hpd_irq_setup(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
}
......
......@@ -574,10 +574,8 @@ static void asle_work(struct work_struct *work)
asle->aslc = aslc_stat;
}
void intel_opregion_asle_intr(struct drm_device *dev)
void intel_opregion_asle_intr(struct drm_i915_private *dev_priv)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (dev_priv->opregion.asle)
schedule_work(&dev_priv->opregion.asle_work);
}
......
......@@ -4167,9 +4167,8 @@ DEFINE_SPINLOCK(mchdev_lock);
* mchdev_lock. */
static struct drm_i915_private *i915_mch_dev;
bool ironlake_set_drps(struct drm_device *dev, u8 val)
bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u16 rgvswctl;
assert_spin_locked(&mchdev_lock);
......@@ -4191,9 +4190,8 @@ bool ironlake_set_drps(struct drm_device *dev, u8 val)
return true;
}
static void ironlake_enable_drps(struct drm_device *dev)
static void ironlake_enable_drps(struct drm_i915_private *dev_priv)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 rgvmodectl;
u8 fmax, fmin, fstart, vstart;
......@@ -4250,7 +4248,7 @@ static void ironlake_enable_drps(struct drm_device *dev)
DRM_ERROR("stuck trying to change perf mode\n");
mdelay(1);
ironlake_set_drps(dev, fstart);
ironlake_set_drps(dev_priv, fstart);
dev_priv->ips.last_count1 = I915_READ(DMIEC) +
I915_READ(DDREC) + I915_READ(CSIEC);
......@@ -4261,9 +4259,8 @@ static void ironlake_enable_drps(struct drm_device *dev)
spin_unlock_irq(&mchdev_lock);
}
static void ironlake_disable_drps(struct drm_device *dev)
static void ironlake_disable_drps(struct drm_i915_private *dev_priv)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u16 rgvswctl;
spin_lock_irq(&mchdev_lock);
......@@ -4278,7 +4275,7 @@ static void ironlake_disable_drps(struct drm_device *dev)
I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
/* Go back to the starting frequency */
ironlake_set_drps(dev, dev_priv->ips.fstart);
ironlake_set_drps(dev_priv, dev_priv->ips.fstart);
mdelay(1);
rgvswctl |= MEMCTL_CMD_STS;
I915_WRITE(MEMSWCTL, rgvswctl);
......@@ -6095,7 +6092,7 @@ bool i915_gpu_turbo_disable(void)
dev_priv->ips.max_delay = dev_priv->ips.fstart;
if (!ironlake_set_drps(dev_priv->dev, dev_priv->ips.fstart))
if (!ironlake_set_drps(dev_priv, dev_priv->ips.fstart))
ret = false;
out_unlock:
......@@ -6246,13 +6243,11 @@ void intel_cleanup_gt_powersave(struct drm_device *dev)
intel_runtime_pm_put(dev_priv);
}
static void gen6_suspend_rps(struct drm_device *dev)
static void gen6_suspend_rps(struct drm_i915_private *dev_priv)
{
struct drm_i915_private *dev_priv = dev->dev_private;
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
gen6_disable_rps_interrupts(dev);
gen6_disable_rps_interrupts(dev_priv);
}
/**
......@@ -6267,10 +6262,10 @@ void intel_suspend_gt_powersave(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (INTEL_INFO(dev)->gen < 6)
if (INTEL_GEN(dev_priv) < 6)
return;
gen6_suspend_rps(dev);
gen6_suspend_rps(dev_priv);
/* Force GPU to min freq during suspend */
gen6_rps_idle(dev_priv);
......@@ -6281,7 +6276,7 @@ void intel_disable_gt_powersave(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
if (IS_IRONLAKE_M(dev)) {
ironlake_disable_drps(dev);
ironlake_disable_drps(dev_priv);
} else if (INTEL_INFO(dev)->gen >= 6) {
intel_suspend_gt_powersave(dev);
......@@ -6337,7 +6332,7 @@ static void intel_gen6_powersave_work(struct work_struct *work)
dev_priv->rps.enabled = true;
gen6_enable_rps_interrupts(dev);
gen6_enable_rps_interrupts(dev_priv);
mutex_unlock(&dev_priv->rps.hw_lock);
......@@ -6353,7 +6348,7 @@ void intel_enable_gt_powersave(struct drm_device *dev)
return;
if (IS_IRONLAKE_M(dev)) {
ironlake_enable_drps(dev);
ironlake_enable_drps(dev_priv);
mutex_lock(&dev->struct_mutex);
intel_init_emon(dev);
mutex_unlock(&dev->struct_mutex);
......@@ -6383,7 +6378,7 @@ void intel_reset_gt_powersave(struct drm_device *dev)
if (INTEL_INFO(dev)->gen < 6)
return;
gen6_suspend_rps(dev);
gen6_suspend_rps(dev_priv);
dev_priv->rps.enabled = false;
}
......@@ -7412,12 +7407,11 @@ static void __intel_rps_boost_work(struct work_struct *work)
kfree(boost);
}
void intel_queue_rps_boost_for_request(struct drm_device *dev,
struct drm_i915_gem_request *req)
void intel_queue_rps_boost_for_request(struct drm_i915_gem_request *req)
{
struct request_boost *boost;
if (req == NULL || INTEL_INFO(dev)->gen < 6)
if (req == NULL || INTEL_GEN(req->i915) < 6)
return;
if (i915_gem_request_completed(req, true))
......@@ -7431,7 +7425,7 @@ void intel_queue_rps_boost_for_request(struct drm_device *dev,
boost->req = req;
INIT_WORK(&boost->work, __intel_rps_boost_work);
queue_work(to_i915(dev)->wq, &boost->work);
queue_work(req->i915->wq, &boost->work);
}
void intel_pm_setup(struct drm_device *dev)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册