提交 73a6d3fc 编写于 作者: R Rafał Miłecki 提交者: Dave Airlie

drm/radeon/kms: use wait queue (events) for VBLANK sync

This already simplifies code significally and makes it maintaible
in case of adding memory reclocking plus voltage changing in future.
Signed-off-by: NRafał Miłecki <zajec5@gmail.com>
Signed-off-by: NDave Airlie <airlied@redhat.com>
上级 20d6c346
...@@ -312,13 +312,11 @@ int r100_irq_process(struct radeon_device *rdev) ...@@ -312,13 +312,11 @@ int r100_irq_process(struct radeon_device *rdev)
/* Vertical blank interrupts */ /* Vertical blank interrupts */
if (status & RADEON_CRTC_VBLANK_STAT) { if (status & RADEON_CRTC_VBLANK_STAT) {
drm_handle_vblank(rdev->ddev, 0); drm_handle_vblank(rdev->ddev, 0);
if (rdev->pm.vblank_callback) wake_up(&rdev->irq.vblank_queue);
queue_work(rdev->wq, &rdev->pm.reclock_work);
} }
if (status & RADEON_CRTC2_VBLANK_STAT) { if (status & RADEON_CRTC2_VBLANK_STAT) {
drm_handle_vblank(rdev->ddev, 1); drm_handle_vblank(rdev->ddev, 1);
if (rdev->pm.vblank_callback) wake_up(&rdev->irq.vblank_queue);
queue_work(rdev->wq, &rdev->pm.reclock_work);
} }
if (status & RADEON_FP_DETECT_STAT) { if (status & RADEON_FP_DETECT_STAT) {
queue_hotplug = true; queue_hotplug = true;
......
...@@ -2744,8 +2744,7 @@ int r600_irq_process(struct radeon_device *rdev) ...@@ -2744,8 +2744,7 @@ int r600_irq_process(struct radeon_device *rdev)
case 0: /* D1 vblank */ case 0: /* D1 vblank */
if (disp_int & LB_D1_VBLANK_INTERRUPT) { if (disp_int & LB_D1_VBLANK_INTERRUPT) {
drm_handle_vblank(rdev->ddev, 0); drm_handle_vblank(rdev->ddev, 0);
if (rdev->pm.vblank_callback) wake_up(&rdev->irq.vblank_queue);
queue_work(rdev->wq, &rdev->pm.reclock_work);
disp_int &= ~LB_D1_VBLANK_INTERRUPT; disp_int &= ~LB_D1_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D1 vblank\n"); DRM_DEBUG("IH: D1 vblank\n");
} }
...@@ -2766,8 +2765,7 @@ int r600_irq_process(struct radeon_device *rdev) ...@@ -2766,8 +2765,7 @@ int r600_irq_process(struct radeon_device *rdev)
case 0: /* D2 vblank */ case 0: /* D2 vblank */
if (disp_int & LB_D2_VBLANK_INTERRUPT) { if (disp_int & LB_D2_VBLANK_INTERRUPT) {
drm_handle_vblank(rdev->ddev, 1); drm_handle_vblank(rdev->ddev, 1);
if (rdev->pm.vblank_callback) wake_up(&rdev->irq.vblank_queue);
queue_work(rdev->wq, &rdev->pm.reclock_work);
disp_int &= ~LB_D2_VBLANK_INTERRUPT; disp_int &= ~LB_D2_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D2 vblank\n"); DRM_DEBUG("IH: D2 vblank\n");
} }
......
...@@ -351,6 +351,7 @@ struct radeon_irq { ...@@ -351,6 +351,7 @@ struct radeon_irq {
bool sw_int; bool sw_int;
/* FIXME: use a define max crtc rather than hardcode it */ /* FIXME: use a define max crtc rather than hardcode it */
bool crtc_vblank_int[2]; bool crtc_vblank_int[2];
wait_queue_head_t vblank_queue;
/* FIXME: use defines for max hpd/dacs */ /* FIXME: use defines for max hpd/dacs */
bool hpd[6]; bool hpd[6];
spinlock_t sw_lock; spinlock_t sw_lock;
...@@ -657,13 +658,11 @@ struct radeon_power_state { ...@@ -657,13 +658,11 @@ struct radeon_power_state {
struct radeon_pm { struct radeon_pm {
struct mutex mutex; struct mutex mutex;
struct work_struct reclock_work;
struct delayed_work idle_work; struct delayed_work idle_work;
enum radeon_pm_state state; enum radeon_pm_state state;
enum radeon_pm_action planned_action; enum radeon_pm_action planned_action;
unsigned long action_timeout; unsigned long action_timeout;
bool downclocked; bool downclocked;
bool vblank_callback;
int active_crtcs; int active_crtcs;
int req_vblank; int req_vblank;
fixed20_12 max_bandwidth; fixed20_12 max_bandwidth;
......
...@@ -645,6 +645,7 @@ int radeon_device_init(struct radeon_device *rdev, ...@@ -645,6 +645,7 @@ int radeon_device_init(struct radeon_device *rdev,
mutex_init(&rdev->pm.mutex); mutex_init(&rdev->pm.mutex);
rwlock_init(&rdev->fence_drv.lock); rwlock_init(&rdev->fence_drv.lock);
INIT_LIST_HEAD(&rdev->gem.objects); INIT_LIST_HEAD(&rdev->gem.objects);
init_waitqueue_head(&rdev->irq.vblank_queue);
/* setup workqueue */ /* setup workqueue */
rdev->wq = create_workqueue("radeon"); rdev->wq = create_workqueue("radeon");
......
...@@ -25,10 +25,10 @@ ...@@ -25,10 +25,10 @@
#define RADEON_IDLE_LOOP_MS 100 #define RADEON_IDLE_LOOP_MS 100
#define RADEON_RECLOCK_DELAY_MS 200 #define RADEON_RECLOCK_DELAY_MS 200
#define RADEON_WAIT_VBLANK_TIMEOUT 200
static void radeon_pm_set_clocks_locked(struct radeon_device *rdev); static void radeon_pm_set_clocks_locked(struct radeon_device *rdev);
static void radeon_pm_set_clocks(struct radeon_device *rdev); static void radeon_pm_set_clocks(struct radeon_device *rdev);
static void radeon_pm_reclock_work_handler(struct work_struct *work);
static void radeon_pm_idle_work_handler(struct work_struct *work); static void radeon_pm_idle_work_handler(struct work_struct *work);
static int radeon_debugfs_pm_init(struct radeon_device *rdev); static int radeon_debugfs_pm_init(struct radeon_device *rdev);
...@@ -214,7 +214,6 @@ int radeon_pm_init(struct radeon_device *rdev) ...@@ -214,7 +214,6 @@ int radeon_pm_init(struct radeon_device *rdev)
rdev->pm.state = PM_STATE_DISABLED; rdev->pm.state = PM_STATE_DISABLED;
rdev->pm.planned_action = PM_ACTION_NONE; rdev->pm.planned_action = PM_ACTION_NONE;
rdev->pm.downclocked = false; rdev->pm.downclocked = false;
rdev->pm.vblank_callback = false;
if (rdev->bios) { if (rdev->bios) {
if (rdev->is_atom_bios) if (rdev->is_atom_bios)
...@@ -228,7 +227,6 @@ int radeon_pm_init(struct radeon_device *rdev) ...@@ -228,7 +227,6 @@ int radeon_pm_init(struct radeon_device *rdev)
DRM_ERROR("Failed to register debugfs file for PM!\n"); DRM_ERROR("Failed to register debugfs file for PM!\n");
} }
INIT_WORK(&rdev->pm.reclock_work, radeon_pm_reclock_work_handler);
INIT_DELAYED_WORK(&rdev->pm.idle_work, radeon_pm_idle_work_handler); INIT_DELAYED_WORK(&rdev->pm.idle_work, radeon_pm_idle_work_handler);
if (radeon_dynpm != -1 && radeon_dynpm) { if (radeon_dynpm != -1 && radeon_dynpm) {
...@@ -266,26 +264,14 @@ void radeon_pm_compute_clocks(struct radeon_device *rdev) ...@@ -266,26 +264,14 @@ void radeon_pm_compute_clocks(struct radeon_device *rdev)
if (count > 1) { if (count > 1) {
if (rdev->pm.state == PM_STATE_ACTIVE) { if (rdev->pm.state == PM_STATE_ACTIVE) {
wait_queue_head_t wait;
init_waitqueue_head(&wait);
cancel_delayed_work(&rdev->pm.idle_work); cancel_delayed_work(&rdev->pm.idle_work);
rdev->pm.state = PM_STATE_PAUSED; rdev->pm.state = PM_STATE_PAUSED;
rdev->pm.planned_action = PM_ACTION_UPCLOCK; rdev->pm.planned_action = PM_ACTION_UPCLOCK;
radeon_get_power_state(rdev, rdev->pm.planned_action); if (rdev->pm.downclocked)
rdev->pm.vblank_callback = true;
mutex_unlock(&rdev->pm.mutex);
wait_event_timeout(wait, !rdev->pm.downclocked,
msecs_to_jiffies(300));
if (!rdev->pm.downclocked)
radeon_pm_set_clocks(rdev); radeon_pm_set_clocks(rdev);
DRM_DEBUG("radeon: dynamic power management deactivated\n"); DRM_DEBUG("radeon: dynamic power management deactivated\n");
} else {
mutex_unlock(&rdev->pm.mutex);
} }
} else if (count == 1) { } else if (count == 1) {
/* TODO: Increase clocks if needed for current mode */ /* TODO: Increase clocks if needed for current mode */
...@@ -293,8 +279,7 @@ void radeon_pm_compute_clocks(struct radeon_device *rdev) ...@@ -293,8 +279,7 @@ void radeon_pm_compute_clocks(struct radeon_device *rdev)
if (rdev->pm.state == PM_STATE_MINIMUM) { if (rdev->pm.state == PM_STATE_MINIMUM) {
rdev->pm.state = PM_STATE_ACTIVE; rdev->pm.state = PM_STATE_ACTIVE;
rdev->pm.planned_action = PM_ACTION_UPCLOCK; rdev->pm.planned_action = PM_ACTION_UPCLOCK;
radeon_get_power_state(rdev, rdev->pm.planned_action); radeon_pm_set_clocks(rdev);
radeon_pm_set_clocks_locked(rdev);
queue_delayed_work(rdev->wq, &rdev->pm.idle_work, queue_delayed_work(rdev->wq, &rdev->pm.idle_work,
msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
...@@ -305,8 +290,6 @@ void radeon_pm_compute_clocks(struct radeon_device *rdev) ...@@ -305,8 +290,6 @@ void radeon_pm_compute_clocks(struct radeon_device *rdev)
msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
DRM_DEBUG("radeon: dynamic power management activated\n"); DRM_DEBUG("radeon: dynamic power management activated\n");
} }
mutex_unlock(&rdev->pm.mutex);
} }
else { /* count == 0 */ else { /* count == 0 */
if (rdev->pm.state != PM_STATE_MINIMUM) { if (rdev->pm.state != PM_STATE_MINIMUM) {
...@@ -314,12 +297,11 @@ void radeon_pm_compute_clocks(struct radeon_device *rdev) ...@@ -314,12 +297,11 @@ void radeon_pm_compute_clocks(struct radeon_device *rdev)
rdev->pm.state = PM_STATE_MINIMUM; rdev->pm.state = PM_STATE_MINIMUM;
rdev->pm.planned_action = PM_ACTION_MINIMUM; rdev->pm.planned_action = PM_ACTION_MINIMUM;
radeon_get_power_state(rdev, rdev->pm.planned_action); radeon_pm_set_clocks(rdev);
radeon_pm_set_clocks_locked(rdev);
} }
mutex_unlock(&rdev->pm.mutex);
} }
mutex_unlock(&rdev->pm.mutex);
} }
static void radeon_pm_set_clocks_locked(struct radeon_device *rdev) static void radeon_pm_set_clocks_locked(struct radeon_device *rdev)
...@@ -344,31 +326,32 @@ static void radeon_pm_set_clocks_locked(struct radeon_device *rdev) ...@@ -344,31 +326,32 @@ static void radeon_pm_set_clocks_locked(struct radeon_device *rdev)
static void radeon_pm_set_clocks(struct radeon_device *rdev) static void radeon_pm_set_clocks(struct radeon_device *rdev)
{ {
mutex_lock(&rdev->pm.mutex); radeon_get_power_state(rdev, rdev->pm.planned_action);
/* new VBLANK irq may come before handling previous one */ mutex_lock(&rdev->cp.mutex);
if (rdev->pm.vblank_callback) {
mutex_lock(&rdev->cp.mutex); if (rdev->pm.active_crtcs & (1 << 0)) {
if (rdev->pm.req_vblank & (1 << 0)) { rdev->pm.req_vblank |= (1 << 0);
rdev->pm.req_vblank &= ~(1 << 0); drm_vblank_get(rdev->ddev, 0);
drm_vblank_put(rdev->ddev, 0); }
} if (rdev->pm.active_crtcs & (1 << 1)) {
if (rdev->pm.req_vblank & (1 << 1)) { rdev->pm.req_vblank |= (1 << 1);
rdev->pm.req_vblank &= ~(1 << 1); drm_vblank_get(rdev->ddev, 1);
drm_vblank_put(rdev->ddev, 1); }
} if (rdev->pm.active_crtcs)
rdev->pm.vblank_callback = false; wait_event_interruptible_timeout(
radeon_pm_set_clocks_locked(rdev); rdev->irq.vblank_queue, 0,
mutex_unlock(&rdev->cp.mutex); msecs_to_jiffies(RADEON_WAIT_VBLANK_TIMEOUT));
if (rdev->pm.req_vblank & (1 << 0)) {
rdev->pm.req_vblank &= ~(1 << 0);
drm_vblank_put(rdev->ddev, 0);
}
if (rdev->pm.req_vblank & (1 << 1)) {
rdev->pm.req_vblank &= ~(1 << 1);
drm_vblank_put(rdev->ddev, 1);
} }
mutex_unlock(&rdev->pm.mutex);
}
static void radeon_pm_reclock_work_handler(struct work_struct *work) radeon_pm_set_clocks_locked(rdev);
{ mutex_unlock(&rdev->cp.mutex);
struct radeon_device *rdev;
rdev = container_of(work, struct radeon_device,
pm.reclock_work);
radeon_pm_set_clocks(rdev);
} }
static void radeon_pm_idle_work_handler(struct work_struct *work) static void radeon_pm_idle_work_handler(struct work_struct *work)
...@@ -378,8 +361,7 @@ static void radeon_pm_idle_work_handler(struct work_struct *work) ...@@ -378,8 +361,7 @@ static void radeon_pm_idle_work_handler(struct work_struct *work)
pm.idle_work.work); pm.idle_work.work);
mutex_lock(&rdev->pm.mutex); mutex_lock(&rdev->pm.mutex);
if (rdev->pm.state == PM_STATE_ACTIVE && if (rdev->pm.state == PM_STATE_ACTIVE) {
!rdev->pm.vblank_callback) {
unsigned long irq_flags; unsigned long irq_flags;
int not_processed = 0; int not_processed = 0;
...@@ -417,17 +399,8 @@ static void radeon_pm_idle_work_handler(struct work_struct *work) ...@@ -417,17 +399,8 @@ static void radeon_pm_idle_work_handler(struct work_struct *work)
} }
if (rdev->pm.planned_action != PM_ACTION_NONE && if (rdev->pm.planned_action != PM_ACTION_NONE &&
jiffies > rdev->pm.action_timeout) { jiffies > rdev->pm.action_timeout) {
if (rdev->pm.active_crtcs & (1 << 0)) { radeon_pm_set_clocks(rdev);
rdev->pm.req_vblank |= (1 << 0);
drm_vblank_get(rdev->ddev, 0);
}
if (rdev->pm.active_crtcs & (1 << 1)) {
rdev->pm.req_vblank |= (1 << 1);
drm_vblank_get(rdev->ddev, 1);
}
radeon_get_power_state(rdev, rdev->pm.planned_action);
rdev->pm.vblank_callback = true;
} }
} }
mutex_unlock(&rdev->pm.mutex); mutex_unlock(&rdev->pm.mutex);
......
...@@ -408,13 +408,11 @@ int rs600_irq_process(struct radeon_device *rdev) ...@@ -408,13 +408,11 @@ int rs600_irq_process(struct radeon_device *rdev)
/* Vertical blank interrupts */ /* Vertical blank interrupts */
if (G_007EDC_LB_D1_VBLANK_INTERRUPT(r500_disp_int)) { if (G_007EDC_LB_D1_VBLANK_INTERRUPT(r500_disp_int)) {
drm_handle_vblank(rdev->ddev, 0); drm_handle_vblank(rdev->ddev, 0);
if (rdev->pm.vblank_callback) wake_up(&rdev->irq.vblank_queue);
queue_work(rdev->wq, &rdev->pm.reclock_work);
} }
if (G_007EDC_LB_D2_VBLANK_INTERRUPT(r500_disp_int)) { if (G_007EDC_LB_D2_VBLANK_INTERRUPT(r500_disp_int)) {
drm_handle_vblank(rdev->ddev, 1); drm_handle_vblank(rdev->ddev, 1);
if (rdev->pm.vblank_callback) wake_up(&rdev->irq.vblank_queue);
queue_work(rdev->wq, &rdev->pm.reclock_work);
} }
if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(r500_disp_int)) { if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(r500_disp_int)) {
queue_hotplug = true; queue_hotplug = true;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册