提交 45f9a39b 编写于 作者: A Alex Deucher 提交者: Dave Airlie

drm/radeon/kms/evergreen: implement irq support

Signed-off-by: NAlex Deucher <alexdeucher@gmail.com>
Signed-off-by: NDave Airlie <airlied@redhat.com>
上级 fe251e2f
...@@ -249,16 +249,12 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode) ...@@ -249,16 +249,12 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
if (ASIC_IS_DCE3(rdev)) if (ASIC_IS_DCE3(rdev))
atombios_enable_crtc_memreq(crtc, ATOM_ENABLE); atombios_enable_crtc_memreq(crtc, ATOM_ENABLE);
atombios_blank_crtc(crtc, ATOM_DISABLE); atombios_blank_crtc(crtc, ATOM_DISABLE);
/* XXX re-enable when interrupt support is added */
if (!ASIC_IS_DCE4(rdev))
drm_vblank_post_modeset(dev, radeon_crtc->crtc_id); drm_vblank_post_modeset(dev, radeon_crtc->crtc_id);
radeon_crtc_load_lut(crtc); radeon_crtc_load_lut(crtc);
break; break;
case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF: case DRM_MODE_DPMS_OFF:
/* XXX re-enable when interrupt support is added */
if (!ASIC_IS_DCE4(rdev))
drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id); drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id);
atombios_blank_crtc(crtc, ATOM_ENABLE); atombios_blank_crtc(crtc, ATOM_ENABLE);
if (ASIC_IS_DCE3(rdev)) if (ASIC_IS_DCE3(rdev))
......
...@@ -1205,12 +1205,532 @@ int evergreen_asic_reset(struct radeon_device *rdev) ...@@ -1205,12 +1205,532 @@ int evergreen_asic_reset(struct radeon_device *rdev)
return evergreen_gpu_soft_reset(rdev); return evergreen_gpu_soft_reset(rdev);
} }
/* Interrupts */
u32 evergreen_get_vblank_counter(struct radeon_device *rdev, int crtc)
{
switch (crtc) {
case 0:
return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC0_REGISTER_OFFSET);
case 1:
return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC1_REGISTER_OFFSET);
case 2:
return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC2_REGISTER_OFFSET);
case 3:
return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC3_REGISTER_OFFSET);
case 4:
return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC4_REGISTER_OFFSET);
case 5:
return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC5_REGISTER_OFFSET);
default:
return 0;
}
}
void evergreen_disable_interrupt_state(struct radeon_device *rdev)
{
u32 tmp;
WREG32(CP_INT_CNTL, 0);
WREG32(GRBM_INT_CNTL, 0);
WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
WREG32(DC_HPD1_INT_CONTROL, tmp);
tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
WREG32(DC_HPD2_INT_CONTROL, tmp);
tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
WREG32(DC_HPD3_INT_CONTROL, tmp);
tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
WREG32(DC_HPD4_INT_CONTROL, tmp);
tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
WREG32(DC_HPD5_INT_CONTROL, tmp);
tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
WREG32(DC_HPD6_INT_CONTROL, tmp);
}
int evergreen_irq_set(struct radeon_device *rdev)
{
u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
if (!rdev->irq.installed) {
WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n");
return -EINVAL;
}
/* don't enable anything if the ih is disabled */
if (!rdev->ih.enabled) {
r600_disable_interrupts(rdev);
/* force the active interrupt state to all disabled */
evergreen_disable_interrupt_state(rdev);
return 0;
}
hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
if (rdev->irq.sw_int) {
DRM_DEBUG("evergreen_irq_set: sw int\n");
cp_int_cntl |= RB_INT_ENABLE;
}
if (rdev->irq.crtc_vblank_int[0]) {
DRM_DEBUG("evergreen_irq_set: vblank 0\n");
crtc1 |= VBLANK_INT_MASK;
}
if (rdev->irq.crtc_vblank_int[1]) {
DRM_DEBUG("evergreen_irq_set: vblank 1\n");
crtc2 |= VBLANK_INT_MASK;
}
if (rdev->irq.crtc_vblank_int[2]) {
DRM_DEBUG("evergreen_irq_set: vblank 2\n");
crtc3 |= VBLANK_INT_MASK;
}
if (rdev->irq.crtc_vblank_int[3]) {
DRM_DEBUG("evergreen_irq_set: vblank 3\n");
crtc4 |= VBLANK_INT_MASK;
}
if (rdev->irq.crtc_vblank_int[4]) {
DRM_DEBUG("evergreen_irq_set: vblank 4\n");
crtc5 |= VBLANK_INT_MASK;
}
if (rdev->irq.crtc_vblank_int[5]) {
DRM_DEBUG("evergreen_irq_set: vblank 5\n");
crtc6 |= VBLANK_INT_MASK;
}
if (rdev->irq.hpd[0]) {
DRM_DEBUG("evergreen_irq_set: hpd 1\n");
hpd1 |= DC_HPDx_INT_EN;
}
if (rdev->irq.hpd[1]) {
DRM_DEBUG("evergreen_irq_set: hpd 2\n");
hpd2 |= DC_HPDx_INT_EN;
}
if (rdev->irq.hpd[2]) {
DRM_DEBUG("evergreen_irq_set: hpd 3\n");
hpd3 |= DC_HPDx_INT_EN;
}
if (rdev->irq.hpd[3]) {
DRM_DEBUG("evergreen_irq_set: hpd 4\n");
hpd4 |= DC_HPDx_INT_EN;
}
if (rdev->irq.hpd[4]) {
DRM_DEBUG("evergreen_irq_set: hpd 5\n");
hpd5 |= DC_HPDx_INT_EN;
}
if (rdev->irq.hpd[5]) {
DRM_DEBUG("evergreen_irq_set: hpd 6\n");
hpd6 |= DC_HPDx_INT_EN;
}
WREG32(CP_INT_CNTL, cp_int_cntl);
WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2);
WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, crtc3);
WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, crtc4);
WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, crtc5);
WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6);
WREG32(DC_HPD1_INT_CONTROL, hpd1);
WREG32(DC_HPD2_INT_CONTROL, hpd2);
WREG32(DC_HPD3_INT_CONTROL, hpd3);
WREG32(DC_HPD4_INT_CONTROL, hpd4);
WREG32(DC_HPD5_INT_CONTROL, hpd5);
WREG32(DC_HPD6_INT_CONTROL, hpd6);
return 0;
}
static inline void evergreen_irq_ack(struct radeon_device *rdev,
u32 *disp_int,
u32 *disp_int_cont,
u32 *disp_int_cont2,
u32 *disp_int_cont3,
u32 *disp_int_cont4,
u32 *disp_int_cont5)
{
u32 tmp;
*disp_int = RREG32(DISP_INTERRUPT_STATUS);
*disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
*disp_int_cont2 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE2);
*disp_int_cont3 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE3);
*disp_int_cont4 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE4);
*disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5);
if (*disp_int & LB_D1_VBLANK_INTERRUPT)
WREG32(VBLANK_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VBLANK_ACK);
if (*disp_int & LB_D1_VLINE_INTERRUPT)
WREG32(VLINE_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VLINE_ACK);
if (*disp_int_cont & LB_D2_VBLANK_INTERRUPT)
WREG32(VBLANK_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VBLANK_ACK);
if (*disp_int_cont & LB_D2_VLINE_INTERRUPT)
WREG32(VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VLINE_ACK);
if (*disp_int_cont2 & LB_D3_VBLANK_INTERRUPT)
WREG32(VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK);
if (*disp_int_cont2 & LB_D3_VLINE_INTERRUPT)
WREG32(VLINE_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VLINE_ACK);
if (*disp_int_cont3 & LB_D4_VBLANK_INTERRUPT)
WREG32(VBLANK_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VBLANK_ACK);
if (*disp_int_cont3 & LB_D4_VLINE_INTERRUPT)
WREG32(VLINE_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VLINE_ACK);
if (*disp_int_cont4 & LB_D5_VBLANK_INTERRUPT)
WREG32(VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK);
if (*disp_int_cont4 & LB_D5_VLINE_INTERRUPT)
WREG32(VLINE_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VLINE_ACK);
if (*disp_int_cont5 & LB_D6_VBLANK_INTERRUPT)
WREG32(VBLANK_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VBLANK_ACK);
if (*disp_int_cont5 & LB_D6_VLINE_INTERRUPT)
WREG32(VLINE_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VLINE_ACK);
if (*disp_int & DC_HPD1_INTERRUPT) {
tmp = RREG32(DC_HPD1_INT_CONTROL);
tmp |= DC_HPDx_INT_ACK;
WREG32(DC_HPD1_INT_CONTROL, tmp);
}
if (*disp_int_cont & DC_HPD2_INTERRUPT) {
tmp = RREG32(DC_HPD2_INT_CONTROL);
tmp |= DC_HPDx_INT_ACK;
WREG32(DC_HPD2_INT_CONTROL, tmp);
}
if (*disp_int_cont2 & DC_HPD3_INTERRUPT) {
tmp = RREG32(DC_HPD3_INT_CONTROL);
tmp |= DC_HPDx_INT_ACK;
WREG32(DC_HPD3_INT_CONTROL, tmp);
}
if (*disp_int_cont3 & DC_HPD4_INTERRUPT) {
tmp = RREG32(DC_HPD4_INT_CONTROL);
tmp |= DC_HPDx_INT_ACK;
WREG32(DC_HPD4_INT_CONTROL, tmp);
}
if (*disp_int_cont4 & DC_HPD5_INTERRUPT) {
tmp = RREG32(DC_HPD5_INT_CONTROL);
tmp |= DC_HPDx_INT_ACK;
WREG32(DC_HPD5_INT_CONTROL, tmp);
}
if (*disp_int_cont5 & DC_HPD6_INTERRUPT) {
tmp = RREG32(DC_HPD5_INT_CONTROL);
tmp |= DC_HPDx_INT_ACK;
WREG32(DC_HPD6_INT_CONTROL, tmp);
}
}
void evergreen_irq_disable(struct radeon_device *rdev)
{
u32 disp_int, disp_int_cont, disp_int_cont2;
u32 disp_int_cont3, disp_int_cont4, disp_int_cont5;
r600_disable_interrupts(rdev);
/* Wait and acknowledge irq */
mdelay(1);
evergreen_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2,
&disp_int_cont3, &disp_int_cont4, &disp_int_cont5);
evergreen_disable_interrupt_state(rdev);
}
static void evergreen_irq_suspend(struct radeon_device *rdev)
{
evergreen_irq_disable(rdev);
r600_rlc_stop(rdev);
}
static inline u32 evergreen_get_ih_wptr(struct radeon_device *rdev)
{
u32 wptr, tmp;
/* XXX use writeback */
wptr = RREG32(IH_RB_WPTR);
if (wptr & RB_OVERFLOW) {
/* When a ring buffer overflow happen start parsing interrupt
* from the last not overwritten vector (wptr + 16). Hopefully
* this should allow us to catchup.
*/
dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
tmp = RREG32(IH_RB_CNTL);
tmp |= IH_WPTR_OVERFLOW_CLEAR;
WREG32(IH_RB_CNTL, tmp);
}
return (wptr & rdev->ih.ptr_mask);
}
int evergreen_irq_process(struct radeon_device *rdev)
{
u32 wptr = evergreen_get_ih_wptr(rdev);
u32 rptr = rdev->ih.rptr;
u32 src_id, src_data;
u32 ring_index;
u32 disp_int, disp_int_cont, disp_int_cont2;
u32 disp_int_cont3, disp_int_cont4, disp_int_cont5;
unsigned long flags;
bool queue_hotplug = false;
DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
if (!rdev->ih.enabled)
return IRQ_NONE;
spin_lock_irqsave(&rdev->ih.lock, flags);
if (rptr == wptr) {
spin_unlock_irqrestore(&rdev->ih.lock, flags);
return IRQ_NONE;
}
if (rdev->shutdown) {
spin_unlock_irqrestore(&rdev->ih.lock, flags);
return IRQ_NONE;
}
restart_ih:
/* display interrupts */
evergreen_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2,
&disp_int_cont3, &disp_int_cont4, &disp_int_cont5);
rdev->ih.wptr = wptr;
while (rptr != wptr) {
/* wptr/rptr are in bytes! */
ring_index = rptr / 4;
src_id = rdev->ih.ring[ring_index] & 0xff;
src_data = rdev->ih.ring[ring_index + 1] & 0xfffffff;
switch (src_id) {
case 1: /* D1 vblank/vline */
switch (src_data) {
case 0: /* D1 vblank */
if (disp_int & LB_D1_VBLANK_INTERRUPT) {
drm_handle_vblank(rdev->ddev, 0);
wake_up(&rdev->irq.vblank_queue);
disp_int &= ~LB_D1_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D1 vblank\n");
}
break;
case 1: /* D1 vline */
if (disp_int & LB_D1_VLINE_INTERRUPT) {
disp_int &= ~LB_D1_VLINE_INTERRUPT;
DRM_DEBUG("IH: D1 vline\n");
}
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
break;
}
break;
case 2: /* D2 vblank/vline */
switch (src_data) {
case 0: /* D2 vblank */
if (disp_int_cont & LB_D2_VBLANK_INTERRUPT) {
drm_handle_vblank(rdev->ddev, 1);
wake_up(&rdev->irq.vblank_queue);
disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D2 vblank\n");
}
break;
case 1: /* D2 vline */
if (disp_int_cont & LB_D2_VLINE_INTERRUPT) {
disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
DRM_DEBUG("IH: D2 vline\n");
}
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
break;
}
break;
case 3: /* D3 vblank/vline */
switch (src_data) {
case 0: /* D3 vblank */
if (disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {
drm_handle_vblank(rdev->ddev, 2);
wake_up(&rdev->irq.vblank_queue);
disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D3 vblank\n");
}
break;
case 1: /* D3 vline */
if (disp_int_cont2 & LB_D3_VLINE_INTERRUPT) {
disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
DRM_DEBUG("IH: D3 vline\n");
}
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
break;
}
break;
case 4: /* D4 vblank/vline */
switch (src_data) {
case 0: /* D4 vblank */
if (disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {
drm_handle_vblank(rdev->ddev, 3);
wake_up(&rdev->irq.vblank_queue);
disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D4 vblank\n");
}
break;
case 1: /* D4 vline */
if (disp_int_cont3 & LB_D4_VLINE_INTERRUPT) {
disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
DRM_DEBUG("IH: D4 vline\n");
}
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
break;
}
break;
case 5: /* D5 vblank/vline */
switch (src_data) {
case 0: /* D5 vblank */
if (disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {
drm_handle_vblank(rdev->ddev, 4);
wake_up(&rdev->irq.vblank_queue);
disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D5 vblank\n");
}
break;
case 1: /* D5 vline */
if (disp_int_cont4 & LB_D5_VLINE_INTERRUPT) {
disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
DRM_DEBUG("IH: D5 vline\n");
}
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
break;
}
break;
case 6: /* D6 vblank/vline */
switch (src_data) {
case 0: /* D6 vblank */
if (disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {
drm_handle_vblank(rdev->ddev, 5);
wake_up(&rdev->irq.vblank_queue);
disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D6 vblank\n");
}
break;
case 1: /* D6 vline */
if (disp_int_cont5 & LB_D6_VLINE_INTERRUPT) {
disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
DRM_DEBUG("IH: D6 vline\n");
}
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
break;
}
break;
case 42: /* HPD hotplug */
switch (src_data) {
case 0:
if (disp_int & DC_HPD1_INTERRUPT) {
disp_int &= ~DC_HPD1_INTERRUPT;
queue_hotplug = true;
DRM_DEBUG("IH: HPD1\n");
}
break;
case 1:
if (disp_int_cont & DC_HPD2_INTERRUPT) {
disp_int_cont &= ~DC_HPD2_INTERRUPT;
queue_hotplug = true;
DRM_DEBUG("IH: HPD2\n");
}
break;
case 2:
if (disp_int_cont2 & DC_HPD3_INTERRUPT) {
disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
queue_hotplug = true;
DRM_DEBUG("IH: HPD3\n");
}
break;
case 3:
if (disp_int_cont3 & DC_HPD4_INTERRUPT) {
disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
queue_hotplug = true;
DRM_DEBUG("IH: HPD4\n");
}
break;
case 4:
if (disp_int_cont4 & DC_HPD5_INTERRUPT) {
disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
queue_hotplug = true;
DRM_DEBUG("IH: HPD5\n");
}
break;
case 5:
if (disp_int_cont5 & DC_HPD6_INTERRUPT) {
disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
queue_hotplug = true;
DRM_DEBUG("IH: HPD6\n");
}
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
break;
}
break;
case 176: /* CP_INT in ring buffer */
case 177: /* CP_INT in IB1 */
case 178: /* CP_INT in IB2 */
DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
radeon_fence_process(rdev);
break;
case 181: /* CP EOP event */
DRM_DEBUG("IH: CP EOP\n");
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
break;
}
/* wptr/rptr are in bytes! */
rptr += 16;
rptr &= rdev->ih.ptr_mask;
}
/* make sure wptr hasn't changed while processing */
wptr = evergreen_get_ih_wptr(rdev);
if (wptr != rdev->ih.wptr)
goto restart_ih;
if (queue_hotplug)
queue_work(rdev->wq, &rdev->hotplug_work);
rdev->ih.rptr = rptr;
WREG32(IH_RB_RPTR, rdev->ih.rptr);
spin_unlock_irqrestore(&rdev->ih.lock, flags);
return IRQ_HANDLED;
}
static int evergreen_startup(struct radeon_device *rdev) static int evergreen_startup(struct radeon_device *rdev)
{ {
int r; int r;
/* XXX until interrupts are supported */ if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
if (!rdev->me_fw || !rdev->pfp_fw /*|| !rdev->rlc_fw*/) {
r = r600_init_microcode(rdev); r = r600_init_microcode(rdev);
if (r) { if (r) {
DRM_ERROR("Failed to load firmware!\n"); DRM_ERROR("Failed to load firmware!\n");
...@@ -1246,6 +1766,7 @@ static int evergreen_startup(struct radeon_device *rdev) ...@@ -1246,6 +1766,7 @@ static int evergreen_startup(struct radeon_device *rdev)
DRM_ERROR("failed to pin blit object %d\n", r); DRM_ERROR("failed to pin blit object %d\n", r);
return r; return r;
} }
#endif
/* Enable IRQ */ /* Enable IRQ */
r = r600_irq_init(rdev); r = r600_irq_init(rdev);
...@@ -1254,8 +1775,7 @@ static int evergreen_startup(struct radeon_device *rdev) ...@@ -1254,8 +1775,7 @@ static int evergreen_startup(struct radeon_device *rdev)
radeon_irq_kms_fini(rdev); radeon_irq_kms_fini(rdev);
return r; return r;
} }
r600_irq_set(rdev); evergreen_irq_set(rdev);
#endif
r = radeon_ring_init(rdev, rdev->cp.ring_size); r = radeon_ring_init(rdev, rdev->cp.ring_size);
if (r) if (r)
...@@ -1312,8 +1832,8 @@ int evergreen_suspend(struct radeon_device *rdev) ...@@ -1312,8 +1832,8 @@ int evergreen_suspend(struct radeon_device *rdev)
/* FIXME: we should wait for ring to be empty */ /* FIXME: we should wait for ring to be empty */
r700_cp_stop(rdev); r700_cp_stop(rdev);
rdev->cp.ready = false; rdev->cp.ready = false;
evergreen_irq_suspend(rdev);
r600_wb_disable(rdev); r600_wb_disable(rdev);
evergreen_pcie_gart_disable(rdev); evergreen_pcie_gart_disable(rdev);
#if 0 #if 0
/* unpin shaders bo */ /* unpin shaders bo */
...@@ -1415,17 +1935,17 @@ int evergreen_init(struct radeon_device *rdev) ...@@ -1415,17 +1935,17 @@ int evergreen_init(struct radeon_device *rdev)
r = radeon_bo_init(rdev); r = radeon_bo_init(rdev);
if (r) if (r)
return r; return r;
#if 0
r = radeon_irq_kms_init(rdev); r = radeon_irq_kms_init(rdev);
if (r) if (r)
return r; return r;
#endif
rdev->cp.ring_obj = NULL; rdev->cp.ring_obj = NULL;
r600_ring_init(rdev, 1024 * 1024); r600_ring_init(rdev, 1024 * 1024);
#if 0
rdev->ih.ring_obj = NULL; rdev->ih.ring_obj = NULL;
r600_ih_ring_init(rdev, 64 * 1024); r600_ih_ring_init(rdev, 64 * 1024);
#endif
r = r600_pcie_gart_init(rdev); r = r600_pcie_gart_init(rdev);
if (r) if (r)
return r; return r;
...@@ -1436,10 +1956,8 @@ int evergreen_init(struct radeon_device *rdev) ...@@ -1436,10 +1956,8 @@ int evergreen_init(struct radeon_device *rdev)
dev_err(rdev->dev, "disabling GPU acceleration\n"); dev_err(rdev->dev, "disabling GPU acceleration\n");
r700_cp_fini(rdev); r700_cp_fini(rdev);
r600_wb_fini(rdev); r600_wb_fini(rdev);
#if 0
r600_irq_fini(rdev); r600_irq_fini(rdev);
radeon_irq_kms_fini(rdev); radeon_irq_kms_fini(rdev);
#endif
evergreen_pcie_gart_fini(rdev); evergreen_pcie_gart_fini(rdev);
rdev->accel_working = false; rdev->accel_working = false;
} }
...@@ -1461,14 +1979,11 @@ int evergreen_init(struct radeon_device *rdev) ...@@ -1461,14 +1979,11 @@ int evergreen_init(struct radeon_device *rdev)
void evergreen_fini(struct radeon_device *rdev) void evergreen_fini(struct radeon_device *rdev)
{ {
radeon_pm_fini(rdev); radeon_pm_fini(rdev);
evergreen_suspend(rdev); /*r600_blit_fini(rdev);*/
#if 0 r700_cp_fini(rdev);
r600_blit_fini(rdev); r600_wb_fini(rdev);
r600_irq_fini(rdev); r600_irq_fini(rdev);
radeon_irq_kms_fini(rdev); radeon_irq_kms_fini(rdev);
radeon_ring_fini(rdev);
r600_wb_fini(rdev);
#endif
evergreen_pcie_gart_fini(rdev); evergreen_pcie_gart_fini(rdev);
radeon_gem_fini(rdev); radeon_gem_fini(rdev);
radeon_fence_driver_fini(rdev); radeon_fence_driver_fini(rdev);
......
...@@ -405,4 +405,152 @@ ...@@ -405,4 +405,152 @@
#define SOFT_RESET_REGBB (1 << 22) #define SOFT_RESET_REGBB (1 << 22)
#define SOFT_RESET_ORB (1 << 23) #define SOFT_RESET_ORB (1 << 23)
#define IH_RB_CNTL 0x3e00
# define IH_RB_ENABLE (1 << 0)
# define IH_IB_SIZE(x) ((x) << 1) /* log2 */
# define IH_RB_FULL_DRAIN_ENABLE (1 << 6)
# define IH_WPTR_WRITEBACK_ENABLE (1 << 8)
# define IH_WPTR_WRITEBACK_TIMER(x) ((x) << 9) /* log2 */
# define IH_WPTR_OVERFLOW_ENABLE (1 << 16)
# define IH_WPTR_OVERFLOW_CLEAR (1 << 31)
#define IH_RB_BASE 0x3e04
#define IH_RB_RPTR 0x3e08
#define IH_RB_WPTR 0x3e0c
# define RB_OVERFLOW (1 << 0)
# define WPTR_OFFSET_MASK 0x3fffc
#define IH_RB_WPTR_ADDR_HI 0x3e10
#define IH_RB_WPTR_ADDR_LO 0x3e14
#define IH_CNTL 0x3e18
# define ENABLE_INTR (1 << 0)
# define IH_MC_SWAP(x) ((x) << 2)
# define IH_MC_SWAP_NONE 0
# define IH_MC_SWAP_16BIT 1
# define IH_MC_SWAP_32BIT 2
# define IH_MC_SWAP_64BIT 3
# define RPTR_REARM (1 << 4)
# define MC_WRREQ_CREDIT(x) ((x) << 15)
# define MC_WR_CLEAN_CNT(x) ((x) << 20)
#define CP_INT_CNTL 0xc124
# define CNTX_BUSY_INT_ENABLE (1 << 19)
# define CNTX_EMPTY_INT_ENABLE (1 << 20)
# define SCRATCH_INT_ENABLE (1 << 25)
# define TIME_STAMP_INT_ENABLE (1 << 26)
# define IB2_INT_ENABLE (1 << 29)
# define IB1_INT_ENABLE (1 << 30)
# define RB_INT_ENABLE (1 << 31)
#define CP_INT_STATUS 0xc128
# define SCRATCH_INT_STAT (1 << 25)
# define TIME_STAMP_INT_STAT (1 << 26)
# define IB2_INT_STAT (1 << 29)
# define IB1_INT_STAT (1 << 30)
# define RB_INT_STAT (1 << 31)
#define GRBM_INT_CNTL 0x8060
# define RDERR_INT_ENABLE (1 << 0)
# define GUI_IDLE_INT_ENABLE (1 << 19)
/* 0x6e98, 0x7a98, 0x10698, 0x11298, 0x11e98, 0x12a98 */
#define CRTC_STATUS_FRAME_COUNT 0x6e98
/* 0x6bb8, 0x77b8, 0x103b8, 0x10fb8, 0x11bb8, 0x127b8 */
#define VLINE_STATUS 0x6bb8
# define VLINE_OCCURRED (1 << 0)
# define VLINE_ACK (1 << 4)
# define VLINE_STAT (1 << 12)
# define VLINE_INTERRUPT (1 << 16)
# define VLINE_INTERRUPT_TYPE (1 << 17)
/* 0x6bbc, 0x77bc, 0x103bc, 0x10fbc, 0x11bbc, 0x127bc */
#define VBLANK_STATUS 0x6bbc
# define VBLANK_OCCURRED (1 << 0)
# define VBLANK_ACK (1 << 4)
# define VBLANK_STAT (1 << 12)
# define VBLANK_INTERRUPT (1 << 16)
# define VBLANK_INTERRUPT_TYPE (1 << 17)
/* 0x6b40, 0x7740, 0x10340, 0x10f40, 0x11b40, 0x12740 */
#define INT_MASK 0x6b40
# define VBLANK_INT_MASK (1 << 0)
# define VLINE_INT_MASK (1 << 4)
#define DISP_INTERRUPT_STATUS 0x60f4
# define LB_D1_VLINE_INTERRUPT (1 << 2)
# define LB_D1_VBLANK_INTERRUPT (1 << 3)
# define DC_HPD1_INTERRUPT (1 << 17)
# define DC_HPD1_RX_INTERRUPT (1 << 18)
# define DACA_AUTODETECT_INTERRUPT (1 << 22)
# define DACB_AUTODETECT_INTERRUPT (1 << 23)
# define DC_I2C_SW_DONE_INTERRUPT (1 << 24)
# define DC_I2C_HW_DONE_INTERRUPT (1 << 25)
#define DISP_INTERRUPT_STATUS_CONTINUE 0x60f8
# define LB_D2_VLINE_INTERRUPT (1 << 2)
# define LB_D2_VBLANK_INTERRUPT (1 << 3)
# define DC_HPD2_INTERRUPT (1 << 17)
# define DC_HPD2_RX_INTERRUPT (1 << 18)
# define DISP_TIMER_INTERRUPT (1 << 24)
#define DISP_INTERRUPT_STATUS_CONTINUE2 0x60fc
# define LB_D3_VLINE_INTERRUPT (1 << 2)
# define LB_D3_VBLANK_INTERRUPT (1 << 3)
# define DC_HPD3_INTERRUPT (1 << 17)
# define DC_HPD3_RX_INTERRUPT (1 << 18)
#define DISP_INTERRUPT_STATUS_CONTINUE3 0x6100
# define LB_D4_VLINE_INTERRUPT (1 << 2)
# define LB_D4_VBLANK_INTERRUPT (1 << 3)
# define DC_HPD4_INTERRUPT (1 << 17)
# define DC_HPD4_RX_INTERRUPT (1 << 18)
#define DISP_INTERRUPT_STATUS_CONTINUE4 0x614c
# define LB_D5_VLINE_INTERRUPT (1 << 2)
# define LB_D5_VBLANK_INTERRUPT (1 << 3)
# define DC_HPD5_INTERRUPT (1 << 17)
# define DC_HPD5_RX_INTERRUPT (1 << 18)
#define DISP_INTERRUPT_STATUS_CONTINUE5 0x6050
# define LB_D6_VLINE_INTERRUPT (1 << 2)
# define LB_D6_VBLANK_INTERRUPT (1 << 3)
# define DC_HPD6_INTERRUPT (1 << 17)
# define DC_HPD6_RX_INTERRUPT (1 << 18)
/* 0x6858, 0x7458, 0x10058, 0x10c58, 0x11858, 0x12458 */
#define GRPH_INT_STATUS 0x6858
# define GRPH_PFLIP_INT_OCCURRED (1 << 0)
# define GRPH_PFLIP_INT_CLEAR (1 << 8)
/* 0x685c, 0x745c, 0x1005c, 0x10c5c, 0x1185c, 0x1245c */
#define GRPH_INT_CONTROL 0x685c
# define GRPH_PFLIP_INT_MASK (1 << 0)
# define GRPH_PFLIP_INT_TYPE (1 << 8)
#define DACA_AUTODETECT_INT_CONTROL 0x66c8
#define DACB_AUTODETECT_INT_CONTROL 0x67c8
#define DC_HPD1_INT_STATUS 0x601c
#define DC_HPD2_INT_STATUS 0x6028
#define DC_HPD3_INT_STATUS 0x6034
#define DC_HPD4_INT_STATUS 0x6040
#define DC_HPD5_INT_STATUS 0x604c
#define DC_HPD6_INT_STATUS 0x6058
# define DC_HPDx_INT_STATUS (1 << 0)
# define DC_HPDx_SENSE (1 << 1)
# define DC_HPDx_RX_INT_STATUS (1 << 8)
#define DC_HPD1_INT_CONTROL 0x6020
#define DC_HPD2_INT_CONTROL 0x602c
#define DC_HPD3_INT_CONTROL 0x6038
#define DC_HPD4_INT_CONTROL 0x6044
#define DC_HPD5_INT_CONTROL 0x6050
#define DC_HPD6_INT_CONTROL 0x605c
# define DC_HPDx_INT_ACK (1 << 0)
# define DC_HPDx_INT_POLARITY (1 << 8)
# define DC_HPDx_INT_EN (1 << 16)
# define DC_HPDx_RX_INT_ACK (1 << 20)
# define DC_HPDx_RX_INT_EN (1 << 24)
#define DC_HPD1_CONTROL 0x6024
#define DC_HPD2_CONTROL 0x6030
#define DC_HPD3_CONTROL 0x603c
#define DC_HPD4_CONTROL 0x6048
#define DC_HPD5_CONTROL 0x6054
#define DC_HPD6_CONTROL 0x6060
# define DC_HPDx_CONNECTION_TIMER(x) ((x) << 0)
# define DC_HPDx_RX_INT_TIMER(x) ((x) << 16)
# define DC_HPDx_EN (1 << 28)
#endif #endif
...@@ -45,6 +45,7 @@ ...@@ -45,6 +45,7 @@
#define R700_RLC_UCODE_SIZE 1024 #define R700_RLC_UCODE_SIZE 1024
#define EVERGREEN_PFP_UCODE_SIZE 1120 #define EVERGREEN_PFP_UCODE_SIZE 1120
#define EVERGREEN_PM4_UCODE_SIZE 1376 #define EVERGREEN_PM4_UCODE_SIZE 1376
#define EVERGREEN_RLC_UCODE_SIZE 768
/* Firmware Names */ /* Firmware Names */
MODULE_FIRMWARE("radeon/R600_pfp.bin"); MODULE_FIRMWARE("radeon/R600_pfp.bin");
...@@ -71,12 +72,16 @@ MODULE_FIRMWARE("radeon/R600_rlc.bin"); ...@@ -71,12 +72,16 @@ MODULE_FIRMWARE("radeon/R600_rlc.bin");
MODULE_FIRMWARE("radeon/R700_rlc.bin"); MODULE_FIRMWARE("radeon/R700_rlc.bin");
MODULE_FIRMWARE("radeon/CEDAR_pfp.bin"); MODULE_FIRMWARE("radeon/CEDAR_pfp.bin");
MODULE_FIRMWARE("radeon/CEDAR_me.bin"); MODULE_FIRMWARE("radeon/CEDAR_me.bin");
MODULE_FIRMWARE("radeon/CEDAR_rlc.bin");
MODULE_FIRMWARE("radeon/REDWOOD_pfp.bin"); MODULE_FIRMWARE("radeon/REDWOOD_pfp.bin");
MODULE_FIRMWARE("radeon/REDWOOD_me.bin"); MODULE_FIRMWARE("radeon/REDWOOD_me.bin");
MODULE_FIRMWARE("radeon/REDWOOD_rlc.bin");
MODULE_FIRMWARE("radeon/JUNIPER_pfp.bin"); MODULE_FIRMWARE("radeon/JUNIPER_pfp.bin");
MODULE_FIRMWARE("radeon/JUNIPER_me.bin"); MODULE_FIRMWARE("radeon/JUNIPER_me.bin");
MODULE_FIRMWARE("radeon/JUNIPER_rlc.bin");
MODULE_FIRMWARE("radeon/CYRPESS_pfp.bin"); MODULE_FIRMWARE("radeon/CYRPESS_pfp.bin");
MODULE_FIRMWARE("radeon/CYPRESS_me.bin"); MODULE_FIRMWARE("radeon/CYPRESS_me.bin");
MODULE_FIRMWARE("radeon/CYPRESS_rlc.bin");
int r600_debugfs_mc_info_init(struct radeon_device *rdev); int r600_debugfs_mc_info_init(struct radeon_device *rdev);
...@@ -84,6 +89,7 @@ int r600_debugfs_mc_info_init(struct radeon_device *rdev); ...@@ -84,6 +89,7 @@ int r600_debugfs_mc_info_init(struct radeon_device *rdev);
int r600_mc_wait_for_idle(struct radeon_device *rdev); int r600_mc_wait_for_idle(struct radeon_device *rdev);
void r600_gpu_init(struct radeon_device *rdev); void r600_gpu_init(struct radeon_device *rdev);
void r600_fini(struct radeon_device *rdev); void r600_fini(struct radeon_device *rdev);
void r600_irq_disable(struct radeon_device *rdev);
/* hpd for digital panel detect/disconnect */ /* hpd for digital panel detect/disconnect */
bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd) bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
...@@ -1461,20 +1467,20 @@ int r600_init_microcode(struct radeon_device *rdev) ...@@ -1461,20 +1467,20 @@ int r600_init_microcode(struct radeon_device *rdev)
break; break;
case CHIP_CEDAR: case CHIP_CEDAR:
chip_name = "CEDAR"; chip_name = "CEDAR";
rlc_chip_name = ""; rlc_chip_name = "CEDAR";
break; break;
case CHIP_REDWOOD: case CHIP_REDWOOD:
chip_name = "REDWOOD"; chip_name = "REDWOOD";
rlc_chip_name = ""; rlc_chip_name = "REDWOOD";
break; break;
case CHIP_JUNIPER: case CHIP_JUNIPER:
chip_name = "JUNIPER"; chip_name = "JUNIPER";
rlc_chip_name = ""; rlc_chip_name = "JUNIPER";
break; break;
case CHIP_CYPRESS: case CHIP_CYPRESS:
case CHIP_HEMLOCK: case CHIP_HEMLOCK:
chip_name = "CYPRESS"; chip_name = "CYPRESS";
rlc_chip_name = ""; rlc_chip_name = "CYPRESS";
break; break;
default: BUG(); default: BUG();
} }
...@@ -1482,7 +1488,7 @@ int r600_init_microcode(struct radeon_device *rdev) ...@@ -1482,7 +1488,7 @@ int r600_init_microcode(struct radeon_device *rdev)
if (rdev->family >= CHIP_CEDAR) { if (rdev->family >= CHIP_CEDAR) {
pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4; pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4; me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
rlc_req_size = 0; rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
} else if (rdev->family >= CHIP_RV770) { } else if (rdev->family >= CHIP_RV770) {
pfp_req_size = R700_PFP_UCODE_SIZE * 4; pfp_req_size = R700_PFP_UCODE_SIZE * 4;
me_req_size = R700_PM4_UCODE_SIZE * 4; me_req_size = R700_PM4_UCODE_SIZE * 4;
...@@ -1518,8 +1524,6 @@ int r600_init_microcode(struct radeon_device *rdev) ...@@ -1518,8 +1524,6 @@ int r600_init_microcode(struct radeon_device *rdev)
err = -EINVAL; err = -EINVAL;
} }
/* XXX until evergreen interrupts are supported */
if (rdev->family < CHIP_CEDAR) {
snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name); snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev); err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev);
if (err) if (err)
...@@ -1530,7 +1534,6 @@ int r600_init_microcode(struct radeon_device *rdev) ...@@ -1530,7 +1534,6 @@ int r600_init_microcode(struct radeon_device *rdev)
rdev->rlc_fw->size, fw_name); rdev->rlc_fw->size, fw_name);
err = -EINVAL; err = -EINVAL;
} }
}
out: out:
platform_device_unregister(pdev); platform_device_unregister(pdev);
...@@ -2309,10 +2312,11 @@ static void r600_ih_ring_fini(struct radeon_device *rdev) ...@@ -2309,10 +2312,11 @@ static void r600_ih_ring_fini(struct radeon_device *rdev)
} }
} }
static void r600_rlc_stop(struct radeon_device *rdev) void r600_rlc_stop(struct radeon_device *rdev)
{ {
if (rdev->family >= CHIP_RV770) { if ((rdev->family >= CHIP_RV770) &&
(rdev->family <= CHIP_RV740)) {
/* r7xx asics need to soft reset RLC before halting */ /* r7xx asics need to soft reset RLC before halting */
WREG32(SRBM_SOFT_RESET, SOFT_RESET_RLC); WREG32(SRBM_SOFT_RESET, SOFT_RESET_RLC);
RREG32(SRBM_SOFT_RESET); RREG32(SRBM_SOFT_RESET);
...@@ -2349,7 +2353,12 @@ static int r600_rlc_init(struct radeon_device *rdev) ...@@ -2349,7 +2353,12 @@ static int r600_rlc_init(struct radeon_device *rdev)
WREG32(RLC_UCODE_CNTL, 0); WREG32(RLC_UCODE_CNTL, 0);
fw_data = (const __be32 *)rdev->rlc_fw->data; fw_data = (const __be32 *)rdev->rlc_fw->data;
if (rdev->family >= CHIP_RV770) { if (rdev->family >= CHIP_CEDAR) {
for (i = 0; i < EVERGREEN_RLC_UCODE_SIZE; i++) {
WREG32(RLC_UCODE_ADDR, i);
WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
}
} else if (rdev->family >= CHIP_RV770) {
for (i = 0; i < R700_RLC_UCODE_SIZE; i++) { for (i = 0; i < R700_RLC_UCODE_SIZE; i++) {
WREG32(RLC_UCODE_ADDR, i); WREG32(RLC_UCODE_ADDR, i);
WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++)); WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
...@@ -2379,7 +2388,7 @@ static void r600_enable_interrupts(struct radeon_device *rdev) ...@@ -2379,7 +2388,7 @@ static void r600_enable_interrupts(struct radeon_device *rdev)
rdev->ih.enabled = true; rdev->ih.enabled = true;
} }
static void r600_disable_interrupts(struct radeon_device *rdev) void r600_disable_interrupts(struct radeon_device *rdev)
{ {
u32 ih_rb_cntl = RREG32(IH_RB_CNTL); u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
u32 ih_cntl = RREG32(IH_CNTL); u32 ih_cntl = RREG32(IH_CNTL);
...@@ -2494,6 +2503,9 @@ int r600_irq_init(struct radeon_device *rdev) ...@@ -2494,6 +2503,9 @@ int r600_irq_init(struct radeon_device *rdev)
WREG32(IH_CNTL, ih_cntl); WREG32(IH_CNTL, ih_cntl);
/* force the active interrupt state to all disabled */ /* force the active interrupt state to all disabled */
if (rdev->family >= CHIP_CEDAR)
evergreen_disable_interrupt_state(rdev);
else
r600_disable_interrupt_state(rdev); r600_disable_interrupt_state(rdev);
/* enable irqs */ /* enable irqs */
...@@ -2504,7 +2516,7 @@ int r600_irq_init(struct radeon_device *rdev) ...@@ -2504,7 +2516,7 @@ int r600_irq_init(struct radeon_device *rdev)
void r600_irq_suspend(struct radeon_device *rdev) void r600_irq_suspend(struct radeon_device *rdev)
{ {
r600_disable_interrupts(rdev); r600_irq_disable(rdev);
r600_rlc_stop(rdev); r600_rlc_stop(rdev);
} }
......
...@@ -372,7 +372,7 @@ struct radeon_irq { ...@@ -372,7 +372,7 @@ struct radeon_irq {
bool installed; bool installed;
bool sw_int; bool sw_int;
/* FIXME: use a define max crtc rather than hardcode it */ /* FIXME: use a define max crtc rather than hardcode it */
bool crtc_vblank_int[2]; bool crtc_vblank_int[6];
wait_queue_head_t vblank_queue; wait_queue_head_t vblank_queue;
/* FIXME: use defines for max hpd/dacs */ /* FIXME: use defines for max hpd/dacs */
bool hpd[6]; bool hpd[6];
...@@ -1324,6 +1324,8 @@ extern void r600_irq_fini(struct radeon_device *rdev); ...@@ -1324,6 +1324,8 @@ extern void r600_irq_fini(struct radeon_device *rdev);
extern void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size); extern void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size);
extern int r600_irq_set(struct radeon_device *rdev); extern int r600_irq_set(struct radeon_device *rdev);
extern void r600_irq_suspend(struct radeon_device *rdev); extern void r600_irq_suspend(struct radeon_device *rdev);
extern void r600_disable_interrupts(struct radeon_device *rdev);
extern void r600_rlc_stop(struct radeon_device *rdev);
/* r600 audio */ /* r600 audio */
extern int r600_audio_init(struct radeon_device *rdev); extern int r600_audio_init(struct radeon_device *rdev);
extern int r600_audio_tmds_index(struct drm_encoder *encoder); extern int r600_audio_tmds_index(struct drm_encoder *encoder);
...@@ -1343,6 +1345,7 @@ extern void r600_hdmi_update_audio_settings(struct drm_encoder *encoder, ...@@ -1343,6 +1345,7 @@ extern void r600_hdmi_update_audio_settings(struct drm_encoder *encoder,
extern void r700_cp_stop(struct radeon_device *rdev); extern void r700_cp_stop(struct radeon_device *rdev);
extern void r700_cp_fini(struct radeon_device *rdev); extern void r700_cp_fini(struct radeon_device *rdev);
void evergreen_disable_interrupt_state(struct radeon_device *rdev);
/* evergreen */ /* evergreen */
struct evergreen_mc_save { struct evergreen_mc_save {
......
...@@ -643,9 +643,9 @@ static struct radeon_asic evergreen_asic = { ...@@ -643,9 +643,9 @@ static struct radeon_asic evergreen_asic = {
.gart_set_page = &rs600_gart_set_page, .gart_set_page = &rs600_gart_set_page,
.ring_test = &r600_ring_test, .ring_test = &r600_ring_test,
.ring_ib_execute = &r600_ring_ib_execute, .ring_ib_execute = &r600_ring_ib_execute,
.irq_set = NULL, .irq_set = &evergreen_irq_set,
.irq_process = NULL, .irq_process = &evergreen_irq_process,
.get_vblank_counter = NULL, .get_vblank_counter = &evergreen_get_vblank_counter,
.fence_ring_emit = NULL, .fence_ring_emit = NULL,
.cs_parse = NULL, .cs_parse = NULL,
.copy_blit = NULL, .copy_blit = NULL,
......
...@@ -294,4 +294,8 @@ void evergreen_hpd_fini(struct radeon_device *rdev); ...@@ -294,4 +294,8 @@ void evergreen_hpd_fini(struct radeon_device *rdev);
bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
void evergreen_hpd_set_polarity(struct radeon_device *rdev, void evergreen_hpd_set_polarity(struct radeon_device *rdev,
enum radeon_hpd_id hpd); enum radeon_hpd_id hpd);
u32 evergreen_get_vblank_counter(struct radeon_device *rdev, int crtc);
int evergreen_irq_set(struct radeon_device *rdev);
int evergreen_irq_process(struct radeon_device *rdev);
#endif #endif
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册