提交 8af30675 编写于 作者: J Jeremy Kerr 提交者: Paul Mackerras

[POWERPC] spufs: use #defines for SPU class [012] exception status

Add a few #defines for the class 0, 1 and 2 interrupt status bits, and
use them instead of magic numbers when we're setting or checking for
these interrupts.

Also, add a #define for the class 2 mailbox threshold interrupt mask.
Signed-off-by: NJeremy Kerr <jk@ozlabs.org>
Signed-off-by: NPaul Mackerras <paulus@samba.org>
上级 c40aa471
...@@ -360,18 +360,18 @@ spu_irq_class_0_bottom(struct spu *spu) ...@@ -360,18 +360,18 @@ spu_irq_class_0_bottom(struct spu *spu)
stat = spu->class_0_pending; stat = spu->class_0_pending;
spu->class_0_pending = 0; spu->class_0_pending = 0;
if (stat & 1) /* invalid DMA alignment */ if (stat & CLASS0_DMA_ALIGNMENT_INTR)
__spu_trap_dma_align(spu); __spu_trap_dma_align(spu);
if (stat & 2) /* invalid MFC DMA */ if (stat & CLASS0_INVALID_DMA_COMMAND_INTR)
__spu_trap_invalid_dma(spu); __spu_trap_invalid_dma(spu);
if (stat & 4) /* error on SPU */ if (stat & CLASS0_SPU_ERROR_INTR)
__spu_trap_error(spu); __spu_trap_error(spu);
spin_unlock_irqrestore(&spu->register_lock, flags); spin_unlock_irqrestore(&spu->register_lock, flags);
return (stat & 0x7) ? -EIO : 0; return (stat & CLASS0_INTR_MASK) ? -EIO : 0;
} }
EXPORT_SYMBOL_GPL(spu_irq_class_0_bottom); EXPORT_SYMBOL_GPL(spu_irq_class_0_bottom);
...@@ -389,24 +389,23 @@ spu_irq_class_1(int irq, void *data) ...@@ -389,24 +389,23 @@ spu_irq_class_1(int irq, void *data)
stat = spu_int_stat_get(spu, 1) & mask; stat = spu_int_stat_get(spu, 1) & mask;
dar = spu_mfc_dar_get(spu); dar = spu_mfc_dar_get(spu);
dsisr = spu_mfc_dsisr_get(spu); dsisr = spu_mfc_dsisr_get(spu);
if (stat & 2) /* mapping fault */ if (stat & CLASS1_STORAGE_FAULT_INTR)
spu_mfc_dsisr_set(spu, 0ul); spu_mfc_dsisr_set(spu, 0ul);
spu_int_stat_clear(spu, 1, stat); spu_int_stat_clear(spu, 1, stat);
spin_unlock(&spu->register_lock); spin_unlock(&spu->register_lock);
pr_debug("%s: %lx %lx %lx %lx\n", __FUNCTION__, mask, stat, pr_debug("%s: %lx %lx %lx %lx\n", __FUNCTION__, mask, stat,
dar, dsisr); dar, dsisr);
if (stat & 1) /* segment fault */ if (stat & CLASS1_SEGMENT_FAULT_INTR)
__spu_trap_data_seg(spu, dar); __spu_trap_data_seg(spu, dar);
if (stat & 2) { /* mapping fault */ if (stat & CLASS1_STORAGE_FAULT_INTR)
__spu_trap_data_map(spu, dar, dsisr); __spu_trap_data_map(spu, dar, dsisr);
}
if (stat & 4) /* ls compare & suspend on get */ if (stat & CLASS1_LS_COMPARE_SUSPEND_ON_GET_INTR)
; ;
if (stat & 8) /* ls compare & suspend on put */ if (stat & CLASS1_LS_COMPARE_SUSPEND_ON_PUT_INTR)
; ;
return stat ? IRQ_HANDLED : IRQ_NONE; return stat ? IRQ_HANDLED : IRQ_NONE;
...@@ -418,6 +417,8 @@ spu_irq_class_2(int irq, void *data) ...@@ -418,6 +417,8 @@ spu_irq_class_2(int irq, void *data)
struct spu *spu; struct spu *spu;
unsigned long stat; unsigned long stat;
unsigned long mask; unsigned long mask;
const int mailbox_intrs =
CLASS2_MAILBOX_THRESHOLD_INTR | CLASS2_MAILBOX_INTR;
spu = data; spu = data;
spin_lock(&spu->register_lock); spin_lock(&spu->register_lock);
...@@ -425,31 +426,30 @@ spu_irq_class_2(int irq, void *data) ...@@ -425,31 +426,30 @@ spu_irq_class_2(int irq, void *data)
mask = spu_int_mask_get(spu, 2); mask = spu_int_mask_get(spu, 2);
/* ignore interrupts we're not waiting for */ /* ignore interrupts we're not waiting for */
stat &= mask; stat &= mask;
/*
* mailbox interrupts (0x1 and 0x10) are level triggered. /* mailbox interrupts are level triggered. mask them now before
* mask them now before acknowledging. * acknowledging */
*/ if (stat & mailbox_intrs)
if (stat & 0x11) spu_int_mask_and(spu, 2, ~(stat & mailbox_intrs));
spu_int_mask_and(spu, 2, ~(stat & 0x11));
/* acknowledge all interrupts before the callbacks */ /* acknowledge all interrupts before the callbacks */
spu_int_stat_clear(spu, 2, stat); spu_int_stat_clear(spu, 2, stat);
spin_unlock(&spu->register_lock); spin_unlock(&spu->register_lock);
pr_debug("class 2 interrupt %d, %lx, %lx\n", irq, stat, mask); pr_debug("class 2 interrupt %d, %lx, %lx\n", irq, stat, mask);
if (stat & 1) /* PPC core mailbox */ if (stat & CLASS2_MAILBOX_INTR)
spu->ibox_callback(spu); spu->ibox_callback(spu);
if (stat & 2) /* SPU stop-and-signal */ if (stat & CLASS2_SPU_STOP_INTR)
spu->stop_callback(spu); spu->stop_callback(spu);
if (stat & 4) /* SPU halted */ if (stat & CLASS2_SPU_HALT_INTR)
spu->stop_callback(spu); spu->stop_callback(spu);
if (stat & 8) /* DMA tag group complete */ if (stat & CLASS2_SPU_DMA_TAG_GROUP_COMPLETE_INTR)
spu->mfc_callback(spu); spu->mfc_callback(spu);
if (stat & 0x10) /* SPU mailbox threshold */ if (stat & CLASS2_MAILBOX_THRESHOLD_INTR)
spu->wbox_callback(spu); spu->wbox_callback(spu);
spu->stats.class2_intr++; spu->stats.class2_intr++;
......
...@@ -106,16 +106,20 @@ static unsigned int spu_backing_mbox_stat_poll(struct spu_context *ctx, ...@@ -106,16 +106,20 @@ static unsigned int spu_backing_mbox_stat_poll(struct spu_context *ctx,
if (stat & 0xff0000) if (stat & 0xff0000)
ret |= POLLIN | POLLRDNORM; ret |= POLLIN | POLLRDNORM;
else { else {
ctx->csa.priv1.int_stat_class2_RW &= ~0x1; ctx->csa.priv1.int_stat_class2_RW &=
ctx->csa.priv1.int_mask_class2_RW |= 0x1; ~CLASS2_MAILBOX_INTR;
ctx->csa.priv1.int_mask_class2_RW |=
CLASS2_ENABLE_MAILBOX_INTR;
} }
} }
if (events & (POLLOUT | POLLWRNORM)) { if (events & (POLLOUT | POLLWRNORM)) {
if (stat & 0x00ff00) if (stat & 0x00ff00)
ret = POLLOUT | POLLWRNORM; ret = POLLOUT | POLLWRNORM;
else { else {
ctx->csa.priv1.int_stat_class2_RW &= ~0x10; ctx->csa.priv1.int_stat_class2_RW &=
ctx->csa.priv1.int_mask_class2_RW |= 0x10; ~CLASS2_MAILBOX_THRESHOLD_INTR;
ctx->csa.priv1.int_mask_class2_RW |=
CLASS2_ENABLE_MAILBOX_THRESHOLD_INTR;
} }
} }
spin_unlock_irq(&ctx->csa.register_lock); spin_unlock_irq(&ctx->csa.register_lock);
...@@ -139,7 +143,7 @@ static int spu_backing_ibox_read(struct spu_context *ctx, u32 * data) ...@@ -139,7 +143,7 @@ static int spu_backing_ibox_read(struct spu_context *ctx, u32 * data)
ret = 4; ret = 4;
} else { } else {
/* make sure we get woken up by the interrupt */ /* make sure we get woken up by the interrupt */
ctx->csa.priv1.int_mask_class2_RW |= 0x1UL; ctx->csa.priv1.int_mask_class2_RW |= CLASS2_ENABLE_MAILBOX_INTR;
ret = 0; ret = 0;
} }
spin_unlock(&ctx->csa.register_lock); spin_unlock(&ctx->csa.register_lock);
...@@ -169,7 +173,8 @@ static int spu_backing_wbox_write(struct spu_context *ctx, u32 data) ...@@ -169,7 +173,8 @@ static int spu_backing_wbox_write(struct spu_context *ctx, u32 data)
} else { } else {
/* make sure we get woken up by the interrupt when space /* make sure we get woken up by the interrupt when space
becomes available */ becomes available */
ctx->csa.priv1.int_mask_class2_RW |= 0x10; ctx->csa.priv1.int_mask_class2_RW |=
CLASS2_ENABLE_MAILBOX_THRESHOLD_INTR;
ret = 0; ret = 0;
} }
spin_unlock(&ctx->csa.register_lock); spin_unlock(&ctx->csa.register_lock);
......
...@@ -76,16 +76,18 @@ static unsigned int spu_hw_mbox_stat_poll(struct spu_context *ctx, ...@@ -76,16 +76,18 @@ static unsigned int spu_hw_mbox_stat_poll(struct spu_context *ctx,
if (stat & 0xff0000) if (stat & 0xff0000)
ret |= POLLIN | POLLRDNORM; ret |= POLLIN | POLLRDNORM;
else { else {
spu_int_stat_clear(spu, 2, 0x1); spu_int_stat_clear(spu, 2, CLASS2_MAILBOX_INTR);
spu_int_mask_or(spu, 2, 0x1); spu_int_mask_or(spu, 2, CLASS2_ENABLE_MAILBOX_INTR);
} }
} }
if (events & (POLLOUT | POLLWRNORM)) { if (events & (POLLOUT | POLLWRNORM)) {
if (stat & 0x00ff00) if (stat & 0x00ff00)
ret = POLLOUT | POLLWRNORM; ret = POLLOUT | POLLWRNORM;
else { else {
spu_int_stat_clear(spu, 2, 0x10); spu_int_stat_clear(spu, 2,
spu_int_mask_or(spu, 2, 0x10); CLASS2_MAILBOX_THRESHOLD_INTR);
spu_int_mask_or(spu, 2,
CLASS2_ENABLE_MAILBOX_THRESHOLD_INTR);
} }
} }
spin_unlock_irq(&spu->register_lock); spin_unlock_irq(&spu->register_lock);
...@@ -106,7 +108,7 @@ static int spu_hw_ibox_read(struct spu_context *ctx, u32 * data) ...@@ -106,7 +108,7 @@ static int spu_hw_ibox_read(struct spu_context *ctx, u32 * data)
ret = 4; ret = 4;
} else { } else {
/* make sure we get woken up by the interrupt */ /* make sure we get woken up by the interrupt */
spu_int_mask_or(spu, 2, 0x1); spu_int_mask_or(spu, 2, CLASS2_ENABLE_MAILBOX_INTR);
ret = 0; ret = 0;
} }
spin_unlock_irq(&spu->register_lock); spin_unlock_irq(&spu->register_lock);
...@@ -127,7 +129,7 @@ static int spu_hw_wbox_write(struct spu_context *ctx, u32 data) ...@@ -127,7 +129,7 @@ static int spu_hw_wbox_write(struct spu_context *ctx, u32 data)
} else { } else {
/* make sure we get woken up by the interrupt when space /* make sure we get woken up by the interrupt when space
becomes available */ becomes available */
spu_int_mask_or(spu, 2, 0x10); spu_int_mask_or(spu, 2, CLASS2_ENABLE_MAILBOX_THRESHOLD_INTR);
ret = 0; ret = 0;
} }
spin_unlock_irq(&spu->register_lock); spin_unlock_irq(&spu->register_lock);
......
...@@ -527,8 +527,22 @@ struct spu_priv1 { ...@@ -527,8 +527,22 @@ struct spu_priv1 {
#define CLASS2_ENABLE_SPU_STOP_INTR 0x2L #define CLASS2_ENABLE_SPU_STOP_INTR 0x2L
#define CLASS2_ENABLE_SPU_HALT_INTR 0x4L #define CLASS2_ENABLE_SPU_HALT_INTR 0x4L
#define CLASS2_ENABLE_SPU_DMA_TAG_GROUP_COMPLETE_INTR 0x8L #define CLASS2_ENABLE_SPU_DMA_TAG_GROUP_COMPLETE_INTR 0x8L
#define CLASS2_ENABLE_MAILBOX_THRESHOLD_INTR 0x10L
u8 pad_0x118_0x140[0x28]; /* 0x118 */ u8 pad_0x118_0x140[0x28]; /* 0x118 */
u64 int_stat_RW[3]; /* 0x140 */ u64 int_stat_RW[3]; /* 0x140 */
#define CLASS0_DMA_ALIGNMENT_INTR 0x1L
#define CLASS0_INVALID_DMA_COMMAND_INTR 0x2L
#define CLASS0_SPU_ERROR_INTR 0x4L
#define CLASS0_INTR_MASK 0x7L
#define CLASS1_SEGMENT_FAULT_INTR 0x1L
#define CLASS1_STORAGE_FAULT_INTR 0x2L
#define CLASS1_LS_COMPARE_SUSPEND_ON_GET_INTR 0x4L
#define CLASS1_LS_COMPARE_SUSPEND_ON_PUT_INTR 0x8L
#define CLASS2_MAILBOX_INTR 0x1L
#define CLASS2_SPU_STOP_INTR 0x2L
#define CLASS2_SPU_HALT_INTR 0x4L
#define CLASS2_SPU_DMA_TAG_GROUP_COMPLETE_INTR 0x8L
#define CLASS2_MAILBOX_THRESHOLD_INTR 0x10L
u8 pad_0x158_0x180[0x28]; /* 0x158 */ u8 pad_0x158_0x180[0x28]; /* 0x158 */
u64 int_route_RW; /* 0x180 */ u64 int_route_RW; /* 0x180 */
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册