aboutsummaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorJeremy Kerr <jk@ozlabs.org>2007-12-20 16:39:59 +0900
committerPaul Mackerras <paulus@samba.org>2007-12-21 19:46:20 +1100
commit8af30675c3e7b945bbaf6f57b724f246e56eb209 (patch)
treea883fbefe8d2a4dc8c8ef4855e1159c94bcf7c64 /arch
parentc40aa4710479b5d9f0e1fdf71b151f4c3708e3eb (diff)
[POWERPC] spufs: use #defines for SPU class [012] exception status
Add a few #defines for the class 0, 1 and 2 interrupt status bits, and use them instead of magic numbers when we're setting or checking for these interrupts. Also, add a #define for the class 2 mailbox threshold interrupt mask. Signed-off-by: Jeremy Kerr <jk@ozlabs.org> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/platforms/cell/spu_base.c42
-rw-r--r--arch/powerpc/platforms/cell/spufs/backing_ops.c17
-rw-r--r--arch/powerpc/platforms/cell/spufs/hw_ops.c14
3 files changed, 40 insertions, 33 deletions
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c
index f73263ba984..a560277b3ad 100644
--- a/arch/powerpc/platforms/cell/spu_base.c
+++ b/arch/powerpc/platforms/cell/spu_base.c
@@ -360,18 +360,18 @@ spu_irq_class_0_bottom(struct spu *spu)
stat = spu->class_0_pending;
spu->class_0_pending = 0;
- if (stat & 1) /* invalid DMA alignment */
+ if (stat & CLASS0_DMA_ALIGNMENT_INTR)
__spu_trap_dma_align(spu);
- if (stat & 2) /* invalid MFC DMA */
+ if (stat & CLASS0_INVALID_DMA_COMMAND_INTR)
__spu_trap_invalid_dma(spu);
- if (stat & 4) /* error on SPU */
+ if (stat & CLASS0_SPU_ERROR_INTR)
__spu_trap_error(spu);
spin_unlock_irqrestore(&spu->register_lock, flags);
- return (stat & 0x7) ? -EIO : 0;
+ return (stat & CLASS0_INTR_MASK) ? -EIO : 0;
}
EXPORT_SYMBOL_GPL(spu_irq_class_0_bottom);
@@ -389,24 +389,23 @@ spu_irq_class_1(int irq, void *data)
stat = spu_int_stat_get(spu, 1) & mask;
dar = spu_mfc_dar_get(spu);
dsisr = spu_mfc_dsisr_get(spu);
- if (stat & 2) /* mapping fault */
+ if (stat & CLASS1_STORAGE_FAULT_INTR)
spu_mfc_dsisr_set(spu, 0ul);
spu_int_stat_clear(spu, 1, stat);
spin_unlock(&spu->register_lock);
pr_debug("%s: %lx %lx %lx %lx\n", __FUNCTION__, mask, stat,
dar, dsisr);
- if (stat & 1) /* segment fault */
+ if (stat & CLASS1_SEGMENT_FAULT_INTR)
__spu_trap_data_seg(spu, dar);
- if (stat & 2) { /* mapping fault */
+ if (stat & CLASS1_STORAGE_FAULT_INTR)
__spu_trap_data_map(spu, dar, dsisr);
- }
- if (stat & 4) /* ls compare & suspend on get */
+ if (stat & CLASS1_LS_COMPARE_SUSPEND_ON_GET_INTR)
;
- if (stat & 8) /* ls compare & suspend on put */
+ if (stat & CLASS1_LS_COMPARE_SUSPEND_ON_PUT_INTR)
;
return stat ? IRQ_HANDLED : IRQ_NONE;
@@ -418,6 +417,8 @@ spu_irq_class_2(int irq, void *data)
struct spu *spu;
unsigned long stat;
unsigned long mask;
+ const int mailbox_intrs =
+ CLASS2_MAILBOX_THRESHOLD_INTR | CLASS2_MAILBOX_INTR;
spu = data;
spin_lock(&spu->register_lock);
@@ -425,31 +426,30 @@ spu_irq_class_2(int irq, void *data)
mask = spu_int_mask_get(spu, 2);
/* ignore interrupts we're not waiting for */
stat &= mask;
- /*
- * mailbox interrupts (0x1 and 0x10) are level triggered.
- * mask them now before acknowledging.
- */
- if (stat & 0x11)
- spu_int_mask_and(spu, 2, ~(stat & 0x11));
+
+ /* mailbox interrupts are level triggered. mask them now before
+ * acknowledging */
+ if (stat & mailbox_intrs)
+ spu_int_mask_and(spu, 2, ~(stat & mailbox_intrs));
/* acknowledge all interrupts before the callbacks */
spu_int_stat_clear(spu, 2, stat);
spin_unlock(&spu->register_lock);
pr_debug("class 2 interrupt %d, %lx, %lx\n", irq, stat, mask);
- if (stat & 1) /* PPC core mailbox */
+ if (stat & CLASS2_MAILBOX_INTR)
spu->ibox_callback(spu);
- if (stat & 2) /* SPU stop-and-signal */
+ if (stat & CLASS2_SPU_STOP_INTR)
spu->stop_callback(spu);
- if (stat & 4) /* SPU halted */
+ if (stat & CLASS2_SPU_HALT_INTR)
spu->stop_callback(spu);
- if (stat & 8) /* DMA tag group complete */
+ if (stat & CLASS2_SPU_DMA_TAG_GROUP_COMPLETE_INTR)
spu->mfc_callback(spu);
- if (stat & 0x10) /* SPU mailbox threshold */
+ if (stat & CLASS2_MAILBOX_THRESHOLD_INTR)
spu->wbox_callback(spu);
spu->stats.class2_intr++;
diff --git a/arch/powerpc/platforms/cell/spufs/backing_ops.c b/arch/powerpc/platforms/cell/spufs/backing_ops.c
index 97b2d5e587c..d4495531e5b 100644
--- a/arch/powerpc/platforms/cell/spufs/backing_ops.c
+++ b/arch/powerpc/platforms/cell/spufs/backing_ops.c
@@ -106,16 +106,20 @@ static unsigned int spu_backing_mbox_stat_poll(struct spu_context *ctx,
if (stat & 0xff0000)
ret |= POLLIN | POLLRDNORM;
else {
- ctx->csa.priv1.int_stat_class2_RW &= ~0x1;
- ctx->csa.priv1.int_mask_class2_RW |= 0x1;
+ ctx->csa.priv1.int_stat_class2_RW &=
+ ~CLASS2_MAILBOX_INTR;
+ ctx->csa.priv1.int_mask_class2_RW |=
+ CLASS2_ENABLE_MAILBOX_INTR;
}
}
if (events & (POLLOUT | POLLWRNORM)) {
if (stat & 0x00ff00)
ret = POLLOUT | POLLWRNORM;
else {
- ctx->csa.priv1.int_stat_class2_RW &= ~0x10;
- ctx->csa.priv1.int_mask_class2_RW |= 0x10;
+ ctx->csa.priv1.int_stat_class2_RW &=
+ ~CLASS2_MAILBOX_THRESHOLD_INTR;
+ ctx->csa.priv1.int_mask_class2_RW |=
+ CLASS2_ENABLE_MAILBOX_THRESHOLD_INTR;
}
}
spin_unlock_irq(&ctx->csa.register_lock);
@@ -139,7 +143,7 @@ static int spu_backing_ibox_read(struct spu_context *ctx, u32 * data)
ret = 4;
} else {
/* make sure we get woken up by the interrupt */
- ctx->csa.priv1.int_mask_class2_RW |= 0x1UL;
+ ctx->csa.priv1.int_mask_class2_RW |= CLASS2_ENABLE_MAILBOX_INTR;
ret = 0;
}
spin_unlock(&ctx->csa.register_lock);
@@ -169,7 +173,8 @@ static int spu_backing_wbox_write(struct spu_context *ctx, u32 data)
} else {
/* make sure we get woken up by the interrupt when space
becomes available */
- ctx->csa.priv1.int_mask_class2_RW |= 0x10;
+ ctx->csa.priv1.int_mask_class2_RW |=
+ CLASS2_ENABLE_MAILBOX_THRESHOLD_INTR;
ret = 0;
}
spin_unlock(&ctx->csa.register_lock);
diff --git a/arch/powerpc/platforms/cell/spufs/hw_ops.c b/arch/powerpc/platforms/cell/spufs/hw_ops.c
index a7767e3b085..64f8540b832 100644
--- a/arch/powerpc/platforms/cell/spufs/hw_ops.c
+++ b/arch/powerpc/platforms/cell/spufs/hw_ops.c
@@ -76,16 +76,18 @@ static unsigned int spu_hw_mbox_stat_poll(struct spu_context *ctx,
if (stat & 0xff0000)
ret |= POLLIN | POLLRDNORM;
else {
- spu_int_stat_clear(spu, 2, 0x1);
- spu_int_mask_or(spu, 2, 0x1);
+ spu_int_stat_clear(spu, 2, CLASS2_MAILBOX_INTR);
+ spu_int_mask_or(spu, 2, CLASS2_ENABLE_MAILBOX_INTR);
}
}
if (events & (POLLOUT | POLLWRNORM)) {
if (stat & 0x00ff00)
ret = POLLOUT | POLLWRNORM;
else {
- spu_int_stat_clear(spu, 2, 0x10);
- spu_int_mask_or(spu, 2, 0x10);
+ spu_int_stat_clear(spu, 2,
+ CLASS2_MAILBOX_THRESHOLD_INTR);
+ spu_int_mask_or(spu, 2,
+ CLASS2_ENABLE_MAILBOX_THRESHOLD_INTR);
}
}
spin_unlock_irq(&spu->register_lock);
@@ -106,7 +108,7 @@ static int spu_hw_ibox_read(struct spu_context *ctx, u32 * data)
ret = 4;
} else {
/* make sure we get woken up by the interrupt */
- spu_int_mask_or(spu, 2, 0x1);
+ spu_int_mask_or(spu, 2, CLASS2_ENABLE_MAILBOX_INTR);
ret = 0;
}
spin_unlock_irq(&spu->register_lock);
@@ -127,7 +129,7 @@ static int spu_hw_wbox_write(struct spu_context *ctx, u32 data)
} else {
/* make sure we get woken up by the interrupt when space
becomes available */
- spu_int_mask_or(spu, 2, 0x10);
+ spu_int_mask_or(spu, 2, CLASS2_ENABLE_MAILBOX_THRESHOLD_INTR);
ret = 0;
}
spin_unlock_irq(&spu->register_lock);