aboutsummaryrefslogtreecommitdiff
path: root/drivers/dma
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/fsldma.c30
-rw-r--r--drivers/dma/fsldma.h48
-rw-r--r--drivers/dma/ioat_dca.c4
3 files changed, 60 insertions, 22 deletions
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index ad2f938597e..72692309398 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -123,6 +123,11 @@ static dma_addr_t get_ndar(struct fsl_dma_chan *fsl_chan)
return DMA_IN(fsl_chan, &fsl_chan->reg_base->ndar, 64);
}
+static u32 get_bcr(struct fsl_dma_chan *fsl_chan)
+{
+ return DMA_IN(fsl_chan, &fsl_chan->reg_base->bcr, 32);
+}
+
static int dma_is_idle(struct fsl_dma_chan *fsl_chan)
{
u32 sr = get_sr(fsl_chan);
@@ -426,6 +431,9 @@ fsl_dma_prep_interrupt(struct dma_chan *chan)
new->async_tx.cookie = -EBUSY;
new->async_tx.ack = 0;
+ /* Insert the link descriptor to the LD ring */
+ list_add_tail(&new->node, &new->async_tx.tx_list);
+
/* Set End-of-link to the last link descriptor of new list*/
set_ld_eol(fsl_chan, new);
@@ -701,6 +709,23 @@ static irqreturn_t fsl_dma_chan_do_interrupt(int irq, void *data)
if (stat & FSL_DMA_SR_TE)
dev_err(fsl_chan->dev, "Transfer Error!\n");
+ /* Programming Error
+ * The DMA_INTERRUPT async_tx is a NULL transfer, which will
+ * triger a PE interrupt.
+ */
+ if (stat & FSL_DMA_SR_PE) {
+ dev_dbg(fsl_chan->dev, "event: Programming Error INT\n");
+ if (get_bcr(fsl_chan) == 0) {
+ /* BCR register is 0, this is a DMA_INTERRUPT async_tx.
+ * Now, update the completed cookie, and continue the
+ * next uncompleted transfer.
+ */
+ fsl_dma_update_completed_cookie(fsl_chan);
+ fsl_chan_xfer_ld_queue(fsl_chan);
+ }
+ stat &= ~FSL_DMA_SR_PE;
+ }
+
/* If the link descriptor segment transfer finishes,
* we will recycle the used descriptor.
*/
@@ -841,6 +866,11 @@ static int fsl_dma_self_test(struct fsl_dma_chan *fsl_chan)
tx3 = fsl_dma_prep_memcpy(chan, dma_dest, dma_src, test_size / 4, 0);
async_tx_ack(tx3);
+ /* Interrupt tx test */
+ tx1 = fsl_dma_prep_interrupt(chan);
+ async_tx_ack(tx1);
+ cookie = fsl_dma_tx_submit(tx1);
+
/* Test exchanging the prepared tx sort */
cookie = fsl_dma_tx_submit(tx3);
cookie = fsl_dma_tx_submit(tx2);
diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h
index ba78c42121b..6faf07ba0d0 100644
--- a/drivers/dma/fsldma.h
+++ b/drivers/dma/fsldma.h
@@ -40,6 +40,7 @@
#define FSL_DMA_MR_EOTIE 0x00000080
#define FSL_DMA_SR_CH 0x00000020
+#define FSL_DMA_SR_PE 0x00000010
#define FSL_DMA_SR_CB 0x00000004
#define FSL_DMA_SR_TE 0x00000080
#define FSL_DMA_SR_EOSI 0x00000002
@@ -74,12 +75,15 @@
#define FSL_DMA_DGSR_EOSI 0x02
#define FSL_DMA_DGSR_EOLSI 0x01
+typedef u64 __bitwise v64;
+typedef u32 __bitwise v32;
+
struct fsl_dma_ld_hw {
- u64 __bitwise src_addr;
- u64 __bitwise dst_addr;
- u64 __bitwise next_ln_addr;
- u32 __bitwise count;
- u32 __bitwise reserve;
+ v64 src_addr;
+ v64 dst_addr;
+ v64 next_ln_addr;
+ v32 count;
+ v32 reserve;
} __attribute__((aligned(32)));
struct fsl_desc_sw {
@@ -91,13 +95,13 @@ struct fsl_desc_sw {
} __attribute__((aligned(32)));
struct fsl_dma_chan_regs {
- u32 __bitwise mr; /* 0x00 - Mode Register */
- u32 __bitwise sr; /* 0x04 - Status Register */
- u64 __bitwise cdar; /* 0x08 - Current descriptor address register */
- u64 __bitwise sar; /* 0x10 - Source Address Register */
- u64 __bitwise dar; /* 0x18 - Destination Address Register */
- u32 __bitwise bcr; /* 0x20 - Byte Count Register */
- u64 __bitwise ndar; /* 0x24 - Next Descriptor Address Register */
+ u32 mr; /* 0x00 - Mode Register */
+ u32 sr; /* 0x04 - Status Register */
+ u64 cdar; /* 0x08 - Current descriptor address register */
+ u64 sar; /* 0x10 - Source Address Register */
+ u64 dar; /* 0x18 - Destination Address Register */
+ u32 bcr; /* 0x20 - Byte Count Register */
+ u64 ndar; /* 0x24 - Next Descriptor Address Register */
};
struct fsl_dma_chan;
@@ -150,25 +154,27 @@ struct fsl_dma_chan {
#ifndef __powerpc64__
static u64 in_be64(const u64 __iomem *addr)
{
- return ((u64)in_be32((u32 *)addr) << 32) | (in_be32((u32 *)addr + 1));
+ return ((u64)in_be32((u32 __iomem *)addr) << 32) |
+ (in_be32((u32 __iomem *)addr + 1));
}
static void out_be64(u64 __iomem *addr, u64 val)
{
- out_be32((u32 *)addr, val >> 32);
- out_be32((u32 *)addr + 1, (u32)val);
+ out_be32((u32 __iomem *)addr, val >> 32);
+ out_be32((u32 __iomem *)addr + 1, (u32)val);
}
/* There is no asm instructions for 64 bits reverse loads and stores */
static u64 in_le64(const u64 __iomem *addr)
{
- return ((u64)in_le32((u32 *)addr + 1) << 32) | (in_le32((u32 *)addr));
+ return ((u64)in_le32((u32 __iomem *)addr + 1) << 32) |
+ (in_le32((u32 __iomem *)addr));
}
static void out_le64(u64 __iomem *addr, u64 val)
{
- out_le32((u32 *)addr + 1, val >> 32);
- out_le32((u32 *)addr, (u32)val);
+ out_le32((u32 __iomem *)addr + 1, val >> 32);
+ out_le32((u32 __iomem *)addr, (u32)val);
}
#endif
@@ -181,9 +187,11 @@ static void out_le64(u64 __iomem *addr, u64 val)
#define DMA_TO_CPU(fsl_chan, d, width) \
(((fsl_chan)->feature & FSL_DMA_BIG_ENDIAN) ? \
- be##width##_to_cpu(d) : le##width##_to_cpu(d))
+ be##width##_to_cpu((__force __be##width)(v##width)d) : \
+ le##width##_to_cpu((__force __le##width)(v##width)d))
#define CPU_TO_DMA(fsl_chan, c, width) \
(((fsl_chan)->feature & FSL_DMA_BIG_ENDIAN) ? \
- cpu_to_be##width(c) : cpu_to_le##width(c))
+ (__force v##width)cpu_to_be##width(c) : \
+ (__force v##width)cpu_to_le##width(c))
#endif /* __DMA_FSLDMA_H */
diff --git a/drivers/dma/ioat_dca.c b/drivers/dma/ioat_dca.c
index 0fa8a98051a..9e922760b7f 100644
--- a/drivers/dma/ioat_dca.c
+++ b/drivers/dma/ioat_dca.c
@@ -98,7 +98,7 @@ struct ioat_dca_slot {
struct ioat_dca_priv {
void __iomem *iobase;
- void *dca_base;
+ void __iomem *dca_base;
int max_requesters;
int requester_count;
u8 tag_map[IOAT_TAG_MAP_LEN];
@@ -338,7 +338,7 @@ static struct dca_ops ioat2_dca_ops = {
.get_tag = ioat2_dca_get_tag,
};
-static int ioat2_dca_count_dca_slots(void *iobase, u16 dca_offset)
+static int ioat2_dca_count_dca_slots(void __iomem *iobase, u16 dca_offset)
{
int slots = 0;
u32 req;