提交 f8e72db3 编写于 作者: L Linus Torvalds

Merge tag 'dmaengine-fix-4.12-rc4' of git://git.infradead.org/users/vkoul/slave-dma

Pull dmaengine fixes from Vinod Koul:
 "Here is the dmaengine fixes request for 4.12. Fixes bunch of issues in
  the driver, npthing exciting though..

   - mv_xor_v2 driver fixes for handling descriptors, tx_submit
     implementation, removing interrupt coalescing and setting DMA mask
     properly

   - fix usb-dmac DMAOR AE bit definition

   - fix ep93xx start buffer from BASE0 and not drain the transfers in
     terminate_all

   - fix rcar-dmac to use right descriptor pointer for residue
     calculation

   - pl330 fix warn for irq freeup"

* tag 'dmaengine-fix-4.12-rc4' of git://git.infradead.org/users/vkoul/slave-dma:
  dmaengine: pl330: fix warning in pl330_remove
  rcar-dmac: fixup descriptor pointer for descriptor mode
  dmaengine: ep93xx: Don't drain the transfers in terminate_all()
  dmaengine: ep93xx: Always start from BASE0
  dmaengine: usb-dmac: Fix DMAOR AE bit definition
  dmaengine: mv_xor_v2: set DMA mask to 40 bits
  dmaengine: mv_xor_v2: remove interrupt coalescing
  dmaengine: mv_xor_v2: fix tx_submit() implementation
  dmaengine: mv_xor_v2: enable XOR engine after its configuration
  dmaengine: mv_xor_v2: do not use descriptors not acked by async_tx
  dmaengine: mv_xor_v2: properly handle wrapping in the array of HW descriptors
  dmaengine: mv_xor_v2: handle mv_xor_v2_prep_sw_desc() error properly
......@@ -201,6 +201,7 @@ struct ep93xx_dma_engine {
struct dma_device dma_dev;
bool m2m;
int (*hw_setup)(struct ep93xx_dma_chan *);
void (*hw_synchronize)(struct ep93xx_dma_chan *);
void (*hw_shutdown)(struct ep93xx_dma_chan *);
void (*hw_submit)(struct ep93xx_dma_chan *);
int (*hw_interrupt)(struct ep93xx_dma_chan *);
......@@ -323,6 +324,8 @@ static int m2p_hw_setup(struct ep93xx_dma_chan *edmac)
| M2P_CONTROL_ENABLE;
m2p_set_control(edmac, control);
edmac->buffer = 0;
return 0;
}
......@@ -331,21 +334,27 @@ static inline u32 m2p_channel_state(struct ep93xx_dma_chan *edmac)
return (readl(edmac->regs + M2P_STATUS) >> 4) & 0x3;
}
static void m2p_hw_shutdown(struct ep93xx_dma_chan *edmac)
static void m2p_hw_synchronize(struct ep93xx_dma_chan *edmac)
{
unsigned long flags;
u32 control;
spin_lock_irqsave(&edmac->lock, flags);
control = readl(edmac->regs + M2P_CONTROL);
control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT);
m2p_set_control(edmac, control);
spin_unlock_irqrestore(&edmac->lock, flags);
while (m2p_channel_state(edmac) >= M2P_STATE_ON)
cpu_relax();
schedule();
}
static void m2p_hw_shutdown(struct ep93xx_dma_chan *edmac)
{
m2p_set_control(edmac, 0);
while (m2p_channel_state(edmac) == M2P_STATE_STALL)
cpu_relax();
while (m2p_channel_state(edmac) != M2P_STATE_IDLE)
dev_warn(chan2dev(edmac), "M2P: Not yet IDLE\n");
}
static void m2p_fill_desc(struct ep93xx_dma_chan *edmac)
......@@ -1160,6 +1169,26 @@ ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
return NULL;
}
/**
* ep93xx_dma_synchronize - Synchronizes the termination of transfers to the
* current context.
* @chan: channel
*
* Synchronizes the DMA channel termination to the current context. When this
* function returns it is guaranteed that all transfers for previously issued
* descriptors have stopped and and it is safe to free the memory associated
* with them. Furthermore it is guaranteed that all complete callback functions
* for a previously submitted descriptor have finished running and it is safe to
* free resources accessed from within the complete callbacks.
*/
static void ep93xx_dma_synchronize(struct dma_chan *chan)
{
struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
if (edmac->edma->hw_synchronize)
edmac->edma->hw_synchronize(edmac);
}
/**
* ep93xx_dma_terminate_all - terminate all transactions
* @chan: channel
......@@ -1323,6 +1352,7 @@ static int __init ep93xx_dma_probe(struct platform_device *pdev)
dma_dev->device_prep_slave_sg = ep93xx_dma_prep_slave_sg;
dma_dev->device_prep_dma_cyclic = ep93xx_dma_prep_dma_cyclic;
dma_dev->device_config = ep93xx_dma_slave_config;
dma_dev->device_synchronize = ep93xx_dma_synchronize;
dma_dev->device_terminate_all = ep93xx_dma_terminate_all;
dma_dev->device_issue_pending = ep93xx_dma_issue_pending;
dma_dev->device_tx_status = ep93xx_dma_tx_status;
......@@ -1340,6 +1370,7 @@ static int __init ep93xx_dma_probe(struct platform_device *pdev)
} else {
dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
edma->hw_synchronize = m2p_hw_synchronize;
edma->hw_setup = m2p_hw_setup;
edma->hw_shutdown = m2p_hw_shutdown;
edma->hw_submit = m2p_hw_submit;
......
......@@ -161,6 +161,7 @@ struct mv_xor_v2_device {
struct mv_xor_v2_sw_desc *sw_desq;
int desc_size;
unsigned int npendings;
unsigned int hw_queue_idx;
};
/**
......@@ -213,18 +214,6 @@ static void mv_xor_v2_set_data_buffers(struct mv_xor_v2_device *xor_dev,
}
}
/*
* Return the next available index in the DESQ.
*/
static int mv_xor_v2_get_desq_write_ptr(struct mv_xor_v2_device *xor_dev)
{
/* read the index for the next available descriptor in the DESQ */
u32 reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_ALLOC_OFF);
return ((reg >> MV_XOR_V2_DMA_DESQ_ALLOC_WRPTR_SHIFT)
& MV_XOR_V2_DMA_DESQ_ALLOC_WRPTR_MASK);
}
/*
* notify the engine of new descriptors, and update the available index.
*/
......@@ -257,22 +246,6 @@ static int mv_xor_v2_set_desc_size(struct mv_xor_v2_device *xor_dev)
return MV_XOR_V2_EXT_DESC_SIZE;
}
/*
* Set the IMSG threshold
*/
static inline
void mv_xor_v2_set_imsg_thrd(struct mv_xor_v2_device *xor_dev, int thrd_val)
{
u32 reg;
reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_THRD_OFF);
reg &= (~MV_XOR_V2_DMA_IMSG_THRD_MASK << MV_XOR_V2_DMA_IMSG_THRD_SHIFT);
reg |= (thrd_val << MV_XOR_V2_DMA_IMSG_THRD_SHIFT);
writel(reg, xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_THRD_OFF);
}
static irqreturn_t mv_xor_v2_interrupt_handler(int irq, void *data)
{
struct mv_xor_v2_device *xor_dev = data;
......@@ -288,12 +261,6 @@ static irqreturn_t mv_xor_v2_interrupt_handler(int irq, void *data)
if (!ndescs)
return IRQ_NONE;
/*
* Update IMSG threshold, to disable new IMSG interrupts until
* end of the tasklet
*/
mv_xor_v2_set_imsg_thrd(xor_dev, MV_XOR_V2_DESC_NUM);
/* schedule a tasklet to handle descriptors callbacks */
tasklet_schedule(&xor_dev->irq_tasklet);
......@@ -306,7 +273,6 @@ static irqreturn_t mv_xor_v2_interrupt_handler(int irq, void *data)
static dma_cookie_t
mv_xor_v2_tx_submit(struct dma_async_tx_descriptor *tx)
{
int desq_ptr;
void *dest_hw_desc;
dma_cookie_t cookie;
struct mv_xor_v2_sw_desc *sw_desc =
......@@ -322,15 +288,15 @@ mv_xor_v2_tx_submit(struct dma_async_tx_descriptor *tx)
spin_lock_bh(&xor_dev->lock);
cookie = dma_cookie_assign(tx);
/* get the next available slot in the DESQ */
desq_ptr = mv_xor_v2_get_desq_write_ptr(xor_dev);
/* copy the HW descriptor from the SW descriptor to the DESQ */
dest_hw_desc = xor_dev->hw_desq_virt + desq_ptr;
dest_hw_desc = xor_dev->hw_desq_virt + xor_dev->hw_queue_idx;
memcpy(dest_hw_desc, &sw_desc->hw_desc, xor_dev->desc_size);
xor_dev->npendings++;
xor_dev->hw_queue_idx++;
if (xor_dev->hw_queue_idx >= MV_XOR_V2_DESC_NUM)
xor_dev->hw_queue_idx = 0;
spin_unlock_bh(&xor_dev->lock);
......@@ -344,6 +310,7 @@ static struct mv_xor_v2_sw_desc *
mv_xor_v2_prep_sw_desc(struct mv_xor_v2_device *xor_dev)
{
struct mv_xor_v2_sw_desc *sw_desc;
bool found = false;
/* Lock the channel */
spin_lock_bh(&xor_dev->lock);
......@@ -355,19 +322,23 @@ mv_xor_v2_prep_sw_desc(struct mv_xor_v2_device *xor_dev)
return NULL;
}
/* get a free SW descriptor from the SW DESQ */
sw_desc = list_first_entry(&xor_dev->free_sw_desc,
struct mv_xor_v2_sw_desc, free_list);
list_for_each_entry(sw_desc, &xor_dev->free_sw_desc, free_list) {
if (async_tx_test_ack(&sw_desc->async_tx)) {
found = true;
break;
}
}
if (!found) {
spin_unlock_bh(&xor_dev->lock);
return NULL;
}
list_del(&sw_desc->free_list);
/* Release the channel */
spin_unlock_bh(&xor_dev->lock);
/* set the async tx descriptor */
dma_async_tx_descriptor_init(&sw_desc->async_tx, &xor_dev->dmachan);
sw_desc->async_tx.tx_submit = mv_xor_v2_tx_submit;
async_tx_ack(&sw_desc->async_tx);
return sw_desc;
}
......@@ -389,6 +360,8 @@ mv_xor_v2_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest,
__func__, len, &src, &dest, flags);
sw_desc = mv_xor_v2_prep_sw_desc(xor_dev);
if (!sw_desc)
return NULL;
sw_desc->async_tx.flags = flags;
......@@ -443,6 +416,8 @@ mv_xor_v2_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
__func__, src_cnt, len, &dest, flags);
sw_desc = mv_xor_v2_prep_sw_desc(xor_dev);
if (!sw_desc)
return NULL;
sw_desc->async_tx.flags = flags;
......@@ -491,6 +466,8 @@ mv_xor_v2_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags)
container_of(chan, struct mv_xor_v2_device, dmachan);
sw_desc = mv_xor_v2_prep_sw_desc(xor_dev);
if (!sw_desc)
return NULL;
/* set the HW descriptor */
hw_descriptor = &sw_desc->hw_desc;
......@@ -554,7 +531,6 @@ static void mv_xor_v2_tasklet(unsigned long data)
{
struct mv_xor_v2_device *xor_dev = (struct mv_xor_v2_device *) data;
int pending_ptr, num_of_pending, i;
struct mv_xor_v2_descriptor *next_pending_hw_desc = NULL;
struct mv_xor_v2_sw_desc *next_pending_sw_desc = NULL;
dev_dbg(xor_dev->dmadev.dev, "%s %d\n", __func__, __LINE__);
......@@ -562,17 +538,10 @@ static void mv_xor_v2_tasklet(unsigned long data)
/* get the pending descriptors parameters */
num_of_pending = mv_xor_v2_get_pending_params(xor_dev, &pending_ptr);
/* next HW descriptor */
next_pending_hw_desc = xor_dev->hw_desq_virt + pending_ptr;
/* loop over free descriptors */
for (i = 0; i < num_of_pending; i++) {
if (pending_ptr > MV_XOR_V2_DESC_NUM)
pending_ptr = 0;
if (next_pending_sw_desc != NULL)
next_pending_hw_desc++;
struct mv_xor_v2_descriptor *next_pending_hw_desc =
xor_dev->hw_desq_virt + pending_ptr;
/* get the SW descriptor related to the HW descriptor */
next_pending_sw_desc =
......@@ -608,15 +577,14 @@ static void mv_xor_v2_tasklet(unsigned long data)
/* increment the next descriptor */
pending_ptr++;
if (pending_ptr >= MV_XOR_V2_DESC_NUM)
pending_ptr = 0;
}
if (num_of_pending != 0) {
/* free the descriptores */
mv_xor_v2_free_desc_from_desq(xor_dev, num_of_pending);
}
/* Update IMSG threshold, to enable new IMSG interrupts */
mv_xor_v2_set_imsg_thrd(xor_dev, 0);
}
/*
......@@ -648,9 +616,6 @@ static int mv_xor_v2_descq_init(struct mv_xor_v2_device *xor_dev)
writel((xor_dev->hw_desq & 0xFFFF00000000) >> 32,
xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_BAHR_OFF);
/* enable the DMA engine */
writel(0, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_STOP_OFF);
/*
* This is a temporary solution, until we activate the
* SMMU. Set the attributes for reading & writing data buffers
......@@ -694,6 +659,9 @@ static int mv_xor_v2_descq_init(struct mv_xor_v2_device *xor_dev)
reg |= MV_XOR_V2_GLOB_PAUSE_AXI_TIME_DIS_VAL;
writel(reg, xor_dev->glob_base + MV_XOR_V2_GLOB_PAUSE);
/* enable the DMA engine */
writel(0, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_STOP_OFF);
return 0;
}
......@@ -725,6 +693,10 @@ static int mv_xor_v2_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, xor_dev);
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
if (ret)
return ret;
xor_dev->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(xor_dev->clk) && PTR_ERR(xor_dev->clk) == -EPROBE_DEFER)
return -EPROBE_DEFER;
......@@ -785,8 +757,15 @@ static int mv_xor_v2_probe(struct platform_device *pdev)
/* add all SW descriptors to the free list */
for (i = 0; i < MV_XOR_V2_DESC_NUM; i++) {
xor_dev->sw_desq[i].idx = i;
list_add(&xor_dev->sw_desq[i].free_list,
struct mv_xor_v2_sw_desc *sw_desc =
xor_dev->sw_desq + i;
sw_desc->idx = i;
dma_async_tx_descriptor_init(&sw_desc->async_tx,
&xor_dev->dmachan);
sw_desc->async_tx.tx_submit = mv_xor_v2_tx_submit;
async_tx_ack(&sw_desc->async_tx);
list_add(&sw_desc->free_list,
&xor_dev->free_sw_desc);
}
......
......@@ -3008,6 +3008,7 @@ static int pl330_remove(struct amba_device *adev)
for (i = 0; i < AMBA_NR_IRQS; i++) {
irq = adev->irq[i];
if (irq)
devm_free_irq(&adev->dev, irq, pl330);
}
......
......@@ -1287,6 +1287,9 @@ static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan,
if (desc->hwdescs.use) {
dptr = (rcar_dmac_chan_read(chan, RCAR_DMACHCRB) &
RCAR_DMACHCRB_DPTR_MASK) >> RCAR_DMACHCRB_DPTR_SHIFT;
if (dptr == 0)
dptr = desc->nchunks;
dptr--;
WARN_ON(dptr >= desc->nchunks);
} else {
running = desc->running;
......
......@@ -117,7 +117,7 @@ struct usb_dmac {
#define USB_DMASWR 0x0008
#define USB_DMASWR_SWR (1 << 0)
#define USB_DMAOR 0x0060
#define USB_DMAOR_AE (1 << 2)
#define USB_DMAOR_AE (1 << 1)
#define USB_DMAOR_DME (1 << 0)
#define USB_DMASAR 0x0000
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册