提交 a7c57cf7 编写于 作者: L Linus Walleij 提交者: Vinod Koul

dmaengine/dw_dmac: implement pause and resume in dwc_control

Some peripherals like amba-pl011 needs pause to be implemented in DMA controller
drivers. This also returns correct status from dwc_tx_status() in case chan is
paused.
Signed-off-by: NLinus Walleij <linus.walleij@linaro.org>
Signed-off-by: NViresh Kumar <viresh.kumar@st.com>
Signed-off-by: NVinod Koul <vinod.koul@intel.com>
上级 69cea5a0
......@@ -862,34 +862,50 @@ static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
struct dw_dma *dw = to_dw_dma(chan->device);
struct dw_desc *desc, *_desc;
unsigned long flags;
u32 cfglo;
LIST_HEAD(list);
/* Only supports DMA_TERMINATE_ALL */
if (cmd != DMA_TERMINATE_ALL)
return -ENXIO;
if (cmd == DMA_PAUSE) {
spin_lock_irqsave(&dwc->lock, flags);
/*
* This is only called when something went wrong elsewhere, so
* we don't really care about the data. Just disable the
* channel. We still have to poll the channel enable bit due
* to AHB/HSB limitations.
*/
spin_lock_irqsave(&dwc->lock, flags);
cfglo = channel_readl(dwc, CFG_LO);
channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP);
while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY))
cpu_relax();
channel_clear_bit(dw, CH_EN, dwc->mask);
dwc->paused = true;
spin_unlock_irqrestore(&dwc->lock, flags);
} else if (cmd == DMA_RESUME) {
if (!dwc->paused)
return 0;
while (dma_readl(dw, CH_EN) & dwc->mask)
cpu_relax();
spin_lock_irqsave(&dwc->lock, flags);
/* active_list entries will end up before queued entries */
list_splice_init(&dwc->queue, &list);
list_splice_init(&dwc->active_list, &list);
cfglo = channel_readl(dwc, CFG_LO);
channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP);
dwc->paused = false;
spin_unlock_irqrestore(&dwc->lock, flags);
spin_unlock_irqrestore(&dwc->lock, flags);
} else if (cmd == DMA_TERMINATE_ALL) {
spin_lock_irqsave(&dwc->lock, flags);
/* Flush all pending and queued descriptors */
list_for_each_entry_safe(desc, _desc, &list, desc_node)
dwc_descriptor_complete(dwc, desc, false);
channel_clear_bit(dw, CH_EN, dwc->mask);
while (dma_readl(dw, CH_EN) & dwc->mask)
cpu_relax();
dwc->paused = false;
/* active_list entries will end up before queued entries */
list_splice_init(&dwc->queue, &list);
list_splice_init(&dwc->active_list, &list);
spin_unlock_irqrestore(&dwc->lock, flags);
/* Flush all pending and queued descriptors */
list_for_each_entry_safe(desc, _desc, &list, desc_node)
dwc_descriptor_complete(dwc, desc, false);
} else
return -ENXIO;
return 0;
}
......@@ -923,6 +939,9 @@ dwc_tx_status(struct dma_chan *chan,
else
dma_set_tx_state(txstate, last_complete, last_used, 0);
if (dwc->paused)
return DMA_PAUSED;
return ret;
}
......
......@@ -138,6 +138,7 @@ struct dw_dma_chan {
void __iomem *ch_regs;
u8 mask;
u8 priority;
bool paused;
spinlock_t lock;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册