提交 18536134 编写于 作者: R Russell King

dmaengine: PL08x: convert to use vchan done list

Convert to use the virtual dma channel done list, tasklet, and
descriptor freeing.
Acked-by: NLinus Walleij <linus.walleij@linaro.org>
Tested-by: NLinus Walleij <linus.walleij@linaro.org>
Signed-off-by: NRussell King <rmk+kernel@arm.linux.org.uk>
上级 879f127b
...@@ -167,16 +167,16 @@ struct pl08x_sg { ...@@ -167,16 +167,16 @@ struct pl08x_sg {
/** /**
* struct pl08x_txd - wrapper for struct dma_async_tx_descriptor * struct pl08x_txd - wrapper for struct dma_async_tx_descriptor
* @vd: virtual DMA descriptor * @vd: virtual DMA descriptor
* @node: node for txd list for channels
* @dsg_list: list of children sg's * @dsg_list: list of children sg's
* @llis_bus: DMA memory address (physical) start for the LLIs * @llis_bus: DMA memory address (physical) start for the LLIs
* @llis_va: virtual memory address start for the LLIs * @llis_va: virtual memory address start for the LLIs
* @cctl: control reg values for current txd * @cctl: control reg values for current txd
* @ccfg: config reg values for current txd * @ccfg: config reg values for current txd
* @done: this marks completed descriptors, which should not have their
* mux released.
*/ */
struct pl08x_txd { struct pl08x_txd {
struct virt_dma_desc vd; struct virt_dma_desc vd;
struct list_head node;
struct list_head dsg_list; struct list_head dsg_list;
dma_addr_t llis_bus; dma_addr_t llis_bus;
struct pl08x_lli *llis_va; struct pl08x_lli *llis_va;
...@@ -187,6 +187,7 @@ struct pl08x_txd { ...@@ -187,6 +187,7 @@ struct pl08x_txd {
* trigger this txd. Other registers are in llis_va[0]. * trigger this txd. Other registers are in llis_va[0].
*/ */
u32 ccfg; u32 ccfg;
bool done;
}; };
/** /**
...@@ -211,11 +212,9 @@ enum pl08x_dma_chan_state { ...@@ -211,11 +212,9 @@ enum pl08x_dma_chan_state {
* struct pl08x_dma_chan - this structure wraps a DMA ENGINE channel * struct pl08x_dma_chan - this structure wraps a DMA ENGINE channel
* @vc: wrappped virtual channel * @vc: wrappped virtual channel
* @phychan: the physical channel utilized by this channel, if there is one * @phychan: the physical channel utilized by this channel, if there is one
* @tasklet: tasklet scheduled by the IRQ to handle actual work etc
* @name: name of channel * @name: name of channel
* @cd: channel platform data * @cd: channel platform data
* @runtime_addr: address for RX/TX according to the runtime config * @runtime_addr: address for RX/TX according to the runtime config
* @done_list: list of completed transactions
* @at: active transaction on this channel * @at: active transaction on this channel
* @lock: a lock for this channel data * @lock: a lock for this channel data
* @host: a pointer to the host (internal use) * @host: a pointer to the host (internal use)
...@@ -227,11 +226,9 @@ enum pl08x_dma_chan_state { ...@@ -227,11 +226,9 @@ enum pl08x_dma_chan_state {
struct pl08x_dma_chan { struct pl08x_dma_chan {
struct virt_dma_chan vc; struct virt_dma_chan vc;
struct pl08x_phy_chan *phychan; struct pl08x_phy_chan *phychan;
struct tasklet_struct tasklet;
const char *name; const char *name;
const struct pl08x_channel_data *cd; const struct pl08x_channel_data *cd;
struct dma_slave_config cfg; struct dma_slave_config cfg;
struct list_head done_list;
struct pl08x_txd *at; struct pl08x_txd *at;
struct pl08x_driver_data *host; struct pl08x_driver_data *host;
enum pl08x_dma_chan_state state; enum pl08x_dma_chan_state state;
...@@ -1084,6 +1081,52 @@ static void pl08x_free_txd(struct pl08x_driver_data *pl08x, ...@@ -1084,6 +1081,52 @@ static void pl08x_free_txd(struct pl08x_driver_data *pl08x,
kfree(txd); kfree(txd);
} }
static void pl08x_unmap_buffers(struct pl08x_txd *txd)
{
struct device *dev = txd->vd.tx.chan->device->dev;
struct pl08x_sg *dsg;
if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
if (txd->vd.tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE)
list_for_each_entry(dsg, &txd->dsg_list, node)
dma_unmap_single(dev, dsg->src_addr, dsg->len,
DMA_TO_DEVICE);
else {
list_for_each_entry(dsg, &txd->dsg_list, node)
dma_unmap_page(dev, dsg->src_addr, dsg->len,
DMA_TO_DEVICE);
}
}
if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
if (txd->vd.tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE)
list_for_each_entry(dsg, &txd->dsg_list, node)
dma_unmap_single(dev, dsg->dst_addr, dsg->len,
DMA_FROM_DEVICE);
else
list_for_each_entry(dsg, &txd->dsg_list, node)
dma_unmap_page(dev, dsg->dst_addr, dsg->len,
DMA_FROM_DEVICE);
}
}
static void pl08x_desc_free(struct virt_dma_desc *vd)
{
struct pl08x_txd *txd = to_pl08x_txd(&vd->tx);
struct pl08x_dma_chan *plchan = to_pl08x_chan(vd->tx.chan);
struct pl08x_driver_data *pl08x = plchan->host;
unsigned long flags;
if (!plchan->slave)
pl08x_unmap_buffers(txd);
if (!txd->done)
pl08x_release_mux(plchan);
spin_lock_irqsave(&pl08x->lock, flags);
pl08x_free_txd(plchan->host, txd);
spin_unlock_irqrestore(&pl08x->lock, flags);
}
static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x, static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x,
struct pl08x_dma_chan *plchan) struct pl08x_dma_chan *plchan)
{ {
...@@ -1094,9 +1137,8 @@ static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x, ...@@ -1094,9 +1137,8 @@ static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x,
while (!list_empty(&head)) { while (!list_empty(&head)) {
txd = list_first_entry(&head, struct pl08x_txd, vd.node); txd = list_first_entry(&head, struct pl08x_txd, vd.node);
pl08x_release_mux(plchan);
list_del(&txd->vd.node); list_del(&txd->vd.node);
pl08x_free_txd(pl08x, txd); pl08x_desc_free(&txd->vd);
} }
} }
...@@ -1541,9 +1583,7 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, ...@@ -1541,9 +1583,7 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
} }
/* Dequeue jobs and free LLIs */ /* Dequeue jobs and free LLIs */
if (plchan->at) { if (plchan->at) {
/* Killing this one off, release its mux */ pl08x_desc_free(&plchan->at->vd);
pl08x_release_mux(plchan);
pl08x_free_txd(pl08x, plchan->at);
plchan->at = NULL; plchan->at = NULL;
} }
/* Dequeue jobs not yet fired as well */ /* Dequeue jobs not yet fired as well */
...@@ -1600,68 +1640,6 @@ static void pl08x_ensure_on(struct pl08x_driver_data *pl08x) ...@@ -1600,68 +1640,6 @@ static void pl08x_ensure_on(struct pl08x_driver_data *pl08x)
writel(PL080_CONFIG_ENABLE, pl08x->base + PL080_CONFIG); writel(PL080_CONFIG_ENABLE, pl08x->base + PL080_CONFIG);
} }
static void pl08x_unmap_buffers(struct pl08x_txd *txd)
{
struct device *dev = txd->vd.tx.chan->device->dev;
struct pl08x_sg *dsg;
if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
if (txd->vd.tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE)
list_for_each_entry(dsg, &txd->dsg_list, node)
dma_unmap_single(dev, dsg->src_addr, dsg->len,
DMA_TO_DEVICE);
else {
list_for_each_entry(dsg, &txd->dsg_list, node)
dma_unmap_page(dev, dsg->src_addr, dsg->len,
DMA_TO_DEVICE);
}
}
if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
if (txd->vd.tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE)
list_for_each_entry(dsg, &txd->dsg_list, node)
dma_unmap_single(dev, dsg->dst_addr, dsg->len,
DMA_FROM_DEVICE);
else
list_for_each_entry(dsg, &txd->dsg_list, node)
dma_unmap_page(dev, dsg->dst_addr, dsg->len,
DMA_FROM_DEVICE);
}
}
static void pl08x_tasklet(unsigned long data)
{
struct pl08x_dma_chan *plchan = (struct pl08x_dma_chan *) data;
struct pl08x_driver_data *pl08x = plchan->host;
unsigned long flags;
LIST_HEAD(head);
spin_lock_irqsave(&plchan->vc.lock, flags);
list_splice_tail_init(&plchan->done_list, &head);
spin_unlock_irqrestore(&plchan->vc.lock, flags);
while (!list_empty(&head)) {
struct pl08x_txd *txd = list_first_entry(&head,
struct pl08x_txd, node);
dma_async_tx_callback callback = txd->vd.tx.callback;
void *callback_param = txd->vd.tx.callback_param;
list_del(&txd->node);
/* Don't try to unmap buffers on slave channels */
if (!plchan->slave)
pl08x_unmap_buffers(txd);
/* Free the descriptor */
spin_lock_irqsave(&plchan->vc.lock, flags);
pl08x_free_txd(pl08x, txd);
spin_unlock_irqrestore(&plchan->vc.lock, flags);
/* Callback to signal completion */
if (callback)
callback(callback_param);
}
}
static irqreturn_t pl08x_irq(int irq, void *dev) static irqreturn_t pl08x_irq(int irq, void *dev)
{ {
struct pl08x_driver_data *pl08x = dev; struct pl08x_driver_data *pl08x = dev;
...@@ -1704,8 +1682,8 @@ static irqreturn_t pl08x_irq(int irq, void *dev) ...@@ -1704,8 +1682,8 @@ static irqreturn_t pl08x_irq(int irq, void *dev)
* reservation. * reservation.
*/ */
pl08x_release_mux(plchan); pl08x_release_mux(plchan);
dma_cookie_complete(&tx->vd.tx); tx->done = true;
list_add_tail(&tx->node, &plchan->done_list); vchan_cookie_complete(&tx->vd);
/* /*
* And start the next descriptor (if any), * And start the next descriptor (if any),
...@@ -1718,8 +1696,6 @@ static irqreturn_t pl08x_irq(int irq, void *dev) ...@@ -1718,8 +1696,6 @@ static irqreturn_t pl08x_irq(int irq, void *dev)
} }
spin_unlock(&plchan->vc.lock); spin_unlock(&plchan->vc.lock);
/* Schedule tasklet on this channel */
tasklet_schedule(&plchan->tasklet);
mask |= (1 << i); mask |= (1 << i);
} }
} }
...@@ -1779,10 +1755,7 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x, ...@@ -1779,10 +1755,7 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
"initialize virtual channel \"%s\"\n", "initialize virtual channel \"%s\"\n",
chan->name); chan->name);
INIT_LIST_HEAD(&chan->done_list); chan->vc.desc_free = pl08x_desc_free;
tasklet_init(&chan->tasklet, pl08x_tasklet,
(unsigned long) chan);
vchan_init(&chan->vc, dmadev); vchan_init(&chan->vc, dmadev);
} }
dev_info(&pl08x->adev->dev, "initialized %d virtual %s channels\n", dev_info(&pl08x->adev->dev, "initialized %d virtual %s channels\n",
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册