提交 e4e436e0 编写于 作者: L Linus Torvalds

Merge branch 'fixes' of git://git.infradead.org/users/vkoul/slave-dma

* 'fixes' of git://git.infradead.org/users/vkoul/slave-dma:
  dmaengine/ste_dma40: fix memory leak due to prepared descriptors
  dmaengine/ste_dma40: fix Oops due to double free of client descriptor
  dmaengine/ste_dma40: remove duplicate call to d40_pool_lli_free().
  dmaengine/ste_dma40: add missing kernel doc for pending_queue
...@@ -174,8 +174,10 @@ struct d40_base; ...@@ -174,8 +174,10 @@ struct d40_base;
* @tasklet: Tasklet that gets scheduled from interrupt context to complete a * @tasklet: Tasklet that gets scheduled from interrupt context to complete a
* transfer and call client callback. * transfer and call client callback.
* @client: Cliented owned descriptor list. * @client: Cliented owned descriptor list.
* @pending_queue: Submitted jobs, to be issued by issue_pending()
* @active: Active descriptor. * @active: Active descriptor.
* @queue: Queued jobs. * @queue: Queued jobs.
* @prepare_queue: Prepared jobs.
* @dma_cfg: The client configuration of this dma channel. * @dma_cfg: The client configuration of this dma channel.
* @configured: whether the dma_cfg configuration is valid * @configured: whether the dma_cfg configuration is valid
* @base: Pointer to the device instance struct. * @base: Pointer to the device instance struct.
...@@ -203,6 +205,7 @@ struct d40_chan { ...@@ -203,6 +205,7 @@ struct d40_chan {
struct list_head pending_queue; struct list_head pending_queue;
struct list_head active; struct list_head active;
struct list_head queue; struct list_head queue;
struct list_head prepare_queue;
struct stedma40_chan_cfg dma_cfg; struct stedma40_chan_cfg dma_cfg;
bool configured; bool configured;
struct d40_base *base; struct d40_base *base;
...@@ -477,7 +480,6 @@ static struct d40_desc *d40_desc_get(struct d40_chan *d40c) ...@@ -477,7 +480,6 @@ static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
list_for_each_entry_safe(d, _d, &d40c->client, node) list_for_each_entry_safe(d, _d, &d40c->client, node)
if (async_tx_test_ack(&d->txd)) { if (async_tx_test_ack(&d->txd)) {
d40_pool_lli_free(d40c, d);
d40_desc_remove(d); d40_desc_remove(d);
desc = d; desc = d;
memset(desc, 0, sizeof(*desc)); memset(desc, 0, sizeof(*desc));
...@@ -644,8 +646,11 @@ static struct d40_desc *d40_first_active_get(struct d40_chan *d40c) ...@@ -644,8 +646,11 @@ static struct d40_desc *d40_first_active_get(struct d40_chan *d40c)
return d; return d;
} }
/* remove desc from current queue and add it to the pending_queue */
static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc) static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc)
{ {
d40_desc_remove(desc);
desc->is_in_client_list = false;
list_add_tail(&desc->node, &d40c->pending_queue); list_add_tail(&desc->node, &d40c->pending_queue);
} }
...@@ -803,6 +808,7 @@ static int d40_channel_execute_command(struct d40_chan *d40c, ...@@ -803,6 +808,7 @@ static int d40_channel_execute_command(struct d40_chan *d40c,
static void d40_term_all(struct d40_chan *d40c) static void d40_term_all(struct d40_chan *d40c)
{ {
struct d40_desc *d40d; struct d40_desc *d40d;
struct d40_desc *_d;
/* Release active descriptors */ /* Release active descriptors */
while ((d40d = d40_first_active_get(d40c))) { while ((d40d = d40_first_active_get(d40c))) {
...@@ -822,6 +828,21 @@ static void d40_term_all(struct d40_chan *d40c) ...@@ -822,6 +828,21 @@ static void d40_term_all(struct d40_chan *d40c)
d40_desc_free(d40c, d40d); d40_desc_free(d40c, d40d);
} }
/* Release client owned descriptors */
if (!list_empty(&d40c->client))
list_for_each_entry_safe(d40d, _d, &d40c->client, node) {
d40_desc_remove(d40d);
d40_desc_free(d40c, d40d);
}
/* Release descriptors in prepare queue */
if (!list_empty(&d40c->prepare_queue))
list_for_each_entry_safe(d40d, _d,
&d40c->prepare_queue, node) {
d40_desc_remove(d40d);
d40_desc_free(d40c, d40d);
}
d40c->pending_tx = 0; d40c->pending_tx = 0;
d40c->busy = false; d40c->busy = false;
} }
...@@ -1208,7 +1229,6 @@ static void dma_tasklet(unsigned long data) ...@@ -1208,7 +1229,6 @@ static void dma_tasklet(unsigned long data)
if (!d40d->cyclic) { if (!d40d->cyclic) {
if (async_tx_test_ack(&d40d->txd)) { if (async_tx_test_ack(&d40d->txd)) {
d40_pool_lli_free(d40c, d40d);
d40_desc_remove(d40d); d40_desc_remove(d40d);
d40_desc_free(d40c, d40d); d40_desc_free(d40c, d40d);
} else { } else {
...@@ -1595,21 +1615,10 @@ static int d40_free_dma(struct d40_chan *d40c) ...@@ -1595,21 +1615,10 @@ static int d40_free_dma(struct d40_chan *d40c)
u32 event; u32 event;
struct d40_phy_res *phy = d40c->phy_chan; struct d40_phy_res *phy = d40c->phy_chan;
bool is_src; bool is_src;
struct d40_desc *d;
struct d40_desc *_d;
/* Terminate all queued and active transfers */ /* Terminate all queued and active transfers */
d40_term_all(d40c); d40_term_all(d40c);
/* Release client owned descriptors */
if (!list_empty(&d40c->client))
list_for_each_entry_safe(d, _d, &d40c->client, node) {
d40_pool_lli_free(d40c, d);
d40_desc_remove(d);
d40_desc_free(d40c, d);
}
if (phy == NULL) { if (phy == NULL) {
chan_err(d40c, "phy == null\n"); chan_err(d40c, "phy == null\n");
return -EINVAL; return -EINVAL;
...@@ -1911,6 +1920,12 @@ d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src, ...@@ -1911,6 +1920,12 @@ d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src,
goto err; goto err;
} }
/*
* add descriptor to the prepare queue in order to be able
* to free them later in terminate_all
*/
list_add_tail(&desc->node, &chan->prepare_queue);
spin_unlock_irqrestore(&chan->lock, flags); spin_unlock_irqrestore(&chan->lock, flags);
return &desc->txd; return &desc->txd;
...@@ -2400,6 +2415,7 @@ static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma, ...@@ -2400,6 +2415,7 @@ static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
INIT_LIST_HEAD(&d40c->queue); INIT_LIST_HEAD(&d40c->queue);
INIT_LIST_HEAD(&d40c->pending_queue); INIT_LIST_HEAD(&d40c->pending_queue);
INIT_LIST_HEAD(&d40c->client); INIT_LIST_HEAD(&d40c->client);
INIT_LIST_HEAD(&d40c->prepare_queue);
tasklet_init(&d40c->tasklet, dma_tasklet, tasklet_init(&d40c->tasklet, dma_tasklet,
(unsigned long) d40c); (unsigned long) d40c);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册