提交 8010dad5 编写于 作者: S Stephen Warren 提交者: Vinod Koul

dma: add dma_get_any_slave_channel(), for use in of_xlate()

mmp_pdma.c implements a custom of_xlate() function that is 95% identical
to what Tegra will need. Create a function to implement the common part,
so everyone doesn't just cut/paste the implementation.

Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Vinod Koul <vinod.koul@intel.com>
Cc: Lars-Peter Clausen <lars@metafoo.de>
Cc: dmaengine@vger.kernel.org
Cc: linux-kernel@vger.kernel.org
Signed-off-by: NStephen Warren <swarren@nvidia.com>
Signed-off-by: NVinod Koul <vinod.koul@intel.com>
上级 6ce4eac1
...@@ -535,6 +535,34 @@ struct dma_chan *dma_get_slave_channel(struct dma_chan *chan) ...@@ -535,6 +535,34 @@ struct dma_chan *dma_get_slave_channel(struct dma_chan *chan)
} }
EXPORT_SYMBOL_GPL(dma_get_slave_channel); EXPORT_SYMBOL_GPL(dma_get_slave_channel);
struct dma_chan *dma_get_any_slave_channel(struct dma_device *device)
{
dma_cap_mask_t mask;
struct dma_chan *chan;
int err;
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
/* lock against __dma_request_channel */
mutex_lock(&dma_list_mutex);
chan = private_candidate(&mask, device, NULL, NULL);
if (chan) {
err = dma_chan_get(chan);
if (err) {
pr_debug("%s: failed to get %s: (%d)\n",
__func__, dma_chan_name(chan), err);
chan = NULL;
}
}
mutex_unlock(&dma_list_mutex);
return chan;
}
EXPORT_SYMBOL_GPL(dma_get_any_slave_channel);
/** /**
* __dma_request_channel - try to allocate an exclusive channel * __dma_request_channel - try to allocate an exclusive channel
* @mask: capabilities that the channel must satisfy * @mask: capabilities that the channel must satisfy
......
...@@ -893,33 +893,17 @@ static struct dma_chan *mmp_pdma_dma_xlate(struct of_phandle_args *dma_spec, ...@@ -893,33 +893,17 @@ static struct dma_chan *mmp_pdma_dma_xlate(struct of_phandle_args *dma_spec,
struct of_dma *ofdma) struct of_dma *ofdma)
{ {
struct mmp_pdma_device *d = ofdma->of_dma_data; struct mmp_pdma_device *d = ofdma->of_dma_data;
struct dma_chan *chan, *candidate; struct dma_chan *chan;
struct mmp_pdma_chan *c;
retry: chan = dma_get_any_slave_channel(&d->device);
candidate = NULL; if (!chan)
/* walk the list of channels registered with the current instance and
* find one that is currently unused */
list_for_each_entry(chan, &d->device.channels, device_node)
if (chan->client_count == 0) {
candidate = chan;
break;
}
if (!candidate)
return NULL; return NULL;
/* dma_get_slave_channel will return NULL if we lost a race between c = to_mmp_pdma_chan(chan);
* the lookup and the reservation */ c->drcmr = dma_spec->args[0];
chan = dma_get_slave_channel(candidate);
if (chan) {
struct mmp_pdma_chan *c = to_mmp_pdma_chan(chan);
c->drcmr = dma_spec->args[0];
return chan;
}
goto retry; return chan;
} }
static int mmp_pdma_probe(struct platform_device *op) static int mmp_pdma_probe(struct platform_device *op)
......
...@@ -1079,6 +1079,7 @@ int dma_async_device_register(struct dma_device *device); ...@@ -1079,6 +1079,7 @@ int dma_async_device_register(struct dma_device *device);
void dma_async_device_unregister(struct dma_device *device); void dma_async_device_unregister(struct dma_device *device);
void dma_run_dependencies(struct dma_async_tx_descriptor *tx); void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
struct dma_chan *dma_get_slave_channel(struct dma_chan *chan); struct dma_chan *dma_get_slave_channel(struct dma_chan *chan);
struct dma_chan *dma_get_any_slave_channel(struct dma_device *device);
struct dma_chan *net_dma_find_channel(void); struct dma_chan *net_dma_find_channel(void);
#define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y) #define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y)
#define dma_request_slave_channel_compat(mask, x, y, dev, name) \ #define dma_request_slave_channel_compat(mask, x, y, dev, name) \
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册