提交 59da1f87 编写于 作者: L Linus Torvalds

Merge branch 'fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx

* 'fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx:
  async_xor: dma_map destination DMA_BIDIRECTIONAL
  dmaengine: protect 'id' from concurrent registrations
  ioat: wait for self-test completion
...@@ -53,10 +53,17 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list, ...@@ -53,10 +53,17 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list,
int xor_src_cnt; int xor_src_cnt;
dma_addr_t dma_dest; dma_addr_t dma_dest;
dma_dest = dma_map_page(dma->dev, dest, offset, len, DMA_FROM_DEVICE); /* map the dest bidrectional in case it is re-used as a source */
for (i = 0; i < src_cnt; i++) dma_dest = dma_map_page(dma->dev, dest, offset, len, DMA_BIDIRECTIONAL);
for (i = 0; i < src_cnt; i++) {
/* only map the dest once */
if (unlikely(src_list[i] == dest)) {
dma_src[i] = dma_dest;
continue;
}
dma_src[i] = dma_map_page(dma->dev, src_list[i], offset, dma_src[i] = dma_map_page(dma->dev, src_list[i], offset,
len, DMA_TO_DEVICE); len, DMA_TO_DEVICE);
}
while (src_cnt) { while (src_cnt) {
async_flags = flags; async_flags = flags;
......
...@@ -388,7 +388,10 @@ int dma_async_device_register(struct dma_device *device) ...@@ -388,7 +388,10 @@ int dma_async_device_register(struct dma_device *device)
init_completion(&device->done); init_completion(&device->done);
kref_init(&device->refcount); kref_init(&device->refcount);
mutex_lock(&dma_list_mutex);
device->dev_id = id++; device->dev_id = id++;
mutex_unlock(&dma_list_mutex);
/* represent channels in sysfs. Probably want devs too */ /* represent channels in sysfs. Probably want devs too */
list_for_each_entry(chan, &device->channels, device_node) { list_for_each_entry(chan, &device->channels, device_node) {
......
...@@ -1341,10 +1341,12 @@ static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan) ...@@ -1341,10 +1341,12 @@ static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan)
*/ */
#define IOAT_TEST_SIZE 2000 #define IOAT_TEST_SIZE 2000
DECLARE_COMPLETION(test_completion);
static void ioat_dma_test_callback(void *dma_async_param) static void ioat_dma_test_callback(void *dma_async_param)
{ {
printk(KERN_ERR "ioatdma: ioat_dma_test_callback(%p)\n", printk(KERN_ERR "ioatdma: ioat_dma_test_callback(%p)\n",
dma_async_param); dma_async_param);
complete(&test_completion);
} }
/** /**
...@@ -1410,7 +1412,8 @@ static int ioat_dma_self_test(struct ioatdma_device *device) ...@@ -1410,7 +1412,8 @@ static int ioat_dma_self_test(struct ioatdma_device *device)
goto free_resources; goto free_resources;
} }
device->common.device_issue_pending(dma_chan); device->common.device_issue_pending(dma_chan);
msleep(1);
wait_for_completion_timeout(&test_completion, msecs_to_jiffies(3000));
if (device->common.device_is_tx_complete(dma_chan, cookie, NULL, NULL) if (device->common.device_is_tx_complete(dma_chan, cookie, NULL, NULL)
!= DMA_SUCCESS) { != DMA_SUCCESS) {
......
...@@ -85,18 +85,28 @@ iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc, ...@@ -85,18 +85,28 @@ iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc,
enum dma_ctrl_flags flags = desc->async_tx.flags; enum dma_ctrl_flags flags = desc->async_tx.flags;
u32 src_cnt; u32 src_cnt;
dma_addr_t addr; dma_addr_t addr;
dma_addr_t dest;
src_cnt = unmap->unmap_src_cnt;
dest = iop_desc_get_dest_addr(unmap, iop_chan);
if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) { if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
addr = iop_desc_get_dest_addr(unmap, iop_chan); enum dma_data_direction dir;
dma_unmap_page(dev, addr, len, DMA_FROM_DEVICE);
if (src_cnt > 1) /* is xor? */
dir = DMA_BIDIRECTIONAL;
else
dir = DMA_FROM_DEVICE;
dma_unmap_page(dev, dest, len, dir);
} }
if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
src_cnt = unmap->unmap_src_cnt;
while (src_cnt--) { while (src_cnt--) {
addr = iop_desc_get_src_addr(unmap, addr = iop_desc_get_src_addr(unmap,
iop_chan, iop_chan,
src_cnt); src_cnt);
if (addr == dest)
continue;
dma_unmap_page(dev, addr, len, dma_unmap_page(dev, addr, len,
DMA_TO_DEVICE); DMA_TO_DEVICE);
} }
......
...@@ -311,17 +311,26 @@ mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc, ...@@ -311,17 +311,26 @@ mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc,
enum dma_ctrl_flags flags = desc->async_tx.flags; enum dma_ctrl_flags flags = desc->async_tx.flags;
u32 src_cnt; u32 src_cnt;
dma_addr_t addr; dma_addr_t addr;
dma_addr_t dest;
src_cnt = unmap->unmap_src_cnt;
dest = mv_desc_get_dest_addr(unmap);
if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) { if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
addr = mv_desc_get_dest_addr(unmap); enum dma_data_direction dir;
dma_unmap_page(dev, addr, len, DMA_FROM_DEVICE);
if (src_cnt > 1) /* is xor ? */
dir = DMA_BIDIRECTIONAL;
else
dir = DMA_FROM_DEVICE;
dma_unmap_page(dev, dest, len, dir);
} }
if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
src_cnt = unmap->unmap_src_cnt;
while (src_cnt--) { while (src_cnt--) {
addr = mv_desc_get_src_addr(unmap, addr = mv_desc_get_src_addr(unmap,
src_cnt); src_cnt);
if (addr == dest)
continue;
dma_unmap_page(dev, addr, len, dma_unmap_page(dev, addr, len,
DMA_TO_DEVICE); DMA_TO_DEVICE);
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册