diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index 64b048d7fba751de8f5faf7b73062ea104aeea15..3a8047b1f108aba1bd43e5d44ce8466f12ac6497 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -242,6 +242,13 @@ static inline void unmap_dst(struct device *dev, dma_addr_t *addr, size_t len, dma_unmap_single(dev, addr[count], len, DMA_BIDIRECTIONAL); } +static unsigned int min_odd(unsigned int x, unsigned int y) +{ + unsigned int val = min(x, y); + + return val % 2 ? val : val - 1; +} + /* * This function repeatedly tests DMA transfers of various lengths and * offsets for a given operation type until it is told to exit by @@ -262,6 +269,7 @@ static int dmatest_func(void *data) struct dmatest_thread *thread = data; struct dmatest_done done = { .wait = &done_wait }; struct dma_chan *chan; + struct dma_device *dev; const char *thread_name; unsigned int src_off, dst_off, len; unsigned int error_count; @@ -283,13 +291,16 @@ static int dmatest_func(void *data) smp_rmb(); chan = thread->chan; + dev = chan->device; if (thread->type == DMA_MEMCPY) src_cnt = dst_cnt = 1; else if (thread->type == DMA_XOR) { - src_cnt = xor_sources | 1; /* force odd to ensure dst = src */ + /* force odd to ensure dst = src */ + src_cnt = min_odd(xor_sources | 1, dev->max_xor); dst_cnt = 1; } else if (thread->type == DMA_PQ) { - src_cnt = pq_sources | 1; /* force odd to ensure dst = src */ + /* force odd to ensure dst = src */ + src_cnt = min_odd(pq_sources | 1, dma_maxpq(dev, 0)); dst_cnt = 2; for (i = 0; i < src_cnt; i++) pq_coefs[i] = 1; @@ -327,7 +338,6 @@ static int dmatest_func(void *data) while (!kthread_should_stop() && !(iterations && total_tests >= iterations)) { - struct dma_device *dev = chan->device; struct dma_async_tx_descriptor *tx = NULL; dma_addr_t dma_srcs[src_cnt]; dma_addr_t dma_dsts[dst_cnt];