async_xor.c 9.7 KB
Newer Older
D
Dan Williams 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32
/*
 * xor offload engine api
 *
 * Copyright © 2006, Intel Corporation.
 *
 *      Dan Williams <dan.j.williams@intel.com>
 *
 *      with architecture considerations by:
 *      Neil Brown <neilb@suse.de>
 *      Jeff Garzik <jeff@garzik.org>
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 *
 * This program is distributed in the hope it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
 * this program; if not, write to the Free Software Foundation, Inc.,
 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
 *
 */
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/mm.h>
#include <linux/dma-mapping.h>
#include <linux/raid/xor.h>
#include <linux/async_tx.h>

33 34 35 36
/* do_async_xor - dma map the pages and perform the xor with an engine.
 * 	This routine is marked __always_inline so it can be compiled away
 * 	when CONFIG_DMA_ENGINE=n
 */
37
static __always_inline struct dma_async_tx_descriptor *
38 39 40 41 42
do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list,
	     unsigned int offset, int src_cnt, size_t len,
	     enum async_tx_flags flags,
	     struct dma_async_tx_descriptor *depend_tx,
	     dma_async_tx_callback cb_fn, void *cb_param)
D
Dan Williams 已提交
43
{
44
	struct dma_device *dma = chan->device;
45
	dma_addr_t *dma_src = (dma_addr_t *) src_list;
46 47
	struct dma_async_tx_descriptor *tx = NULL;
	int src_off = 0;
D
Dan Williams 已提交
48
	int i;
49 50 51 52 53 54
	dma_async_tx_callback _cb_fn;
	void *_cb_param;
	enum async_tx_flags async_flags;
	enum dma_ctrl_flags dma_flags;
	int xor_src_cnt;
	dma_addr_t dma_dest;
D
Dan Williams 已提交
55

56 57 58 59 60 61 62 63
	/* map the dest bidrectional in case it is re-used as a source */
	dma_dest = dma_map_page(dma->dev, dest, offset, len, DMA_BIDIRECTIONAL);
	for (i = 0; i < src_cnt; i++) {
		/* only map the dest once */
		if (unlikely(src_list[i] == dest)) {
			dma_src[i] = dma_dest;
			continue;
		}
64
		dma_src[i] = dma_map_page(dma->dev, src_list[i], offset,
65
					  len, DMA_TO_DEVICE);
66
	}
67

68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94
	while (src_cnt) {
		async_flags = flags;
		dma_flags = 0;
		xor_src_cnt = min(src_cnt, dma->max_xor);
		/* if we are submitting additional xors, leave the chain open,
		 * clear the callback parameters, and leave the destination
		 * buffer mapped
		 */
		if (src_cnt > xor_src_cnt) {
			async_flags &= ~ASYNC_TX_ACK;
			dma_flags = DMA_COMPL_SKIP_DEST_UNMAP;
			_cb_fn = NULL;
			_cb_param = NULL;
		} else {
			_cb_fn = cb_fn;
			_cb_param = cb_param;
		}
		if (_cb_fn)
			dma_flags |= DMA_PREP_INTERRUPT;

		/* Since we have clobbered the src_list we are committed
		 * to doing this asynchronously.  Drivers force forward progress
		 * in case they can not provide a descriptor
		 */
		tx = dma->device_prep_dma_xor(chan, dma_dest, &dma_src[src_off],
					      xor_src_cnt, len, dma_flags);

95 96
		if (unlikely(!tx))
			async_tx_quiesce(&depend_tx);
97

98
		/* spin wait for the preceeding transactions to complete */
99 100
		while (unlikely(!tx)) {
			dma_async_issue_pending(chan);
101 102 103 104
			tx = dma->device_prep_dma_xor(chan, dma_dest,
						      &dma_src[src_off],
						      xor_src_cnt, len,
						      dma_flags);
105
		}
D
Dan Williams 已提交
106

107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123
		async_tx_submit(chan, tx, async_flags, depend_tx, _cb_fn,
				_cb_param);

		depend_tx = tx;
		flags |= ASYNC_TX_DEP_ACK;

		if (src_cnt > xor_src_cnt) {
			/* drop completed sources */
			src_cnt -= xor_src_cnt;
			src_off += xor_src_cnt;

			/* use the intermediate result a source */
			dma_src[--src_off] = dma_dest;
			src_cnt++;
		} else
			break;
	}
124 125

	return tx;
D
Dan Williams 已提交
126 127 128 129
}

static void
do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset,
130 131
	    int src_cnt, size_t len, enum async_tx_flags flags,
	    dma_async_tx_callback cb_fn, void *cb_param)
D
Dan Williams 已提交
132 133
{
	int i;
134 135 136 137
	int xor_src_cnt;
	int src_off = 0;
	void *dest_buf;
	void **srcs = (void **) src_list;
D
Dan Williams 已提交
138 139 140

	/* reuse the 'src_list' array to convert to buffer pointers */
	for (i = 0; i < src_cnt; i++)
141
		srcs[i] = page_address(src_list[i]) + offset;
D
Dan Williams 已提交
142 143

	/* set destination address */
144
	dest_buf = page_address(dest) + offset;
D
Dan Williams 已提交
145 146

	if (flags & ASYNC_TX_XOR_ZERO_DST)
147
		memset(dest_buf, 0, len);
D
Dan Williams 已提交
148

149 150 151 152 153 154 155 156 157
	while (src_cnt > 0) {
		/* process up to 'MAX_XOR_BLOCKS' sources */
		xor_src_cnt = min(src_cnt, MAX_XOR_BLOCKS);
		xor_blocks(xor_src_cnt, len, dest_buf, &srcs[src_off]);

		/* drop completed sources */
		src_cnt -= xor_src_cnt;
		src_off += xor_src_cnt;
	}
D
Dan Williams 已提交
158

159
	async_tx_sync_epilog(cb_fn, cb_param);
D
Dan Williams 已提交
160 161 162 163 164 165 166 167 168 169 170 171 172 173 174
}

/**
 * async_xor - attempt to xor a set of blocks with a dma engine.
 *	xor_blocks always uses the dest as a source so the ASYNC_TX_XOR_ZERO_DST
 *	flag must be set to not include dest data in the calculation.  The
 *	assumption with dma eninges is that they only use the destination
 *	buffer as a source when it is explicity specified in the source list.
 * @dest: destination page
 * @src_list: array of source pages (if the dest is also a source it must be
 *	at index zero).  The contents of this array may be overwritten.
 * @offset: offset in pages to start transaction
 * @src_cnt: number of source pages
 * @len: length in bytes
 * @flags: ASYNC_TX_XOR_ZERO_DST, ASYNC_TX_XOR_DROP_DEST,
175
 *	ASYNC_TX_ACK, ASYNC_TX_DEP_ACK
D
Dan Williams 已提交
176 177 178 179 180 181 182 183 184 185
 * @depend_tx: xor depends on the result of this transaction.
 * @cb_fn: function to call when the xor completes
 * @cb_param: parameter to pass to the callback routine
 */
struct dma_async_tx_descriptor *
async_xor(struct page *dest, struct page **src_list, unsigned int offset,
	int src_cnt, size_t len, enum async_tx_flags flags,
	struct dma_async_tx_descriptor *depend_tx,
	dma_async_tx_callback cb_fn, void *cb_param)
{
186 187 188
	struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_XOR,
						      &dest, 1, src_list,
						      src_cnt, len);
D
Dan Williams 已提交
189 190
	BUG_ON(src_cnt <= 1);

191 192 193
	if (chan) {
		/* run the xor asynchronously */
		pr_debug("%s (async): len: %zu\n", __func__, len);
D
Dan Williams 已提交
194

195 196 197 198 199
		return do_async_xor(chan, dest, src_list, offset, src_cnt, len,
				    flags, depend_tx, cb_fn, cb_param);
	} else {
		/* run the xor synchronously */
		pr_debug("%s (sync): len: %zu\n", __func__, len);
D
Dan Williams 已提交
200

201 202
		/* in the sync case the dest is an implied source
		 * (assumes the dest is the first source)
D
Dan Williams 已提交
203
		 */
204 205 206 207
		if (flags & ASYNC_TX_XOR_DROP_DST) {
			src_cnt--;
			src_list++;
		}
D
Dan Williams 已提交
208

209
		/* wait for any prerequisite operations */
D
Dan Williams 已提交
210
		async_tx_quiesce(&depend_tx);
D
Dan Williams 已提交
211

212
		do_sync_xor(dest, src_list, offset, src_cnt, len,
213
			    flags, cb_fn, cb_param);
D
Dan Williams 已提交
214

215
		return NULL;
D
Dan Williams 已提交
216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235
	}
}
EXPORT_SYMBOL_GPL(async_xor);

static int page_is_zero(struct page *p, unsigned int offset, size_t len)
{
	char *a = page_address(p) + offset;
	return ((*(u32 *) a) == 0 &&
		memcmp(a, a + 4, len - 4) == 0);
}

/**
 * async_xor_zero_sum - attempt a xor parity check with a dma engine.
 * @dest: destination page used if the xor is performed synchronously
 * @src_list: array of source pages.  The dest page must be listed as a source
 * 	at index zero.  The contents of this array may be overwritten.
 * @offset: offset in pages to start transaction
 * @src_cnt: number of source pages
 * @len: length in bytes
 * @result: 0 if sum == 0 else non-zero
236
 * @flags: ASYNC_TX_ACK, ASYNC_TX_DEP_ACK
D
Dan Williams 已提交
237 238 239 240 241 242 243 244 245 246 247
 * @depend_tx: xor depends on the result of this transaction.
 * @cb_fn: function to call when the xor completes
 * @cb_param: parameter to pass to the callback routine
 */
struct dma_async_tx_descriptor *
async_xor_zero_sum(struct page *dest, struct page **src_list,
	unsigned int offset, int src_cnt, size_t len,
	u32 *result, enum async_tx_flags flags,
	struct dma_async_tx_descriptor *depend_tx,
	dma_async_tx_callback cb_fn, void *cb_param)
{
248 249 250
	struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_ZERO_SUM,
						      &dest, 1, src_list,
						      src_cnt, len);
D
Dan Williams 已提交
251
	struct dma_device *device = chan ? chan->device : NULL;
252
	struct dma_async_tx_descriptor *tx = NULL;
D
Dan Williams 已提交
253 254 255

	BUG_ON(src_cnt <= 1);

256
	if (device && src_cnt <= device->max_xor) {
257
		dma_addr_t *dma_src = (dma_addr_t *) src_list;
258
		unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0;
259
		int i;
D
Dan Williams 已提交
260

261
		pr_debug("%s: (async) len: %zu\n", __func__, len);
D
Dan Williams 已提交
262

263 264 265 266 267 268
		for (i = 0; i < src_cnt; i++)
			dma_src[i] = dma_map_page(device->dev, src_list[i],
						  offset, len, DMA_TO_DEVICE);

		tx = device->device_prep_dma_zero_sum(chan, dma_src, src_cnt,
						      len, result,
269
						      dma_prep_flags);
270 271
		if (unlikely(!tx)) {
			async_tx_quiesce(&depend_tx);
272

273
			while (!tx) {
274
				dma_async_issue_pending(chan);
275 276
				tx = device->device_prep_dma_zero_sum(chan,
					dma_src, src_cnt, len, result,
277
					dma_prep_flags);
278
			}
D
Dan Williams 已提交
279 280 281 282 283 284
		}

		async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param);
	} else {
		unsigned long xor_flags = flags;

285
		pr_debug("%s: (sync) len: %zu\n", __func__, len);
D
Dan Williams 已提交
286 287 288 289 290 291 292

		xor_flags |= ASYNC_TX_XOR_DROP_DST;
		xor_flags &= ~ASYNC_TX_ACK;

		tx = async_xor(dest, src_list, offset, src_cnt, len, xor_flags,
			depend_tx, NULL, NULL);

D
Dan Williams 已提交
293
		async_tx_quiesce(&tx);
D
Dan Williams 已提交
294 295 296

		*result = page_is_zero(dest, offset, len) ? 0 : 1;

297
		async_tx_sync_epilog(cb_fn, cb_param);
D
Dan Williams 已提交
298 299 300 301 302 303 304 305
	}

	return tx;
}
EXPORT_SYMBOL_GPL(async_xor_zero_sum);

static int __init async_xor_init(void)
{
306 307 308 309 310 311 312 313 314 315
	#ifdef CONFIG_DMA_ENGINE
	/* To conserve stack space the input src_list (array of page pointers)
	 * is reused to hold the array of dma addresses passed to the driver.
	 * This conversion is only possible when dma_addr_t is less than the
	 * the size of a pointer.  HIGHMEM64G is known to violate this
	 * assumption.
	 */
	BUILD_BUG_ON(sizeof(dma_addr_t) > sizeof(struct page *));
	#endif

D
Dan Williams 已提交
316 317 318 319 320 321 322 323 324 325 326 327 328 329
	return 0;
}

static void __exit async_xor_exit(void)
{
	do { } while (0);
}

module_init(async_xor_init);
module_exit(async_xor_exit);

MODULE_AUTHOR("Intel Corporation");
MODULE_DESCRIPTION("asynchronous xor/xor-zero-sum api");
MODULE_LICENSE("GPL");