dmaengine.c 17.7 KB
Newer Older
C
Chris Leech 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39
/*
 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License as published by the Free
 * Software Foundation; either version 2 of the License, or (at your option)
 * any later version.
 *
 * This program is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
 * this program; if not, write to the Free Software Foundation, Inc., 59
 * Temple Place - Suite 330, Boston, MA  02111-1307, USA.
 *
 * The full GNU General Public License is included in this distribution in the
 * file called COPYING.
 */

/*
 * This code implements the DMA subsystem. It provides a HW-neutral interface
 * for other kernel code to use asynchronous memory copy capabilities,
 * if present, and allows different HW DMA drivers to register as providing
 * this capability.
 *
 * Due to the fact we are accelerating what is already a relatively fast
 * operation, the code goes to great lengths to avoid additional overhead,
 * such as locking.
 *
 * LOCKING:
 *
 * The subsystem keeps two global lists, dma_device_list and dma_client_list.
 * Both of these are protected by a mutex, dma_list_mutex.
 *
 * Each device has a channels list, which runs unlocked but is never modified
 * once the device is registered, it's just setup by the driver.
 *
40 41
 * Each client is responsible for keeping track of the channels it uses.  See
 * the definition of dma_event_callback in dmaengine.h.
C
Chris Leech 已提交
42 43
 *
 * Each device has a kref, which is initialized to 1 when the device is
44
 * registered. A kref_get is done for each device registered.  When the
S
Sebastian Siewior 已提交
45
 * device is released, the corresponding kref_put is done in the release
C
Chris Leech 已提交
46
 * method. Every time one of the device's channels is allocated to a client,
S
Sebastian Siewior 已提交
47
 * a kref_get occurs.  When the channel is freed, the corresponding kref_put
C
Chris Leech 已提交
48
 * happens. The device's release function does a completion, so
49
 * unregister_device does a remove event, device_unregister, a kref_put
C
Chris Leech 已提交
50 51 52 53
 * for the first reference, then waits on the completion for all other
 * references to finish.
 *
 * Each channel has an open-coded implementation of Rusty Russell's "bigref,"
54 55
 * with a kref and a per_cpu local_t.  A dma_chan_get is called when a client
 * signals that it wants to use a channel, and dma_chan_put is called when
S
Sebastian Siewior 已提交
56
 * a channel is removed or a client using it is unregistered.  A client can
57 58 59
 * take extra references per outstanding transaction, as is the case with
 * the NET DMA client.  The release function does a kref_put on the device.
 *	-ChrisL, DanW
C
Chris Leech 已提交
60 61 62 63
 */

#include <linux/init.h>
#include <linux/module.h>
64
#include <linux/mm.h>
C
Chris Leech 已提交
65 66 67 68 69 70 71
#include <linux/device.h>
#include <linux/dmaengine.h>
#include <linux/hardirq.h>
#include <linux/spinlock.h>
#include <linux/percpu.h>
#include <linux/rcupdate.h>
#include <linux/mutex.h>
72
#include <linux/jiffies.h>
C
Chris Leech 已提交
73 74 75 76 77 78 79

static DEFINE_MUTEX(dma_list_mutex);
static LIST_HEAD(dma_device_list);
static LIST_HEAD(dma_client_list);

/* --- sysfs implementation --- */

80
static ssize_t show_memcpy_count(struct device *dev, struct device_attribute *attr, char *buf)
C
Chris Leech 已提交
81
{
82
	struct dma_chan *chan = to_dma_chan(dev);
C
Chris Leech 已提交
83 84 85
	unsigned long count = 0;
	int i;

86
	for_each_possible_cpu(i)
C
Chris Leech 已提交
87 88 89 90 91
		count += per_cpu_ptr(chan->local, i)->memcpy_count;

	return sprintf(buf, "%lu\n", count);
}

92 93
static ssize_t show_bytes_transferred(struct device *dev, struct device_attribute *attr,
				      char *buf)
C
Chris Leech 已提交
94
{
95
	struct dma_chan *chan = to_dma_chan(dev);
C
Chris Leech 已提交
96 97 98
	unsigned long count = 0;
	int i;

99
	for_each_possible_cpu(i)
C
Chris Leech 已提交
100 101 102 103 104
		count += per_cpu_ptr(chan->local, i)->bytes_transferred;

	return sprintf(buf, "%lu\n", count);
}

105
static ssize_t show_in_use(struct device *dev, struct device_attribute *attr, char *buf)
C
Chris Leech 已提交
106
{
107
	struct dma_chan *chan = to_dma_chan(dev);
108 109 110 111 112 113 114 115 116 117 118
	int in_use = 0;

	if (unlikely(chan->slow_ref) &&
		atomic_read(&chan->refcount.refcount) > 1)
		in_use = 1;
	else {
		if (local_read(&(per_cpu_ptr(chan->local,
			get_cpu())->refcount)) > 0)
			in_use = 1;
		put_cpu();
	}
C
Chris Leech 已提交
119

120
	return sprintf(buf, "%d\n", in_use);
C
Chris Leech 已提交
121 122
}

123
static struct device_attribute dma_attrs[] = {
C
Chris Leech 已提交
124 125 126 127 128 129 130 131
	__ATTR(memcpy_count, S_IRUGO, show_memcpy_count, NULL),
	__ATTR(bytes_transferred, S_IRUGO, show_bytes_transferred, NULL),
	__ATTR(in_use, S_IRUGO, show_in_use, NULL),
	__ATTR_NULL
};

static void dma_async_device_cleanup(struct kref *kref);

132
static void dma_dev_release(struct device *dev)
C
Chris Leech 已提交
133
{
134
	struct dma_chan *chan = to_dma_chan(dev);
C
Chris Leech 已提交
135 136 137 138
	kref_put(&chan->device->refcount, dma_async_device_cleanup);
}

static struct class dma_devclass = {
139 140 141
	.name		= "dma",
	.dev_attrs	= dma_attrs,
	.dev_release	= dma_dev_release,
C
Chris Leech 已提交
142 143 144 145
};

/* --- client and device registration --- */

146 147 148 149 150 151 152 153 154 155 156 157
#define dma_chan_satisfies_mask(chan, mask) \
	__dma_chan_satisfies_mask((chan), &(mask))
static int
__dma_chan_satisfies_mask(struct dma_chan *chan, dma_cap_mask_t *want)
{
	dma_cap_mask_t has;

	bitmap_and(has.bits, want->bits, chan->device->cap_mask.bits,
		DMA_TX_TYPE_END);
	return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END);
}

C
Chris Leech 已提交
158
/**
159
 * dma_client_chan_alloc - try to allocate channels to a client
C
Chris Leech 已提交
160 161 162 163
 * @client: &dma_client
 *
 * Called with dma_list_mutex held.
 */
164
static void dma_client_chan_alloc(struct dma_client *client)
C
Chris Leech 已提交
165 166 167 168
{
	struct dma_device *device;
	struct dma_chan *chan;
	int desc;	/* allocated descriptor count */
169
	enum dma_state_client ack;
C
Chris Leech 已提交
170

171
	/* Find a channel */
172 173 174 175 176 177
	list_for_each_entry(device, &dma_device_list, global_node) {
		/* Does the client require a specific DMA controller? */
		if (client->slave && client->slave->dma_dev
				&& client->slave->dma_dev != device->dev)
			continue;

C
Chris Leech 已提交
178
		list_for_each_entry(chan, &device->channels, device_node) {
179
			if (!dma_chan_satisfies_mask(chan, client->cap_mask))
C
Chris Leech 已提交
180 181
				continue;

182 183
			desc = chan->device->device_alloc_chan_resources(
					chan, client);
C
Chris Leech 已提交
184
			if (desc >= 0) {
185 186 187 188 189 190 191
				ack = client->event_callback(client,
						chan,
						DMA_RESOURCE_AVAILABLE);

				/* we are done once this client rejects
				 * an available resource
				 */
192
				if (ack == DMA_ACK) {
193
					dma_chan_get(chan);
194 195
					chan->client_count++;
				} else if (ack == DMA_NAK)
196
					return;
C
Chris Leech 已提交
197 198
			}
		}
199
	}
C
Chris Leech 已提交
200 201
}

202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219
enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
{
	enum dma_status status;
	unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);

	dma_async_issue_pending(chan);
	do {
		status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
		if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
			printk(KERN_ERR "dma_sync_wait_timeout!\n");
			return DMA_ERROR;
		}
	} while (status == DMA_IN_PROGRESS);

	return status;
}
EXPORT_SYMBOL(dma_sync_wait);

C
Chris Leech 已提交
220
/**
221 222
 * dma_chan_cleanup - release a DMA channel's resources
 * @kref: kernel reference structure that contains the DMA channel device
C
Chris Leech 已提交
223 224 225 226 227 228 229
 */
void dma_chan_cleanup(struct kref *kref)
{
	struct dma_chan *chan = container_of(kref, struct dma_chan, refcount);
	chan->device->device_free_chan_resources(chan);
	kref_put(&chan->device->refcount, dma_async_device_cleanup);
}
230
EXPORT_SYMBOL(dma_chan_cleanup);
C
Chris Leech 已提交
231 232 233 234 235 236

static void dma_chan_free_rcu(struct rcu_head *rcu)
{
	struct dma_chan *chan = container_of(rcu, struct dma_chan, rcu);
	int bias = 0x7FFFFFFF;
	int i;
237
	for_each_possible_cpu(i)
C
Chris Leech 已提交
238 239 240 241 242
		bias -= local_read(&per_cpu_ptr(chan->local, i)->refcount);
	atomic_sub(bias, &chan->refcount.refcount);
	kref_put(&chan->refcount, dma_chan_cleanup);
}

243
static void dma_chan_release(struct dma_chan *chan)
C
Chris Leech 已提交
244 245 246 247 248 249 250
{
	atomic_add(0x7FFFFFFF, &chan->refcount.refcount);
	chan->slow_ref = 1;
	call_rcu(&chan->rcu, dma_chan_free_rcu);
}

/**
251
 * dma_chans_notify_available - broadcast available channels to the clients
C
Chris Leech 已提交
252
 */
253
static void dma_clients_notify_available(void)
C
Chris Leech 已提交
254 255 256 257 258
{
	struct dma_client *client;

	mutex_lock(&dma_list_mutex);

259 260
	list_for_each_entry(client, &dma_client_list, global_node)
		dma_client_chan_alloc(client);
C
Chris Leech 已提交
261 262 263 264 265

	mutex_unlock(&dma_list_mutex);
}

/**
266 267
 * dma_chans_notify_available - tell the clients that a channel is going away
 * @chan: channel on its way out
C
Chris Leech 已提交
268
 */
269
static void dma_clients_notify_removed(struct dma_chan *chan)
C
Chris Leech 已提交
270 271
{
	struct dma_client *client;
272
	enum dma_state_client ack;
C
Chris Leech 已提交
273

274 275 276 277 278 279 280 281 282
	mutex_lock(&dma_list_mutex);

	list_for_each_entry(client, &dma_client_list, global_node) {
		ack = client->event_callback(client, chan,
				DMA_RESOURCE_REMOVED);

		/* client was holding resources for this channel so
		 * free it
		 */
283
		if (ack == DMA_ACK) {
284
			dma_chan_put(chan);
285 286
			chan->client_count--;
		}
287
	}
C
Chris Leech 已提交
288

289 290
	mutex_unlock(&dma_list_mutex);
}
C
Chris Leech 已提交
291

292 293 294 295 296 297
/**
 * dma_async_client_register - register a &dma_client
 * @client: ptr to a client structure with valid 'event_callback' and 'cap_mask'
 */
void dma_async_client_register(struct dma_client *client)
{
298 299 300 301
	/* validate client data */
	BUG_ON(dma_has_cap(DMA_SLAVE, client->cap_mask) &&
		!client->slave);

C
Chris Leech 已提交
302 303 304 305
	mutex_lock(&dma_list_mutex);
	list_add_tail(&client->global_node, &dma_client_list);
	mutex_unlock(&dma_list_mutex);
}
306
EXPORT_SYMBOL(dma_async_client_register);
C
Chris Leech 已提交
307 308 309

/**
 * dma_async_client_unregister - unregister a client and free the &dma_client
310
 * @client: &dma_client to free
C
Chris Leech 已提交
311 312 313 314 315
 *
 * Force frees any allocated DMA channels, frees the &dma_client memory
 */
void dma_async_client_unregister(struct dma_client *client)
{
316
	struct dma_device *device;
C
Chris Leech 已提交
317
	struct dma_chan *chan;
318
	enum dma_state_client ack;
C
Chris Leech 已提交
319 320 321 322 323

	if (!client)
		return;

	mutex_lock(&dma_list_mutex);
324 325 326 327 328 329
	/* free all channels the client is holding */
	list_for_each_entry(device, &dma_device_list, global_node)
		list_for_each_entry(chan, &device->channels, device_node) {
			ack = client->event_callback(client, chan,
				DMA_RESOURCE_REMOVED);

330
			if (ack == DMA_ACK) {
331
				dma_chan_put(chan);
332 333
				chan->client_count--;
			}
334 335
		}

C
Chris Leech 已提交
336 337 338
	list_del(&client->global_node);
	mutex_unlock(&dma_list_mutex);
}
339
EXPORT_SYMBOL(dma_async_client_unregister);
C
Chris Leech 已提交
340 341

/**
342 343 344
 * dma_async_client_chan_request - send all available channels to the
 * client that satisfy the capability mask
 * @client - requester
C
Chris Leech 已提交
345
 */
346
void dma_async_client_chan_request(struct dma_client *client)
C
Chris Leech 已提交
347
{
348 349 350
	mutex_lock(&dma_list_mutex);
	dma_client_chan_alloc(client);
	mutex_unlock(&dma_list_mutex);
C
Chris Leech 已提交
351
}
352
EXPORT_SYMBOL(dma_async_client_chan_request);
C
Chris Leech 已提交
353 354

/**
355
 * dma_async_device_register - registers DMA devices found
C
Chris Leech 已提交
356 357 358 359 360
 * @device: &dma_device
 */
int dma_async_device_register(struct dma_device *device)
{
	static int id;
J
Jeff Garzik 已提交
361
	int chancnt = 0, rc;
C
Chris Leech 已提交
362 363 364 365 366
	struct dma_chan* chan;

	if (!device)
		return -ENODEV;

367 368 369 370 371 372 373 374 375
	/* validate device routines */
	BUG_ON(dma_has_cap(DMA_MEMCPY, device->cap_mask) &&
		!device->device_prep_dma_memcpy);
	BUG_ON(dma_has_cap(DMA_XOR, device->cap_mask) &&
		!device->device_prep_dma_xor);
	BUG_ON(dma_has_cap(DMA_ZERO_SUM, device->cap_mask) &&
		!device->device_prep_dma_zero_sum);
	BUG_ON(dma_has_cap(DMA_MEMSET, device->cap_mask) &&
		!device->device_prep_dma_memset);
376
	BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) &&
377
		!device->device_prep_dma_interrupt);
378 379 380 381
	BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
		!device->device_prep_slave_sg);
	BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
		!device->device_terminate_all);
382 383 384 385 386 387 388

	BUG_ON(!device->device_alloc_chan_resources);
	BUG_ON(!device->device_free_chan_resources);
	BUG_ON(!device->device_is_tx_complete);
	BUG_ON(!device->device_issue_pending);
	BUG_ON(!device->dev);

C
Chris Leech 已提交
389 390
	init_completion(&device->done);
	kref_init(&device->refcount);
391 392

	mutex_lock(&dma_list_mutex);
C
Chris Leech 已提交
393
	device->dev_id = id++;
394
	mutex_unlock(&dma_list_mutex);
C
Chris Leech 已提交
395 396 397 398 399 400 401 402

	/* represent channels in sysfs. Probably want devs too */
	list_for_each_entry(chan, &device->channels, device_node) {
		chan->local = alloc_percpu(typeof(*chan->local));
		if (chan->local == NULL)
			continue;

		chan->chan_id = chancnt++;
403
		chan->dev.class = &dma_devclass;
404
		chan->dev.parent = device->dev;
405 406
		dev_set_name(&chan->dev, "dma%dchan%d",
			     device->dev_id, chan->chan_id);
C
Chris Leech 已提交
407

408
		rc = device_register(&chan->dev);
J
Jeff Garzik 已提交
409 410 411 412 413 414 415
		if (rc) {
			chancnt--;
			free_percpu(chan->local);
			chan->local = NULL;
			goto err_out;
		}

416 417
		/* One for the channel, one of the class device */
		kref_get(&device->refcount);
C
Chris Leech 已提交
418
		kref_get(&device->refcount);
419
		kref_init(&chan->refcount);
420
		chan->client_count = 0;
421 422
		chan->slow_ref = 0;
		INIT_RCU_HEAD(&chan->rcu);
C
Chris Leech 已提交
423 424 425 426 427 428
	}

	mutex_lock(&dma_list_mutex);
	list_add_tail(&device->global_node, &dma_device_list);
	mutex_unlock(&dma_list_mutex);

429
	dma_clients_notify_available();
C
Chris Leech 已提交
430 431

	return 0;
J
Jeff Garzik 已提交
432 433 434 435 436 437

err_out:
	list_for_each_entry(chan, &device->channels, device_node) {
		if (chan->local == NULL)
			continue;
		kref_put(&device->refcount, dma_async_device_cleanup);
438
		device_unregister(&chan->dev);
J
Jeff Garzik 已提交
439 440 441 442
		chancnt--;
		free_percpu(chan->local);
	}
	return rc;
C
Chris Leech 已提交
443
}
444
EXPORT_SYMBOL(dma_async_device_register);
C
Chris Leech 已提交
445 446

/**
447 448
 * dma_async_device_cleanup - function called when all references are released
 * @kref: kernel reference object
C
Chris Leech 已提交
449 450 451 452 453 454 455 456 457
 */
static void dma_async_device_cleanup(struct kref *kref)
{
	struct dma_device *device;

	device = container_of(kref, struct dma_device, refcount);
	complete(&device->done);
}

458 459 460 461 462
/**
 * dma_async_device_unregister - unregisters DMA devices
 * @device: &dma_device
 */
void dma_async_device_unregister(struct dma_device *device)
C
Chris Leech 已提交
463 464 465 466 467 468 469 470
{
	struct dma_chan *chan;

	mutex_lock(&dma_list_mutex);
	list_del(&device->global_node);
	mutex_unlock(&dma_list_mutex);

	list_for_each_entry(chan, &device->channels, device_node) {
471
		dma_clients_notify_removed(chan);
472
		device_unregister(&chan->dev);
473
		dma_chan_release(chan);
C
Chris Leech 已提交
474 475 476 477 478
	}

	kref_put(&device->refcount, dma_async_device_cleanup);
	wait_for_completion(&device->done);
}
479
EXPORT_SYMBOL(dma_async_device_unregister);
C
Chris Leech 已提交
480

481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498
/**
 * dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses
 * @chan: DMA channel to offload copy to
 * @dest: destination address (virtual)
 * @src: source address (virtual)
 * @len: length
 *
 * Both @dest and @src must be mappable to a bus address according to the
 * DMA mapping API rules for streaming mappings.
 * Both @dest and @src must stay memory resident (kernel memory or locked
 * user space pages).
 */
dma_cookie_t
dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest,
			void *src, size_t len)
{
	struct dma_device *dev = chan->device;
	struct dma_async_tx_descriptor *tx;
499
	dma_addr_t dma_dest, dma_src;
500 501 502
	dma_cookie_t cookie;
	int cpu;

503 504
	dma_src = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE);
	dma_dest = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE);
505 506
	tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len,
					 DMA_CTRL_ACK);
507 508 509 510

	if (!tx) {
		dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE);
		dma_unmap_single(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
511
		return -ENOMEM;
512
	}
513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544

	tx->callback = NULL;
	cookie = tx->tx_submit(tx);

	cpu = get_cpu();
	per_cpu_ptr(chan->local, cpu)->bytes_transferred += len;
	per_cpu_ptr(chan->local, cpu)->memcpy_count++;
	put_cpu();

	return cookie;
}
EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf);

/**
 * dma_async_memcpy_buf_to_pg - offloaded copy from address to page
 * @chan: DMA channel to offload copy to
 * @page: destination page
 * @offset: offset in page to copy to
 * @kdata: source address (virtual)
 * @len: length
 *
 * Both @page/@offset and @kdata must be mappable to a bus address according
 * to the DMA mapping API rules for streaming mappings.
 * Both @page/@offset and @kdata must stay memory resident (kernel memory or
 * locked user space pages)
 */
dma_cookie_t
dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page,
			unsigned int offset, void *kdata, size_t len)
{
	struct dma_device *dev = chan->device;
	struct dma_async_tx_descriptor *tx;
545
	dma_addr_t dma_dest, dma_src;
546 547 548
	dma_cookie_t cookie;
	int cpu;

549 550
	dma_src = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE);
	dma_dest = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE);
551 552
	tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len,
					 DMA_CTRL_ACK);
553 554 555 556

	if (!tx) {
		dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE);
		dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
557
		return -ENOMEM;
558
	}
559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592

	tx->callback = NULL;
	cookie = tx->tx_submit(tx);

	cpu = get_cpu();
	per_cpu_ptr(chan->local, cpu)->bytes_transferred += len;
	per_cpu_ptr(chan->local, cpu)->memcpy_count++;
	put_cpu();

	return cookie;
}
EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg);

/**
 * dma_async_memcpy_pg_to_pg - offloaded copy from page to page
 * @chan: DMA channel to offload copy to
 * @dest_pg: destination page
 * @dest_off: offset in page to copy to
 * @src_pg: source page
 * @src_off: offset in page to copy from
 * @len: length
 *
 * Both @dest_page/@dest_off and @src_page/@src_off must be mappable to a bus
 * address according to the DMA mapping API rules for streaming mappings.
 * Both @dest_page/@dest_off and @src_page/@src_off must stay memory resident
 * (kernel memory or locked user space pages).
 */
dma_cookie_t
dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
	unsigned int dest_off, struct page *src_pg, unsigned int src_off,
	size_t len)
{
	struct dma_device *dev = chan->device;
	struct dma_async_tx_descriptor *tx;
593
	dma_addr_t dma_dest, dma_src;
594 595 596
	dma_cookie_t cookie;
	int cpu;

597 598 599
	dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE);
	dma_dest = dma_map_page(dev->dev, dest_pg, dest_off, len,
				DMA_FROM_DEVICE);
600 601
	tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len,
					 DMA_CTRL_ACK);
602 603 604 605

	if (!tx) {
		dma_unmap_page(dev->dev, dma_src, len, DMA_TO_DEVICE);
		dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
606
		return -ENOMEM;
607
	}
608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628

	tx->callback = NULL;
	cookie = tx->tx_submit(tx);

	cpu = get_cpu();
	per_cpu_ptr(chan->local, cpu)->bytes_transferred += len;
	per_cpu_ptr(chan->local, cpu)->memcpy_count++;
	put_cpu();

	return cookie;
}
EXPORT_SYMBOL(dma_async_memcpy_pg_to_pg);

void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
	struct dma_chan *chan)
{
	tx->chan = chan;
	spin_lock_init(&tx->lock);
}
EXPORT_SYMBOL(dma_async_tx_descriptor_init);

C
Chris Leech 已提交
629 630 631 632 633 634 635
static int __init dma_bus_init(void)
{
	mutex_init(&dma_list_mutex);
	return class_register(&dma_devclass);
}
subsys_initcall(dma_bus_init);