dmaengine.c 29.6 KB
Newer Older
C
Chris Leech 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33
/*
 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License as published by the Free
 * Software Foundation; either version 2 of the License, or (at your option)
 * any later version.
 *
 * This program is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
 * this program; if not, write to the Free Software Foundation, Inc., 59
 * Temple Place - Suite 330, Boston, MA  02111-1307, USA.
 *
 * The full GNU General Public License is included in this distribution in the
 * file called COPYING.
 */

/*
 * This code implements the DMA subsystem. It provides a HW-neutral interface
 * for other kernel code to use asynchronous memory copy capabilities,
 * if present, and allows different HW DMA drivers to register as providing
 * this capability.
 *
 * Due to the fact we are accelerating what is already a relatively fast
 * operation, the code goes to great lengths to avoid additional overhead,
 * such as locking.
 *
 * LOCKING:
 *
34 35
 * The subsystem keeps a global list of dma_device structs it is protected by a
 * mutex, dma_list_mutex.
C
Chris Leech 已提交
36
 *
37 38 39 40 41
 * A subsystem can get access to a channel by calling dmaengine_get() followed
 * by dma_find_channel(), or if it has need for an exclusive channel it can call
 * dma_request_channel().  Once a channel is allocated a reference is taken
 * against its corresponding driver to disable removal.
 *
C
Chris Leech 已提交
42 43 44
 * Each device has a channels list, which runs unlocked but is never modified
 * once the device is registered, it's just setup by the driver.
 *
45
 * See Documentation/dmaengine.txt for more details
C
Chris Leech 已提交
46 47
 */

48 49
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

50
#include <linux/dma-mapping.h>
C
Chris Leech 已提交
51 52
#include <linux/init.h>
#include <linux/module.h>
53
#include <linux/mm.h>
C
Chris Leech 已提交
54 55 56 57 58 59 60
#include <linux/device.h>
#include <linux/dmaengine.h>
#include <linux/hardirq.h>
#include <linux/spinlock.h>
#include <linux/percpu.h>
#include <linux/rcupdate.h>
#include <linux/mutex.h>
61
#include <linux/jiffies.h>
62
#include <linux/rculist.h>
63
#include <linux/idr.h>
64
#include <linux/slab.h>
65 66
#include <linux/acpi.h>
#include <linux/acpi_dma.h>
67
#include <linux/of_dma.h>
68
#include <linux/mempool.h>
C
Chris Leech 已提交
69 70

static DEFINE_MUTEX(dma_list_mutex);
71
static DEFINE_IDR(dma_idr);
C
Chris Leech 已提交
72
static LIST_HEAD(dma_device_list);
73
static long dmaengine_ref_count;
C
Chris Leech 已提交
74 75 76

/* --- sysfs implementation --- */

77 78 79 80 81 82 83 84 85 86 87 88 89 90
/**
 * dev_to_dma_chan - convert a device pointer to the its sysfs container object
 * @dev - device node
 *
 * Must be called under dma_list_mutex
 */
static struct dma_chan *dev_to_dma_chan(struct device *dev)
{
	struct dma_chan_dev *chan_dev;

	chan_dev = container_of(dev, typeof(*chan_dev), device);
	return chan_dev->chan;
}

91 92
static ssize_t memcpy_count_show(struct device *dev,
				 struct device_attribute *attr, char *buf)
C
Chris Leech 已提交
93
{
94
	struct dma_chan *chan;
C
Chris Leech 已提交
95 96
	unsigned long count = 0;
	int i;
97
	int err;
C
Chris Leech 已提交
98

99 100 101 102 103 104 105 106 107
	mutex_lock(&dma_list_mutex);
	chan = dev_to_dma_chan(dev);
	if (chan) {
		for_each_possible_cpu(i)
			count += per_cpu_ptr(chan->local, i)->memcpy_count;
		err = sprintf(buf, "%lu\n", count);
	} else
		err = -ENODEV;
	mutex_unlock(&dma_list_mutex);
C
Chris Leech 已提交
108

109
	return err;
C
Chris Leech 已提交
110
}
111
static DEVICE_ATTR_RO(memcpy_count);
C
Chris Leech 已提交
112

113 114
static ssize_t bytes_transferred_show(struct device *dev,
				      struct device_attribute *attr, char *buf)
C
Chris Leech 已提交
115
{
116
	struct dma_chan *chan;
C
Chris Leech 已提交
117 118
	unsigned long count = 0;
	int i;
119
	int err;
C
Chris Leech 已提交
120

121 122 123 124 125 126 127 128 129
	mutex_lock(&dma_list_mutex);
	chan = dev_to_dma_chan(dev);
	if (chan) {
		for_each_possible_cpu(i)
			count += per_cpu_ptr(chan->local, i)->bytes_transferred;
		err = sprintf(buf, "%lu\n", count);
	} else
		err = -ENODEV;
	mutex_unlock(&dma_list_mutex);
C
Chris Leech 已提交
130

131
	return err;
C
Chris Leech 已提交
132
}
133
static DEVICE_ATTR_RO(bytes_transferred);
C
Chris Leech 已提交
134

135 136
static ssize_t in_use_show(struct device *dev, struct device_attribute *attr,
			   char *buf)
C
Chris Leech 已提交
137
{
138 139
	struct dma_chan *chan;
	int err;
C
Chris Leech 已提交
140

141 142 143 144 145 146 147 148 149
	mutex_lock(&dma_list_mutex);
	chan = dev_to_dma_chan(dev);
	if (chan)
		err = sprintf(buf, "%d\n", chan->client_count);
	else
		err = -ENODEV;
	mutex_unlock(&dma_list_mutex);

	return err;
C
Chris Leech 已提交
150
}
151
static DEVICE_ATTR_RO(in_use);
C
Chris Leech 已提交
152

153 154 155 156 157
static struct attribute *dma_dev_attrs[] = {
	&dev_attr_memcpy_count.attr,
	&dev_attr_bytes_transferred.attr,
	&dev_attr_in_use.attr,
	NULL,
C
Chris Leech 已提交
158
};
159
ATTRIBUTE_GROUPS(dma_dev);
C
Chris Leech 已提交
160

161 162 163 164 165
static void chan_dev_release(struct device *dev)
{
	struct dma_chan_dev *chan_dev;

	chan_dev = container_of(dev, typeof(*chan_dev), device);
166 167 168 169 170 171
	if (atomic_dec_and_test(chan_dev->idr_ref)) {
		mutex_lock(&dma_list_mutex);
		idr_remove(&dma_idr, chan_dev->dev_id);
		mutex_unlock(&dma_list_mutex);
		kfree(chan_dev->idr_ref);
	}
172 173 174
	kfree(chan_dev);
}

C
Chris Leech 已提交
175
static struct class dma_devclass = {
176
	.name		= "dma",
177
	.dev_groups	= dma_dev_groups,
178
	.dev_release	= chan_dev_release,
C
Chris Leech 已提交
179 180 181 182
};

/* --- client and device registration --- */

183 184
#define dma_device_satisfies_mask(device, mask) \
	__dma_device_satisfies_mask((device), &(mask))
185
static int
186 187
__dma_device_satisfies_mask(struct dma_device *device,
			    const dma_cap_mask_t *want)
188 189 190
{
	dma_cap_mask_t has;

191
	bitmap_and(has.bits, want->bits, device->cap_mask.bits,
192 193 194 195
		DMA_TX_TYPE_END);
	return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END);
}

196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238
static struct module *dma_chan_to_owner(struct dma_chan *chan)
{
	return chan->device->dev->driver->owner;
}

/**
 * balance_ref_count - catch up the channel reference count
 * @chan - channel to balance ->client_count versus dmaengine_ref_count
 *
 * balance_ref_count must be called under dma_list_mutex
 */
static void balance_ref_count(struct dma_chan *chan)
{
	struct module *owner = dma_chan_to_owner(chan);

	while (chan->client_count < dmaengine_ref_count) {
		__module_get(owner);
		chan->client_count++;
	}
}

/**
 * dma_chan_get - try to grab a dma channel's parent driver module
 * @chan - channel to grab
 *
 * Must be called under dma_list_mutex
 */
static int dma_chan_get(struct dma_chan *chan)
{
	int err = -ENODEV;
	struct module *owner = dma_chan_to_owner(chan);

	if (chan->client_count) {
		__module_get(owner);
		err = 0;
	} else if (try_module_get(owner))
		err = 0;

	if (err == 0)
		chan->client_count++;

	/* allocate upon first client reference */
	if (chan->client_count == 1 && err == 0) {
239
		int desc_cnt = chan->device->device_alloc_chan_resources(chan);
240 241 242 243 244

		if (desc_cnt < 0) {
			err = desc_cnt;
			chan->client_count = 0;
			module_put(owner);
245
		} else if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask))
246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267
			balance_ref_count(chan);
	}

	return err;
}

/**
 * dma_chan_put - drop a reference to a dma channel's parent driver module
 * @chan - channel to release
 *
 * Must be called under dma_list_mutex
 */
static void dma_chan_put(struct dma_chan *chan)
{
	if (!chan->client_count)
		return; /* this channel failed alloc_chan_resources */
	chan->client_count--;
	module_put(dma_chan_to_owner(chan));
	if (chan->client_count == 0)
		chan->device->device_free_chan_resources(chan);
}

268 269 270 271 272 273 274 275 276
enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
{
	enum dma_status status;
	unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);

	dma_async_issue_pending(chan);
	do {
		status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
		if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
277
			pr_err("%s: timeout!\n", __func__);
278 279
			return DMA_ERROR;
		}
280 281 282 283
		if (status != DMA_IN_PROGRESS)
			break;
		cpu_relax();
	} while (1);
284 285 286 287 288

	return status;
}
EXPORT_SYMBOL(dma_sync_wait);

289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304
/**
 * dma_cap_mask_all - enable iteration over all operation types
 */
static dma_cap_mask_t dma_cap_mask_all;

/**
 * dma_chan_tbl_ent - tracks channel allocations per core/operation
 * @chan - associated channel for this entry
 */
struct dma_chan_tbl_ent {
	struct dma_chan *chan;
};

/**
 * channel_table - percpu lookup table for memory-to-memory offload providers
 */
305
static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END];
306 307 308 309 310 311 312 313

static int __init dma_channel_table_init(void)
{
	enum dma_transaction_type cap;
	int err = 0;

	bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END);

314 315 316
	/* 'interrupt', 'private', and 'slave' are channel capabilities,
	 * but are not associated with an operation so they do not need
	 * an entry in the channel_table
317 318
	 */
	clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits);
319
	clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits);
320 321 322 323 324 325 326 327 328 329 330
	clear_bit(DMA_SLAVE, dma_cap_mask_all.bits);

	for_each_dma_cap_mask(cap, dma_cap_mask_all) {
		channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent);
		if (!channel_table[cap]) {
			err = -ENOMEM;
			break;
		}
	}

	if (err) {
331
		pr_err("initialization failure\n");
332 333 334 335 336 337 338
		for_each_dma_cap_mask(cap, dma_cap_mask_all)
			if (channel_table[cap])
				free_percpu(channel_table[cap]);
	}

	return err;
}
339
arch_initcall(dma_channel_table_init);
340 341 342 343 344 345 346

/**
 * dma_find_channel - find a channel to carry out the operation
 * @tx_type: transaction type
 */
struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
{
347
	return this_cpu_read(channel_table[tx_type]->chan);
348 349 350
}
EXPORT_SYMBOL(dma_find_channel);

351 352 353 354 355 356 357 358 359 360 361 362 363 364
/*
 * net_dma_find_channel - find a channel for net_dma
 * net_dma has alignment requirements
 */
struct dma_chan *net_dma_find_channel(void)
{
	struct dma_chan *chan = dma_find_channel(DMA_MEMCPY);
	if (chan && !is_dma_copy_aligned(chan->device, 1, 1, 1))
		return NULL;

	return chan;
}
EXPORT_SYMBOL(net_dma_find_channel);

365 366 367 368 369 370 371 372 373
/**
 * dma_issue_pending_all - flush all pending operations across all channels
 */
void dma_issue_pending_all(void)
{
	struct dma_device *device;
	struct dma_chan *chan;

	rcu_read_lock();
374 375 376
	list_for_each_entry_rcu(device, &dma_device_list, global_node) {
		if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
			continue;
377 378 379
		list_for_each_entry(chan, &device->channels, device_node)
			if (chan->client_count)
				device->device_issue_pending(chan);
380
	}
381 382 383 384
	rcu_read_unlock();
}
EXPORT_SYMBOL(dma_issue_pending_all);

385
/**
386 387 388 389 390 391 392 393 394 395
 * dma_chan_is_local - returns true if the channel is in the same numa-node as the cpu
 */
static bool dma_chan_is_local(struct dma_chan *chan, int cpu)
{
	int node = dev_to_node(chan->device->dev);
	return node == -1 || cpumask_test_cpu(cpu, cpumask_of_node(node));
}

/**
 * min_chan - returns the channel with min count and in the same numa-node as the cpu
396
 * @cap: capability to match
397
 * @cpu: cpu index which the channel should be close to
398
 *
399 400 401 402
 * If some channels are close to the given cpu, the one with the lowest
 * reference count is returned. Otherwise, cpu is ignored and only the
 * reference count is taken into account.
 * Must be called under dma_list_mutex.
403
 */
404
static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu)
405 406 407 408
{
	struct dma_device *device;
	struct dma_chan *chan;
	struct dma_chan *min = NULL;
409
	struct dma_chan *localmin = NULL;
410 411

	list_for_each_entry(device, &dma_device_list, global_node) {
412 413
		if (!dma_has_cap(cap, device->cap_mask) ||
		    dma_has_cap(DMA_PRIVATE, device->cap_mask))
414 415 416 417
			continue;
		list_for_each_entry(chan, &device->channels, device_node) {
			if (!chan->client_count)
				continue;
418
			if (!min || chan->table_count < min->table_count)
419 420
				min = chan;

421 422 423 424
			if (dma_chan_is_local(chan, cpu))
				if (!localmin ||
				    chan->table_count < localmin->table_count)
					localmin = chan;
425 426 427
		}
	}

428
	chan = localmin ? localmin : min;
429

430 431
	if (chan)
		chan->table_count++;
432

433
	return chan;
434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455
}

/**
 * dma_channel_rebalance - redistribute the available channels
 *
 * Optimize for cpu isolation (each cpu gets a dedicated channel for an
 * operation type) in the SMP case,  and operation isolation (avoid
 * multi-tasking channels) in the non-SMP case.  Must be called under
 * dma_list_mutex.
 */
static void dma_channel_rebalance(void)
{
	struct dma_chan *chan;
	struct dma_device *device;
	int cpu;
	int cap;

	/* undo the last distribution */
	for_each_dma_cap_mask(cap, dma_cap_mask_all)
		for_each_possible_cpu(cpu)
			per_cpu_ptr(channel_table[cap], cpu)->chan = NULL;

456 457 458
	list_for_each_entry(device, &dma_device_list, global_node) {
		if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
			continue;
459 460
		list_for_each_entry(chan, &device->channels, device_node)
			chan->table_count = 0;
461
	}
462 463 464 465 466 467 468 469

	/* don't populate the channel_table if no clients are available */
	if (!dmaengine_ref_count)
		return;

	/* redistribute available channels */
	for_each_dma_cap_mask(cap, dma_cap_mask_all)
		for_each_online_cpu(cpu) {
470
			chan = min_chan(cap, cpu);
471 472 473 474
			per_cpu_ptr(channel_table[cap], cpu)->chan = chan;
		}
}

475 476
static struct dma_chan *private_candidate(const dma_cap_mask_t *mask,
					  struct dma_device *dev,
477
					  dma_filter_fn fn, void *fn_param)
478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497
{
	struct dma_chan *chan;

	if (!__dma_device_satisfies_mask(dev, mask)) {
		pr_debug("%s: wrong capabilities\n", __func__);
		return NULL;
	}
	/* devices with multiple channels need special handling as we need to
	 * ensure that all channels are either private or public.
	 */
	if (dev->chancnt > 1 && !dma_has_cap(DMA_PRIVATE, dev->cap_mask))
		list_for_each_entry(chan, &dev->channels, device_node) {
			/* some channels are already publicly allocated */
			if (chan->client_count)
				return NULL;
		}

	list_for_each_entry(chan, &dev->channels, device_node) {
		if (chan->client_count) {
			pr_debug("%s: %s busy\n",
498
				 __func__, dma_chan_name(chan));
499 500
			continue;
		}
501 502 503 504 505 506
		if (fn && !fn(chan, fn_param)) {
			pr_debug("%s: %s filter said false\n",
				 __func__, dma_chan_name(chan));
			continue;
		}
		return chan;
507 508
	}

509
	return NULL;
510 511 512
}

/**
513
 * dma_request_slave_channel - try to get specific channel exclusively
514 515 516 517 518 519 520 521 522
 * @chan: target channel
 */
struct dma_chan *dma_get_slave_channel(struct dma_chan *chan)
{
	int err = -EBUSY;

	/* lock against __dma_request_channel */
	mutex_lock(&dma_list_mutex);

523
	if (chan->client_count == 0) {
524
		err = dma_chan_get(chan);
525 526 527 528
		if (err)
			pr_debug("%s: failed to get %s: (%d)\n",
				__func__, dma_chan_name(chan), err);
	} else
529 530 531 532 533 534 535 536 537
		chan = NULL;

	mutex_unlock(&dma_list_mutex);


	return chan;
}
EXPORT_SYMBOL_GPL(dma_get_slave_channel);

538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565
struct dma_chan *dma_get_any_slave_channel(struct dma_device *device)
{
	dma_cap_mask_t mask;
	struct dma_chan *chan;
	int err;

	dma_cap_zero(mask);
	dma_cap_set(DMA_SLAVE, mask);

	/* lock against __dma_request_channel */
	mutex_lock(&dma_list_mutex);

	chan = private_candidate(&mask, device, NULL, NULL);
	if (chan) {
		err = dma_chan_get(chan);
		if (err) {
			pr_debug("%s: failed to get %s: (%d)\n",
				__func__, dma_chan_name(chan), err);
			chan = NULL;
		}
	}

	mutex_unlock(&dma_list_mutex);

	return chan;
}
EXPORT_SYMBOL_GPL(dma_get_any_slave_channel);

566
/**
567
 * __dma_request_channel - try to allocate an exclusive channel
568 569 570
 * @mask: capabilities that the channel must satisfy
 * @fn: optional callback to disposition available channels
 * @fn_param: opaque parameter to pass to dma_filter_fn
571 572
 *
 * Returns pointer to appropriate DMA channel on success or NULL.
573
 */
574 575
struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
				       dma_filter_fn fn, void *fn_param)
576 577 578 579 580 581 582 583
{
	struct dma_device *device, *_d;
	struct dma_chan *chan = NULL;
	int err;

	/* Find a channel */
	mutex_lock(&dma_list_mutex);
	list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
584 585
		chan = private_candidate(mask, device, fn, fn_param);
		if (chan) {
586 587 588 589 590 591
			/* Found a suitable channel, try to grab, prep, and
			 * return it.  We first set DMA_PRIVATE to disable
			 * balance_ref_count as this channel will not be
			 * published in the general-purpose allocator
			 */
			dma_cap_set(DMA_PRIVATE, device->cap_mask);
592
			device->privatecnt++;
593 594 595
			err = dma_chan_get(chan);

			if (err == -ENODEV) {
596 597
				pr_debug("%s: %s module removed\n",
					 __func__, dma_chan_name(chan));
598 599
				list_del_rcu(&device->global_node);
			} else if (err)
600
				pr_debug("%s: failed to get %s: (%d)\n",
601
					 __func__, dma_chan_name(chan), err);
602 603
			else
				break;
604 605
			if (--device->privatecnt == 0)
				dma_cap_clear(DMA_PRIVATE, device->cap_mask);
606 607
			chan = NULL;
		}
608 609 610
	}
	mutex_unlock(&dma_list_mutex);

611 612 613
	pr_debug("%s: %s (%s)\n",
		 __func__,
		 chan ? "success" : "fail",
614
		 chan ? dma_chan_name(chan) : NULL);
615 616 617 618 619

	return chan;
}
EXPORT_SYMBOL_GPL(__dma_request_channel);

620 621 622 623
/**
 * dma_request_slave_channel - try to allocate an exclusive slave channel
 * @dev:	pointer to client device structure
 * @name:	slave channel name
624 625
 *
 * Returns pointer to appropriate DMA channel on success or an error pointer.
626
 */
627 628
struct dma_chan *dma_request_slave_channel_reason(struct device *dev,
						  const char *name)
629 630 631 632 633
{
	/* If device-tree is present get slave info from here */
	if (dev->of_node)
		return of_dma_request_slave_channel(dev->of_node, name);

634
	/* If device was enumerated by ACPI get slave info from here */
635 636
	if (ACPI_HANDLE(dev))
		return acpi_dma_request_slave_chan_by_name(dev, name);
637

638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655
	return ERR_PTR(-ENODEV);
}
EXPORT_SYMBOL_GPL(dma_request_slave_channel_reason);

/**
 * dma_request_slave_channel - try to allocate an exclusive slave channel
 * @dev:	pointer to client device structure
 * @name:	slave channel name
 *
 * Returns pointer to appropriate DMA channel on success or NULL.
 */
struct dma_chan *dma_request_slave_channel(struct device *dev,
					   const char *name)
{
	struct dma_chan *ch = dma_request_slave_channel_reason(dev, name);
	if (IS_ERR(ch))
		return NULL;
	return ch;
656 657 658
}
EXPORT_SYMBOL_GPL(dma_request_slave_channel);

659 660 661 662 663 664
void dma_release_channel(struct dma_chan *chan)
{
	mutex_lock(&dma_list_mutex);
	WARN_ONCE(chan->client_count != 1,
		  "chan reference count %d != 1\n", chan->client_count);
	dma_chan_put(chan);
665 666 667
	/* drop PRIVATE cap enabled by __dma_request_channel() */
	if (--chan->device->privatecnt == 0)
		dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask);
668 669 670 671
	mutex_unlock(&dma_list_mutex);
}
EXPORT_SYMBOL_GPL(dma_release_channel);

672
/**
673
 * dmaengine_get - register interest in dma_channels
674
 */
675
void dmaengine_get(void)
676
{
677 678 679 680
	struct dma_device *device, *_d;
	struct dma_chan *chan;
	int err;

C
Chris Leech 已提交
681
	mutex_lock(&dma_list_mutex);
682 683 684
	dmaengine_ref_count++;

	/* try to grab channels */
685 686 687
	list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
		if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
			continue;
688 689 690 691
		list_for_each_entry(chan, &device->channels, device_node) {
			err = dma_chan_get(chan);
			if (err == -ENODEV) {
				/* module removed before we could use it */
692
				list_del_rcu(&device->global_node);
693 694
				break;
			} else if (err)
695
				pr_debug("%s: failed to get %s: (%d)\n",
696
				       __func__, dma_chan_name(chan), err);
697
		}
698
	}
699

700 701 702 703 704 705
	/* if this is the first reference and there were channels
	 * waiting we need to rebalance to get those channels
	 * incorporated into the channel table
	 */
	if (dmaengine_ref_count == 1)
		dma_channel_rebalance();
C
Chris Leech 已提交
706 707
	mutex_unlock(&dma_list_mutex);
}
708
EXPORT_SYMBOL(dmaengine_get);
C
Chris Leech 已提交
709 710

/**
711
 * dmaengine_put - let dma drivers be removed when ref_count == 0
C
Chris Leech 已提交
712
 */
713
void dmaengine_put(void)
C
Chris Leech 已提交
714
{
715
	struct dma_device *device;
C
Chris Leech 已提交
716 717 718
	struct dma_chan *chan;

	mutex_lock(&dma_list_mutex);
719 720 721
	dmaengine_ref_count--;
	BUG_ON(dmaengine_ref_count < 0);
	/* drop channel references */
722 723 724
	list_for_each_entry(device, &dma_device_list, global_node) {
		if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
			continue;
725 726
		list_for_each_entry(chan, &device->channels, device_node)
			dma_chan_put(chan);
727
	}
C
Chris Leech 已提交
728 729
	mutex_unlock(&dma_list_mutex);
}
730
EXPORT_SYMBOL(dmaengine_put);
C
Chris Leech 已提交
731

732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750
static bool device_has_all_tx_types(struct dma_device *device)
{
	/* A device that satisfies this test has channels that will never cause
	 * an async_tx channel switch event as all possible operation types can
	 * be handled.
	 */
	#ifdef CONFIG_ASYNC_TX_DMA
	if (!dma_has_cap(DMA_INTERRUPT, device->cap_mask))
		return false;
	#endif

	#if defined(CONFIG_ASYNC_MEMCPY) || defined(CONFIG_ASYNC_MEMCPY_MODULE)
	if (!dma_has_cap(DMA_MEMCPY, device->cap_mask))
		return false;
	#endif

	#if defined(CONFIG_ASYNC_XOR) || defined(CONFIG_ASYNC_XOR_MODULE)
	if (!dma_has_cap(DMA_XOR, device->cap_mask))
		return false;
751 752

	#ifndef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
753 754
	if (!dma_has_cap(DMA_XOR_VAL, device->cap_mask))
		return false;
755
	#endif
756
	#endif
757 758 759 760

	#if defined(CONFIG_ASYNC_PQ) || defined(CONFIG_ASYNC_PQ_MODULE)
	if (!dma_has_cap(DMA_PQ, device->cap_mask))
		return false;
761 762

	#ifndef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
763 764
	if (!dma_has_cap(DMA_PQ_VAL, device->cap_mask))
		return false;
765
	#endif
766
	#endif
767 768 769 770

	return true;
}

771 772 773 774 775 776
static int get_dma_id(struct dma_device *device)
{
	int rc;

	mutex_lock(&dma_list_mutex);

T
Tejun Heo 已提交
777 778 779 780 781 782
	rc = idr_alloc(&dma_idr, NULL, 0, 0, GFP_KERNEL);
	if (rc >= 0)
		device->dev_id = rc;

	mutex_unlock(&dma_list_mutex);
	return rc < 0 ? rc : 0;
783 784
}

C
Chris Leech 已提交
785
/**
786
 * dma_async_device_register - registers DMA devices found
C
Chris Leech 已提交
787 788 789 790
 * @device: &dma_device
 */
int dma_async_device_register(struct dma_device *device)
{
J
Jeff Garzik 已提交
791
	int chancnt = 0, rc;
C
Chris Leech 已提交
792
	struct dma_chan* chan;
793
	atomic_t *idr_ref;
C
Chris Leech 已提交
794 795 796 797

	if (!device)
		return -ENODEV;

798 799 800 801 802
	/* validate device routines */
	BUG_ON(dma_has_cap(DMA_MEMCPY, device->cap_mask) &&
		!device->device_prep_dma_memcpy);
	BUG_ON(dma_has_cap(DMA_XOR, device->cap_mask) &&
		!device->device_prep_dma_xor);
D
Dan Williams 已提交
803 804
	BUG_ON(dma_has_cap(DMA_XOR_VAL, device->cap_mask) &&
		!device->device_prep_dma_xor_val);
805 806 807 808
	BUG_ON(dma_has_cap(DMA_PQ, device->cap_mask) &&
		!device->device_prep_dma_pq);
	BUG_ON(dma_has_cap(DMA_PQ_VAL, device->cap_mask) &&
		!device->device_prep_dma_pq_val);
809
	BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) &&
810
		!device->device_prep_dma_interrupt);
811 812
	BUG_ON(dma_has_cap(DMA_SG, device->cap_mask) &&
		!device->device_prep_dma_sg);
813 814
	BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) &&
		!device->device_prep_dma_cyclic);
815
	BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
816
		!device->device_control);
817 818
	BUG_ON(dma_has_cap(DMA_INTERLEAVE, device->cap_mask) &&
		!device->device_prep_interleaved_dma);
819 820 821

	BUG_ON(!device->device_alloc_chan_resources);
	BUG_ON(!device->device_free_chan_resources);
822
	BUG_ON(!device->device_tx_status);
823 824 825
	BUG_ON(!device->device_issue_pending);
	BUG_ON(!device->dev);

826
	/* note: this only matters in the
827
	 * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case
828 829 830 831
	 */
	if (device_has_all_tx_types(device))
		dma_cap_set(DMA_ASYNC_TX, device->cap_mask);

832 833 834
	idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL);
	if (!idr_ref)
		return -ENOMEM;
835 836 837
	rc = get_dma_id(device);
	if (rc != 0) {
		kfree(idr_ref);
838
		return rc;
839 840 841
	}

	atomic_set(idr_ref, 0);
C
Chris Leech 已提交
842 843 844

	/* represent channels in sysfs. Probably want devs too */
	list_for_each_entry(chan, &device->channels, device_node) {
845
		rc = -ENOMEM;
C
Chris Leech 已提交
846 847
		chan->local = alloc_percpu(typeof(*chan->local));
		if (chan->local == NULL)
848
			goto err_out;
849 850 851
		chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL);
		if (chan->dev == NULL) {
			free_percpu(chan->local);
852 853
			chan->local = NULL;
			goto err_out;
854
		}
C
Chris Leech 已提交
855 856

		chan->chan_id = chancnt++;
857 858 859
		chan->dev->device.class = &dma_devclass;
		chan->dev->device.parent = device->dev;
		chan->dev->chan = chan;
860 861 862
		chan->dev->idr_ref = idr_ref;
		chan->dev->dev_id = device->dev_id;
		atomic_inc(idr_ref);
863
		dev_set_name(&chan->dev->device, "dma%dchan%d",
864
			     device->dev_id, chan->chan_id);
C
Chris Leech 已提交
865

866
		rc = device_register(&chan->dev->device);
J
Jeff Garzik 已提交
867 868 869
		if (rc) {
			free_percpu(chan->local);
			chan->local = NULL;
870 871
			kfree(chan->dev);
			atomic_dec(idr_ref);
J
Jeff Garzik 已提交
872 873
			goto err_out;
		}
874
		chan->client_count = 0;
C
Chris Leech 已提交
875
	}
876
	device->chancnt = chancnt;
C
Chris Leech 已提交
877 878

	mutex_lock(&dma_list_mutex);
879 880
	/* take references on public channels */
	if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask))
881 882 883 884 885 886 887 888 889 890 891 892 893 894
		list_for_each_entry(chan, &device->channels, device_node) {
			/* if clients are already waiting for channels we need
			 * to take references on their behalf
			 */
			if (dma_chan_get(chan) == -ENODEV) {
				/* note we can only get here for the first
				 * channel as the remaining channels are
				 * guaranteed to get a reference
				 */
				rc = -ENODEV;
				mutex_unlock(&dma_list_mutex);
				goto err_out;
			}
		}
895
	list_add_tail_rcu(&device->global_node, &dma_device_list);
896 897
	if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
		device->privatecnt++;	/* Always private */
898
	dma_channel_rebalance();
C
Chris Leech 已提交
899 900 901
	mutex_unlock(&dma_list_mutex);

	return 0;
J
Jeff Garzik 已提交
902 903

err_out:
904 905 906 907 908 909 910 911 912
	/* if we never registered a channel just release the idr */
	if (atomic_read(idr_ref) == 0) {
		mutex_lock(&dma_list_mutex);
		idr_remove(&dma_idr, device->dev_id);
		mutex_unlock(&dma_list_mutex);
		kfree(idr_ref);
		return rc;
	}

J
Jeff Garzik 已提交
913 914 915
	list_for_each_entry(chan, &device->channels, device_node) {
		if (chan->local == NULL)
			continue;
916 917 918 919
		mutex_lock(&dma_list_mutex);
		chan->dev->chan = NULL;
		mutex_unlock(&dma_list_mutex);
		device_unregister(&chan->dev->device);
J
Jeff Garzik 已提交
920 921 922
		free_percpu(chan->local);
	}
	return rc;
C
Chris Leech 已提交
923
}
924
EXPORT_SYMBOL(dma_async_device_register);
C
Chris Leech 已提交
925

926
/**
927
 * dma_async_device_unregister - unregister a DMA device
928
 * @device: &dma_device
929 930 931
 *
 * This routine is called by dma driver exit routines, dmaengine holds module
 * references to prevent it being called while channels are in use.
932 933
 */
void dma_async_device_unregister(struct dma_device *device)
C
Chris Leech 已提交
934 935 936 937
{
	struct dma_chan *chan;

	mutex_lock(&dma_list_mutex);
938
	list_del_rcu(&device->global_node);
939
	dma_channel_rebalance();
C
Chris Leech 已提交
940 941 942
	mutex_unlock(&dma_list_mutex);

	list_for_each_entry(chan, &device->channels, device_node) {
943 944 945
		WARN_ONCE(chan->client_count,
			  "%s called while %d clients hold a reference\n",
			  __func__, chan->client_count);
946 947 948 949
		mutex_lock(&dma_list_mutex);
		chan->dev->chan = NULL;
		mutex_unlock(&dma_list_mutex);
		device_unregister(&chan->dev->device);
950
		free_percpu(chan->local);
C
Chris Leech 已提交
951 952
	}
}
953
EXPORT_SYMBOL(dma_async_device_unregister);
C
Chris Leech 已提交
954

955 956 957 958 959 960
struct dmaengine_unmap_pool {
	struct kmem_cache *cache;
	const char *name;
	mempool_t *pool;
	size_t size;
};
961

962 963 964
#define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) }
static struct dmaengine_unmap_pool unmap_pool[] = {
	__UNMAP_POOL(2),
965
	#if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
966 967 968 969 970
	__UNMAP_POOL(16),
	__UNMAP_POOL(128),
	__UNMAP_POOL(256),
	#endif
};
971

972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987
static struct dmaengine_unmap_pool *__get_unmap_pool(int nr)
{
	int order = get_count_order(nr);

	switch (order) {
	case 0 ... 1:
		return &unmap_pool[0];
	case 2 ... 4:
		return &unmap_pool[1];
	case 5 ... 7:
		return &unmap_pool[2];
	case 8:
		return &unmap_pool[3];
	default:
		BUG();
		return NULL;
988
	}
989
}
990

991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005
static void dmaengine_unmap(struct kref *kref)
{
	struct dmaengine_unmap_data *unmap = container_of(kref, typeof(*unmap), kref);
	struct device *dev = unmap->dev;
	int cnt, i;

	cnt = unmap->to_cnt;
	for (i = 0; i < cnt; i++)
		dma_unmap_page(dev, unmap->addr[i], unmap->len,
			       DMA_TO_DEVICE);
	cnt += unmap->from_cnt;
	for (; i < cnt; i++)
		dma_unmap_page(dev, unmap->addr[i], unmap->len,
			       DMA_FROM_DEVICE);
	cnt += unmap->bidi_cnt;
1006 1007 1008
	for (; i < cnt; i++) {
		if (unmap->addr[i] == 0)
			continue;
1009 1010
		dma_unmap_page(dev, unmap->addr[i], unmap->len,
			       DMA_BIDIRECTIONAL);
1011
	}
1012
	cnt = unmap->map_cnt;
1013 1014
	mempool_free(unmap, __get_unmap_pool(cnt)->pool);
}
1015

1016 1017 1018 1019 1020 1021
void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap)
{
	if (unmap)
		kref_put(&unmap->kref, dmaengine_unmap);
}
EXPORT_SYMBOL_GPL(dmaengine_unmap_put);
1022

1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036
static void dmaengine_destroy_unmap_pool(void)
{
	int i;

	for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) {
		struct dmaengine_unmap_pool *p = &unmap_pool[i];

		if (p->pool)
			mempool_destroy(p->pool);
		p->pool = NULL;
		if (p->cache)
			kmem_cache_destroy(p->cache);
		p->cache = NULL;
	}
1037 1038
}

1039
static int __init dmaengine_init_unmap_pool(void)
1040
{
1041
	int i;
1042

1043 1044 1045
	for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) {
		struct dmaengine_unmap_pool *p = &unmap_pool[i];
		size_t size;
1046

1047 1048 1049 1050 1051 1052 1053 1054 1055 1056
		size = sizeof(struct dmaengine_unmap_data) +
		       sizeof(dma_addr_t) * p->size;

		p->cache = kmem_cache_create(p->name, size, 0,
					     SLAB_HWCACHE_ALIGN, NULL);
		if (!p->cache)
			break;
		p->pool = mempool_create_slab_pool(1, p->cache);
		if (!p->pool)
			break;
1057
	}
1058

1059 1060
	if (i == ARRAY_SIZE(unmap_pool))
		return 0;
1061

1062 1063 1064
	dmaengine_destroy_unmap_pool();
	return -ENOMEM;
}
1065

1066
struct dmaengine_unmap_data *
1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077
dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags)
{
	struct dmaengine_unmap_data *unmap;

	unmap = mempool_alloc(__get_unmap_pool(nr)->pool, flags);
	if (!unmap)
		return NULL;

	memset(unmap, 0, sizeof(*unmap));
	kref_init(&unmap->kref);
	unmap->dev = dev;
1078
	unmap->map_cnt = nr;
1079 1080

	return unmap;
1081
}
1082
EXPORT_SYMBOL(dmaengine_get_unmap_data);
1083 1084 1085 1086 1087

void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
	struct dma_chan *chan)
{
	tx->chan = chan;
1088
	#ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
1089
	spin_lock_init(&tx->lock);
1090
	#endif
1091 1092 1093
}
EXPORT_SYMBOL(dma_async_tx_descriptor_init);

1094 1095 1096 1097 1098 1099
/* dma_wait_for_async_tx - spin wait for a transaction to complete
 * @tx: in-flight transaction to wait on
 */
enum dma_status
dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
{
1100
	unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
1101 1102

	if (!tx)
1103
		return DMA_COMPLETE;
1104

1105 1106 1107
	while (tx->cookie == -EBUSY) {
		if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
			pr_err("%s timeout waiting for descriptor submission\n",
1108
			       __func__);
1109 1110 1111 1112 1113
			return DMA_ERROR;
		}
		cpu_relax();
	}
	return dma_sync_wait(tx->chan, tx->cookie);
1114 1115 1116 1117 1118 1119 1120 1121 1122
}
EXPORT_SYMBOL_GPL(dma_wait_for_async_tx);

/* dma_run_dependencies - helper routine for dma drivers to process
 *	(start) dependent operations on their target channel
 * @tx: transaction with dependencies
 */
void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
{
1123
	struct dma_async_tx_descriptor *dep = txd_next(tx);
1124 1125 1126 1127 1128 1129
	struct dma_async_tx_descriptor *dep_next;
	struct dma_chan *chan;

	if (!dep)
		return;

1130
	/* we'll submit tx->next now, so clear the link */
1131
	txd_clear_next(tx);
1132 1133 1134 1135 1136 1137 1138
	chan = dep->chan;

	/* keep submitting up until a channel switch is detected
	 * in that case we will be called again as a result of
	 * processing the interrupt from async_tx_channel_switch
	 */
	for (; dep; dep = dep_next) {
1139 1140 1141
		txd_lock(dep);
		txd_clear_parent(dep);
		dep_next = txd_next(dep);
1142
		if (dep_next && dep_next->chan == chan)
1143
			txd_clear_next(dep); /* ->next will be submitted */
1144 1145
		else
			dep_next = NULL; /* submit current dep and terminate */
1146
		txd_unlock(dep);
1147 1148 1149 1150 1151 1152 1153 1154

		dep->tx_submit(dep);
	}

	chan->device->device_issue_pending(chan);
}
EXPORT_SYMBOL_GPL(dma_run_dependencies);

C
Chris Leech 已提交
1155 1156
static int __init dma_bus_init(void)
{
1157 1158 1159 1160
	int err = dmaengine_init_unmap_pool();

	if (err)
		return err;
C
Chris Leech 已提交
1161 1162
	return class_register(&dma_devclass);
}
1163
arch_initcall(dma_bus_init);
C
Chris Leech 已提交
1164

1165