edma.c 28.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
/*
 * TI EDMA DMA engine driver
 *
 * Copyright 2012 Texas Instruments
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License as
 * published by the Free Software Foundation version 2.
 *
 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
 * kind, whether express or implied; without even the implied warranty
 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 */

#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
18
#include <linux/edma.h>
19 20 21 22 23 24 25 26
#include <linux/err.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
27
#include <linux/of.h>
28

29
#include <linux/platform_data/edma.h>
30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50

#include "dmaengine.h"
#include "virt-dma.h"

/*
 * This will go away when the private EDMA API is folded
 * into this driver and the platform device(s) are
 * instantiated in the arch code. We can only get away
 * with this simplification because DA8XX may not be built
 * in the same kernel image with other DaVinci parts. This
 * avoids having to sprinkle dmaengine driver platform devices
 * and data throughout all the existing board files.
 */
#ifdef CONFIG_ARCH_DAVINCI_DA8XX
#define EDMA_CTLRS	2
#define EDMA_CHANS	32
#else
#define EDMA_CTLRS	1
#define EDMA_CHANS	64
#endif /* CONFIG_ARCH_DAVINCI_DA8XX */

51 52 53 54 55 56 57 58
/*
 * Max of 20 segments per channel to conserve PaRAM slots
 * Also note that MAX_NR_SG should be atleast the no.of periods
 * that are required for ASoC, otherwise DMA prep calls will
 * fail. Today davinci-pcm is the only user of this driver and
 * requires atleast 17 slots, so we setup the default to 20.
 */
#define MAX_NR_SG		20
59 60 61
#define EDMA_MAX_SLOTS		MAX_NR_SG
#define EDMA_DESCRIPTORS	16

62
struct edma_pset {
63 64
	u32				len;
	dma_addr_t			addr;
65 66 67
	struct edmacc_param		param;
};

68 69 70
struct edma_desc {
	struct virt_dma_desc		vdesc;
	struct list_head		node;
71
	enum dma_transfer_direction	direction;
72
	int				cyclic;
73 74
	int				absync;
	int				pset_nr;
75
	struct edma_chan		*echan;
76
	int				processed;
77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97

	/*
	 * The following 4 elements are used for residue accounting.
	 *
	 * - processed_stat: the number of SG elements we have traversed
	 * so far to cover accounting. This is updated directly to processed
	 * during edma_callback and is always <= processed, because processed
	 * refers to the number of pending transfer (programmed to EDMA
	 * controller), where as processed_stat tracks number of transfers
	 * accounted for so far.
	 *
	 * - residue: The amount of bytes we have left to transfer for this desc
	 *
	 * - residue_stat: The residue in bytes of data we have covered
	 * so far for accounting. This is updated directly to residue
	 * during callbacks to keep it current.
	 *
	 * - sg_len: Tracks the length of the current intermediate transfer,
	 * this is required to update the residue during intermediate transfer
	 * completion callback.
	 */
98 99
	int				processed_stat;
	u32				sg_len;
100
	u32				residue;
101
	u32				residue_stat;
102

103
	struct edma_pset		pset[0];
104 105 106 107 108 109 110 111 112 113 114 115
};

struct edma_cc;

struct edma_chan {
	struct virt_dma_chan		vchan;
	struct list_head		node;
	struct edma_desc		*edesc;
	struct edma_cc			*ecc;
	int				ch_num;
	bool				alloced;
	int				slot[EDMA_MAX_SLOTS];
116
	int				missed;
117
	struct dma_slave_config		cfg;
118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151
};

struct edma_cc {
	int				ctlr;
	struct dma_device		dma_slave;
	struct edma_chan		slave_chans[EDMA_CHANS];
	int				num_slave_chans;
	int				dummy_slot;
};

static inline struct edma_cc *to_edma_cc(struct dma_device *d)
{
	return container_of(d, struct edma_cc, dma_slave);
}

static inline struct edma_chan *to_edma_chan(struct dma_chan *c)
{
	return container_of(c, struct edma_chan, vchan.chan);
}

static inline struct edma_desc
*to_edma_desc(struct dma_async_tx_descriptor *tx)
{
	return container_of(tx, struct edma_desc, vdesc.tx);
}

static void edma_desc_free(struct virt_dma_desc *vdesc)
{
	kfree(container_of(vdesc, struct edma_desc, vdesc));
}

/* Dispatch a queued descriptor to the controller (caller holds lock) */
static void edma_execute(struct edma_chan *echan)
{
152
	struct virt_dma_desc *vdesc;
153
	struct edma_desc *edesc;
154 155 156
	struct device *dev = echan->vchan.chan.device->dev;
	int i, j, left, nslots;

157 158
	if (!echan->edesc) {
		/* Setup is needed for the first transfer */
159
		vdesc = vchan_next_desc(&echan->vchan);
160
		if (!vdesc)
161 162 163
			return;
		list_del(&vdesc->node);
		echan->edesc = to_edma_desc(&vdesc->tx);
164 165
	}

166
	edesc = echan->edesc;
167

168 169 170
	/* Find out how many left */
	left = edesc->pset_nr - edesc->processed;
	nslots = min(MAX_NR_SG, left);
171
	edesc->sg_len = 0;
172 173

	/* Write descriptor PaRAM set(s) */
174 175
	for (i = 0; i < nslots; i++) {
		j = i + edesc->processed;
176
		edma_write_slot(echan->slot[i], &edesc->pset[j].param);
177
		edesc->sg_len += edesc->pset[j].len;
178
		dev_vdbg(echan->vchan.chan.device->dev,
179 180 181 182 183 184 185 186 187 188 189
			"\n pset[%d]:\n"
			"  chnum\t%d\n"
			"  slot\t%d\n"
			"  opt\t%08x\n"
			"  src\t%08x\n"
			"  dst\t%08x\n"
			"  abcnt\t%08x\n"
			"  ccnt\t%08x\n"
			"  bidx\t%08x\n"
			"  cidx\t%08x\n"
			"  lkrld\t%08x\n",
190
			j, echan->ch_num, echan->slot[i],
191 192 193 194 195 196 197 198
			edesc->pset[j].param.opt,
			edesc->pset[j].param.src,
			edesc->pset[j].param.dst,
			edesc->pset[j].param.a_b_cnt,
			edesc->pset[j].param.ccnt,
			edesc->pset[j].param.src_dst_bidx,
			edesc->pset[j].param.src_dst_cidx,
			edesc->pset[j].param.link_bcntrld);
199
		/* Link to the previous slot if not the last set */
200
		if (i != (nslots - 1))
201 202 203
			edma_link(echan->slot[i], echan->slot[i+1]);
	}

204 205
	edesc->processed += nslots;

206 207 208 209 210
	/*
	 * If this is either the last set in a set of SG-list transactions
	 * then setup a link to the dummy slot, this results in all future
	 * events being absorbed and that's OK because we're done
	 */
211 212 213 214 215 216 217
	if (edesc->processed == edesc->pset_nr) {
		if (edesc->cyclic)
			edma_link(echan->slot[nslots-1], echan->slot[1]);
		else
			edma_link(echan->slot[nslots-1],
				  echan->ecc->dummy_slot);
	}
218

219
	if (echan->missed) {
220 221 222 223 224
		/*
		 * This happens due to setup times between intermediate
		 * transfers in long SG lists which have to be broken up into
		 * transfers of MAX_NR_SG
		 */
225
		dev_dbg(dev, "missed event on channel %d\n", echan->ch_num);
226 227 228 229 230
		edma_clean_channel(echan->ch_num);
		edma_stop(echan->ch_num);
		edma_start(echan->ch_num);
		edma_trigger_channel(echan->ch_num);
		echan->missed = 0;
231 232 233 234 235 236 237 238
	} else if (edesc->processed <= MAX_NR_SG) {
		dev_dbg(dev, "first transfer starting on channel %d\n",
			echan->ch_num);
		edma_start(echan->ch_num);
	} else {
		dev_dbg(dev, "chan: %d: completed %d elements, resuming\n",
			echan->ch_num, edesc->processed);
		edma_resume(echan->ch_num);
239
	}
240 241
}

242
static int edma_terminate_all(struct dma_chan *chan)
243
{
244
	struct edma_chan *echan = to_edma_chan(chan);
245 246 247 248 249 250 251 252 253 254 255
	unsigned long flags;
	LIST_HEAD(head);

	spin_lock_irqsave(&echan->vchan.lock, flags);

	/*
	 * Stop DMA activity: we assume the callback will not be called
	 * after edma_dma() returns (even if it does, it will see
	 * echan->edesc is NULL and exit.)
	 */
	if (echan->edesc) {
256 257 258 259 260
		edma_stop(echan->ch_num);
		/* Move the cyclic channel back to default queue */
		if (echan->edesc->cyclic)
			edma_assign_channel_eventq(echan->ch_num,
						   EVENTQ_DEFAULT);
261 262 263 264 265
		/*
		 * free the running request descriptor
		 * since it is not in any of the vdesc lists
		 */
		edma_desc_free(&echan->edesc->vdesc);
266 267 268 269 270 271 272 273 274 275
		echan->edesc = NULL;
	}

	vchan_get_all_descriptors(&echan->vchan, &head);
	spin_unlock_irqrestore(&echan->vchan.lock, flags);
	vchan_dma_desc_free_list(&echan->vchan, &head);

	return 0;
}

276
static int edma_slave_config(struct dma_chan *chan,
277
	struct dma_slave_config *cfg)
278
{
279 280
	struct edma_chan *echan = to_edma_chan(chan);

281 282
	if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
	    cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
283 284
		return -EINVAL;

285
	memcpy(&echan->cfg, cfg, sizeof(echan->cfg));
286 287 288 289

	return 0;
}

290
static int edma_dma_pause(struct dma_chan *chan)
291
{
292 293
	struct edma_chan *echan = to_edma_chan(chan);

294
	if (!echan->edesc)
295 296 297 298 299 300
		return -EINVAL;

	edma_pause(echan->ch_num);
	return 0;
}

301
static int edma_dma_resume(struct dma_chan *chan)
302
{
303 304
	struct edma_chan *echan = to_edma_chan(chan);

305 306 307 308
	edma_resume(echan->ch_num);
	return 0;
}

309 310 311 312 313 314 315 316 317 318 319
/*
 * A PaRAM set configuration abstraction used by other modes
 * @chan: Channel who's PaRAM set we're configuring
 * @pset: PaRAM set to initialize and setup.
 * @src_addr: Source address of the DMA
 * @dst_addr: Destination address of the DMA
 * @burst: In units of dev_width, how much to send
 * @dev_width: How much is the dev_width
 * @dma_length: Total length of the DMA transfer
 * @direction: Direction of the transfer
 */
320
static int edma_config_pset(struct dma_chan *chan, struct edma_pset *epset,
321 322 323 324 325 326
	dma_addr_t src_addr, dma_addr_t dst_addr, u32 burst,
	enum dma_slave_buswidth dev_width, unsigned int dma_length,
	enum dma_transfer_direction direction)
{
	struct edma_chan *echan = to_edma_chan(chan);
	struct device *dev = chan->device->dev;
327
	struct edmacc_param *param = &epset->param;
328 329 330 331 332
	int acnt, bcnt, ccnt, cidx;
	int src_bidx, dst_bidx, src_cidx, dst_cidx;
	int absync;

	acnt = dev_width;
333 334 335 336

	/* src/dst_maxburst == 0 is the same case as src/dst_maxburst == 1 */
	if (!burst)
		burst = 1;
337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386
	/*
	 * If the maxburst is equal to the fifo width, use
	 * A-synced transfers. This allows for large contiguous
	 * buffer transfers using only one PaRAM set.
	 */
	if (burst == 1) {
		/*
		 * For the A-sync case, bcnt and ccnt are the remainder
		 * and quotient respectively of the division of:
		 * (dma_length / acnt) by (SZ_64K -1). This is so
		 * that in case bcnt over flows, we have ccnt to use.
		 * Note: In A-sync tranfer only, bcntrld is used, but it
		 * only applies for sg_dma_len(sg) >= SZ_64K.
		 * In this case, the best way adopted is- bccnt for the
		 * first frame will be the remainder below. Then for
		 * every successive frame, bcnt will be SZ_64K-1. This
		 * is assured as bcntrld = 0xffff in end of function.
		 */
		absync = false;
		ccnt = dma_length / acnt / (SZ_64K - 1);
		bcnt = dma_length / acnt - ccnt * (SZ_64K - 1);
		/*
		 * If bcnt is non-zero, we have a remainder and hence an
		 * extra frame to transfer, so increment ccnt.
		 */
		if (bcnt)
			ccnt++;
		else
			bcnt = SZ_64K - 1;
		cidx = acnt;
	} else {
		/*
		 * If maxburst is greater than the fifo address_width,
		 * use AB-synced transfers where A count is the fifo
		 * address_width and B count is the maxburst. In this
		 * case, we are limited to transfers of C count frames
		 * of (address_width * maxburst) where C count is limited
		 * to SZ_64K-1. This places an upper bound on the length
		 * of an SG segment that can be handled.
		 */
		absync = true;
		bcnt = burst;
		ccnt = dma_length / (acnt * bcnt);
		if (ccnt > (SZ_64K - 1)) {
			dev_err(dev, "Exceeded max SG segment size\n");
			return -EINVAL;
		}
		cidx = acnt * bcnt;
	}

387 388
	epset->len = dma_length;

389 390 391 392 393
	if (direction == DMA_MEM_TO_DEV) {
		src_bidx = acnt;
		src_cidx = cidx;
		dst_bidx = 0;
		dst_cidx = 0;
394
		epset->addr = src_addr;
395 396 397 398 399
	} else if (direction == DMA_DEV_TO_MEM)  {
		src_bidx = 0;
		src_cidx = 0;
		dst_bidx = acnt;
		dst_cidx = cidx;
400
		epset->addr = dst_addr;
401 402 403 404 405
	} else if (direction == DMA_MEM_TO_MEM)  {
		src_bidx = acnt;
		src_cidx = cidx;
		dst_bidx = acnt;
		dst_cidx = cidx;
406 407 408 409 410
	} else {
		dev_err(dev, "%s: direction not implemented yet\n", __func__);
		return -EINVAL;
	}

411
	param->opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num));
412 413
	/* Configure A or AB synchronized transfers */
	if (absync)
414
		param->opt |= SYNCDIM;
415

416 417
	param->src = src_addr;
	param->dst = dst_addr;
418

419 420
	param->src_dst_bidx = (dst_bidx << 16) | src_bidx;
	param->src_dst_cidx = (dst_cidx << 16) | src_cidx;
421

422 423
	param->a_b_cnt = bcnt << 16 | acnt;
	param->ccnt = ccnt;
424 425 426 427 428 429
	/*
	 * Only time when (bcntrld) auto reload is required is for
	 * A-sync case, and in this case, a requirement of reload value
	 * of SZ_64K-1 only is assured. 'link' is initially set to NULL
	 * and then later will be populated by edma_execute.
	 */
430
	param->link_bcntrld = 0xffffffff;
431 432 433
	return absync;
}

434 435 436 437 438 439 440 441
static struct dma_async_tx_descriptor *edma_prep_slave_sg(
	struct dma_chan *chan, struct scatterlist *sgl,
	unsigned int sg_len, enum dma_transfer_direction direction,
	unsigned long tx_flags, void *context)
{
	struct edma_chan *echan = to_edma_chan(chan);
	struct device *dev = chan->device->dev;
	struct edma_desc *edesc;
442
	dma_addr_t src_addr = 0, dst_addr = 0;
443 444
	enum dma_slave_buswidth dev_width;
	u32 burst;
445
	struct scatterlist *sg;
446
	int i, nslots, ret;
447 448 449 450

	if (unlikely(!echan || !sgl || !sg_len))
		return NULL;

451
	if (direction == DMA_DEV_TO_MEM) {
452
		src_addr = echan->cfg.src_addr;
453 454 455
		dev_width = echan->cfg.src_addr_width;
		burst = echan->cfg.src_maxburst;
	} else if (direction == DMA_MEM_TO_DEV) {
456
		dst_addr = echan->cfg.dst_addr;
457 458 459
		dev_width = echan->cfg.dst_addr_width;
		burst = echan->cfg.dst_maxburst;
	} else {
460
		dev_err(dev, "%s: bad direction: %d\n", __func__, direction);
461 462 463 464
		return NULL;
	}

	if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) {
465
		dev_err(dev, "%s: Undefined slave buswidth\n", __func__);
466 467 468 469 470 471
		return NULL;
	}

	edesc = kzalloc(sizeof(*edesc) + sg_len *
		sizeof(edesc->pset[0]), GFP_ATOMIC);
	if (!edesc) {
472
		dev_err(dev, "%s: Failed to allocate a descriptor\n", __func__);
473 474 475 476
		return NULL;
	}

	edesc->pset_nr = sg_len;
477
	edesc->residue = 0;
478
	edesc->direction = direction;
479
	edesc->echan = echan;
480

481 482 483 484
	/* Allocate a PaRAM slot, if needed */
	nslots = min_t(unsigned, MAX_NR_SG, sg_len);

	for (i = 0; i < nslots; i++) {
485 486 487 488 489
		if (echan->slot[i] < 0) {
			echan->slot[i] =
				edma_alloc_slot(EDMA_CTLR(echan->ch_num),
						EDMA_SLOT_ANY);
			if (echan->slot[i] < 0) {
V
Valentin Ilie 已提交
490
				kfree(edesc);
491 492
				dev_err(dev, "%s: Failed to allocate slot\n",
					__func__);
493 494 495
				return NULL;
			}
		}
496 497 498 499
	}

	/* Configure PaRAM sets for each SG */
	for_each_sg(sgl, sg, sg_len, i) {
500 501 502 503 504
		/* Get address for each SG */
		if (direction == DMA_DEV_TO_MEM)
			dst_addr = sg_dma_address(sg);
		else
			src_addr = sg_dma_address(sg);
505

506 507 508
		ret = edma_config_pset(chan, &edesc->pset[i], src_addr,
				       dst_addr, burst, dev_width,
				       sg_dma_len(sg), direction);
V
Vinod Koul 已提交
509 510
		if (ret < 0) {
			kfree(edesc);
511
			return NULL;
512 513
		}

514
		edesc->absync = ret;
515
		edesc->residue += sg_dma_len(sg);
516 517 518 519

		/* If this is the last in a current SG set of transactions,
		   enable interrupts so that next set is processed */
		if (!((i+1) % MAX_NR_SG))
520
			edesc->pset[i].param.opt |= TCINTEN;
521

522 523
		/* If this is the last set, enable completion interrupt flag */
		if (i == sg_len - 1)
524
			edesc->pset[i].param.opt |= TCINTEN;
525
	}
526
	edesc->residue_stat = edesc->residue;
527 528 529 530

	return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
}

531
static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562
	struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
	size_t len, unsigned long tx_flags)
{
	int ret;
	struct edma_desc *edesc;
	struct device *dev = chan->device->dev;
	struct edma_chan *echan = to_edma_chan(chan);

	if (unlikely(!echan || !len))
		return NULL;

	edesc = kzalloc(sizeof(*edesc) + sizeof(edesc->pset[0]), GFP_ATOMIC);
	if (!edesc) {
		dev_dbg(dev, "Failed to allocate a descriptor\n");
		return NULL;
	}

	edesc->pset_nr = 1;

	ret = edma_config_pset(chan, &edesc->pset[0], src, dest, 1,
			       DMA_SLAVE_BUSWIDTH_4_BYTES, len, DMA_MEM_TO_MEM);
	if (ret < 0)
		return NULL;

	edesc->absync = ret;

	/*
	 * Enable intermediate transfer chaining to re-trigger channel
	 * on completion of every TR, and enable transfer-completion
	 * interrupt on completion of the whole transfer.
	 */
563 564
	edesc->pset[0].param.opt |= ITCCHEN;
	edesc->pset[0].param.opt |= TCINTEN;
565 566 567 568

	return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
}

569 570 571
static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
	struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
	size_t period_len, enum dma_transfer_direction direction,
572
	unsigned long tx_flags)
573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595
{
	struct edma_chan *echan = to_edma_chan(chan);
	struct device *dev = chan->device->dev;
	struct edma_desc *edesc;
	dma_addr_t src_addr, dst_addr;
	enum dma_slave_buswidth dev_width;
	u32 burst;
	int i, ret, nslots;

	if (unlikely(!echan || !buf_len || !period_len))
		return NULL;

	if (direction == DMA_DEV_TO_MEM) {
		src_addr = echan->cfg.src_addr;
		dst_addr = buf_addr;
		dev_width = echan->cfg.src_addr_width;
		burst = echan->cfg.src_maxburst;
	} else if (direction == DMA_MEM_TO_DEV) {
		src_addr = buf_addr;
		dst_addr = echan->cfg.dst_addr;
		dev_width = echan->cfg.dst_addr_width;
		burst = echan->cfg.dst_maxburst;
	} else {
596
		dev_err(dev, "%s: bad direction: %d\n", __func__, direction);
597 598 599 600
		return NULL;
	}

	if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) {
601
		dev_err(dev, "%s: Undefined slave buswidth\n", __func__);
602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625
		return NULL;
	}

	if (unlikely(buf_len % period_len)) {
		dev_err(dev, "Period should be multiple of Buffer length\n");
		return NULL;
	}

	nslots = (buf_len / period_len) + 1;

	/*
	 * Cyclic DMA users such as audio cannot tolerate delays introduced
	 * by cases where the number of periods is more than the maximum
	 * number of SGs the EDMA driver can handle at a time. For DMA types
	 * such as Slave SGs, such delays are tolerable and synchronized,
	 * but the synchronization is difficult to achieve with Cyclic and
	 * cannot be guaranteed, so we error out early.
	 */
	if (nslots > MAX_NR_SG)
		return NULL;

	edesc = kzalloc(sizeof(*edesc) + nslots *
		sizeof(edesc->pset[0]), GFP_ATOMIC);
	if (!edesc) {
626
		dev_err(dev, "%s: Failed to allocate a descriptor\n", __func__);
627 628 629 630 631
		return NULL;
	}

	edesc->cyclic = 1;
	edesc->pset_nr = nslots;
632
	edesc->residue = edesc->residue_stat = buf_len;
633
	edesc->direction = direction;
634
	edesc->echan = echan;
635

636 637
	dev_dbg(dev, "%s: channel=%d nslots=%d period_len=%zu buf_len=%zu\n",
		__func__, echan->ch_num, nslots, period_len, buf_len);
638 639 640 641 642 643 644 645

	for (i = 0; i < nslots; i++) {
		/* Allocate a PaRAM slot, if needed */
		if (echan->slot[i] < 0) {
			echan->slot[i] =
				edma_alloc_slot(EDMA_CTLR(echan->ch_num),
						EDMA_SLOT_ANY);
			if (echan->slot[i] < 0) {
646
				kfree(edesc);
647 648
				dev_err(dev, "%s: Failed to allocate slot\n",
					__func__);
649 650 651 652 653 654 655 656 657 658 659 660 661
				return NULL;
			}
		}

		if (i == nslots - 1) {
			memcpy(&edesc->pset[i], &edesc->pset[0],
			       sizeof(edesc->pset[0]));
			break;
		}

		ret = edma_config_pset(chan, &edesc->pset[i], src_addr,
				       dst_addr, burst, dev_width, period_len,
				       direction);
662 663
		if (ret < 0) {
			kfree(edesc);
664
			return NULL;
665
		}
666

667 668 669 670
		if (direction == DMA_DEV_TO_MEM)
			dst_addr += period_len;
		else
			src_addr += period_len;
671

672 673
		dev_vdbg(dev, "%s: Configure period %d of buf:\n", __func__, i);
		dev_vdbg(dev,
674 675 676 677 678 679 680 681 682 683 684 685
			"\n pset[%d]:\n"
			"  chnum\t%d\n"
			"  slot\t%d\n"
			"  opt\t%08x\n"
			"  src\t%08x\n"
			"  dst\t%08x\n"
			"  abcnt\t%08x\n"
			"  ccnt\t%08x\n"
			"  bidx\t%08x\n"
			"  cidx\t%08x\n"
			"  lkrld\t%08x\n",
			i, echan->ch_num, echan->slot[i],
686 687 688 689 690 691 692 693
			edesc->pset[i].param.opt,
			edesc->pset[i].param.src,
			edesc->pset[i].param.dst,
			edesc->pset[i].param.a_b_cnt,
			edesc->pset[i].param.ccnt,
			edesc->pset[i].param.src_dst_bidx,
			edesc->pset[i].param.src_dst_cidx,
			edesc->pset[i].param.link_bcntrld);
694 695 696 697

		edesc->absync = ret;

		/*
698
		 * Enable period interrupt only if it is requested
699
		 */
700 701
		if (tx_flags & DMA_PREP_INTERRUPT)
			edesc->pset[i].param.opt |= TCINTEN;
702 703
	}

704 705 706
	/* Place the cyclic channel to highest priority queue */
	edma_assign_channel_eventq(echan->ch_num, EVENTQ_0);

707 708 709 710 711 712 713 714
	return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
}

static void edma_callback(unsigned ch_num, u16 ch_status, void *data)
{
	struct edma_chan *echan = data;
	struct device *dev = echan->vchan.chan.device->dev;
	struct edma_desc *edesc;
715
	struct edmacc_param p;
716

717 718
	edesc = echan->edesc;

719
	spin_lock(&echan->vchan.lock);
720
	switch (ch_status) {
721
	case EDMA_DMA_COMPLETE:
722
		if (edesc) {
723 724
			if (edesc->cyclic) {
				vchan_cyclic_callback(&edesc->vdesc);
725
				goto out;
726
			} else if (edesc->processed == edesc->pset_nr) {
727
				dev_dbg(dev, "Transfer complete, stopping channel %d\n", ch_num);
728
				edesc->residue = 0;
729 730
				edma_stop(echan->ch_num);
				vchan_cookie_complete(&edesc->vdesc);
731
				echan->edesc = NULL;
732 733
			} else {
				dev_dbg(dev, "Intermediate transfer complete on channel %d\n", ch_num);
734

735 736
				edma_pause(echan->ch_num);

737 738 739 740
				/* Update statistics for tx_status */
				edesc->residue -= edesc->sg_len;
				edesc->residue_stat = edesc->residue;
				edesc->processed_stat = edesc->processed;
741
			}
742
			edma_execute(echan);
743 744
		}
		break;
745
	case EDMA_DMA_CC_ERROR:
746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773
		edma_read_slot(EDMA_CHAN_SLOT(echan->slot[0]), &p);

		/*
		 * Issue later based on missed flag which will be sure
		 * to happen as:
		 * (1) we finished transmitting an intermediate slot and
		 *     edma_execute is coming up.
		 * (2) or we finished current transfer and issue will
		 *     call edma_execute.
		 *
		 * Important note: issuing can be dangerous here and
		 * lead to some nasty recursion when we are in a NULL
		 * slot. So we avoid doing so and set the missed flag.
		 */
		if (p.a_b_cnt == 0 && p.ccnt == 0) {
			dev_dbg(dev, "Error occurred, looks like slot is null, just setting miss\n");
			echan->missed = 1;
		} else {
			/*
			 * The slot is already programmed but the event got
			 * missed, so its safe to issue it here.
			 */
			dev_dbg(dev, "Error occurred but slot is non-null, TRIGGERING\n");
			edma_clean_channel(echan->ch_num);
			edma_stop(echan->ch_num);
			edma_start(echan->ch_num);
			edma_trigger_channel(echan->ch_num);
		}
774 775 776 777
		break;
	default:
		break;
	}
778 779
out:
	spin_unlock(&echan->vchan.lock);
780 781 782 783 784 785 786 787 788 789 790 791
}

/* Alloc channel resources */
static int edma_alloc_chan_resources(struct dma_chan *chan)
{
	struct edma_chan *echan = to_edma_chan(chan);
	struct device *dev = chan->device->dev;
	int ret;
	int a_ch_num;
	LIST_HEAD(descs);

	a_ch_num = edma_alloc_channel(echan->ch_num, edma_callback,
792
					echan, EVENTQ_DEFAULT);
793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809

	if (a_ch_num < 0) {
		ret = -ENODEV;
		goto err_no_chan;
	}

	if (a_ch_num != echan->ch_num) {
		dev_err(dev, "failed to allocate requested channel %u:%u\n",
			EDMA_CTLR(echan->ch_num),
			EDMA_CHAN_SLOT(echan->ch_num));
		ret = -ENODEV;
		goto err_wrong_chan;
	}

	echan->alloced = true;
	echan->slot[0] = echan->ch_num;

810
	dev_dbg(dev, "allocated channel %d for %u:%u\n", echan->ch_num,
811
		EDMA_CTLR(echan->ch_num), EDMA_CHAN_SLOT(echan->ch_num));
812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846

	return 0;

err_wrong_chan:
	edma_free_channel(a_ch_num);
err_no_chan:
	return ret;
}

/* Free channel resources */
static void edma_free_chan_resources(struct dma_chan *chan)
{
	struct edma_chan *echan = to_edma_chan(chan);
	struct device *dev = chan->device->dev;
	int i;

	/* Terminate transfers */
	edma_stop(echan->ch_num);

	vchan_free_chan_resources(&echan->vchan);

	/* Free EDMA PaRAM slots */
	for (i = 1; i < EDMA_MAX_SLOTS; i++) {
		if (echan->slot[i] >= 0) {
			edma_free_slot(echan->slot[i]);
			echan->slot[i] = -1;
		}
	}

	/* Free EDMA channel */
	if (echan->alloced) {
		edma_free_channel(echan->ch_num);
		echan->alloced = false;
	}

847
	dev_dbg(dev, "freeing channel for %u\n", echan->ch_num);
848 849 850 851 852 853 854 855 856 857 858 859 860 861
}

/* Send pending descriptor to hardware */
static void edma_issue_pending(struct dma_chan *chan)
{
	struct edma_chan *echan = to_edma_chan(chan);
	unsigned long flags;

	spin_lock_irqsave(&echan->vchan.lock, flags);
	if (vchan_issue_pending(&echan->vchan) && !echan->edesc)
		edma_execute(echan);
	spin_unlock_irqrestore(&echan->vchan.lock, flags);
}

862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909
static u32 edma_residue(struct edma_desc *edesc)
{
	bool dst = edesc->direction == DMA_DEV_TO_MEM;
	struct edma_pset *pset = edesc->pset;
	dma_addr_t done, pos;
	int i;

	/*
	 * We always read the dst/src position from the first RamPar
	 * pset. That's the one which is active now.
	 */
	pos = edma_get_position(edesc->echan->slot[0], dst);

	/*
	 * Cyclic is simple. Just subtract pset[0].addr from pos.
	 *
	 * We never update edesc->residue in the cyclic case, so we
	 * can tell the remaining room to the end of the circular
	 * buffer.
	 */
	if (edesc->cyclic) {
		done = pos - pset->addr;
		edesc->residue_stat = edesc->residue - done;
		return edesc->residue_stat;
	}

	/*
	 * For SG operation we catch up with the last processed
	 * status.
	 */
	pset += edesc->processed_stat;

	for (i = edesc->processed_stat; i < edesc->processed; i++, pset++) {
		/*
		 * If we are inside this pset address range, we know
		 * this is the active one. Get the current delta and
		 * stop walking the psets.
		 */
		if (pos >= pset->addr && pos < pset->addr + pset->len)
			return edesc->residue_stat - (pos - pset->addr);

		/* Otherwise mark it done and update residue_stat. */
		edesc->processed_stat++;
		edesc->residue_stat -= pset->len;
	}
	return edesc->residue_stat;
}

910 911 912 913 914 915 916 917 918 919 920
/* Check request completion status */
static enum dma_status edma_tx_status(struct dma_chan *chan,
				      dma_cookie_t cookie,
				      struct dma_tx_state *txstate)
{
	struct edma_chan *echan = to_edma_chan(chan);
	struct virt_dma_desc *vdesc;
	enum dma_status ret;
	unsigned long flags;

	ret = dma_cookie_status(chan, cookie, txstate);
921
	if (ret == DMA_COMPLETE || !txstate)
922 923 924
		return ret;

	spin_lock_irqsave(&echan->vchan.lock, flags);
925
	if (echan->edesc && echan->edesc->vdesc.tx.cookie == cookie)
926
		txstate->residue = edma_residue(echan->edesc);
927 928
	else if ((vdesc = vchan_find_desc(&echan->vchan, cookie)))
		txstate->residue = to_edma_desc(&vdesc->tx)->residue;
929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953
	spin_unlock_irqrestore(&echan->vchan.lock, flags);

	return ret;
}

static void __init edma_chan_init(struct edma_cc *ecc,
				  struct dma_device *dma,
				  struct edma_chan *echans)
{
	int i, j;

	for (i = 0; i < EDMA_CHANS; i++) {
		struct edma_chan *echan = &echans[i];
		echan->ch_num = EDMA_CTLR_CHAN(ecc->ctlr, i);
		echan->ecc = ecc;
		echan->vchan.desc_free = edma_desc_free;

		vchan_init(&echan->vchan, dma);

		INIT_LIST_HEAD(&echan->node);
		for (j = 0; j < EDMA_MAX_SLOTS; j++)
			echan->slot[j] = -1;
	}
}

954 955
#define EDMA_DMA_BUSWIDTHS	(BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
				 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
956
				 BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
957 958
				 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))

959 960 961 962
static void edma_dma_init(struct edma_cc *ecc, struct dma_device *dma,
			  struct device *dev)
{
	dma->device_prep_slave_sg = edma_prep_slave_sg;
963
	dma->device_prep_dma_cyclic = edma_prep_dma_cyclic;
964
	dma->device_prep_dma_memcpy = edma_prep_dma_memcpy;
965 966 967 968
	dma->device_alloc_chan_resources = edma_alloc_chan_resources;
	dma->device_free_chan_resources = edma_free_chan_resources;
	dma->device_issue_pending = edma_issue_pending;
	dma->device_tx_status = edma_tx_status;
969 970 971 972
	dma->device_config = edma_slave_config;
	dma->device_pause = edma_dma_pause;
	dma->device_resume = edma_dma_resume;
	dma->device_terminate_all = edma_terminate_all;
973 974 975 976 977 978

	dma->src_addr_widths = EDMA_DMA_BUSWIDTHS;
	dma->dst_addr_widths = EDMA_DMA_BUSWIDTHS;
	dma->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
	dma->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;

979 980
	dma->dev = dev;

981 982 983 984
	/*
	 * code using dma memcpy must make sure alignment of
	 * length is at dma->copy_align boundary.
	 */
985
	dma->copy_align = DMAENGINE_ALIGN_4_BYTES;
986

987 988 989
	INIT_LIST_HEAD(&dma->channels);
}

B
Bill Pemberton 已提交
990
static int edma_probe(struct platform_device *pdev)
991 992 993 994
{
	struct edma_cc *ecc;
	int ret;

995 996 997 998
	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
	if (ret)
		return ret;

999 1000 1001 1002 1003 1004 1005 1006 1007 1008
	ecc = devm_kzalloc(&pdev->dev, sizeof(*ecc), GFP_KERNEL);
	if (!ecc) {
		dev_err(&pdev->dev, "Can't allocate controller\n");
		return -ENOMEM;
	}

	ecc->ctlr = pdev->id;
	ecc->dummy_slot = edma_alloc_slot(ecc->ctlr, EDMA_SLOT_ANY);
	if (ecc->dummy_slot < 0) {
		dev_err(&pdev->dev, "Can't allocate PaRAM dummy slot\n");
1009
		return ecc->dummy_slot;
1010 1011 1012 1013
	}

	dma_cap_zero(ecc->dma_slave.cap_mask);
	dma_cap_set(DMA_SLAVE, ecc->dma_slave.cap_mask);
1014
	dma_cap_set(DMA_CYCLIC, ecc->dma_slave.cap_mask);
1015
	dma_cap_set(DMA_MEMCPY, ecc->dma_slave.cap_mask);
1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035

	edma_dma_init(ecc, &ecc->dma_slave, &pdev->dev);

	edma_chan_init(ecc, &ecc->dma_slave, ecc->slave_chans);

	ret = dma_async_device_register(&ecc->dma_slave);
	if (ret)
		goto err_reg1;

	platform_set_drvdata(pdev, ecc);

	dev_info(&pdev->dev, "TI EDMA DMA engine driver\n");

	return 0;

err_reg1:
	edma_free_slot(ecc->dummy_slot);
	return ret;
}

1036
static int edma_remove(struct platform_device *pdev)
1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048
{
	struct device *dev = &pdev->dev;
	struct edma_cc *ecc = dev_get_drvdata(dev);

	dma_async_device_unregister(&ecc->dma_slave);
	edma_free_slot(ecc->dummy_slot);

	return 0;
}

static struct platform_driver edma_driver = {
	.probe		= edma_probe,
B
Bill Pemberton 已提交
1049
	.remove		= edma_remove,
1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067
	.driver = {
		.name = "edma-dma-engine",
	},
};

bool edma_filter_fn(struct dma_chan *chan, void *param)
{
	if (chan->device->dev->driver == &edma_driver.driver) {
		struct edma_chan *echan = to_edma_chan(chan);
		unsigned ch_req = *(unsigned *)param;
		return ch_req == echan->ch_num;
	}
	return false;
}
EXPORT_SYMBOL(edma_filter_fn);

static int edma_init(void)
{
1068
	return platform_driver_register(&edma_driver);
1069 1070 1071 1072 1073 1074 1075 1076 1077
}
subsys_initcall(edma_init);

static void __exit edma_exit(void)
{
	platform_driver_unregister(&edma_driver);
}
module_exit(edma_exit);

J
Josh Boyer 已提交
1078
MODULE_AUTHOR("Matt Porter <matt.porter@linaro.org>");
1079 1080
MODULE_DESCRIPTION("TI EDMA DMA engine driver");
MODULE_LICENSE("GPL v2");