edma.c 29.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
/*
 * TI EDMA DMA engine driver
 *
 * Copyright 2012 Texas Instruments
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License as
 * published by the Free Software Foundation version 2.
 *
 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
 * kind, whether express or implied; without even the implied warranty
 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 */

#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
18
#include <linux/edma.h>
19 20 21 22 23 24 25 26
#include <linux/err.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
27
#include <linux/of.h>
28
#include <linux/of_dma.h>
29

30
#include <linux/platform_data/edma.h>
31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51

#include "dmaengine.h"
#include "virt-dma.h"

/*
 * This will go away when the private EDMA API is folded
 * into this driver and the platform device(s) are
 * instantiated in the arch code. We can only get away
 * with this simplification because DA8XX may not be built
 * in the same kernel image with other DaVinci parts. This
 * avoids having to sprinkle dmaengine driver platform devices
 * and data throughout all the existing board files.
 */
#ifdef CONFIG_ARCH_DAVINCI_DA8XX
#define EDMA_CTLRS	2
#define EDMA_CHANS	32
#else
#define EDMA_CTLRS	1
#define EDMA_CHANS	64
#endif /* CONFIG_ARCH_DAVINCI_DA8XX */

52 53 54 55 56 57 58 59
/*
 * Max of 20 segments per channel to conserve PaRAM slots
 * Also note that MAX_NR_SG should be atleast the no.of periods
 * that are required for ASoC, otherwise DMA prep calls will
 * fail. Today davinci-pcm is the only user of this driver and
 * requires atleast 17 slots, so we setup the default to 20.
 */
#define MAX_NR_SG		20
60 61 62
#define EDMA_MAX_SLOTS		MAX_NR_SG
#define EDMA_DESCRIPTORS	16

63
struct edma_pset {
64 65
	u32				len;
	dma_addr_t			addr;
66 67 68
	struct edmacc_param		param;
};

69 70 71
struct edma_desc {
	struct virt_dma_desc		vdesc;
	struct list_head		node;
72
	enum dma_transfer_direction	direction;
73
	int				cyclic;
74 75
	int				absync;
	int				pset_nr;
76
	struct edma_chan		*echan;
77
	int				processed;
78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98

	/*
	 * The following 4 elements are used for residue accounting.
	 *
	 * - processed_stat: the number of SG elements we have traversed
	 * so far to cover accounting. This is updated directly to processed
	 * during edma_callback and is always <= processed, because processed
	 * refers to the number of pending transfer (programmed to EDMA
	 * controller), where as processed_stat tracks number of transfers
	 * accounted for so far.
	 *
	 * - residue: The amount of bytes we have left to transfer for this desc
	 *
	 * - residue_stat: The residue in bytes of data we have covered
	 * so far for accounting. This is updated directly to residue
	 * during callbacks to keep it current.
	 *
	 * - sg_len: Tracks the length of the current intermediate transfer,
	 * this is required to update the residue during intermediate transfer
	 * completion callback.
	 */
99 100
	int				processed_stat;
	u32				sg_len;
101
	u32				residue;
102
	u32				residue_stat;
103

104
	struct edma_pset		pset[0];
105 106 107 108 109 110 111 112 113 114 115 116
};

struct edma_cc;

struct edma_chan {
	struct virt_dma_chan		vchan;
	struct list_head		node;
	struct edma_desc		*edesc;
	struct edma_cc			*ecc;
	int				ch_num;
	bool				alloced;
	int				slot[EDMA_MAX_SLOTS];
117
	int				missed;
118
	struct dma_slave_config		cfg;
119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152
};

struct edma_cc {
	int				ctlr;
	struct dma_device		dma_slave;
	struct edma_chan		slave_chans[EDMA_CHANS];
	int				num_slave_chans;
	int				dummy_slot;
};

static inline struct edma_cc *to_edma_cc(struct dma_device *d)
{
	return container_of(d, struct edma_cc, dma_slave);
}

static inline struct edma_chan *to_edma_chan(struct dma_chan *c)
{
	return container_of(c, struct edma_chan, vchan.chan);
}

static inline struct edma_desc
*to_edma_desc(struct dma_async_tx_descriptor *tx)
{
	return container_of(tx, struct edma_desc, vdesc.tx);
}

static void edma_desc_free(struct virt_dma_desc *vdesc)
{
	kfree(container_of(vdesc, struct edma_desc, vdesc));
}

/* Dispatch a queued descriptor to the controller (caller holds lock) */
static void edma_execute(struct edma_chan *echan)
{
153
	struct virt_dma_desc *vdesc;
154
	struct edma_desc *edesc;
155 156 157
	struct device *dev = echan->vchan.chan.device->dev;
	int i, j, left, nslots;

158 159
	if (!echan->edesc) {
		/* Setup is needed for the first transfer */
160
		vdesc = vchan_next_desc(&echan->vchan);
161
		if (!vdesc)
162 163 164
			return;
		list_del(&vdesc->node);
		echan->edesc = to_edma_desc(&vdesc->tx);
165 166
	}

167
	edesc = echan->edesc;
168

169 170 171
	/* Find out how many left */
	left = edesc->pset_nr - edesc->processed;
	nslots = min(MAX_NR_SG, left);
172
	edesc->sg_len = 0;
173 174

	/* Write descriptor PaRAM set(s) */
175 176
	for (i = 0; i < nslots; i++) {
		j = i + edesc->processed;
177
		edma_write_slot(echan->slot[i], &edesc->pset[j].param);
178
		edesc->sg_len += edesc->pset[j].len;
179
		dev_vdbg(echan->vchan.chan.device->dev,
180 181 182 183 184 185 186 187 188 189 190
			"\n pset[%d]:\n"
			"  chnum\t%d\n"
			"  slot\t%d\n"
			"  opt\t%08x\n"
			"  src\t%08x\n"
			"  dst\t%08x\n"
			"  abcnt\t%08x\n"
			"  ccnt\t%08x\n"
			"  bidx\t%08x\n"
			"  cidx\t%08x\n"
			"  lkrld\t%08x\n",
191
			j, echan->ch_num, echan->slot[i],
192 193 194 195 196 197 198 199
			edesc->pset[j].param.opt,
			edesc->pset[j].param.src,
			edesc->pset[j].param.dst,
			edesc->pset[j].param.a_b_cnt,
			edesc->pset[j].param.ccnt,
			edesc->pset[j].param.src_dst_bidx,
			edesc->pset[j].param.src_dst_cidx,
			edesc->pset[j].param.link_bcntrld);
200
		/* Link to the previous slot if not the last set */
201
		if (i != (nslots - 1))
202 203 204
			edma_link(echan->slot[i], echan->slot[i+1]);
	}

205 206
	edesc->processed += nslots;

207 208 209 210 211
	/*
	 * If this is either the last set in a set of SG-list transactions
	 * then setup a link to the dummy slot, this results in all future
	 * events being absorbed and that's OK because we're done
	 */
212 213 214 215 216 217 218
	if (edesc->processed == edesc->pset_nr) {
		if (edesc->cyclic)
			edma_link(echan->slot[nslots-1], echan->slot[1]);
		else
			edma_link(echan->slot[nslots-1],
				  echan->ecc->dummy_slot);
	}
219

220
	if (echan->missed) {
221 222 223 224 225
		/*
		 * This happens due to setup times between intermediate
		 * transfers in long SG lists which have to be broken up into
		 * transfers of MAX_NR_SG
		 */
226
		dev_dbg(dev, "missed event on channel %d\n", echan->ch_num);
227 228 229 230 231
		edma_clean_channel(echan->ch_num);
		edma_stop(echan->ch_num);
		edma_start(echan->ch_num);
		edma_trigger_channel(echan->ch_num);
		echan->missed = 0;
232 233 234 235 236 237 238 239
	} else if (edesc->processed <= MAX_NR_SG) {
		dev_dbg(dev, "first transfer starting on channel %d\n",
			echan->ch_num);
		edma_start(echan->ch_num);
	} else {
		dev_dbg(dev, "chan: %d: completed %d elements, resuming\n",
			echan->ch_num, edesc->processed);
		edma_resume(echan->ch_num);
240
	}
241 242
}

243
static int edma_terminate_all(struct dma_chan *chan)
244
{
245
	struct edma_chan *echan = to_edma_chan(chan);
246 247 248 249 250 251 252 253 254 255 256
	unsigned long flags;
	LIST_HEAD(head);

	spin_lock_irqsave(&echan->vchan.lock, flags);

	/*
	 * Stop DMA activity: we assume the callback will not be called
	 * after edma_dma() returns (even if it does, it will see
	 * echan->edesc is NULL and exit.)
	 */
	if (echan->edesc) {
257 258 259 260 261
		edma_stop(echan->ch_num);
		/* Move the cyclic channel back to default queue */
		if (echan->edesc->cyclic)
			edma_assign_channel_eventq(echan->ch_num,
						   EVENTQ_DEFAULT);
262 263 264 265 266
		/*
		 * free the running request descriptor
		 * since it is not in any of the vdesc lists
		 */
		edma_desc_free(&echan->edesc->vdesc);
267 268 269 270 271 272 273 274 275 276
		echan->edesc = NULL;
	}

	vchan_get_all_descriptors(&echan->vchan, &head);
	spin_unlock_irqrestore(&echan->vchan.lock, flags);
	vchan_dma_desc_free_list(&echan->vchan, &head);

	return 0;
}

277
static int edma_slave_config(struct dma_chan *chan,
278
	struct dma_slave_config *cfg)
279
{
280 281
	struct edma_chan *echan = to_edma_chan(chan);

282 283
	if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
	    cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
284 285
		return -EINVAL;

286
	memcpy(&echan->cfg, cfg, sizeof(echan->cfg));
287 288 289 290

	return 0;
}

291
static int edma_dma_pause(struct dma_chan *chan)
292
{
293 294
	struct edma_chan *echan = to_edma_chan(chan);

295
	if (!echan->edesc)
296 297 298 299 300 301
		return -EINVAL;

	edma_pause(echan->ch_num);
	return 0;
}

302
static int edma_dma_resume(struct dma_chan *chan)
303
{
304 305
	struct edma_chan *echan = to_edma_chan(chan);

306 307 308 309
	edma_resume(echan->ch_num);
	return 0;
}

310 311 312 313 314 315 316 317 318 319 320
/*
 * A PaRAM set configuration abstraction used by other modes
 * @chan: Channel who's PaRAM set we're configuring
 * @pset: PaRAM set to initialize and setup.
 * @src_addr: Source address of the DMA
 * @dst_addr: Destination address of the DMA
 * @burst: In units of dev_width, how much to send
 * @dev_width: How much is the dev_width
 * @dma_length: Total length of the DMA transfer
 * @direction: Direction of the transfer
 */
321
static int edma_config_pset(struct dma_chan *chan, struct edma_pset *epset,
322 323 324 325 326 327
	dma_addr_t src_addr, dma_addr_t dst_addr, u32 burst,
	enum dma_slave_buswidth dev_width, unsigned int dma_length,
	enum dma_transfer_direction direction)
{
	struct edma_chan *echan = to_edma_chan(chan);
	struct device *dev = chan->device->dev;
328
	struct edmacc_param *param = &epset->param;
329 330 331 332 333
	int acnt, bcnt, ccnt, cidx;
	int src_bidx, dst_bidx, src_cidx, dst_cidx;
	int absync;

	acnt = dev_width;
334 335 336 337

	/* src/dst_maxburst == 0 is the same case as src/dst_maxburst == 1 */
	if (!burst)
		burst = 1;
338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387
	/*
	 * If the maxburst is equal to the fifo width, use
	 * A-synced transfers. This allows for large contiguous
	 * buffer transfers using only one PaRAM set.
	 */
	if (burst == 1) {
		/*
		 * For the A-sync case, bcnt and ccnt are the remainder
		 * and quotient respectively of the division of:
		 * (dma_length / acnt) by (SZ_64K -1). This is so
		 * that in case bcnt over flows, we have ccnt to use.
		 * Note: In A-sync tranfer only, bcntrld is used, but it
		 * only applies for sg_dma_len(sg) >= SZ_64K.
		 * In this case, the best way adopted is- bccnt for the
		 * first frame will be the remainder below. Then for
		 * every successive frame, bcnt will be SZ_64K-1. This
		 * is assured as bcntrld = 0xffff in end of function.
		 */
		absync = false;
		ccnt = dma_length / acnt / (SZ_64K - 1);
		bcnt = dma_length / acnt - ccnt * (SZ_64K - 1);
		/*
		 * If bcnt is non-zero, we have a remainder and hence an
		 * extra frame to transfer, so increment ccnt.
		 */
		if (bcnt)
			ccnt++;
		else
			bcnt = SZ_64K - 1;
		cidx = acnt;
	} else {
		/*
		 * If maxburst is greater than the fifo address_width,
		 * use AB-synced transfers where A count is the fifo
		 * address_width and B count is the maxburst. In this
		 * case, we are limited to transfers of C count frames
		 * of (address_width * maxburst) where C count is limited
		 * to SZ_64K-1. This places an upper bound on the length
		 * of an SG segment that can be handled.
		 */
		absync = true;
		bcnt = burst;
		ccnt = dma_length / (acnt * bcnt);
		if (ccnt > (SZ_64K - 1)) {
			dev_err(dev, "Exceeded max SG segment size\n");
			return -EINVAL;
		}
		cidx = acnt * bcnt;
	}

388 389
	epset->len = dma_length;

390 391 392 393 394
	if (direction == DMA_MEM_TO_DEV) {
		src_bidx = acnt;
		src_cidx = cidx;
		dst_bidx = 0;
		dst_cidx = 0;
395
		epset->addr = src_addr;
396 397 398 399 400
	} else if (direction == DMA_DEV_TO_MEM)  {
		src_bidx = 0;
		src_cidx = 0;
		dst_bidx = acnt;
		dst_cidx = cidx;
401
		epset->addr = dst_addr;
402 403 404 405 406
	} else if (direction == DMA_MEM_TO_MEM)  {
		src_bidx = acnt;
		src_cidx = cidx;
		dst_bidx = acnt;
		dst_cidx = cidx;
407 408 409 410 411
	} else {
		dev_err(dev, "%s: direction not implemented yet\n", __func__);
		return -EINVAL;
	}

412
	param->opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num));
413 414
	/* Configure A or AB synchronized transfers */
	if (absync)
415
		param->opt |= SYNCDIM;
416

417 418
	param->src = src_addr;
	param->dst = dst_addr;
419

420 421
	param->src_dst_bidx = (dst_bidx << 16) | src_bidx;
	param->src_dst_cidx = (dst_cidx << 16) | src_cidx;
422

423 424
	param->a_b_cnt = bcnt << 16 | acnt;
	param->ccnt = ccnt;
425 426 427 428 429 430
	/*
	 * Only time when (bcntrld) auto reload is required is for
	 * A-sync case, and in this case, a requirement of reload value
	 * of SZ_64K-1 only is assured. 'link' is initially set to NULL
	 * and then later will be populated by edma_execute.
	 */
431
	param->link_bcntrld = 0xffffffff;
432 433 434
	return absync;
}

435 436 437 438 439 440 441 442
static struct dma_async_tx_descriptor *edma_prep_slave_sg(
	struct dma_chan *chan, struct scatterlist *sgl,
	unsigned int sg_len, enum dma_transfer_direction direction,
	unsigned long tx_flags, void *context)
{
	struct edma_chan *echan = to_edma_chan(chan);
	struct device *dev = chan->device->dev;
	struct edma_desc *edesc;
443
	dma_addr_t src_addr = 0, dst_addr = 0;
444 445
	enum dma_slave_buswidth dev_width;
	u32 burst;
446
	struct scatterlist *sg;
447
	int i, nslots, ret;
448 449 450 451

	if (unlikely(!echan || !sgl || !sg_len))
		return NULL;

452
	if (direction == DMA_DEV_TO_MEM) {
453
		src_addr = echan->cfg.src_addr;
454 455 456
		dev_width = echan->cfg.src_addr_width;
		burst = echan->cfg.src_maxburst;
	} else if (direction == DMA_MEM_TO_DEV) {
457
		dst_addr = echan->cfg.dst_addr;
458 459 460
		dev_width = echan->cfg.dst_addr_width;
		burst = echan->cfg.dst_maxburst;
	} else {
461
		dev_err(dev, "%s: bad direction: %d\n", __func__, direction);
462 463 464 465
		return NULL;
	}

	if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) {
466
		dev_err(dev, "%s: Undefined slave buswidth\n", __func__);
467 468 469 470 471 472
		return NULL;
	}

	edesc = kzalloc(sizeof(*edesc) + sg_len *
		sizeof(edesc->pset[0]), GFP_ATOMIC);
	if (!edesc) {
473
		dev_err(dev, "%s: Failed to allocate a descriptor\n", __func__);
474 475 476 477
		return NULL;
	}

	edesc->pset_nr = sg_len;
478
	edesc->residue = 0;
479
	edesc->direction = direction;
480
	edesc->echan = echan;
481

482 483 484 485
	/* Allocate a PaRAM slot, if needed */
	nslots = min_t(unsigned, MAX_NR_SG, sg_len);

	for (i = 0; i < nslots; i++) {
486 487 488 489 490
		if (echan->slot[i] < 0) {
			echan->slot[i] =
				edma_alloc_slot(EDMA_CTLR(echan->ch_num),
						EDMA_SLOT_ANY);
			if (echan->slot[i] < 0) {
V
Valentin Ilie 已提交
491
				kfree(edesc);
492 493
				dev_err(dev, "%s: Failed to allocate slot\n",
					__func__);
494 495 496
				return NULL;
			}
		}
497 498 499 500
	}

	/* Configure PaRAM sets for each SG */
	for_each_sg(sgl, sg, sg_len, i) {
501 502 503 504 505
		/* Get address for each SG */
		if (direction == DMA_DEV_TO_MEM)
			dst_addr = sg_dma_address(sg);
		else
			src_addr = sg_dma_address(sg);
506

507 508 509
		ret = edma_config_pset(chan, &edesc->pset[i], src_addr,
				       dst_addr, burst, dev_width,
				       sg_dma_len(sg), direction);
V
Vinod Koul 已提交
510 511
		if (ret < 0) {
			kfree(edesc);
512
			return NULL;
513 514
		}

515
		edesc->absync = ret;
516
		edesc->residue += sg_dma_len(sg);
517 518 519 520

		/* If this is the last in a current SG set of transactions,
		   enable interrupts so that next set is processed */
		if (!((i+1) % MAX_NR_SG))
521
			edesc->pset[i].param.opt |= TCINTEN;
522

523 524
		/* If this is the last set, enable completion interrupt flag */
		if (i == sg_len - 1)
525
			edesc->pset[i].param.opt |= TCINTEN;
526
	}
527
	edesc->residue_stat = edesc->residue;
528 529 530 531

	return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
}

532
static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563
	struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
	size_t len, unsigned long tx_flags)
{
	int ret;
	struct edma_desc *edesc;
	struct device *dev = chan->device->dev;
	struct edma_chan *echan = to_edma_chan(chan);

	if (unlikely(!echan || !len))
		return NULL;

	edesc = kzalloc(sizeof(*edesc) + sizeof(edesc->pset[0]), GFP_ATOMIC);
	if (!edesc) {
		dev_dbg(dev, "Failed to allocate a descriptor\n");
		return NULL;
	}

	edesc->pset_nr = 1;

	ret = edma_config_pset(chan, &edesc->pset[0], src, dest, 1,
			       DMA_SLAVE_BUSWIDTH_4_BYTES, len, DMA_MEM_TO_MEM);
	if (ret < 0)
		return NULL;

	edesc->absync = ret;

	/*
	 * Enable intermediate transfer chaining to re-trigger channel
	 * on completion of every TR, and enable transfer-completion
	 * interrupt on completion of the whole transfer.
	 */
564 565
	edesc->pset[0].param.opt |= ITCCHEN;
	edesc->pset[0].param.opt |= TCINTEN;
566 567 568 569

	return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
}

570 571 572
static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
	struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
	size_t period_len, enum dma_transfer_direction direction,
573
	unsigned long tx_flags)
574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596
{
	struct edma_chan *echan = to_edma_chan(chan);
	struct device *dev = chan->device->dev;
	struct edma_desc *edesc;
	dma_addr_t src_addr, dst_addr;
	enum dma_slave_buswidth dev_width;
	u32 burst;
	int i, ret, nslots;

	if (unlikely(!echan || !buf_len || !period_len))
		return NULL;

	if (direction == DMA_DEV_TO_MEM) {
		src_addr = echan->cfg.src_addr;
		dst_addr = buf_addr;
		dev_width = echan->cfg.src_addr_width;
		burst = echan->cfg.src_maxburst;
	} else if (direction == DMA_MEM_TO_DEV) {
		src_addr = buf_addr;
		dst_addr = echan->cfg.dst_addr;
		dev_width = echan->cfg.dst_addr_width;
		burst = echan->cfg.dst_maxburst;
	} else {
597
		dev_err(dev, "%s: bad direction: %d\n", __func__, direction);
598 599 600 601
		return NULL;
	}

	if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) {
602
		dev_err(dev, "%s: Undefined slave buswidth\n", __func__);
603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626
		return NULL;
	}

	if (unlikely(buf_len % period_len)) {
		dev_err(dev, "Period should be multiple of Buffer length\n");
		return NULL;
	}

	nslots = (buf_len / period_len) + 1;

	/*
	 * Cyclic DMA users such as audio cannot tolerate delays introduced
	 * by cases where the number of periods is more than the maximum
	 * number of SGs the EDMA driver can handle at a time. For DMA types
	 * such as Slave SGs, such delays are tolerable and synchronized,
	 * but the synchronization is difficult to achieve with Cyclic and
	 * cannot be guaranteed, so we error out early.
	 */
	if (nslots > MAX_NR_SG)
		return NULL;

	edesc = kzalloc(sizeof(*edesc) + nslots *
		sizeof(edesc->pset[0]), GFP_ATOMIC);
	if (!edesc) {
627
		dev_err(dev, "%s: Failed to allocate a descriptor\n", __func__);
628 629 630 631 632
		return NULL;
	}

	edesc->cyclic = 1;
	edesc->pset_nr = nslots;
633
	edesc->residue = edesc->residue_stat = buf_len;
634
	edesc->direction = direction;
635
	edesc->echan = echan;
636

637 638
	dev_dbg(dev, "%s: channel=%d nslots=%d period_len=%zu buf_len=%zu\n",
		__func__, echan->ch_num, nslots, period_len, buf_len);
639 640 641 642 643 644 645 646

	for (i = 0; i < nslots; i++) {
		/* Allocate a PaRAM slot, if needed */
		if (echan->slot[i] < 0) {
			echan->slot[i] =
				edma_alloc_slot(EDMA_CTLR(echan->ch_num),
						EDMA_SLOT_ANY);
			if (echan->slot[i] < 0) {
647
				kfree(edesc);
648 649
				dev_err(dev, "%s: Failed to allocate slot\n",
					__func__);
650 651 652 653 654 655 656 657 658 659 660 661 662
				return NULL;
			}
		}

		if (i == nslots - 1) {
			memcpy(&edesc->pset[i], &edesc->pset[0],
			       sizeof(edesc->pset[0]));
			break;
		}

		ret = edma_config_pset(chan, &edesc->pset[i], src_addr,
				       dst_addr, burst, dev_width, period_len,
				       direction);
663 664
		if (ret < 0) {
			kfree(edesc);
665
			return NULL;
666
		}
667

668 669 670 671
		if (direction == DMA_DEV_TO_MEM)
			dst_addr += period_len;
		else
			src_addr += period_len;
672

673 674
		dev_vdbg(dev, "%s: Configure period %d of buf:\n", __func__, i);
		dev_vdbg(dev,
675 676 677 678 679 680 681 682 683 684 685 686
			"\n pset[%d]:\n"
			"  chnum\t%d\n"
			"  slot\t%d\n"
			"  opt\t%08x\n"
			"  src\t%08x\n"
			"  dst\t%08x\n"
			"  abcnt\t%08x\n"
			"  ccnt\t%08x\n"
			"  bidx\t%08x\n"
			"  cidx\t%08x\n"
			"  lkrld\t%08x\n",
			i, echan->ch_num, echan->slot[i],
687 688 689 690 691 692 693 694
			edesc->pset[i].param.opt,
			edesc->pset[i].param.src,
			edesc->pset[i].param.dst,
			edesc->pset[i].param.a_b_cnt,
			edesc->pset[i].param.ccnt,
			edesc->pset[i].param.src_dst_bidx,
			edesc->pset[i].param.src_dst_cidx,
			edesc->pset[i].param.link_bcntrld);
695 696 697 698

		edesc->absync = ret;

		/*
699
		 * Enable period interrupt only if it is requested
700
		 */
701 702
		if (tx_flags & DMA_PREP_INTERRUPT)
			edesc->pset[i].param.opt |= TCINTEN;
703 704
	}

705 706 707
	/* Place the cyclic channel to highest priority queue */
	edma_assign_channel_eventq(echan->ch_num, EVENTQ_0);

708 709 710 711 712 713 714 715
	return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
}

static void edma_callback(unsigned ch_num, u16 ch_status, void *data)
{
	struct edma_chan *echan = data;
	struct device *dev = echan->vchan.chan.device->dev;
	struct edma_desc *edesc;
716
	struct edmacc_param p;
717

718 719
	edesc = echan->edesc;

720
	spin_lock(&echan->vchan.lock);
721
	switch (ch_status) {
722
	case EDMA_DMA_COMPLETE:
723
		if (edesc) {
724 725
			if (edesc->cyclic) {
				vchan_cyclic_callback(&edesc->vdesc);
726
				goto out;
727
			} else if (edesc->processed == edesc->pset_nr) {
728
				dev_dbg(dev, "Transfer complete, stopping channel %d\n", ch_num);
729
				edesc->residue = 0;
730 731
				edma_stop(echan->ch_num);
				vchan_cookie_complete(&edesc->vdesc);
732
				echan->edesc = NULL;
733 734
			} else {
				dev_dbg(dev, "Intermediate transfer complete on channel %d\n", ch_num);
735

736 737
				edma_pause(echan->ch_num);

738 739 740 741
				/* Update statistics for tx_status */
				edesc->residue -= edesc->sg_len;
				edesc->residue_stat = edesc->residue;
				edesc->processed_stat = edesc->processed;
742
			}
743
			edma_execute(echan);
744 745
		}
		break;
746
	case EDMA_DMA_CC_ERROR:
747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774
		edma_read_slot(EDMA_CHAN_SLOT(echan->slot[0]), &p);

		/*
		 * Issue later based on missed flag which will be sure
		 * to happen as:
		 * (1) we finished transmitting an intermediate slot and
		 *     edma_execute is coming up.
		 * (2) or we finished current transfer and issue will
		 *     call edma_execute.
		 *
		 * Important note: issuing can be dangerous here and
		 * lead to some nasty recursion when we are in a NULL
		 * slot. So we avoid doing so and set the missed flag.
		 */
		if (p.a_b_cnt == 0 && p.ccnt == 0) {
			dev_dbg(dev, "Error occurred, looks like slot is null, just setting miss\n");
			echan->missed = 1;
		} else {
			/*
			 * The slot is already programmed but the event got
			 * missed, so its safe to issue it here.
			 */
			dev_dbg(dev, "Error occurred but slot is non-null, TRIGGERING\n");
			edma_clean_channel(echan->ch_num);
			edma_stop(echan->ch_num);
			edma_start(echan->ch_num);
			edma_trigger_channel(echan->ch_num);
		}
775 776 777 778
		break;
	default:
		break;
	}
779 780
out:
	spin_unlock(&echan->vchan.lock);
781 782 783 784 785 786 787 788 789 790 791 792
}

/* Alloc channel resources */
static int edma_alloc_chan_resources(struct dma_chan *chan)
{
	struct edma_chan *echan = to_edma_chan(chan);
	struct device *dev = chan->device->dev;
	int ret;
	int a_ch_num;
	LIST_HEAD(descs);

	a_ch_num = edma_alloc_channel(echan->ch_num, edma_callback,
793
					echan, EVENTQ_DEFAULT);
794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810

	if (a_ch_num < 0) {
		ret = -ENODEV;
		goto err_no_chan;
	}

	if (a_ch_num != echan->ch_num) {
		dev_err(dev, "failed to allocate requested channel %u:%u\n",
			EDMA_CTLR(echan->ch_num),
			EDMA_CHAN_SLOT(echan->ch_num));
		ret = -ENODEV;
		goto err_wrong_chan;
	}

	echan->alloced = true;
	echan->slot[0] = echan->ch_num;

811
	dev_dbg(dev, "allocated channel %d for %u:%u\n", echan->ch_num,
812
		EDMA_CTLR(echan->ch_num), EDMA_CHAN_SLOT(echan->ch_num));
813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847

	return 0;

err_wrong_chan:
	edma_free_channel(a_ch_num);
err_no_chan:
	return ret;
}

/* Free channel resources */
static void edma_free_chan_resources(struct dma_chan *chan)
{
	struct edma_chan *echan = to_edma_chan(chan);
	struct device *dev = chan->device->dev;
	int i;

	/* Terminate transfers */
	edma_stop(echan->ch_num);

	vchan_free_chan_resources(&echan->vchan);

	/* Free EDMA PaRAM slots */
	for (i = 1; i < EDMA_MAX_SLOTS; i++) {
		if (echan->slot[i] >= 0) {
			edma_free_slot(echan->slot[i]);
			echan->slot[i] = -1;
		}
	}

	/* Free EDMA channel */
	if (echan->alloced) {
		edma_free_channel(echan->ch_num);
		echan->alloced = false;
	}

848
	dev_dbg(dev, "freeing channel for %u\n", echan->ch_num);
849 850 851 852 853 854 855 856 857 858 859 860 861 862
}

/* Send pending descriptor to hardware */
static void edma_issue_pending(struct dma_chan *chan)
{
	struct edma_chan *echan = to_edma_chan(chan);
	unsigned long flags;

	spin_lock_irqsave(&echan->vchan.lock, flags);
	if (vchan_issue_pending(&echan->vchan) && !echan->edesc)
		edma_execute(echan);
	spin_unlock_irqrestore(&echan->vchan.lock, flags);
}

863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910
static u32 edma_residue(struct edma_desc *edesc)
{
	bool dst = edesc->direction == DMA_DEV_TO_MEM;
	struct edma_pset *pset = edesc->pset;
	dma_addr_t done, pos;
	int i;

	/*
	 * We always read the dst/src position from the first RamPar
	 * pset. That's the one which is active now.
	 */
	pos = edma_get_position(edesc->echan->slot[0], dst);

	/*
	 * Cyclic is simple. Just subtract pset[0].addr from pos.
	 *
	 * We never update edesc->residue in the cyclic case, so we
	 * can tell the remaining room to the end of the circular
	 * buffer.
	 */
	if (edesc->cyclic) {
		done = pos - pset->addr;
		edesc->residue_stat = edesc->residue - done;
		return edesc->residue_stat;
	}

	/*
	 * For SG operation we catch up with the last processed
	 * status.
	 */
	pset += edesc->processed_stat;

	for (i = edesc->processed_stat; i < edesc->processed; i++, pset++) {
		/*
		 * If we are inside this pset address range, we know
		 * this is the active one. Get the current delta and
		 * stop walking the psets.
		 */
		if (pos >= pset->addr && pos < pset->addr + pset->len)
			return edesc->residue_stat - (pos - pset->addr);

		/* Otherwise mark it done and update residue_stat. */
		edesc->processed_stat++;
		edesc->residue_stat -= pset->len;
	}
	return edesc->residue_stat;
}

911 912 913 914 915 916 917 918 919 920 921
/* Check request completion status */
static enum dma_status edma_tx_status(struct dma_chan *chan,
				      dma_cookie_t cookie,
				      struct dma_tx_state *txstate)
{
	struct edma_chan *echan = to_edma_chan(chan);
	struct virt_dma_desc *vdesc;
	enum dma_status ret;
	unsigned long flags;

	ret = dma_cookie_status(chan, cookie, txstate);
922
	if (ret == DMA_COMPLETE || !txstate)
923 924 925
		return ret;

	spin_lock_irqsave(&echan->vchan.lock, flags);
926
	if (echan->edesc && echan->edesc->vdesc.tx.cookie == cookie)
927
		txstate->residue = edma_residue(echan->edesc);
928 929
	else if ((vdesc = vchan_find_desc(&echan->vchan, cookie)))
		txstate->residue = to_edma_desc(&vdesc->tx)->residue;
930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954
	spin_unlock_irqrestore(&echan->vchan.lock, flags);

	return ret;
}

static void __init edma_chan_init(struct edma_cc *ecc,
				  struct dma_device *dma,
				  struct edma_chan *echans)
{
	int i, j;

	for (i = 0; i < EDMA_CHANS; i++) {
		struct edma_chan *echan = &echans[i];
		echan->ch_num = EDMA_CTLR_CHAN(ecc->ctlr, i);
		echan->ecc = ecc;
		echan->vchan.desc_free = edma_desc_free;

		vchan_init(&echan->vchan, dma);

		INIT_LIST_HEAD(&echan->node);
		for (j = 0; j < EDMA_MAX_SLOTS; j++)
			echan->slot[j] = -1;
	}
}

955 956
#define EDMA_DMA_BUSWIDTHS	(BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
				 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
957
				 BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
958 959
				 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))

960 961 962 963
static void edma_dma_init(struct edma_cc *ecc, struct dma_device *dma,
			  struct device *dev)
{
	dma->device_prep_slave_sg = edma_prep_slave_sg;
964
	dma->device_prep_dma_cyclic = edma_prep_dma_cyclic;
965
	dma->device_prep_dma_memcpy = edma_prep_dma_memcpy;
966 967 968 969
	dma->device_alloc_chan_resources = edma_alloc_chan_resources;
	dma->device_free_chan_resources = edma_free_chan_resources;
	dma->device_issue_pending = edma_issue_pending;
	dma->device_tx_status = edma_tx_status;
970 971 972 973
	dma->device_config = edma_slave_config;
	dma->device_pause = edma_dma_pause;
	dma->device_resume = edma_dma_resume;
	dma->device_terminate_all = edma_terminate_all;
974 975 976 977 978 979

	dma->src_addr_widths = EDMA_DMA_BUSWIDTHS;
	dma->dst_addr_widths = EDMA_DMA_BUSWIDTHS;
	dma->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
	dma->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;

980 981
	dma->dev = dev;

982 983 984 985
	/*
	 * code using dma memcpy must make sure alignment of
	 * length is at dma->copy_align boundary.
	 */
986
	dma->copy_align = DMAENGINE_ALIGN_4_BYTES;
987

988 989 990
	INIT_LIST_HEAD(&dma->channels);
}

991 992 993 994
static struct of_dma_filter_info edma_filter_info = {
	.filter_fn = edma_filter_fn,
};

B
Bill Pemberton 已提交
995
static int edma_probe(struct platform_device *pdev)
996 997
{
	struct edma_cc *ecc;
998
	struct device_node *parent_node = pdev->dev.parent->of_node;
999 1000
	int ret;

1001 1002 1003 1004
	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
	if (ret)
		return ret;

1005 1006 1007 1008 1009 1010 1011 1012 1013 1014
	ecc = devm_kzalloc(&pdev->dev, sizeof(*ecc), GFP_KERNEL);
	if (!ecc) {
		dev_err(&pdev->dev, "Can't allocate controller\n");
		return -ENOMEM;
	}

	ecc->ctlr = pdev->id;
	ecc->dummy_slot = edma_alloc_slot(ecc->ctlr, EDMA_SLOT_ANY);
	if (ecc->dummy_slot < 0) {
		dev_err(&pdev->dev, "Can't allocate PaRAM dummy slot\n");
1015
		return ecc->dummy_slot;
1016 1017 1018 1019
	}

	dma_cap_zero(ecc->dma_slave.cap_mask);
	dma_cap_set(DMA_SLAVE, ecc->dma_slave.cap_mask);
1020
	dma_cap_set(DMA_CYCLIC, ecc->dma_slave.cap_mask);
1021
	dma_cap_set(DMA_MEMCPY, ecc->dma_slave.cap_mask);
1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032

	edma_dma_init(ecc, &ecc->dma_slave, &pdev->dev);

	edma_chan_init(ecc, &ecc->dma_slave, ecc->slave_chans);

	ret = dma_async_device_register(&ecc->dma_slave);
	if (ret)
		goto err_reg1;

	platform_set_drvdata(pdev, ecc);

1033 1034 1035 1036 1037 1038 1039
	if (parent_node) {
		dma_cap_set(DMA_SLAVE, edma_filter_info.dma_cap);
		dma_cap_set(DMA_CYCLIC, edma_filter_info.dma_cap);
		of_dma_controller_register(parent_node, of_dma_simple_xlate,
					   &edma_filter_info);
	}

1040 1041 1042 1043 1044 1045 1046 1047 1048
	dev_info(&pdev->dev, "TI EDMA DMA engine driver\n");

	return 0;

err_reg1:
	edma_free_slot(ecc->dummy_slot);
	return ret;
}

1049
static int edma_remove(struct platform_device *pdev)
1050 1051 1052
{
	struct device *dev = &pdev->dev;
	struct edma_cc *ecc = dev_get_drvdata(dev);
1053
	struct device_node *parent_node = pdev->dev.parent->of_node;
1054

1055 1056
	if (parent_node)
		of_dma_controller_free(parent_node);
1057 1058 1059 1060 1061 1062 1063 1064
	dma_async_device_unregister(&ecc->dma_slave);
	edma_free_slot(ecc->dummy_slot);

	return 0;
}

static struct platform_driver edma_driver = {
	.probe		= edma_probe,
B
Bill Pemberton 已提交
1065
	.remove		= edma_remove,
1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083
	.driver = {
		.name = "edma-dma-engine",
	},
};

bool edma_filter_fn(struct dma_chan *chan, void *param)
{
	if (chan->device->dev->driver == &edma_driver.driver) {
		struct edma_chan *echan = to_edma_chan(chan);
		unsigned ch_req = *(unsigned *)param;
		return ch_req == echan->ch_num;
	}
	return false;
}
EXPORT_SYMBOL(edma_filter_fn);

static int edma_init(void)
{
1084
	return platform_driver_register(&edma_driver);
1085 1086 1087 1088 1089 1090 1091 1092 1093
}
subsys_initcall(edma_init);

static void __exit edma_exit(void)
{
	platform_driver_unregister(&edma_driver);
}
module_exit(edma_exit);

J
Josh Boyer 已提交
1094
MODULE_AUTHOR("Matt Porter <matt.porter@linaro.org>");
1095 1096
MODULE_DESCRIPTION("TI EDMA DMA engine driver");
MODULE_LICENSE("GPL v2");