core.c 43.6 KB
Newer Older
1
/*
2
 * Core driver for the Synopsys DesignWare DMA Controller
3 4
 *
 * Copyright (C) 2007-2008 Atmel Corporation
5
 * Copyright (C) 2010-2011 ST Microelectronics
6
 * Copyright (C) 2013 Intel Corporation
7 8 9 10 11
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */
12

13
#include <linux/bitops.h>
14 15 16 17
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
18
#include <linux/dmapool.h>
19
#include <linux/err.h>
20 21 22 23 24 25 26
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/slab.h>

27
#include "../dmaengine.h"
28
#include "internal.h"
29 30 31 32 33 34 35

/*
 * This supports the Synopsys "DesignWare AHB Central DMA Controller",
 * (DW_ahb_dmac) which is used with various AMBA 2.0 systems (not all
 * of which use ARM any more).  See the "Databook" from Synopsys for
 * information beyond what licensees probably provide.
 *
36 37
 * The driver has been tested with the Atmel AT32AP7000, which does not
 * support descriptor writeback.
38 39
 */

40 41 42 43 44
static inline bool is_request_line_unset(struct dw_dma_chan *dwc)
{
	return dwc->request_line == (typeof(dwc->request_line))~0;
}

45
static inline void dwc_set_masters(struct dw_dma_chan *dwc)
46
{
47 48 49
	struct dw_dma *dw = to_dw_dma(dwc->chan.device);
	struct dw_dma_slave *dws = dwc->chan.private;
	unsigned char mmax = dw->nr_masters - 1;
50

51 52 53 54 55
	if (!is_request_line_unset(dwc))
		return;

	dwc->src_master = min_t(unsigned char, mmax, dwc_get_sms(dws));
	dwc->dst_master = min_t(unsigned char, mmax, dwc_get_dms(dws));
56 57
}

58 59 60
#define DWC_DEFAULT_CTLLO(_chan) ({				\
		struct dw_dma_chan *_dwc = to_dw_dma_chan(_chan);	\
		struct dma_slave_config	*_sconfig = &_dwc->dma_sconfig;	\
61 62
		bool _is_slave = is_slave_direction(_dwc->direction);	\
		u8 _smsize = _is_slave ? _sconfig->src_maxburst :	\
63
			DW_DMA_MSIZE_16;			\
64
		u8 _dmsize = _is_slave ? _sconfig->dst_maxburst :	\
65
			DW_DMA_MSIZE_16;			\
66
								\
67 68
		(DWC_CTLL_DST_MSIZE(_dmsize)			\
		 | DWC_CTLL_SRC_MSIZE(_smsize)			\
69 70
		 | DWC_CTLL_LLP_D_EN				\
		 | DWC_CTLL_LLP_S_EN				\
71 72
		 | DWC_CTLL_DMS(_dwc->dst_master)		\
		 | DWC_CTLL_SMS(_dwc->src_master));		\
73
	})
74 75 76 77 78 79 80 81 82 83

/*
 * Number of descriptors to allocate for each channel. This should be
 * made configurable somehow; preferably, the clients (at least the
 * ones using slave transfers) should be able to give us a hint.
 */
#define NR_DESCS_PER_CHANNEL	64

/*----------------------------------------------------------------------*/

84 85 86 87 88
static struct device *chan2dev(struct dma_chan *chan)
{
	return &chan->dev->device;
}

89 90
static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc)
{
91
	return to_dw_desc(dwc->active_list.next);
92 93 94 95 96 97 98
}

static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
{
	struct dw_desc *desc, *_desc;
	struct dw_desc *ret = NULL;
	unsigned int i = 0;
99
	unsigned long flags;
100

101
	spin_lock_irqsave(&dwc->lock, flags);
102
	list_for_each_entry_safe(desc, _desc, &dwc->free_list, desc_node) {
103
		i++;
104 105 106 107 108
		if (async_tx_test_ack(&desc->txd)) {
			list_del(&desc->desc_node);
			ret = desc;
			break;
		}
109
		dev_dbg(chan2dev(&dwc->chan), "desc %p not ACKed\n", desc);
110
	}
111
	spin_unlock_irqrestore(&dwc->lock, flags);
112

113
	dev_vdbg(chan2dev(&dwc->chan), "scanned %u descriptors on freelist\n", i);
114 115 116 117 118 119 120 121 122 123

	return ret;
}

/*
 * Move a descriptor, including any children, to the free list.
 * `desc' must not be on any lists.
 */
static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
{
124 125
	unsigned long flags;

126 127 128
	if (desc) {
		struct dw_desc *child;

129
		spin_lock_irqsave(&dwc->lock, flags);
130
		list_for_each_entry(child, &desc->tx_list, desc_node)
131
			dev_vdbg(chan2dev(&dwc->chan),
132 133
					"moving child desc %p to freelist\n",
					child);
134
		list_splice_init(&desc->tx_list, &dwc->free_list);
135
		dev_vdbg(chan2dev(&dwc->chan), "moving desc %p to freelist\n", desc);
136
		list_add(&desc->desc_node, &dwc->free_list);
137
		spin_unlock_irqrestore(&dwc->lock, flags);
138 139 140
	}
}

141 142 143 144 145 146 147 148 149 150
static void dwc_initialize(struct dw_dma_chan *dwc)
{
	struct dw_dma *dw = to_dw_dma(dwc->chan.device);
	struct dw_dma_slave *dws = dwc->chan.private;
	u32 cfghi = DWC_CFGH_FIFO_MODE;
	u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority);

	if (dwc->initialized == true)
		return;

151
	if (dws) {
152 153 154 155 156 157 158 159
		/*
		 * We need controller-specific data to set up slave
		 * transfers.
		 */
		BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev);

		cfghi = dws->cfg_hi;
		cfglo |= dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK;
160
	} else {
161
		if (dwc->direction == DMA_MEM_TO_DEV)
162
			cfghi = DWC_CFGH_DST_PER(dwc->request_line);
163
		else if (dwc->direction == DMA_DEV_TO_MEM)
164
			cfghi = DWC_CFGH_SRC_PER(dwc->request_line);
165 166 167 168 169 170 171 172 173 174 175 176
	}

	channel_writel(dwc, CFG_LO, cfglo);
	channel_writel(dwc, CFG_HI, cfghi);

	/* Enable interrupts */
	channel_set_bit(dw, MASK.XFER, dwc->mask);
	channel_set_bit(dw, MASK.ERROR, dwc->mask);

	dwc->initialized = true;
}

177 178
/*----------------------------------------------------------------------*/

179 180 181 182 183 184 185 186 187 188 189 190 191 192 193
static inline unsigned int dwc_fast_fls(unsigned long long v)
{
	/*
	 * We can be a lot more clever here, but this should take care
	 * of the most common optimization.
	 */
	if (!(v & 7))
		return 3;
	else if (!(v & 3))
		return 2;
	else if (!(v & 1))
		return 1;
	return 0;
}

194
static inline void dwc_dump_chan_regs(struct dw_dma_chan *dwc)
195 196 197 198 199 200 201 202 203 204
{
	dev_err(chan2dev(&dwc->chan),
		"  SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
		channel_readl(dwc, SAR),
		channel_readl(dwc, DAR),
		channel_readl(dwc, LLP),
		channel_readl(dwc, CTL_HI),
		channel_readl(dwc, CTL_LO));
}

205 206 207 208 209 210 211
static inline void dwc_chan_disable(struct dw_dma *dw, struct dw_dma_chan *dwc)
{
	channel_clear_bit(dw, CH_EN, dwc->mask);
	while (dma_readl(dw, CH_EN) & dwc->mask)
		cpu_relax();
}

212 213
/*----------------------------------------------------------------------*/

214 215 216 217 218 219 220
/* Perform single block transfer */
static inline void dwc_do_single_block(struct dw_dma_chan *dwc,
				       struct dw_desc *desc)
{
	struct dw_dma	*dw = to_dw_dma(dwc->chan.device);
	u32		ctllo;

221 222 223 224
	/*
	 * Software emulation of LLP mode relies on interrupts to continue
	 * multi block transfer.
	 */
225 226 227 228 229 230 231
	ctllo = desc->lli.ctllo | DWC_CTLL_INT_EN;

	channel_writel(dwc, SAR, desc->lli.sar);
	channel_writel(dwc, DAR, desc->lli.dar);
	channel_writel(dwc, CTL_LO, ctllo);
	channel_writel(dwc, CTL_HI, desc->lli.ctlhi);
	channel_set_bit(dw, CH_EN, dwc->mask);
232 233 234

	/* Move pointer to next descriptor */
	dwc->tx_node_active = dwc->tx_node_active->next;
235 236
}

237 238 239 240
/* Called with dwc->lock held and bh disabled */
static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
{
	struct dw_dma	*dw = to_dw_dma(dwc->chan.device);
241
	unsigned long	was_soft_llp;
242 243 244

	/* ASSERT:  channel is idle */
	if (dma_readl(dw, CH_EN) & dwc->mask) {
245
		dev_err(chan2dev(&dwc->chan),
246
			"BUG: Attempted to start non-idle channel\n");
247
		dwc_dump_chan_regs(dwc);
248 249 250 251 252

		/* The tasklet will hopefully advance the queue... */
		return;
	}

253 254 255 256 257
	if (dwc->nollp) {
		was_soft_llp = test_and_set_bit(DW_DMA_IS_SOFT_LLP,
						&dwc->flags);
		if (was_soft_llp) {
			dev_err(chan2dev(&dwc->chan),
258
				"BUG: Attempted to start new LLP transfer inside ongoing one\n");
259 260 261 262 263
			return;
		}

		dwc_initialize(dwc);

264
		dwc->residue = first->total_len;
265
		dwc->tx_node_active = &first->tx_list;
266

267
		/* Submit first block */
268 269 270 271 272
		dwc_do_single_block(dwc, first);

		return;
	}

273 274
	dwc_initialize(dwc);

275 276 277 278 279 280 281
	channel_writel(dwc, LLP, first->txd.phys);
	channel_writel(dwc, CTL_LO,
			DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
	channel_writel(dwc, CTL_HI, 0);
	channel_set_bit(dw, CH_EN, dwc->mask);
}

282 283
static void dwc_dostart_first_queued(struct dw_dma_chan *dwc)
{
284 285
	struct dw_desc *desc;

286 287 288 289
	if (list_empty(&dwc->queue))
		return;

	list_move(dwc->queue.next, &dwc->active_list);
290 291 292
	desc = dwc_first_active(dwc);
	dev_vdbg(chan2dev(&dwc->chan), "%s: started %u\n", __func__, desc->txd.cookie);
	dwc_dostart(dwc, desc);
293 294
}

295 296 297
/*----------------------------------------------------------------------*/

static void
298 299
dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc,
		bool callback_required)
300
{
301 302
	dma_async_tx_callback		callback = NULL;
	void				*param = NULL;
303
	struct dma_async_tx_descriptor	*txd = &desc->txd;
304
	struct dw_desc			*child;
305
	unsigned long			flags;
306

307
	dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie);
308

309
	spin_lock_irqsave(&dwc->lock, flags);
310
	dma_cookie_complete(txd);
311 312 313 314
	if (callback_required) {
		callback = txd->callback;
		param = txd->callback_param;
	}
315

316 317 318 319 320
	/* async_tx_ack */
	list_for_each_entry(child, &desc->tx_list, desc_node)
		async_tx_ack(&child->txd);
	async_tx_ack(&desc->txd);

321
	list_splice_init(&desc->tx_list, &dwc->free_list);
322 323
	list_move(&desc->desc_node, &dwc->free_list);

324
	dma_descriptor_unmap(txd);
325 326
	spin_unlock_irqrestore(&dwc->lock, flags);

327
	if (callback)
328 329 330 331 332 333 334
		callback(param);
}

static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
{
	struct dw_desc *desc, *_desc;
	LIST_HEAD(list);
335
	unsigned long flags;
336

337
	spin_lock_irqsave(&dwc->lock, flags);
338
	if (dma_readl(dw, CH_EN) & dwc->mask) {
339
		dev_err(chan2dev(&dwc->chan),
340 341 342
			"BUG: XFER bit set, but channel not idle!\n");

		/* Try to continue after resetting the channel... */
343
		dwc_chan_disable(dw, dwc);
344 345 346 347 348 349 350
	}

	/*
	 * Submit queued descriptors ASAP, i.e. before we go through
	 * the completed ones.
	 */
	list_splice_init(&dwc->active_list, &list);
351
	dwc_dostart_first_queued(dwc);
352

353 354
	spin_unlock_irqrestore(&dwc->lock, flags);

355
	list_for_each_entry_safe(desc, _desc, &list, desc_node)
356
		dwc_descriptor_complete(dwc, desc, true);
357 358
}

359 360 361 362 363 364 365 366 367
/* Returns how many bytes were already received from source */
static inline u32 dwc_get_sent(struct dw_dma_chan *dwc)
{
	u32 ctlhi = channel_readl(dwc, CTL_HI);
	u32 ctllo = channel_readl(dwc, CTL_LO);

	return (ctlhi & DWC_CTLH_BLOCK_TS_MASK) * (1 << (ctllo >> 4 & 7));
}

368 369 370 371 372 373
static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
{
	dma_addr_t llp;
	struct dw_desc *desc, *_desc;
	struct dw_desc *child;
	u32 status_xfer;
374
	unsigned long flags;
375

376
	spin_lock_irqsave(&dwc->lock, flags);
377 378 379 380 381 382
	llp = channel_readl(dwc, LLP);
	status_xfer = dma_readl(dw, RAW.XFER);

	if (status_xfer & dwc->mask) {
		/* Everything we've submitted is done */
		dma_writel(dw, CLEAR.XFER, dwc->mask);
383 384

		if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) {
385 386 387 388 389 390 391 392 393 394
			struct list_head *head, *active = dwc->tx_node_active;

			/*
			 * We are inside first active descriptor.
			 * Otherwise something is really wrong.
			 */
			desc = dwc_first_active(dwc);

			head = &desc->tx_list;
			if (active != head) {
395 396 397 398 399 400
				/* Update desc to reflect last sent one */
				if (active != head->next)
					desc = to_dw_desc(active->prev);

				dwc->residue -= desc->len;

401
				child = to_dw_desc(active);
402 403

				/* Submit next block */
404
				dwc_do_single_block(dwc, child);
405

406
				spin_unlock_irqrestore(&dwc->lock, flags);
407 408
				return;
			}
409

410 411 412
			/* We are done here */
			clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags);
		}
413 414 415

		dwc->residue = 0;

416 417
		spin_unlock_irqrestore(&dwc->lock, flags);

418 419 420 421
		dwc_complete_all(dw, dwc);
		return;
	}

422
	if (list_empty(&dwc->active_list)) {
423
		dwc->residue = 0;
424
		spin_unlock_irqrestore(&dwc->lock, flags);
425
		return;
426
	}
427

428 429
	if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) {
		dev_vdbg(chan2dev(&dwc->chan), "%s: soft LLP mode\n", __func__);
430
		spin_unlock_irqrestore(&dwc->lock, flags);
431
		return;
432
	}
433

434
	dev_vdbg(chan2dev(&dwc->chan), "%s: llp=%pad\n", __func__, &llp);
435 436

	list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) {
437
		/* Initial residue value */
438 439
		dwc->residue = desc->total_len;

440
		/* Check first descriptors addr */
441 442
		if (desc->txd.phys == llp) {
			spin_unlock_irqrestore(&dwc->lock, flags);
443
			return;
444
		}
445

446
		/* Check first descriptors llp */
447
		if (desc->lli.llp == llp) {
448
			/* This one is currently in progress */
449
			dwc->residue -= dwc_get_sent(dwc);
450
			spin_unlock_irqrestore(&dwc->lock, flags);
451
			return;
452
		}
453

454 455
		dwc->residue -= desc->len;
		list_for_each_entry(child, &desc->tx_list, desc_node) {
456
			if (child->lli.llp == llp) {
457
				/* Currently in progress */
458
				dwc->residue -= dwc_get_sent(dwc);
459
				spin_unlock_irqrestore(&dwc->lock, flags);
460
				return;
461
			}
462 463
			dwc->residue -= child->len;
		}
464 465 466 467 468

		/*
		 * No descriptors so far seem to be in progress, i.e.
		 * this one must be done.
		 */
469
		spin_unlock_irqrestore(&dwc->lock, flags);
470
		dwc_descriptor_complete(dwc, desc, true);
471
		spin_lock_irqsave(&dwc->lock, flags);
472 473
	}

474
	dev_err(chan2dev(&dwc->chan),
475 476 477
		"BUG: All descriptors done, but channel not idle!\n");

	/* Try to continue after resetting the channel... */
478
	dwc_chan_disable(dw, dwc);
479

480
	dwc_dostart_first_queued(dwc);
481
	spin_unlock_irqrestore(&dwc->lock, flags);
482 483
}

484
static inline void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli)
485
{
486 487
	dev_crit(chan2dev(&dwc->chan), "  desc: s0x%x d0x%x l0x%x c0x%x:%x\n",
		 lli->sar, lli->dar, lli->llp, lli->ctlhi, lli->ctllo);
488 489 490 491 492 493
}

static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
{
	struct dw_desc *bad_desc;
	struct dw_desc *child;
494
	unsigned long flags;
495 496 497

	dwc_scan_descriptors(dw, dwc);

498 499
	spin_lock_irqsave(&dwc->lock, flags);

500 501 502 503 504 505 506
	/*
	 * The descriptor currently at the head of the active list is
	 * borked. Since we don't have any way to report errors, we'll
	 * just have to scream loudly and try to carry on.
	 */
	bad_desc = dwc_first_active(dwc);
	list_del_init(&bad_desc->desc_node);
507
	list_move(dwc->queue.next, dwc->active_list.prev);
508 509 510 511 512 513 514

	/* Clear the error flag and try to restart the controller */
	dma_writel(dw, CLEAR.ERROR, dwc->mask);
	if (!list_empty(&dwc->active_list))
		dwc_dostart(dwc, dwc_first_active(dwc));

	/*
515
	 * WARN may seem harsh, but since this only happens
516 517 518 519 520
	 * when someone submits a bad physical address in a
	 * descriptor, we should consider ourselves lucky that the
	 * controller flagged an error instead of scribbling over
	 * random memory locations.
	 */
521 522
	dev_WARN(chan2dev(&dwc->chan), "Bad descriptor submitted for DMA!\n"
				       "  cookie: %d\n", bad_desc->txd.cookie);
523
	dwc_dump_lli(dwc, &bad_desc->lli);
524
	list_for_each_entry(child, &bad_desc->tx_list, desc_node)
525 526
		dwc_dump_lli(dwc, &child->lli);

527 528
	spin_unlock_irqrestore(&dwc->lock, flags);

529
	/* Pretend the descriptor completed successfully */
530
	dwc_descriptor_complete(dwc, bad_desc, true);
531 532
}

533 534
/* --------------------- Cyclic DMA API extensions -------------------- */

535
dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan)
536 537 538 539 540 541
{
	struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
	return channel_readl(dwc, SAR);
}
EXPORT_SYMBOL(dw_dma_get_src_addr);

542
dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan)
543 544 545 546 547 548
{
	struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
	return channel_readl(dwc, DAR);
}
EXPORT_SYMBOL(dw_dma_get_dst_addr);

549
/* Called with dwc->lock held and all DMAC interrupts disabled */
550
static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
551
		u32 status_err, u32 status_xfer)
552
{
553 554
	unsigned long flags;

555
	if (dwc->mask) {
556 557 558 559 560 561 562 563
		void (*callback)(void *param);
		void *callback_param;

		dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n",
				channel_readl(dwc, LLP));

		callback = dwc->cdesc->period_callback;
		callback_param = dwc->cdesc->period_callback_param;
564 565

		if (callback)
566 567 568 569 570 571 572 573 574 575 576
			callback(callback_param);
	}

	/*
	 * Error and transfer complete are highly unlikely, and will most
	 * likely be due to a configuration error by the user.
	 */
	if (unlikely(status_err & dwc->mask) ||
			unlikely(status_xfer & dwc->mask)) {
		int i;

577 578 579
		dev_err(chan2dev(&dwc->chan),
			"cyclic DMA unexpected %s interrupt, stopping DMA transfer\n",
			status_xfer ? "xfer" : "error");
580 581 582

		spin_lock_irqsave(&dwc->lock, flags);

583
		dwc_dump_chan_regs(dwc);
584

585
		dwc_chan_disable(dw, dwc);
586

587
		/* Make sure DMA does not restart by loading a new list */
588 589 590 591 592 593 594 595 596
		channel_writel(dwc, LLP, 0);
		channel_writel(dwc, CTL_LO, 0);
		channel_writel(dwc, CTL_HI, 0);

		dma_writel(dw, CLEAR.ERROR, dwc->mask);
		dma_writel(dw, CLEAR.XFER, dwc->mask);

		for (i = 0; i < dwc->cdesc->periods; i++)
			dwc_dump_lli(dwc, &dwc->cdesc->desc[i]->lli);
597 598

		spin_unlock_irqrestore(&dwc->lock, flags);
599 600 601 602 603
	}
}

/* ------------------------------------------------------------------------- */

604 605 606 607 608 609 610 611
static void dw_dma_tasklet(unsigned long data)
{
	struct dw_dma *dw = (struct dw_dma *)data;
	struct dw_dma_chan *dwc;
	u32 status_xfer;
	u32 status_err;
	int i;

612
	status_xfer = dma_readl(dw, RAW.XFER);
613 614
	status_err = dma_readl(dw, RAW.ERROR);

615
	dev_vdbg(dw->dma.dev, "%s: status_err=%x\n", __func__, status_err);
616 617 618

	for (i = 0; i < dw->dma.chancnt; i++) {
		dwc = &dw->chan[i];
619
		if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags))
620
			dwc_handle_cyclic(dw, dwc, status_err, status_xfer);
621
		else if (status_err & (1 << i))
622
			dwc_handle_error(dw, dwc);
623
		else if (status_xfer & (1 << i))
624 625 626 627
			dwc_scan_descriptors(dw, dwc);
	}

	/*
628
	 * Re-enable interrupts.
629 630 631 632 633 634 635 636
	 */
	channel_set_bit(dw, MASK.XFER, dw->all_chan_mask);
	channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask);
}

static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
{
	struct dw_dma *dw = dev_id;
637
	u32 status = dma_readl(dw, STATUS_INT);
638

639 640 641 642 643
	dev_vdbg(dw->dma.dev, "%s: status=0x%x\n", __func__, status);

	/* Check if we have any interrupt from the DMAC */
	if (!status)
		return IRQ_NONE;
644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676

	/*
	 * Just disable the interrupts. We'll turn them back on in the
	 * softirq handler.
	 */
	channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
	channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);

	status = dma_readl(dw, STATUS_INT);
	if (status) {
		dev_err(dw->dma.dev,
			"BUG: Unexpected interrupts pending: 0x%x\n",
			status);

		/* Try to recover */
		channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1);
		channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1);
		channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1);
		channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1);
	}

	tasklet_schedule(&dw->tasklet);

	return IRQ_HANDLED;
}

/*----------------------------------------------------------------------*/

static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx)
{
	struct dw_desc		*desc = txd_to_dw_desc(tx);
	struct dw_dma_chan	*dwc = to_dw_dma_chan(tx->chan);
	dma_cookie_t		cookie;
677
	unsigned long		flags;
678

679
	spin_lock_irqsave(&dwc->lock, flags);
680
	cookie = dma_cookie_assign(tx);
681 682 683 684 685 686 687

	/*
	 * REVISIT: We should attempt to chain as many descriptors as
	 * possible, perhaps even appending to those already submitted
	 * for DMA. But this is hard to do in a race-free manner.
	 */
	if (list_empty(&dwc->active_list)) {
688
		dev_vdbg(chan2dev(tx->chan), "%s: started %u\n", __func__,
689 690
				desc->txd.cookie);
		list_add_tail(&desc->desc_node, &dwc->active_list);
691
		dwc_dostart(dwc, dwc_first_active(dwc));
692
	} else {
693
		dev_vdbg(chan2dev(tx->chan), "%s: queued %u\n", __func__,
694 695 696 697 698
				desc->txd.cookie);

		list_add_tail(&desc->desc_node, &dwc->queue);
	}

699
	spin_unlock_irqrestore(&dwc->lock, flags);
700 701 702 703 704 705 706 707 708

	return cookie;
}

static struct dma_async_tx_descriptor *
dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
		size_t len, unsigned long flags)
{
	struct dw_dma_chan	*dwc = to_dw_dma_chan(chan);
709
	struct dw_dma		*dw = to_dw_dma(chan->device);
710 711 712 713 714 715 716
	struct dw_desc		*desc;
	struct dw_desc		*first;
	struct dw_desc		*prev;
	size_t			xfer_count;
	size_t			offset;
	unsigned int		src_width;
	unsigned int		dst_width;
717
	unsigned int		data_width;
718 719
	u32			ctllo;

720
	dev_vdbg(chan2dev(chan),
721 722
			"%s: d%pad s%pad l0x%zx f0x%lx\n", __func__,
			&dest, &src, len, flags);
723 724

	if (unlikely(!len)) {
725
		dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__);
726 727 728
		return NULL;
	}

729 730
	dwc->direction = DMA_MEM_TO_MEM;

731 732
	data_width = min_t(unsigned int, dw->data_width[dwc->src_master],
			   dw->data_width[dwc->dst_master]);
733

734 735
	src_width = dst_width = min_t(unsigned int, data_width,
				      dwc_fast_fls(src | dest | len));
736

737
	ctllo = DWC_DEFAULT_CTLLO(chan)
738 739 740 741 742 743 744 745 746
			| DWC_CTLL_DST_WIDTH(dst_width)
			| DWC_CTLL_SRC_WIDTH(src_width)
			| DWC_CTLL_DST_INC
			| DWC_CTLL_SRC_INC
			| DWC_CTLL_FC_M2M;
	prev = first = NULL;

	for (offset = 0; offset < len; offset += xfer_count << src_width) {
		xfer_count = min_t(size_t, (len - offset) >> src_width,
747
					   dwc->block_size);
748 749 750 751 752 753 754 755 756

		desc = dwc_desc_get(dwc);
		if (!desc)
			goto err_desc_get;

		desc->lli.sar = src + offset;
		desc->lli.dar = dest + offset;
		desc->lli.ctllo = ctllo;
		desc->lli.ctlhi = xfer_count;
757
		desc->len = xfer_count << src_width;
758 759 760 761 762 763

		if (!first) {
			first = desc;
		} else {
			prev->lli.llp = desc->txd.phys;
			list_add_tail(&desc->desc_node,
764
					&first->tx_list);
765 766 767 768 769 770 771 772 773 774
		}
		prev = desc;
	}

	if (flags & DMA_PREP_INTERRUPT)
		/* Trigger interrupt after last block */
		prev->lli.ctllo |= DWC_CTLL_INT_EN;

	prev->lli.llp = 0;
	first->txd.flags = flags;
775
	first->total_len = len;
776 777 778 779 780 781 782 783 784 785

	return &first->txd;

err_desc_get:
	dwc_desc_put(dwc, first);
	return NULL;
}

static struct dma_async_tx_descriptor *
dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
786
		unsigned int sg_len, enum dma_transfer_direction direction,
787
		unsigned long flags, void *context)
788 789
{
	struct dw_dma_chan	*dwc = to_dw_dma_chan(chan);
790
	struct dw_dma		*dw = to_dw_dma(chan->device);
791
	struct dma_slave_config	*sconfig = &dwc->dma_sconfig;
792 793 794 795 796 797
	struct dw_desc		*prev;
	struct dw_desc		*first;
	u32			ctllo;
	dma_addr_t		reg;
	unsigned int		reg_width;
	unsigned int		mem_width;
798
	unsigned int		data_width;
799 800 801 802
	unsigned int		i;
	struct scatterlist	*sg;
	size_t			total_len = 0;

803
	dev_vdbg(chan2dev(chan), "%s\n", __func__);
804

805
	if (unlikely(!is_slave_direction(direction) || !sg_len))
806 807
		return NULL;

808 809
	dwc->direction = direction;

810 811 812
	prev = first = NULL;

	switch (direction) {
813
	case DMA_MEM_TO_DEV:
814 815 816
		reg_width = __fls(sconfig->dst_addr_width);
		reg = sconfig->dst_addr;
		ctllo = (DWC_DEFAULT_CTLLO(chan)
817 818
				| DWC_CTLL_DST_WIDTH(reg_width)
				| DWC_CTLL_DST_FIX
819 820 821 822 823
				| DWC_CTLL_SRC_INC);

		ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) :
			DWC_CTLL_FC(DW_DMA_FC_D_M2P);

824
		data_width = dw->data_width[dwc->src_master];
825

826 827
		for_each_sg(sgl, sg, sg_len, i) {
			struct dw_desc	*desc;
828
			u32		len, dlen, mem;
829

830
			mem = sg_dma_address(sg);
831
			len = sg_dma_len(sg);
832

833 834
			mem_width = min_t(unsigned int,
					  data_width, dwc_fast_fls(mem | len));
835

836
slave_sg_todev_fill_desc:
837 838
			desc = dwc_desc_get(dwc);
			if (!desc) {
839
				dev_err(chan2dev(chan),
840 841 842 843 844 845 846
					"not enough descriptors available\n");
				goto err_desc_get;
			}

			desc->lli.sar = mem;
			desc->lli.dar = reg;
			desc->lli.ctllo = ctllo | DWC_CTLL_SRC_WIDTH(mem_width);
847 848
			if ((len >> mem_width) > dwc->block_size) {
				dlen = dwc->block_size << mem_width;
849 850 851 852 853 854 855 856
				mem += dlen;
				len -= dlen;
			} else {
				dlen = len;
				len = 0;
			}

			desc->lli.ctlhi = dlen >> mem_width;
857
			desc->len = dlen;
858 859 860 861 862 863

			if (!first) {
				first = desc;
			} else {
				prev->lli.llp = desc->txd.phys;
				list_add_tail(&desc->desc_node,
864
						&first->tx_list);
865 866
			}
			prev = desc;
867 868 869 870
			total_len += dlen;

			if (len)
				goto slave_sg_todev_fill_desc;
871 872
		}
		break;
873
	case DMA_DEV_TO_MEM:
874 875 876
		reg_width = __fls(sconfig->src_addr_width);
		reg = sconfig->src_addr;
		ctllo = (DWC_DEFAULT_CTLLO(chan)
877 878
				| DWC_CTLL_SRC_WIDTH(reg_width)
				| DWC_CTLL_DST_INC
879 880 881 882
				| DWC_CTLL_SRC_FIX);

		ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) :
			DWC_CTLL_FC(DW_DMA_FC_D_P2M);
883

884
		data_width = dw->data_width[dwc->dst_master];
885

886 887
		for_each_sg(sgl, sg, sg_len, i) {
			struct dw_desc	*desc;
888
			u32		len, dlen, mem;
889

890
			mem = sg_dma_address(sg);
891
			len = sg_dma_len(sg);
892

893 894
			mem_width = min_t(unsigned int,
					  data_width, dwc_fast_fls(mem | len));
895

896 897 898 899 900 901 902 903
slave_sg_fromdev_fill_desc:
			desc = dwc_desc_get(dwc);
			if (!desc) {
				dev_err(chan2dev(chan),
						"not enough descriptors available\n");
				goto err_desc_get;
			}

904 905 906
			desc->lli.sar = reg;
			desc->lli.dar = mem;
			desc->lli.ctllo = ctllo | DWC_CTLL_DST_WIDTH(mem_width);
907 908
			if ((len >> reg_width) > dwc->block_size) {
				dlen = dwc->block_size << reg_width;
909 910 911 912 913 914 915
				mem += dlen;
				len -= dlen;
			} else {
				dlen = len;
				len = 0;
			}
			desc->lli.ctlhi = dlen >> reg_width;
916
			desc->len = dlen;
917 918 919 920 921 922

			if (!first) {
				first = desc;
			} else {
				prev->lli.llp = desc->txd.phys;
				list_add_tail(&desc->desc_node,
923
						&first->tx_list);
924 925
			}
			prev = desc;
926 927 928 929
			total_len += dlen;

			if (len)
				goto slave_sg_fromdev_fill_desc;
930 931 932 933 934 935 936 937 938 939 940
		}
		break;
	default:
		return NULL;
	}

	if (flags & DMA_PREP_INTERRUPT)
		/* Trigger interrupt after last block */
		prev->lli.ctllo |= DWC_CTLL_INT_EN;

	prev->lli.llp = 0;
941
	first->total_len = total_len;
942 943 944 945 946 947 948 949

	return &first->txd;

err_desc_get:
	dwc_desc_put(dwc, first);
	return NULL;
}

950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970
/*
 * Fix sconfig's burst size according to dw_dmac. We need to convert them as:
 * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3.
 *
 * NOTE: burst size 2 is not supported by controller.
 *
 * This can be done by finding least significant bit set: n & (n - 1)
 */
static inline void convert_burst(u32 *maxburst)
{
	if (*maxburst > 1)
		*maxburst = fls(*maxburst) - 2;
	else
		*maxburst = 0;
}

static int
set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
{
	struct dw_dma_chan *dwc = to_dw_dma_chan(chan);

971 972
	/* Check if chan will be configured for slave transfers */
	if (!is_slave_direction(sconfig->direction))
973 974 975
		return -EINVAL;

	memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig));
976
	dwc->direction = sconfig->direction;
977

978
	/* Take the request line from slave_id member */
979
	if (is_request_line_unset(dwc))
980 981
		dwc->request_line = sconfig->slave_id;

982 983 984 985 986 987
	convert_burst(&dwc->dma_sconfig.src_maxburst);
	convert_burst(&dwc->dma_sconfig.dst_maxburst);

	return 0;
}

988 989 990
static inline void dwc_chan_pause(struct dw_dma_chan *dwc)
{
	u32 cfglo = channel_readl(dwc, CFG_LO);
991
	unsigned int count = 20;	/* timeout iterations */
992 993

	channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP);
994 995
	while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY) && count--)
		udelay(2);
996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008

	dwc->paused = true;
}

static inline void dwc_chan_resume(struct dw_dma_chan *dwc)
{
	u32 cfglo = channel_readl(dwc, CFG_LO);

	channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP);

	dwc->paused = false;
}

1009 1010
static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
		       unsigned long arg)
1011 1012 1013 1014
{
	struct dw_dma_chan	*dwc = to_dw_dma_chan(chan);
	struct dw_dma		*dw = to_dw_dma(chan->device);
	struct dw_desc		*desc, *_desc;
1015
	unsigned long		flags;
1016 1017
	LIST_HEAD(list);

1018 1019
	if (cmd == DMA_PAUSE) {
		spin_lock_irqsave(&dwc->lock, flags);
1020

1021
		dwc_chan_pause(dwc);
1022

1023 1024 1025 1026
		spin_unlock_irqrestore(&dwc->lock, flags);
	} else if (cmd == DMA_RESUME) {
		if (!dwc->paused)
			return 0;
1027

1028
		spin_lock_irqsave(&dwc->lock, flags);
1029

1030
		dwc_chan_resume(dwc);
1031

1032 1033 1034
		spin_unlock_irqrestore(&dwc->lock, flags);
	} else if (cmd == DMA_TERMINATE_ALL) {
		spin_lock_irqsave(&dwc->lock, flags);
1035

1036 1037
		clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags);

1038
		dwc_chan_disable(dw, dwc);
1039

1040
		dwc_chan_resume(dwc);
1041 1042 1043 1044 1045 1046 1047 1048 1049 1050

		/* active_list entries will end up before queued entries */
		list_splice_init(&dwc->queue, &list);
		list_splice_init(&dwc->active_list, &list);

		spin_unlock_irqrestore(&dwc->lock, flags);

		/* Flush all pending and queued descriptors */
		list_for_each_entry_safe(desc, _desc, &list, desc_node)
			dwc_descriptor_complete(dwc, desc, false);
1051 1052 1053
	} else if (cmd == DMA_SLAVE_CONFIG) {
		return set_runtime_config(chan, (struct dma_slave_config *)arg);
	} else {
1054
		return -ENXIO;
1055
	}
1056 1057

	return 0;
1058 1059
}

1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074
static inline u32 dwc_get_residue(struct dw_dma_chan *dwc)
{
	unsigned long flags;
	u32 residue;

	spin_lock_irqsave(&dwc->lock, flags);

	residue = dwc->residue;
	if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags) && residue)
		residue -= dwc_get_sent(dwc);

	spin_unlock_irqrestore(&dwc->lock, flags);
	return residue;
}

1075
static enum dma_status
1076 1077 1078
dwc_tx_status(struct dma_chan *chan,
	      dma_cookie_t cookie,
	      struct dma_tx_state *txstate)
1079 1080
{
	struct dw_dma_chan	*dwc = to_dw_dma_chan(chan);
1081
	enum dma_status		ret;
1082

1083
	ret = dma_cookie_status(chan, cookie, txstate);
1084
	if (ret == DMA_COMPLETE)
1085
		return ret;
1086

1087
	dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
1088

1089
	ret = dma_cookie_status(chan, cookie, txstate);
1090
	if (ret != DMA_COMPLETE)
1091
		dma_set_residue(txstate, dwc_get_residue(dwc));
1092

1093
	if (dwc->paused && ret == DMA_IN_PROGRESS)
1094
		return DMA_PAUSED;
1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106

	return ret;
}

static void dwc_issue_pending(struct dma_chan *chan)
{
	struct dw_dma_chan	*dwc = to_dw_dma_chan(chan);

	if (!list_empty(&dwc->queue))
		dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
}

1107
static int dwc_alloc_chan_resources(struct dma_chan *chan)
1108 1109 1110 1111 1112
{
	struct dw_dma_chan	*dwc = to_dw_dma_chan(chan);
	struct dw_dma		*dw = to_dw_dma(chan->device);
	struct dw_desc		*desc;
	int			i;
1113
	unsigned long		flags;
1114

1115
	dev_vdbg(chan2dev(chan), "%s\n", __func__);
1116 1117 1118

	/* ASSERT:  channel is idle */
	if (dma_readl(dw, CH_EN) & dwc->mask) {
1119
		dev_dbg(chan2dev(chan), "DMA channel not idle?\n");
1120 1121 1122
		return -EIO;
	}

1123
	dma_cookie_init(chan);
1124 1125 1126 1127 1128 1129 1130

	/*
	 * NOTE: some controllers may have additional features that we
	 * need to initialize here, like "scatter-gather" (which
	 * doesn't mean what you think it means), and status writeback.
	 */

1131 1132
	dwc_set_masters(dwc);

1133
	spin_lock_irqsave(&dwc->lock, flags);
1134 1135
	i = dwc->descs_allocated;
	while (dwc->descs_allocated < NR_DESCS_PER_CHANNEL) {
1136 1137
		dma_addr_t phys;

1138
		spin_unlock_irqrestore(&dwc->lock, flags);
1139

1140
		desc = dma_pool_alloc(dw->desc_pool, GFP_ATOMIC, &phys);
1141 1142
		if (!desc)
			goto err_desc_alloc;
1143

1144
		memset(desc, 0, sizeof(struct dw_desc));
1145

1146
		INIT_LIST_HEAD(&desc->tx_list);
1147 1148 1149
		dma_async_tx_descriptor_init(&desc->txd, chan);
		desc->txd.tx_submit = dwc_tx_submit;
		desc->txd.flags = DMA_CTRL_ACK;
1150
		desc->txd.phys = phys;
1151

1152 1153
		dwc_desc_put(dwc, desc);

1154
		spin_lock_irqsave(&dwc->lock, flags);
1155 1156 1157
		i = ++dwc->descs_allocated;
	}

1158
	spin_unlock_irqrestore(&dwc->lock, flags);
1159

1160
	dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i);
1161

1162 1163 1164 1165 1166
	return i;

err_desc_alloc:
	dev_info(chan2dev(chan), "only allocated %d descriptors\n", i);

1167 1168 1169 1170 1171 1172 1173 1174
	return i;
}

static void dwc_free_chan_resources(struct dma_chan *chan)
{
	struct dw_dma_chan	*dwc = to_dw_dma_chan(chan);
	struct dw_dma		*dw = to_dw_dma(chan->device);
	struct dw_desc		*desc, *_desc;
1175
	unsigned long		flags;
1176 1177
	LIST_HEAD(list);

1178
	dev_dbg(chan2dev(chan), "%s: descs allocated=%u\n", __func__,
1179 1180 1181 1182 1183 1184 1185
			dwc->descs_allocated);

	/* ASSERT:  channel is idle */
	BUG_ON(!list_empty(&dwc->active_list));
	BUG_ON(!list_empty(&dwc->queue));
	BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask);

1186
	spin_lock_irqsave(&dwc->lock, flags);
1187 1188
	list_splice_init(&dwc->free_list, &list);
	dwc->descs_allocated = 0;
1189
	dwc->initialized = false;
1190
	dwc->request_line = ~0;
1191 1192 1193 1194 1195

	/* Disable interrupts */
	channel_clear_bit(dw, MASK.XFER, dwc->mask);
	channel_clear_bit(dw, MASK.ERROR, dwc->mask);

1196
	spin_unlock_irqrestore(&dwc->lock, flags);
1197 1198

	list_for_each_entry_safe(desc, _desc, &list, desc_node) {
1199
		dev_vdbg(chan2dev(chan), "  freeing descriptor %p\n", desc);
1200
		dma_pool_free(dw->desc_pool, desc, desc->txd.phys);
1201 1202
	}

1203
	dev_vdbg(chan2dev(chan), "%s: done\n", __func__);
1204 1205
}

1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218
/* --------------------- Cyclic DMA API extensions -------------------- */

/**
 * dw_dma_cyclic_start - start the cyclic DMA transfer
 * @chan: the DMA channel to start
 *
 * Must be called with soft interrupts disabled. Returns zero on success or
 * -errno on failure.
 */
int dw_dma_cyclic_start(struct dma_chan *chan)
{
	struct dw_dma_chan	*dwc = to_dw_dma_chan(chan);
	struct dw_dma		*dw = to_dw_dma(dwc->chan.device);
1219
	unsigned long		flags;
1220 1221 1222 1223 1224 1225

	if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) {
		dev_err(chan2dev(&dwc->chan), "missing prep for cyclic DMA\n");
		return -ENODEV;
	}

1226
	spin_lock_irqsave(&dwc->lock, flags);
1227

1228
	/* Assert channel is idle */
1229 1230 1231
	if (dma_readl(dw, CH_EN) & dwc->mask) {
		dev_err(chan2dev(&dwc->chan),
			"BUG: Attempted to start non-idle channel\n");
1232
		dwc_dump_chan_regs(dwc);
1233
		spin_unlock_irqrestore(&dwc->lock, flags);
1234 1235 1236 1237 1238 1239
		return -EBUSY;
	}

	dma_writel(dw, CLEAR.ERROR, dwc->mask);
	dma_writel(dw, CLEAR.XFER, dwc->mask);

1240
	/* Setup DMAC channel registers */
1241 1242 1243 1244 1245 1246
	channel_writel(dwc, LLP, dwc->cdesc->desc[0]->txd.phys);
	channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
	channel_writel(dwc, CTL_HI, 0);

	channel_set_bit(dw, CH_EN, dwc->mask);

1247
	spin_unlock_irqrestore(&dwc->lock, flags);
1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262

	return 0;
}
EXPORT_SYMBOL(dw_dma_cyclic_start);

/**
 * dw_dma_cyclic_stop - stop the cyclic DMA transfer
 * @chan: the DMA channel to stop
 *
 * Must be called with soft interrupts disabled.
 */
void dw_dma_cyclic_stop(struct dma_chan *chan)
{
	struct dw_dma_chan	*dwc = to_dw_dma_chan(chan);
	struct dw_dma		*dw = to_dw_dma(dwc->chan.device);
1263
	unsigned long		flags;
1264

1265
	spin_lock_irqsave(&dwc->lock, flags);
1266

1267
	dwc_chan_disable(dw, dwc);
1268

1269
	spin_unlock_irqrestore(&dwc->lock, flags);
1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285
}
EXPORT_SYMBOL(dw_dma_cyclic_stop);

/**
 * dw_dma_cyclic_prep - prepare the cyclic DMA transfer
 * @chan: the DMA channel to prepare
 * @buf_addr: physical DMA address where the buffer starts
 * @buf_len: total number of bytes for the entire buffer
 * @period_len: number of bytes for each period
 * @direction: transfer direction, to or from device
 *
 * Must be called before trying to start the transfer. Returns a valid struct
 * dw_cyclic_desc if successful or an ERR_PTR(-errno) if not successful.
 */
struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
		dma_addr_t buf_addr, size_t buf_len, size_t period_len,
1286
		enum dma_transfer_direction direction)
1287 1288
{
	struct dw_dma_chan		*dwc = to_dw_dma_chan(chan);
1289
	struct dma_slave_config		*sconfig = &dwc->dma_sconfig;
1290 1291 1292 1293 1294 1295 1296 1297
	struct dw_cyclic_desc		*cdesc;
	struct dw_cyclic_desc		*retval = NULL;
	struct dw_desc			*desc;
	struct dw_desc			*last = NULL;
	unsigned long			was_cyclic;
	unsigned int			reg_width;
	unsigned int			periods;
	unsigned int			i;
1298
	unsigned long			flags;
1299

1300
	spin_lock_irqsave(&dwc->lock, flags);
1301 1302 1303 1304 1305 1306 1307
	if (dwc->nollp) {
		spin_unlock_irqrestore(&dwc->lock, flags);
		dev_dbg(chan2dev(&dwc->chan),
				"channel doesn't support LLP transfers\n");
		return ERR_PTR(-EINVAL);
	}

1308
	if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) {
1309
		spin_unlock_irqrestore(&dwc->lock, flags);
1310 1311 1312 1313 1314 1315
		dev_dbg(chan2dev(&dwc->chan),
				"queue and/or active list are not empty\n");
		return ERR_PTR(-EBUSY);
	}

	was_cyclic = test_and_set_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
1316
	spin_unlock_irqrestore(&dwc->lock, flags);
1317 1318 1319 1320 1321 1322 1323
	if (was_cyclic) {
		dev_dbg(chan2dev(&dwc->chan),
				"channel already prepared for cyclic DMA\n");
		return ERR_PTR(-EBUSY);
	}

	retval = ERR_PTR(-EINVAL);
1324

1325 1326 1327
	if (unlikely(!is_slave_direction(direction)))
		goto out_err;

1328 1329
	dwc->direction = direction;

1330 1331 1332 1333 1334
	if (direction == DMA_MEM_TO_DEV)
		reg_width = __ffs(sconfig->dst_addr_width);
	else
		reg_width = __ffs(sconfig->src_addr_width);

1335 1336 1337
	periods = buf_len / period_len;

	/* Check for too big/unaligned periods and unaligned DMA buffer. */
1338
	if (period_len > (dwc->block_size << reg_width))
1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363
		goto out_err;
	if (unlikely(period_len & ((1 << reg_width) - 1)))
		goto out_err;
	if (unlikely(buf_addr & ((1 << reg_width) - 1)))
		goto out_err;

	retval = ERR_PTR(-ENOMEM);

	if (periods > NR_DESCS_PER_CHANNEL)
		goto out_err;

	cdesc = kzalloc(sizeof(struct dw_cyclic_desc), GFP_KERNEL);
	if (!cdesc)
		goto out_err;

	cdesc->desc = kzalloc(sizeof(struct dw_desc *) * periods, GFP_KERNEL);
	if (!cdesc->desc)
		goto out_err_alloc;

	for (i = 0; i < periods; i++) {
		desc = dwc_desc_get(dwc);
		if (!desc)
			goto out_err_desc_get;

		switch (direction) {
1364
		case DMA_MEM_TO_DEV:
1365
			desc->lli.dar = sconfig->dst_addr;
1366
			desc->lli.sar = buf_addr + (period_len * i);
1367
			desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan)
1368 1369 1370 1371 1372
					| DWC_CTLL_DST_WIDTH(reg_width)
					| DWC_CTLL_SRC_WIDTH(reg_width)
					| DWC_CTLL_DST_FIX
					| DWC_CTLL_SRC_INC
					| DWC_CTLL_INT_EN);
1373 1374 1375 1376 1377

			desc->lli.ctllo |= sconfig->device_fc ?
				DWC_CTLL_FC(DW_DMA_FC_P_M2P) :
				DWC_CTLL_FC(DW_DMA_FC_D_M2P);

1378
			break;
1379
		case DMA_DEV_TO_MEM:
1380
			desc->lli.dar = buf_addr + (period_len * i);
1381 1382
			desc->lli.sar = sconfig->src_addr;
			desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan)
1383 1384 1385 1386 1387
					| DWC_CTLL_SRC_WIDTH(reg_width)
					| DWC_CTLL_DST_WIDTH(reg_width)
					| DWC_CTLL_DST_INC
					| DWC_CTLL_SRC_FIX
					| DWC_CTLL_INT_EN);
1388 1389 1390 1391 1392

			desc->lli.ctllo |= sconfig->device_fc ?
				DWC_CTLL_FC(DW_DMA_FC_P_P2M) :
				DWC_CTLL_FC(DW_DMA_FC_D_P2M);

1393 1394 1395 1396 1397 1398 1399 1400
			break;
		default:
			break;
		}

		desc->lli.ctlhi = (period_len >> reg_width);
		cdesc->desc[i] = desc;

1401
		if (last)
1402 1403 1404 1405 1406
			last->lli.llp = desc->txd.phys;

		last = desc;
	}

1407
	/* Let's make a cyclic list */
1408 1409
	last->lli.llp = cdesc->desc[0]->txd.phys;

1410 1411 1412
	dev_dbg(chan2dev(&dwc->chan),
			"cyclic prepared buf %pad len %zu period %zu periods %d\n",
			&buf_addr, buf_len, period_len, periods);
1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439

	cdesc->periods = periods;
	dwc->cdesc = cdesc;

	return cdesc;

out_err_desc_get:
	while (i--)
		dwc_desc_put(dwc, cdesc->desc[i]);
out_err_alloc:
	kfree(cdesc);
out_err:
	clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
	return (struct dw_cyclic_desc *)retval;
}
EXPORT_SYMBOL(dw_dma_cyclic_prep);

/**
 * dw_dma_cyclic_free - free a prepared cyclic DMA transfer
 * @chan: the DMA channel to free
 */
void dw_dma_cyclic_free(struct dma_chan *chan)
{
	struct dw_dma_chan	*dwc = to_dw_dma_chan(chan);
	struct dw_dma		*dw = to_dw_dma(dwc->chan.device);
	struct dw_cyclic_desc	*cdesc = dwc->cdesc;
	int			i;
1440
	unsigned long		flags;
1441

1442
	dev_dbg(chan2dev(&dwc->chan), "%s\n", __func__);
1443 1444 1445 1446

	if (!cdesc)
		return;

1447
	spin_lock_irqsave(&dwc->lock, flags);
1448

1449
	dwc_chan_disable(dw, dwc);
1450 1451 1452 1453

	dma_writel(dw, CLEAR.ERROR, dwc->mask);
	dma_writel(dw, CLEAR.XFER, dwc->mask);

1454
	spin_unlock_irqrestore(&dwc->lock, flags);
1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465

	for (i = 0; i < cdesc->periods; i++)
		dwc_desc_put(dwc, cdesc->desc[i]);

	kfree(cdesc->desc);
	kfree(cdesc);

	clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
}
EXPORT_SYMBOL(dw_dma_cyclic_free);

1466 1467 1468 1469
/*----------------------------------------------------------------------*/

static void dw_dma_off(struct dw_dma *dw)
{
1470 1471
	int i;

1472 1473 1474 1475 1476 1477 1478 1479 1480
	dma_writel(dw, CFG, 0);

	channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
	channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask);
	channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
	channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);

	while (dma_readl(dw, CFG) & DW_CFG_DMA_EN)
		cpu_relax();
1481 1482 1483

	for (i = 0; i < dw->dma.chancnt; i++)
		dw->chan[i].initialized = false;
1484 1485
}

1486
int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
1487
{
1488
	struct dw_dma		*dw;
1489 1490 1491
	bool			autocfg;
	unsigned int		dw_params;
	unsigned int		nr_channels;
1492
	unsigned int		max_blk_size = 0;
1493 1494 1495
	int			err;
	int			i;

1496 1497 1498 1499 1500 1501 1502
	dw = devm_kzalloc(chip->dev, sizeof(*dw), GFP_KERNEL);
	if (!dw)
		return -ENOMEM;

	dw->regs = chip->regs;
	chip->dw = dw;

1503 1504 1505
	dw->clk = devm_clk_get(chip->dev, "hclk");
	if (IS_ERR(dw->clk))
		return PTR_ERR(dw->clk);
1506 1507 1508
	err = clk_prepare_enable(dw->clk);
	if (err)
		return err;
1509

1510
	dw_params = dma_read_byaddr(chip->regs, DW_PARAMS);
1511 1512
	autocfg = dw_params >> DW_PARAMS_EN & 0x1;

1513
	dev_dbg(chip->dev, "DW_PARAMS: 0x%08x\n", dw_params);
1514 1515

	if (!pdata && autocfg) {
1516
		pdata = devm_kzalloc(chip->dev, sizeof(*pdata), GFP_KERNEL);
1517 1518 1519 1520
		if (!pdata) {
			err = -ENOMEM;
			goto err_pdata;
		}
1521 1522 1523 1524 1525

		/* Fill platform data with the default values */
		pdata->is_private = true;
		pdata->chan_allocation_order = CHAN_ALLOCATION_ASCENDING;
		pdata->chan_priority = CHAN_PRIORITY_ASCENDING;
1526 1527 1528 1529
	} else if (!pdata || pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS) {
		err = -EINVAL;
		goto err_pdata;
	}
1530

1531 1532 1533 1534 1535
	if (autocfg)
		nr_channels = (dw_params >> DW_PARAMS_NR_CHAN & 0x7) + 1;
	else
		nr_channels = pdata->nr_channels;

1536 1537
	dw->chan = devm_kcalloc(chip->dev, nr_channels, sizeof(*dw->chan),
				GFP_KERNEL);
1538 1539 1540 1541
	if (!dw->chan) {
		err = -ENOMEM;
		goto err_pdata;
	}
1542

1543
	/* Get hardware configuration parameters */
1544
	if (autocfg) {
1545 1546
		max_blk_size = dma_readl(dw, MAX_BLK_SIZE);

1547 1548 1549 1550 1551 1552 1553 1554 1555 1556
		dw->nr_masters = (dw_params >> DW_PARAMS_NR_MASTER & 3) + 1;
		for (i = 0; i < dw->nr_masters; i++) {
			dw->data_width[i] =
				(dw_params >> DW_PARAMS_DATA_WIDTH(i) & 3) + 2;
		}
	} else {
		dw->nr_masters = pdata->nr_masters;
		memcpy(dw->data_width, pdata->data_width, 4);
	}

1557
	/* Calculate all channel mask before DMA setup */
1558
	dw->all_chan_mask = (1 << nr_channels) - 1;
1559

1560
	/* Force dma off, just in case */
1561 1562
	dw_dma_off(dw);

1563
	/* Disable BLOCK interrupts as well */
1564 1565
	channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);

1566
	/* Create a pool of consistent memory blocks for hardware descriptors */
1567
	dw->desc_pool = dmam_pool_create("dw_dmac_desc_pool", chip->dev,
1568 1569
					 sizeof(struct dw_desc), 4, 0);
	if (!dw->desc_pool) {
1570
		dev_err(chip->dev, "No memory for descriptors dma pool\n");
1571 1572
		err = -ENOMEM;
		goto err_pdata;
1573 1574
	}

1575 1576
	tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw);

1577 1578 1579
	err = request_irq(chip->irq, dw_dma_interrupt, IRQF_SHARED,
			  "dw_dmac", dw);
	if (err)
1580
		goto err_pdata;
1581

1582
	INIT_LIST_HEAD(&dw->dma.channels);
1583
	for (i = 0; i < nr_channels; i++) {
1584
		struct dw_dma_chan	*dwc = &dw->chan[i];
1585
		int			r = nr_channels - i - 1;
1586 1587

		dwc->chan.device = &dw->dma;
1588
		dma_cookie_init(&dwc->chan);
1589 1590 1591 1592 1593
		if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING)
			list_add_tail(&dwc->chan.device_node,
					&dw->dma.channels);
		else
			list_add(&dwc->chan.device_node, &dw->dma.channels);
1594

1595 1596
		/* 7 is highest priority & 0 is lowest. */
		if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING)
1597
			dwc->priority = r;
1598 1599 1600
		else
			dwc->priority = i;

1601 1602 1603 1604 1605 1606 1607 1608 1609
		dwc->ch_regs = &__dw_regs(dw)->CHAN[i];
		spin_lock_init(&dwc->lock);
		dwc->mask = 1 << i;

		INIT_LIST_HEAD(&dwc->active_list);
		INIT_LIST_HEAD(&dwc->queue);
		INIT_LIST_HEAD(&dwc->free_list);

		channel_clear_bit(dw, CH_EN, dwc->mask);
1610

1611
		dwc->direction = DMA_TRANS_NONE;
1612
		dwc->request_line = ~0;
1613

1614
		/* Hardware configuration */
1615 1616
		if (autocfg) {
			unsigned int dwc_params;
1617
			void __iomem *addr = chip->regs + r * sizeof(u32);
1618

1619
			dwc_params = dma_read_byaddr(addr, DWC_PARAMS);
1620

1621 1622
			dev_dbg(chip->dev, "DWC_PARAMS[%d]: 0x%08x\n", i,
					   dwc_params);
1623

1624 1625
			/*
			 * Decode maximum block size for given channel. The
1626
			 * stored 4 bit value represents blocks from 0x00 for 3
1627 1628
			 * up to 0x0a for 4095.
			 */
1629 1630
			dwc->block_size =
				(4 << ((max_blk_size >> 4 * i) & 0xf)) - 1;
1631 1632 1633
			dwc->nollp =
				(dwc_params >> DWC_PARAMS_MBLK_EN & 0x1) == 0;
		} else {
1634
			dwc->block_size = pdata->block_size;
1635 1636 1637 1638 1639 1640 1641

			/* Check if channel supports multi block transfer */
			channel_writel(dwc, LLP, 0xfffffffc);
			dwc->nollp =
				(channel_readl(dwc, LLP) & 0xfffffffc) == 0;
			channel_writel(dwc, LLP, 0);
		}
1642 1643
	}

1644
	/* Clear all interrupts on all channels. */
1645
	dma_writel(dw, CLEAR.XFER, dw->all_chan_mask);
1646
	dma_writel(dw, CLEAR.BLOCK, dw->all_chan_mask);
1647 1648 1649 1650 1651 1652
	dma_writel(dw, CLEAR.SRC_TRAN, dw->all_chan_mask);
	dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask);
	dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask);

	dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask);
	dma_cap_set(DMA_SLAVE, dw->dma.cap_mask);
1653 1654
	if (pdata->is_private)
		dma_cap_set(DMA_PRIVATE, dw->dma.cap_mask);
1655
	dw->dma.dev = chip->dev;
1656 1657 1658 1659 1660 1661
	dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources;
	dw->dma.device_free_chan_resources = dwc_free_chan_resources;

	dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy;

	dw->dma.device_prep_slave_sg = dwc_prep_slave_sg;
1662
	dw->dma.device_control = dwc_control;
1663

1664
	dw->dma.device_tx_status = dwc_tx_status;
1665 1666 1667 1668
	dw->dma.device_issue_pending = dwc_issue_pending;

	dma_writel(dw, CFG, DW_CFG_DMA_EN);

1669 1670 1671 1672
	err = dma_async_device_register(&dw->dma);
	if (err)
		goto err_dma_register;

1673
	dev_info(chip->dev, "DesignWare DMA Controller, %d channels\n",
1674
		 nr_channels);
1675 1676

	return 0;
1677

1678 1679
err_dma_register:
	free_irq(chip->irq, dw);
1680 1681 1682
err_pdata:
	clk_disable_unprepare(dw->clk);
	return err;
1683
}
1684
EXPORT_SYMBOL_GPL(dw_dma_probe);
1685

1686
int dw_dma_remove(struct dw_dma_chip *chip)
1687
{
1688
	struct dw_dma		*dw = chip->dw;
1689 1690 1691 1692 1693
	struct dw_dma_chan	*dwc, *_dwc;

	dw_dma_off(dw);
	dma_async_device_unregister(&dw->dma);

1694
	free_irq(chip->irq, dw);
1695 1696 1697 1698 1699 1700 1701 1702
	tasklet_kill(&dw->tasklet);

	list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels,
			chan.device_node) {
		list_del(&dwc->chan.device_node);
		channel_clear_bit(dw, CH_EN, dwc->mask);
	}

1703 1704
	clk_disable_unprepare(dw->clk);

1705 1706
	return 0;
}
1707
EXPORT_SYMBOL_GPL(dw_dma_remove);
1708

1709
void dw_dma_shutdown(struct dw_dma_chip *chip)
1710
{
1711
	struct dw_dma *dw = chip->dw;
1712

1713
	dw_dma_off(dw);
1714
	clk_disable_unprepare(dw->clk);
1715
}
1716
EXPORT_SYMBOL_GPL(dw_dma_shutdown);
1717

1718 1719 1720
#ifdef CONFIG_PM_SLEEP

int dw_dma_suspend(struct dw_dma_chip *chip)
1721
{
1722
	struct dw_dma *dw = chip->dw;
1723

1724
	dw_dma_off(dw);
1725
	clk_disable_unprepare(dw->clk);
1726

1727 1728
	return 0;
}
1729
EXPORT_SYMBOL_GPL(dw_dma_suspend);
1730

1731
int dw_dma_resume(struct dw_dma_chip *chip)
1732
{
1733
	struct dw_dma *dw = chip->dw;
1734

1735
	clk_prepare_enable(dw->clk);
1736
	dma_writel(dw, CFG, DW_CFG_DMA_EN);
1737

1738 1739
	return 0;
}
1740
EXPORT_SYMBOL_GPL(dw_dma_resume);
1741

1742
#endif /* CONFIG_PM_SLEEP */
1743 1744

MODULE_LICENSE("GPL v2");
1745
MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller core driver");
J
Jean Delvare 已提交
1746
MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
V
Viresh Kumar 已提交
1747
MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");