core.c 42.8 KB
Newer Older
1
/*
2
 * Core driver for the Synopsys DesignWare DMA Controller
3 4
 *
 * Copyright (C) 2007-2008 Atmel Corporation
5
 * Copyright (C) 2010-2011 ST Microelectronics
6
 * Copyright (C) 2013 Intel Corporation
7 8 9 10 11
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */
12

13
#include <linux/bitops.h>
14 15 16
#include <linux/delay.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
17
#include <linux/dmapool.h>
18
#include <linux/err.h>
19 20 21 22 23 24
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/slab.h>
A
Andy Shevchenko 已提交
25
#include <linux/pm_runtime.h>
26

27
#include "../dmaengine.h"
28
#include "internal.h"
29 30 31 32 33 34 35

/*
 * This supports the Synopsys "DesignWare AHB Central DMA Controller",
 * (DW_ahb_dmac) which is used with various AMBA 2.0 systems (not all
 * of which use ARM any more).  See the "Databook" from Synopsys for
 * information beyond what licensees probably provide.
 *
36 37
 * The driver has been tested with the Atmel AT32AP7000, which does not
 * support descriptor writeback.
38 39
 */

40 41 42
#define DWC_DEFAULT_CTLLO(_chan) ({				\
		struct dw_dma_chan *_dwc = to_dw_dma_chan(_chan);	\
		struct dma_slave_config	*_sconfig = &_dwc->dma_sconfig;	\
43 44
		bool _is_slave = is_slave_direction(_dwc->direction);	\
		u8 _smsize = _is_slave ? _sconfig->src_maxburst :	\
45
			DW_DMA_MSIZE_16;			\
46
		u8 _dmsize = _is_slave ? _sconfig->dst_maxburst :	\
47
			DW_DMA_MSIZE_16;			\
48
		u8 _dms = (_dwc->direction == DMA_MEM_TO_DEV) ?		\
49
			_dwc->dws.p_master : _dwc->dws.m_master;	\
50
		u8 _sms = (_dwc->direction == DMA_DEV_TO_MEM) ?		\
51
			_dwc->dws.p_master : _dwc->dws.m_master;	\
52
								\
53 54
		(DWC_CTLL_DST_MSIZE(_dmsize)			\
		 | DWC_CTLL_SRC_MSIZE(_smsize)			\
55 56
		 | DWC_CTLL_LLP_D_EN				\
		 | DWC_CTLL_LLP_S_EN				\
57 58
		 | DWC_CTLL_DMS(_dms)				\
		 | DWC_CTLL_SMS(_sms));				\
59
	})
60

61 62 63 64 65 66 67
/* The set of bus widths supported by the DMA controller */
#define DW_DMA_BUSWIDTHS			  \
	BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED)	| \
	BIT(DMA_SLAVE_BUSWIDTH_1_BYTE)		| \
	BIT(DMA_SLAVE_BUSWIDTH_2_BYTES)		| \
	BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)

68 69
/*----------------------------------------------------------------------*/

70 71 72 73 74
static struct device *chan2dev(struct dma_chan *chan)
{
	return &chan->dev->device;
}

75 76
static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc)
{
77
	return to_dw_desc(dwc->active_list.next);
78 79
}

80
static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx)
81
{
82 83 84 85
	struct dw_desc		*desc = txd_to_dw_desc(tx);
	struct dw_dma_chan	*dwc = to_dw_dma_chan(tx->chan);
	dma_cookie_t		cookie;
	unsigned long		flags;
86

87
	spin_lock_irqsave(&dwc->lock, flags);
88 89 90 91 92 93 94 95 96
	cookie = dma_cookie_assign(tx);

	/*
	 * REVISIT: We should attempt to chain as many descriptors as
	 * possible, perhaps even appending to those already submitted
	 * for DMA. But this is hard to do in a race-free manner.
	 */

	list_add_tail(&desc->desc_node, &dwc->queue);
97
	spin_unlock_irqrestore(&dwc->lock, flags);
98 99
	dev_vdbg(chan2dev(tx->chan), "%s: queued %u\n",
		 __func__, desc->txd.cookie);
100

101 102
	return cookie;
}
103

104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120
static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
{
	struct dw_dma *dw = to_dw_dma(dwc->chan.device);
	struct dw_desc *desc;
	dma_addr_t phys;

	desc = dma_pool_zalloc(dw->desc_pool, GFP_ATOMIC, &phys);
	if (!desc)
		return NULL;

	dwc->descs_allocated++;
	INIT_LIST_HEAD(&desc->tx_list);
	dma_async_tx_descriptor_init(&desc->txd, &dwc->chan);
	desc->txd.tx_submit = dwc_tx_submit;
	desc->txd.flags = DMA_CTRL_ACK;
	desc->txd.phys = phys;
	return desc;
121 122 123 124
}

static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
{
125 126
	struct dw_dma *dw = to_dw_dma(dwc->chan.device);
	struct dw_desc *child, *_next;
127

128 129
	if (unlikely(!desc))
		return;
130

131 132 133 134
	list_for_each_entry_safe(child, _next, &desc->tx_list, desc_node) {
		list_del(&child->desc_node);
		dma_pool_free(dw->desc_pool, child, child->txd.phys);
		dwc->descs_allocated--;
135
	}
136 137 138

	dma_pool_free(dw->desc_pool, desc, desc->txd.phys);
	dwc->descs_allocated--;
139 140
}

141 142 143 144 145
static void dwc_initialize(struct dw_dma_chan *dwc)
{
	struct dw_dma *dw = to_dw_dma(dwc->chan.device);
	u32 cfghi = DWC_CFGH_FIFO_MODE;
	u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority);
146
	bool hs_polarity = dwc->dws.hs_polarity;
147

148
	if (test_bit(DW_DMA_IS_INITIALIZED, &dwc->flags))
149 150
		return;

151 152
	cfghi |= DWC_CFGH_DST_PER(dwc->dws.dst_id);
	cfghi |= DWC_CFGH_SRC_PER(dwc->dws.src_id);
153

154 155 156
	/* Set polarity of handshake interface */
	cfglo |= hs_polarity ? DWC_CFGL_HS_DST_POL | DWC_CFGL_HS_SRC_POL : 0;

157 158 159 160 161 162 163
	channel_writel(dwc, CFG_LO, cfglo);
	channel_writel(dwc, CFG_HI, cfghi);

	/* Enable interrupts */
	channel_set_bit(dw, MASK.XFER, dwc->mask);
	channel_set_bit(dw, MASK.ERROR, dwc->mask);

164
	set_bit(DW_DMA_IS_INITIALIZED, &dwc->flags);
165 166
}

167 168
/*----------------------------------------------------------------------*/

169
static inline void dwc_dump_chan_regs(struct dw_dma_chan *dwc)
170 171 172 173 174 175 176 177 178 179
{
	dev_err(chan2dev(&dwc->chan),
		"  SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
		channel_readl(dwc, SAR),
		channel_readl(dwc, DAR),
		channel_readl(dwc, LLP),
		channel_readl(dwc, CTL_HI),
		channel_readl(dwc, CTL_LO));
}

180 181 182 183 184 185 186
static inline void dwc_chan_disable(struct dw_dma *dw, struct dw_dma_chan *dwc)
{
	channel_clear_bit(dw, CH_EN, dwc->mask);
	while (dma_readl(dw, CH_EN) & dwc->mask)
		cpu_relax();
}

187 188
/*----------------------------------------------------------------------*/

189 190 191 192 193 194 195
/* Perform single block transfer */
static inline void dwc_do_single_block(struct dw_dma_chan *dwc,
				       struct dw_desc *desc)
{
	struct dw_dma	*dw = to_dw_dma(dwc->chan.device);
	u32		ctllo;

196 197 198 199
	/*
	 * Software emulation of LLP mode relies on interrupts to continue
	 * multi block transfer.
	 */
200
	ctllo = lli_read(desc, ctllo) | DWC_CTLL_INT_EN;
201

202 203
	channel_writel(dwc, SAR, lli_read(desc, sar));
	channel_writel(dwc, DAR, lli_read(desc, dar));
204
	channel_writel(dwc, CTL_LO, ctllo);
205
	channel_writel(dwc, CTL_HI, lli_read(desc, ctlhi));
206
	channel_set_bit(dw, CH_EN, dwc->mask);
207 208 209

	/* Move pointer to next descriptor */
	dwc->tx_node_active = dwc->tx_node_active->next;
210 211
}

212 213 214 215
/* Called with dwc->lock held and bh disabled */
static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
{
	struct dw_dma	*dw = to_dw_dma(dwc->chan.device);
216
	u8		lms = DWC_LLP_LMS(dwc->dws.m_master);
217
	unsigned long	was_soft_llp;
218 219 220

	/* ASSERT:  channel is idle */
	if (dma_readl(dw, CH_EN) & dwc->mask) {
221
		dev_err(chan2dev(&dwc->chan),
222 223
			"%s: BUG: Attempted to start non-idle channel\n",
			__func__);
224
		dwc_dump_chan_regs(dwc);
225 226 227 228 229

		/* The tasklet will hopefully advance the queue... */
		return;
	}

230 231 232 233 234
	if (dwc->nollp) {
		was_soft_llp = test_and_set_bit(DW_DMA_IS_SOFT_LLP,
						&dwc->flags);
		if (was_soft_llp) {
			dev_err(chan2dev(&dwc->chan),
235
				"BUG: Attempted to start new LLP transfer inside ongoing one\n");
236 237 238 239 240
			return;
		}

		dwc_initialize(dwc);

241
		first->residue = first->total_len;
242
		dwc->tx_node_active = &first->tx_list;
243

244
		/* Submit first block */
245 246 247 248 249
		dwc_do_single_block(dwc, first);

		return;
	}

250 251
	dwc_initialize(dwc);

252 253
	channel_writel(dwc, LLP, first->txd.phys | lms);
	channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
254 255 256 257
	channel_writel(dwc, CTL_HI, 0);
	channel_set_bit(dw, CH_EN, dwc->mask);
}

258 259
static void dwc_dostart_first_queued(struct dw_dma_chan *dwc)
{
260 261
	struct dw_desc *desc;

262 263 264 265
	if (list_empty(&dwc->queue))
		return;

	list_move(dwc->queue.next, &dwc->active_list);
266 267 268
	desc = dwc_first_active(dwc);
	dev_vdbg(chan2dev(&dwc->chan), "%s: started %u\n", __func__, desc->txd.cookie);
	dwc_dostart(dwc, desc);
269 270
}

271 272 273
/*----------------------------------------------------------------------*/

static void
274 275
dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc,
		bool callback_required)
276
{
277 278
	dma_async_tx_callback		callback = NULL;
	void				*param = NULL;
279
	struct dma_async_tx_descriptor	*txd = &desc->txd;
280
	struct dw_desc			*child;
281
	unsigned long			flags;
282

283
	dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie);
284

285
	spin_lock_irqsave(&dwc->lock, flags);
286
	dma_cookie_complete(txd);
287 288 289 290
	if (callback_required) {
		callback = txd->callback;
		param = txd->callback_param;
	}
291

292 293 294 295
	/* async_tx_ack */
	list_for_each_entry(child, &desc->tx_list, desc_node)
		async_tx_ack(&child->txd);
	async_tx_ack(&desc->txd);
296
	dwc_desc_put(dwc, desc);
297 298
	spin_unlock_irqrestore(&dwc->lock, flags);

299
	if (callback)
300 301 302 303 304 305 306
		callback(param);
}

static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
{
	struct dw_desc *desc, *_desc;
	LIST_HEAD(list);
307
	unsigned long flags;
308

309
	spin_lock_irqsave(&dwc->lock, flags);
310
	if (dma_readl(dw, CH_EN) & dwc->mask) {
311
		dev_err(chan2dev(&dwc->chan),
312 313 314
			"BUG: XFER bit set, but channel not idle!\n");

		/* Try to continue after resetting the channel... */
315
		dwc_chan_disable(dw, dwc);
316 317 318 319 320 321 322
	}

	/*
	 * Submit queued descriptors ASAP, i.e. before we go through
	 * the completed ones.
	 */
	list_splice_init(&dwc->active_list, &list);
323
	dwc_dostart_first_queued(dwc);
324

325 326
	spin_unlock_irqrestore(&dwc->lock, flags);

327
	list_for_each_entry_safe(desc, _desc, &list, desc_node)
328
		dwc_descriptor_complete(dwc, desc, true);
329 330
}

331 332 333 334 335 336 337 338 339
/* Returns how many bytes were already received from source */
static inline u32 dwc_get_sent(struct dw_dma_chan *dwc)
{
	u32 ctlhi = channel_readl(dwc, CTL_HI);
	u32 ctllo = channel_readl(dwc, CTL_LO);

	return (ctlhi & DWC_CTLH_BLOCK_TS_MASK) * (1 << (ctllo >> 4 & 7));
}

340 341 342 343 344 345
static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
{
	dma_addr_t llp;
	struct dw_desc *desc, *_desc;
	struct dw_desc *child;
	u32 status_xfer;
346
	unsigned long flags;
347

348
	spin_lock_irqsave(&dwc->lock, flags);
349 350 351 352 353 354
	llp = channel_readl(dwc, LLP);
	status_xfer = dma_readl(dw, RAW.XFER);

	if (status_xfer & dwc->mask) {
		/* Everything we've submitted is done */
		dma_writel(dw, CLEAR.XFER, dwc->mask);
355 356

		if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) {
357 358 359 360 361 362 363 364 365 366
			struct list_head *head, *active = dwc->tx_node_active;

			/*
			 * We are inside first active descriptor.
			 * Otherwise something is really wrong.
			 */
			desc = dwc_first_active(dwc);

			head = &desc->tx_list;
			if (active != head) {
367 368 369 370 371
				/* Update residue to reflect last sent descriptor */
				if (active == head->next)
					desc->residue -= desc->len;
				else
					desc->residue -= to_dw_desc(active->prev)->len;
372

373
				child = to_dw_desc(active);
374 375

				/* Submit next block */
376
				dwc_do_single_block(dwc, child);
377

378
				spin_unlock_irqrestore(&dwc->lock, flags);
379 380
				return;
			}
381

382 383 384
			/* We are done here */
			clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags);
		}
385

386 387
		spin_unlock_irqrestore(&dwc->lock, flags);

388 389 390 391
		dwc_complete_all(dw, dwc);
		return;
	}

392 393
	if (list_empty(&dwc->active_list)) {
		spin_unlock_irqrestore(&dwc->lock, flags);
394
		return;
395
	}
396

397 398
	if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) {
		dev_vdbg(chan2dev(&dwc->chan), "%s: soft LLP mode\n", __func__);
399
		spin_unlock_irqrestore(&dwc->lock, flags);
400
		return;
401
	}
402

403
	dev_vdbg(chan2dev(&dwc->chan), "%s: llp=%pad\n", __func__, &llp);
404 405

	list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) {
406
		/* Initial residue value */
407
		desc->residue = desc->total_len;
408

409
		/* Check first descriptors addr */
410
		if (desc->txd.phys == DWC_LLP_LOC(llp)) {
411
			spin_unlock_irqrestore(&dwc->lock, flags);
412
			return;
413
		}
414

415
		/* Check first descriptors llp */
416
		if (lli_read(desc, llp) == llp) {
417
			/* This one is currently in progress */
418
			desc->residue -= dwc_get_sent(dwc);
419
			spin_unlock_irqrestore(&dwc->lock, flags);
420
			return;
421
		}
422

423
		desc->residue -= desc->len;
424
		list_for_each_entry(child, &desc->tx_list, desc_node) {
425
			if (lli_read(child, llp) == llp) {
426
				/* Currently in progress */
427
				desc->residue -= dwc_get_sent(dwc);
428
				spin_unlock_irqrestore(&dwc->lock, flags);
429
				return;
430
			}
431
			desc->residue -= child->len;
432
		}
433 434 435 436 437

		/*
		 * No descriptors so far seem to be in progress, i.e.
		 * this one must be done.
		 */
438
		spin_unlock_irqrestore(&dwc->lock, flags);
439
		dwc_descriptor_complete(dwc, desc, true);
440
		spin_lock_irqsave(&dwc->lock, flags);
441 442
	}

443
	dev_err(chan2dev(&dwc->chan),
444 445 446
		"BUG: All descriptors done, but channel not idle!\n");

	/* Try to continue after resetting the channel... */
447
	dwc_chan_disable(dw, dwc);
448

449
	dwc_dostart_first_queued(dwc);
450
	spin_unlock_irqrestore(&dwc->lock, flags);
451 452
}

453
static inline void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_desc *desc)
454
{
455
	dev_crit(chan2dev(&dwc->chan), "  desc: s0x%x d0x%x l0x%x c0x%x:%x\n",
456 457 458 459 460
		 lli_read(desc, sar),
		 lli_read(desc, dar),
		 lli_read(desc, llp),
		 lli_read(desc, ctlhi),
		 lli_read(desc, ctllo));
461 462 463 464 465 466
}

static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
{
	struct dw_desc *bad_desc;
	struct dw_desc *child;
467
	unsigned long flags;
468 469 470

	dwc_scan_descriptors(dw, dwc);

471 472
	spin_lock_irqsave(&dwc->lock, flags);

473 474 475 476 477 478 479
	/*
	 * The descriptor currently at the head of the active list is
	 * borked. Since we don't have any way to report errors, we'll
	 * just have to scream loudly and try to carry on.
	 */
	bad_desc = dwc_first_active(dwc);
	list_del_init(&bad_desc->desc_node);
480
	list_move(dwc->queue.next, dwc->active_list.prev);
481 482 483 484 485 486 487

	/* Clear the error flag and try to restart the controller */
	dma_writel(dw, CLEAR.ERROR, dwc->mask);
	if (!list_empty(&dwc->active_list))
		dwc_dostart(dwc, dwc_first_active(dwc));

	/*
488
	 * WARN may seem harsh, but since this only happens
489 490 491 492 493
	 * when someone submits a bad physical address in a
	 * descriptor, we should consider ourselves lucky that the
	 * controller flagged an error instead of scribbling over
	 * random memory locations.
	 */
494 495
	dev_WARN(chan2dev(&dwc->chan), "Bad descriptor submitted for DMA!\n"
				       "  cookie: %d\n", bad_desc->txd.cookie);
496
	dwc_dump_lli(dwc, bad_desc);
497
	list_for_each_entry(child, &bad_desc->tx_list, desc_node)
498
		dwc_dump_lli(dwc, child);
499

500 501
	spin_unlock_irqrestore(&dwc->lock, flags);

502
	/* Pretend the descriptor completed successfully */
503
	dwc_descriptor_complete(dwc, bad_desc, true);
504 505
}

506 507
/* --------------------- Cyclic DMA API extensions -------------------- */

508
dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan)
509 510 511 512 513 514
{
	struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
	return channel_readl(dwc, SAR);
}
EXPORT_SYMBOL(dw_dma_get_src_addr);

515
dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan)
516 517 518 519 520 521
{
	struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
	return channel_readl(dwc, DAR);
}
EXPORT_SYMBOL(dw_dma_get_dst_addr);

522
/* Called with dwc->lock held and all DMAC interrupts disabled */
523
static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
524
		u32 status_block, u32 status_err, u32 status_xfer)
525
{
526 527
	unsigned long flags;

528
	if (status_block & dwc->mask) {
529 530 531 532 533
		void (*callback)(void *param);
		void *callback_param;

		dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n",
				channel_readl(dwc, LLP));
534
		dma_writel(dw, CLEAR.BLOCK, dwc->mask);
535 536 537

		callback = dwc->cdesc->period_callback;
		callback_param = dwc->cdesc->period_callback_param;
538 539

		if (callback)
540 541 542 543 544 545 546 547 548
			callback(callback_param);
	}

	/*
	 * Error and transfer complete are highly unlikely, and will most
	 * likely be due to a configuration error by the user.
	 */
	if (unlikely(status_err & dwc->mask) ||
			unlikely(status_xfer & dwc->mask)) {
549
		unsigned int i;
550

551 552 553
		dev_err(chan2dev(&dwc->chan),
			"cyclic DMA unexpected %s interrupt, stopping DMA transfer\n",
			status_xfer ? "xfer" : "error");
554 555 556

		spin_lock_irqsave(&dwc->lock, flags);

557
		dwc_dump_chan_regs(dwc);
558

559
		dwc_chan_disable(dw, dwc);
560

561
		/* Make sure DMA does not restart by loading a new list */
562 563 564 565
		channel_writel(dwc, LLP, 0);
		channel_writel(dwc, CTL_LO, 0);
		channel_writel(dwc, CTL_HI, 0);

566
		dma_writel(dw, CLEAR.BLOCK, dwc->mask);
567 568 569 570
		dma_writel(dw, CLEAR.ERROR, dwc->mask);
		dma_writel(dw, CLEAR.XFER, dwc->mask);

		for (i = 0; i < dwc->cdesc->periods; i++)
571
			dwc_dump_lli(dwc, dwc->cdesc->desc[i]);
572 573

		spin_unlock_irqrestore(&dwc->lock, flags);
574
	}
575 576 577

	/* Re-enable interrupts */
	channel_set_bit(dw, MASK.BLOCK, dwc->mask);
578 579 580 581
}

/* ------------------------------------------------------------------------- */

582 583 584 585
static void dw_dma_tasklet(unsigned long data)
{
	struct dw_dma *dw = (struct dw_dma *)data;
	struct dw_dma_chan *dwc;
586
	u32 status_block;
587 588
	u32 status_xfer;
	u32 status_err;
589
	unsigned int i;
590

591
	status_block = dma_readl(dw, RAW.BLOCK);
592
	status_xfer = dma_readl(dw, RAW.XFER);
593 594
	status_err = dma_readl(dw, RAW.ERROR);

595
	dev_vdbg(dw->dma.dev, "%s: status_err=%x\n", __func__, status_err);
596 597 598

	for (i = 0; i < dw->dma.chancnt; i++) {
		dwc = &dw->chan[i];
599
		if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags))
600 601
			dwc_handle_cyclic(dw, dwc, status_block, status_err,
					status_xfer);
602
		else if (status_err & (1 << i))
603
			dwc_handle_error(dw, dwc);
604
		else if (status_xfer & (1 << i))
605 606 607
			dwc_scan_descriptors(dw, dwc);
	}

608
	/* Re-enable interrupts */
609 610 611 612 613 614 615
	channel_set_bit(dw, MASK.XFER, dw->all_chan_mask);
	channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask);
}

static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
{
	struct dw_dma *dw = dev_id;
616
	u32 status;
617

618 619 620 621 622
	/* Check if we have any interrupt from the DMAC which is not in use */
	if (!dw->in_use)
		return IRQ_NONE;

	status = dma_readl(dw, STATUS_INT);
623 624 625
	dev_vdbg(dw->dma.dev, "%s: status=0x%x\n", __func__, status);

	/* Check if we have any interrupt from the DMAC */
626
	if (!status)
627
		return IRQ_NONE;
628 629 630 631 632 633

	/*
	 * Just disable the interrupts. We'll turn them back on in the
	 * softirq handler.
	 */
	channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
634
	channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
635 636 637 638 639 640 641 642 643 644
	channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);

	status = dma_readl(dw, STATUS_INT);
	if (status) {
		dev_err(dw->dma.dev,
			"BUG: Unexpected interrupts pending: 0x%x\n",
			status);

		/* Try to recover */
		channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1);
645
		channel_clear_bit(dw, MASK.BLOCK, (1 << 8) - 1);
646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662
		channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1);
		channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1);
		channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1);
	}

	tasklet_schedule(&dw->tasklet);

	return IRQ_HANDLED;
}

/*----------------------------------------------------------------------*/

static struct dma_async_tx_descriptor *
dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
		size_t len, unsigned long flags)
{
	struct dw_dma_chan	*dwc = to_dw_dma_chan(chan);
663
	struct dw_dma		*dw = to_dw_dma(chan->device);
664 665 666 667 668
	struct dw_desc		*desc;
	struct dw_desc		*first;
	struct dw_desc		*prev;
	size_t			xfer_count;
	size_t			offset;
669
	u8			m_master = dwc->dws.m_master;
670 671
	unsigned int		src_width;
	unsigned int		dst_width;
672
	unsigned int		data_width = dw->pdata->data_width[m_master];
673
	u32			ctllo;
674
	u8			lms = DWC_LLP_LMS(m_master);
675

676
	dev_vdbg(chan2dev(chan),
677 678
			"%s: d%pad s%pad l0x%zx f0x%lx\n", __func__,
			&dest, &src, len, flags);
679 680

	if (unlikely(!len)) {
681
		dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__);
682 683 684
		return NULL;
	}

685 686
	dwc->direction = DMA_MEM_TO_MEM;

687
	src_width = dst_width = __ffs(data_width | src | dest | len);
688

689
	ctllo = DWC_DEFAULT_CTLLO(chan)
690 691 692 693 694 695 696 697 698
			| DWC_CTLL_DST_WIDTH(dst_width)
			| DWC_CTLL_SRC_WIDTH(src_width)
			| DWC_CTLL_DST_INC
			| DWC_CTLL_SRC_INC
			| DWC_CTLL_FC_M2M;
	prev = first = NULL;

	for (offset = 0; offset < len; offset += xfer_count << src_width) {
		xfer_count = min_t(size_t, (len - offset) >> src_width,
699
					   dwc->block_size);
700 701 702 703 704

		desc = dwc_desc_get(dwc);
		if (!desc)
			goto err_desc_get;

705 706 707 708
		lli_write(desc, sar, src + offset);
		lli_write(desc, dar, dest + offset);
		lli_write(desc, ctllo, ctllo);
		lli_write(desc, ctlhi, xfer_count);
709
		desc->len = xfer_count << src_width;
710 711 712 713

		if (!first) {
			first = desc;
		} else {
714
			lli_write(prev, llp, desc->txd.phys | lms);
715
			list_add_tail(&desc->desc_node, &first->tx_list);
716 717 718 719 720 721
		}
		prev = desc;
	}

	if (flags & DMA_PREP_INTERRUPT)
		/* Trigger interrupt after last block */
722
		lli_set(prev, ctllo, DWC_CTLL_INT_EN);
723 724

	prev->lli.llp = 0;
725
	lli_clear(prev, ctllo, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
726
	first->txd.flags = flags;
727
	first->total_len = len;
728 729 730 731 732 733 734 735 736 737

	return &first->txd;

err_desc_get:
	dwc_desc_put(dwc, first);
	return NULL;
}

static struct dma_async_tx_descriptor *
dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
738
		unsigned int sg_len, enum dma_transfer_direction direction,
739
		unsigned long flags, void *context)
740 741
{
	struct dw_dma_chan	*dwc = to_dw_dma_chan(chan);
742
	struct dw_dma		*dw = to_dw_dma(chan->device);
743
	struct dma_slave_config	*sconfig = &dwc->dma_sconfig;
744 745 746
	struct dw_desc		*prev;
	struct dw_desc		*first;
	u32			ctllo;
747
	u8			m_master = dwc->dws.m_master;
748
	u8			lms = DWC_LLP_LMS(m_master);
749 750 751
	dma_addr_t		reg;
	unsigned int		reg_width;
	unsigned int		mem_width;
752
	unsigned int		data_width = dw->pdata->data_width[m_master];
753 754 755 756
	unsigned int		i;
	struct scatterlist	*sg;
	size_t			total_len = 0;

757
	dev_vdbg(chan2dev(chan), "%s\n", __func__);
758

759
	if (unlikely(!is_slave_direction(direction) || !sg_len))
760 761
		return NULL;

762 763
	dwc->direction = direction;

764 765 766
	prev = first = NULL;

	switch (direction) {
767
	case DMA_MEM_TO_DEV:
768
		reg_width = __ffs(sconfig->dst_addr_width);
769 770
		reg = sconfig->dst_addr;
		ctllo = (DWC_DEFAULT_CTLLO(chan)
771 772
				| DWC_CTLL_DST_WIDTH(reg_width)
				| DWC_CTLL_DST_FIX
773 774 775 776 777
				| DWC_CTLL_SRC_INC);

		ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) :
			DWC_CTLL_FC(DW_DMA_FC_D_M2P);

778 779
		for_each_sg(sgl, sg, sg_len, i) {
			struct dw_desc	*desc;
780
			u32		len, dlen, mem;
781

782
			mem = sg_dma_address(sg);
783
			len = sg_dma_len(sg);
784

785
			mem_width = __ffs(data_width | mem | len);
786

787
slave_sg_todev_fill_desc:
788
			desc = dwc_desc_get(dwc);
789
			if (!desc)
790 791
				goto err_desc_get;

792 793 794
			lli_write(desc, sar, mem);
			lli_write(desc, dar, reg);
			lli_write(desc, ctllo, ctllo | DWC_CTLL_SRC_WIDTH(mem_width));
795 796
			if ((len >> mem_width) > dwc->block_size) {
				dlen = dwc->block_size << mem_width;
797 798 799 800 801 802 803
				mem += dlen;
				len -= dlen;
			} else {
				dlen = len;
				len = 0;
			}

804
			lli_write(desc, ctlhi, dlen >> mem_width);
805
			desc->len = dlen;
806 807 808 809

			if (!first) {
				first = desc;
			} else {
810
				lli_write(prev, llp, desc->txd.phys | lms);
811
				list_add_tail(&desc->desc_node, &first->tx_list);
812 813
			}
			prev = desc;
814 815 816 817
			total_len += dlen;

			if (len)
				goto slave_sg_todev_fill_desc;
818 819
		}
		break;
820
	case DMA_DEV_TO_MEM:
821
		reg_width = __ffs(sconfig->src_addr_width);
822 823
		reg = sconfig->src_addr;
		ctllo = (DWC_DEFAULT_CTLLO(chan)
824 825
				| DWC_CTLL_SRC_WIDTH(reg_width)
				| DWC_CTLL_DST_INC
826 827 828 829
				| DWC_CTLL_SRC_FIX);

		ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) :
			DWC_CTLL_FC(DW_DMA_FC_D_P2M);
830 831 832

		for_each_sg(sgl, sg, sg_len, i) {
			struct dw_desc	*desc;
833
			u32		len, dlen, mem;
834

835
			mem = sg_dma_address(sg);
836
			len = sg_dma_len(sg);
837

838
			mem_width = __ffs(data_width | mem | len);
839

840 841
slave_sg_fromdev_fill_desc:
			desc = dwc_desc_get(dwc);
842
			if (!desc)
843 844
				goto err_desc_get;

845 846 847
			lli_write(desc, sar, reg);
			lli_write(desc, dar, mem);
			lli_write(desc, ctllo, ctllo | DWC_CTLL_DST_WIDTH(mem_width));
848 849
			if ((len >> reg_width) > dwc->block_size) {
				dlen = dwc->block_size << reg_width;
850 851 852 853 854 855
				mem += dlen;
				len -= dlen;
			} else {
				dlen = len;
				len = 0;
			}
856
			lli_write(desc, ctlhi, dlen >> reg_width);
857
			desc->len = dlen;
858 859 860 861

			if (!first) {
				first = desc;
			} else {
862
				lli_write(prev, llp, desc->txd.phys | lms);
863
				list_add_tail(&desc->desc_node, &first->tx_list);
864 865
			}
			prev = desc;
866 867 868 869
			total_len += dlen;

			if (len)
				goto slave_sg_fromdev_fill_desc;
870 871 872 873 874 875 876 877
		}
		break;
	default:
		return NULL;
	}

	if (flags & DMA_PREP_INTERRUPT)
		/* Trigger interrupt after last block */
878
		lli_set(prev, ctllo, DWC_CTLL_INT_EN);
879 880

	prev->lli.llp = 0;
881
	lli_clear(prev, ctllo, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
882
	first->total_len = total_len;
883 884 885 886

	return &first->txd;

err_desc_get:
887 888
	dev_err(chan2dev(chan),
		"not enough descriptors available. Direction %d\n", direction);
889 890 891 892
	dwc_desc_put(dwc, first);
	return NULL;
}

893 894 895 896 897
bool dw_dma_filter(struct dma_chan *chan, void *param)
{
	struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
	struct dw_dma_slave *dws = param;

898
	if (dws->dma_dev != chan->device->dev)
899 900 901
		return false;

	/* We have to copy data since dws can be temporary storage */
902
	memcpy(&dwc->dws, dws, sizeof(struct dw_dma_slave));
903 904 905 906 907

	return true;
}
EXPORT_SYMBOL_GPL(dw_dma_filter);

908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923
/*
 * Fix sconfig's burst size according to dw_dmac. We need to convert them as:
 * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3.
 *
 * NOTE: burst size 2 is not supported by controller.
 *
 * This can be done by finding least significant bit set: n & (n - 1)
 */
static inline void convert_burst(u32 *maxburst)
{
	if (*maxburst > 1)
		*maxburst = fls(*maxburst) - 2;
	else
		*maxburst = 0;
}

924
static int dwc_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
925 926 927
{
	struct dw_dma_chan *dwc = to_dw_dma_chan(chan);

928 929
	/* Check if chan will be configured for slave transfers */
	if (!is_slave_direction(sconfig->direction))
930 931 932
		return -EINVAL;

	memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig));
933
	dwc->direction = sconfig->direction;
934 935 936 937 938 939 940

	convert_burst(&dwc->dma_sconfig.src_maxburst);
	convert_burst(&dwc->dma_sconfig.dst_maxburst);

	return 0;
}

941
static int dwc_pause(struct dma_chan *chan)
942
{
943 944 945 946 947 948
	struct dw_dma_chan	*dwc = to_dw_dma_chan(chan);
	unsigned long		flags;
	unsigned int		count = 20;	/* timeout iterations */
	u32			cfglo;

	spin_lock_irqsave(&dwc->lock, flags);
949

950
	cfglo = channel_readl(dwc, CFG_LO);
951
	channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP);
952 953
	while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY) && count--)
		udelay(2);
954

955
	set_bit(DW_DMA_IS_PAUSED, &dwc->flags);
956 957 958 959

	spin_unlock_irqrestore(&dwc->lock, flags);

	return 0;
960 961 962 963 964 965 966 967
}

static inline void dwc_chan_resume(struct dw_dma_chan *dwc)
{
	u32 cfglo = channel_readl(dwc, CFG_LO);

	channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP);

968
	clear_bit(DW_DMA_IS_PAUSED, &dwc->flags);
969 970
}

971
static int dwc_resume(struct dma_chan *chan)
972 973
{
	struct dw_dma_chan	*dwc = to_dw_dma_chan(chan);
974
	unsigned long		flags;
975

976
	spin_lock_irqsave(&dwc->lock, flags);
977

978 979
	if (test_bit(DW_DMA_IS_PAUSED, &dwc->flags))
		dwc_chan_resume(dwc);
980

981
	spin_unlock_irqrestore(&dwc->lock, flags);
982

983 984
	return 0;
}
985

986 987 988 989 990 991 992
static int dwc_terminate_all(struct dma_chan *chan)
{
	struct dw_dma_chan	*dwc = to_dw_dma_chan(chan);
	struct dw_dma		*dw = to_dw_dma(chan->device);
	struct dw_desc		*desc, *_desc;
	unsigned long		flags;
	LIST_HEAD(list);
993

994
	spin_lock_irqsave(&dwc->lock, flags);
995

996
	clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags);
997

998
	dwc_chan_disable(dw, dwc);
999

1000
	dwc_chan_resume(dwc);
1001

1002 1003 1004
	/* active_list entries will end up before queued entries */
	list_splice_init(&dwc->queue, &list);
	list_splice_init(&dwc->active_list, &list);
1005

1006
	spin_unlock_irqrestore(&dwc->lock, flags);
1007

1008 1009 1010
	/* Flush all pending and queued descriptors */
	list_for_each_entry_safe(desc, _desc, &list, desc_node)
		dwc_descriptor_complete(dwc, desc, false);
1011 1012

	return 0;
1013 1014
}

1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026
static struct dw_desc *dwc_find_desc(struct dw_dma_chan *dwc, dma_cookie_t c)
{
	struct dw_desc *desc;

	list_for_each_entry(desc, &dwc->active_list, desc_node)
		if (desc->txd.cookie == c)
			return desc;

	return NULL;
}

static u32 dwc_get_residue(struct dw_dma_chan *dwc, dma_cookie_t cookie)
1027
{
1028
	struct dw_desc *desc;
1029 1030 1031 1032 1033
	unsigned long flags;
	u32 residue;

	spin_lock_irqsave(&dwc->lock, flags);

1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045
	desc = dwc_find_desc(dwc, cookie);
	if (desc) {
		if (desc == dwc_first_active(dwc)) {
			residue = desc->residue;
			if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags) && residue)
				residue -= dwc_get_sent(dwc);
		} else {
			residue = desc->total_len;
		}
	} else {
		residue = 0;
	}
1046 1047 1048 1049 1050

	spin_unlock_irqrestore(&dwc->lock, flags);
	return residue;
}

1051
static enum dma_status
1052 1053 1054
dwc_tx_status(struct dma_chan *chan,
	      dma_cookie_t cookie,
	      struct dma_tx_state *txstate)
1055 1056
{
	struct dw_dma_chan	*dwc = to_dw_dma_chan(chan);
1057
	enum dma_status		ret;
1058

1059
	ret = dma_cookie_status(chan, cookie, txstate);
1060
	if (ret == DMA_COMPLETE)
1061
		return ret;
1062

1063
	dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
1064

1065
	ret = dma_cookie_status(chan, cookie, txstate);
1066 1067 1068 1069
	if (ret == DMA_COMPLETE)
		return ret;

	dma_set_residue(txstate, dwc_get_residue(dwc, cookie));
1070

1071
	if (test_bit(DW_DMA_IS_PAUSED, &dwc->flags) && ret == DMA_IN_PROGRESS)
1072
		return DMA_PAUSED;
1073 1074 1075 1076 1077 1078 1079

	return ret;
}

static void dwc_issue_pending(struct dma_chan *chan)
{
	struct dw_dma_chan	*dwc = to_dw_dma_chan(chan);
1080
	unsigned long		flags;
1081

1082 1083 1084 1085
	spin_lock_irqsave(&dwc->lock, flags);
	if (list_empty(&dwc->active_list))
		dwc_dostart_first_queued(dwc);
	spin_unlock_irqrestore(&dwc->lock, flags);
1086 1087
}

1088 1089 1090 1091
/*----------------------------------------------------------------------*/

static void dw_dma_off(struct dw_dma *dw)
{
1092
	unsigned int i;
1093 1094 1095 1096

	dma_writel(dw, CFG, 0);

	channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
1097
	channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
1098 1099 1100 1101 1102 1103 1104 1105
	channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask);
	channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
	channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);

	while (dma_readl(dw, CFG) & DW_CFG_DMA_EN)
		cpu_relax();

	for (i = 0; i < dw->dma.chancnt; i++)
1106
		clear_bit(DW_DMA_IS_INITIALIZED, &dw->chan[i].flags);
1107 1108 1109 1110 1111 1112 1113
}

static void dw_dma_on(struct dw_dma *dw)
{
	dma_writel(dw, CFG, DW_CFG_DMA_EN);
}

1114
static int dwc_alloc_chan_resources(struct dma_chan *chan)
1115 1116 1117 1118
{
	struct dw_dma_chan	*dwc = to_dw_dma_chan(chan);
	struct dw_dma		*dw = to_dw_dma(chan->device);

1119
	dev_vdbg(chan2dev(chan), "%s\n", __func__);
1120 1121 1122

	/* ASSERT:  channel is idle */
	if (dma_readl(dw, CH_EN) & dwc->mask) {
1123
		dev_dbg(chan2dev(chan), "DMA channel not idle?\n");
1124 1125 1126
		return -EIO;
	}

1127
	dma_cookie_init(chan);
1128 1129 1130 1131 1132 1133 1134

	/*
	 * NOTE: some controllers may have additional features that we
	 * need to initialize here, like "scatter-gather" (which
	 * doesn't mean what you think it means), and status writeback.
	 */

1135 1136 1137 1138 1139 1140 1141 1142
	/*
	 * We need controller-specific data to set up slave transfers.
	 */
	if (chan->private && !dw_dma_filter(chan, chan->private)) {
		dev_warn(chan2dev(chan), "Wrong controller-specific data\n");
		return -EINVAL;
	}

1143 1144 1145 1146 1147
	/* Enable controller here if needed */
	if (!dw->in_use)
		dw_dma_on(dw);
	dw->in_use |= dwc->mask;

1148
	return 0;
1149 1150 1151 1152 1153 1154
}

static void dwc_free_chan_resources(struct dma_chan *chan)
{
	struct dw_dma_chan	*dwc = to_dw_dma_chan(chan);
	struct dw_dma		*dw = to_dw_dma(chan->device);
1155
	unsigned long		flags;
1156 1157
	LIST_HEAD(list);

1158
	dev_dbg(chan2dev(chan), "%s: descs allocated=%u\n", __func__,
1159 1160 1161 1162 1163 1164 1165
			dwc->descs_allocated);

	/* ASSERT:  channel is idle */
	BUG_ON(!list_empty(&dwc->active_list));
	BUG_ON(!list_empty(&dwc->queue));
	BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask);

1166
	spin_lock_irqsave(&dwc->lock, flags);
1167 1168

	/* Clear custom channel configuration */
1169
	memset(&dwc->dws, 0, sizeof(struct dw_dma_slave));
1170

1171
	clear_bit(DW_DMA_IS_INITIALIZED, &dwc->flags);
1172 1173 1174

	/* Disable interrupts */
	channel_clear_bit(dw, MASK.XFER, dwc->mask);
1175
	channel_clear_bit(dw, MASK.BLOCK, dwc->mask);
1176 1177
	channel_clear_bit(dw, MASK.ERROR, dwc->mask);

1178
	spin_unlock_irqrestore(&dwc->lock, flags);
1179

1180 1181 1182 1183 1184
	/* Disable controller in case it was a last user */
	dw->in_use &= ~dwc->mask;
	if (!dw->in_use)
		dw_dma_off(dw);

1185
	dev_vdbg(chan2dev(chan), "%s: done\n", __func__);
1186 1187
}

1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199
/* --------------------- Cyclic DMA API extensions -------------------- */

/**
 * dw_dma_cyclic_start - start the cyclic DMA transfer
 * @chan: the DMA channel to start
 *
 * Must be called with soft interrupts disabled. Returns zero on success or
 * -errno on failure.
 */
int dw_dma_cyclic_start(struct dma_chan *chan)
{
	struct dw_dma_chan	*dwc = to_dw_dma_chan(chan);
1200
	struct dw_dma		*dw = to_dw_dma(chan->device);
1201
	unsigned long		flags;
1202 1203 1204 1205 1206 1207

	if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) {
		dev_err(chan2dev(&dwc->chan), "missing prep for cyclic DMA\n");
		return -ENODEV;
	}

1208
	spin_lock_irqsave(&dwc->lock, flags);
1209 1210 1211 1212

	/* Enable interrupts to perform cyclic transfer */
	channel_set_bit(dw, MASK.BLOCK, dwc->mask);

1213
	dwc_dostart(dwc, dwc->cdesc->desc[0]);
1214

1215
	spin_unlock_irqrestore(&dwc->lock, flags);
1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230

	return 0;
}
EXPORT_SYMBOL(dw_dma_cyclic_start);

/**
 * dw_dma_cyclic_stop - stop the cyclic DMA transfer
 * @chan: the DMA channel to stop
 *
 * Must be called with soft interrupts disabled.
 */
void dw_dma_cyclic_stop(struct dma_chan *chan)
{
	struct dw_dma_chan	*dwc = to_dw_dma_chan(chan);
	struct dw_dma		*dw = to_dw_dma(dwc->chan.device);
1231
	unsigned long		flags;
1232

1233
	spin_lock_irqsave(&dwc->lock, flags);
1234

1235
	dwc_chan_disable(dw, dwc);
1236

1237
	spin_unlock_irqrestore(&dwc->lock, flags);
1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253
}
EXPORT_SYMBOL(dw_dma_cyclic_stop);

/**
 * dw_dma_cyclic_prep - prepare the cyclic DMA transfer
 * @chan: the DMA channel to prepare
 * @buf_addr: physical DMA address where the buffer starts
 * @buf_len: total number of bytes for the entire buffer
 * @period_len: number of bytes for each period
 * @direction: transfer direction, to or from device
 *
 * Must be called before trying to start the transfer. Returns a valid struct
 * dw_cyclic_desc if successful or an ERR_PTR(-errno) if not successful.
 */
struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
		dma_addr_t buf_addr, size_t buf_len, size_t period_len,
1254
		enum dma_transfer_direction direction)
1255 1256
{
	struct dw_dma_chan		*dwc = to_dw_dma_chan(chan);
1257
	struct dma_slave_config		*sconfig = &dwc->dma_sconfig;
1258 1259 1260 1261
	struct dw_cyclic_desc		*cdesc;
	struct dw_cyclic_desc		*retval = NULL;
	struct dw_desc			*desc;
	struct dw_desc			*last = NULL;
1262
	u8				lms = DWC_LLP_LMS(dwc->dws.m_master);
1263 1264 1265 1266
	unsigned long			was_cyclic;
	unsigned int			reg_width;
	unsigned int			periods;
	unsigned int			i;
1267
	unsigned long			flags;
1268

1269
	spin_lock_irqsave(&dwc->lock, flags);
1270 1271 1272 1273 1274 1275 1276
	if (dwc->nollp) {
		spin_unlock_irqrestore(&dwc->lock, flags);
		dev_dbg(chan2dev(&dwc->chan),
				"channel doesn't support LLP transfers\n");
		return ERR_PTR(-EINVAL);
	}

1277
	if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) {
1278
		spin_unlock_irqrestore(&dwc->lock, flags);
1279 1280 1281 1282 1283 1284
		dev_dbg(chan2dev(&dwc->chan),
				"queue and/or active list are not empty\n");
		return ERR_PTR(-EBUSY);
	}

	was_cyclic = test_and_set_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
1285
	spin_unlock_irqrestore(&dwc->lock, flags);
1286 1287 1288 1289 1290 1291 1292
	if (was_cyclic) {
		dev_dbg(chan2dev(&dwc->chan),
				"channel already prepared for cyclic DMA\n");
		return ERR_PTR(-EBUSY);
	}

	retval = ERR_PTR(-EINVAL);
1293

1294 1295 1296
	if (unlikely(!is_slave_direction(direction)))
		goto out_err;

1297 1298
	dwc->direction = direction;

1299 1300 1301 1302 1303
	if (direction == DMA_MEM_TO_DEV)
		reg_width = __ffs(sconfig->dst_addr_width);
	else
		reg_width = __ffs(sconfig->src_addr_width);

1304 1305 1306
	periods = buf_len / period_len;

	/* Check for too big/unaligned periods and unaligned DMA buffer. */
1307
	if (period_len > (dwc->block_size << reg_width))
1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329
		goto out_err;
	if (unlikely(period_len & ((1 << reg_width) - 1)))
		goto out_err;
	if (unlikely(buf_addr & ((1 << reg_width) - 1)))
		goto out_err;

	retval = ERR_PTR(-ENOMEM);

	cdesc = kzalloc(sizeof(struct dw_cyclic_desc), GFP_KERNEL);
	if (!cdesc)
		goto out_err;

	cdesc->desc = kzalloc(sizeof(struct dw_desc *) * periods, GFP_KERNEL);
	if (!cdesc->desc)
		goto out_err_alloc;

	for (i = 0; i < periods; i++) {
		desc = dwc_desc_get(dwc);
		if (!desc)
			goto out_err_desc_get;

		switch (direction) {
1330
		case DMA_MEM_TO_DEV:
1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342
			lli_write(desc, dar, sconfig->dst_addr);
			lli_write(desc, sar, buf_addr + period_len * i);
			lli_write(desc, ctllo, (DWC_DEFAULT_CTLLO(chan)
				| DWC_CTLL_DST_WIDTH(reg_width)
				| DWC_CTLL_SRC_WIDTH(reg_width)
				| DWC_CTLL_DST_FIX
				| DWC_CTLL_SRC_INC
				| DWC_CTLL_INT_EN));

			lli_set(desc, ctllo, sconfig->device_fc ?
					DWC_CTLL_FC(DW_DMA_FC_P_M2P) :
					DWC_CTLL_FC(DW_DMA_FC_D_M2P));
1343

1344
			break;
1345
		case DMA_DEV_TO_MEM:
1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357
			lli_write(desc, dar, buf_addr + period_len * i);
			lli_write(desc, sar, sconfig->src_addr);
			lli_write(desc, ctllo, (DWC_DEFAULT_CTLLO(chan)
				| DWC_CTLL_SRC_WIDTH(reg_width)
				| DWC_CTLL_DST_WIDTH(reg_width)
				| DWC_CTLL_DST_INC
				| DWC_CTLL_SRC_FIX
				| DWC_CTLL_INT_EN));

			lli_set(desc, ctllo, sconfig->device_fc ?
					DWC_CTLL_FC(DW_DMA_FC_P_P2M) :
					DWC_CTLL_FC(DW_DMA_FC_D_P2M));
1358

1359 1360 1361 1362 1363
			break;
		default:
			break;
		}

1364
		lli_write(desc, ctlhi, period_len >> reg_width);
1365 1366
		cdesc->desc[i] = desc;

1367
		if (last)
1368
			lli_write(last, llp, desc->txd.phys | lms);
1369 1370 1371 1372

		last = desc;
	}

1373
	/* Let's make a cyclic list */
1374
	lli_write(last, llp, cdesc->desc[0]->txd.phys | lms);
1375

1376 1377 1378
	dev_dbg(chan2dev(&dwc->chan),
			"cyclic prepared buf %pad len %zu period %zu periods %d\n",
			&buf_addr, buf_len, period_len, periods);
1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404

	cdesc->periods = periods;
	dwc->cdesc = cdesc;

	return cdesc;

out_err_desc_get:
	while (i--)
		dwc_desc_put(dwc, cdesc->desc[i]);
out_err_alloc:
	kfree(cdesc);
out_err:
	clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
	return (struct dw_cyclic_desc *)retval;
}
EXPORT_SYMBOL(dw_dma_cyclic_prep);

/**
 * dw_dma_cyclic_free - free a prepared cyclic DMA transfer
 * @chan: the DMA channel to free
 */
void dw_dma_cyclic_free(struct dma_chan *chan)
{
	struct dw_dma_chan	*dwc = to_dw_dma_chan(chan);
	struct dw_dma		*dw = to_dw_dma(dwc->chan.device);
	struct dw_cyclic_desc	*cdesc = dwc->cdesc;
1405
	unsigned int		i;
1406
	unsigned long		flags;
1407

1408
	dev_dbg(chan2dev(&dwc->chan), "%s\n", __func__);
1409 1410 1411 1412

	if (!cdesc)
		return;

1413
	spin_lock_irqsave(&dwc->lock, flags);
1414

1415
	dwc_chan_disable(dw, dwc);
1416

1417
	dma_writel(dw, CLEAR.BLOCK, dwc->mask);
1418 1419 1420
	dma_writel(dw, CLEAR.ERROR, dwc->mask);
	dma_writel(dw, CLEAR.XFER, dwc->mask);

1421
	spin_unlock_irqrestore(&dwc->lock, flags);
1422 1423 1424 1425 1426 1427 1428

	for (i = 0; i < cdesc->periods; i++)
		dwc_desc_put(dwc, cdesc->desc[i]);

	kfree(cdesc->desc);
	kfree(cdesc);

1429 1430
	dwc->cdesc = NULL;

1431 1432 1433 1434
	clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
}
EXPORT_SYMBOL(dw_dma_cyclic_free);

1435 1436
/*----------------------------------------------------------------------*/

1437
int dw_dma_probe(struct dw_dma_chip *chip)
1438
{
1439
	struct dw_dma_platform_data *pdata;
1440
	struct dw_dma		*dw;
1441
	bool			autocfg = false;
1442
	unsigned int		dw_params;
1443
	unsigned int		i;
1444 1445
	int			err;

1446 1447 1448 1449
	dw = devm_kzalloc(chip->dev, sizeof(*dw), GFP_KERNEL);
	if (!dw)
		return -ENOMEM;

1450 1451 1452 1453
	dw->pdata = devm_kzalloc(chip->dev, sizeof(*dw->pdata), GFP_KERNEL);
	if (!dw->pdata)
		return -ENOMEM;

1454 1455 1456
	dw->regs = chip->regs;
	chip->dw = dw;

A
Andy Shevchenko 已提交
1457 1458
	pm_runtime_get_sync(chip->dev);

1459
	if (!chip->pdata) {
1460
		dw_params = dma_readl(dw, DW_PARAMS);
1461
		dev_dbg(chip->dev, "DW_PARAMS: 0x%08x\n", dw_params);
1462

1463 1464 1465 1466 1467
		autocfg = dw_params >> DW_PARAMS_EN & 1;
		if (!autocfg) {
			err = -EINVAL;
			goto err_pdata;
		}
1468

1469 1470
		/* Reassign the platform data pointer */
		pdata = dw->pdata;
1471

1472 1473 1474 1475 1476
		/* Get hardware configuration parameters */
		pdata->nr_channels = (dw_params >> DW_PARAMS_NR_CHAN & 7) + 1;
		pdata->nr_masters = (dw_params >> DW_PARAMS_NR_MASTER & 3) + 1;
		for (i = 0; i < pdata->nr_masters; i++) {
			pdata->data_width[i] =
1477
				4 << (dw_params >> DW_PARAMS_DATA_WIDTH(i) & 3);
1478
		}
1479
		pdata->block_size = dma_readl(dw, MAX_BLK_SIZE);
1480

1481 1482
		/* Fill platform data with the default values */
		pdata->is_private = true;
1483
		pdata->is_memcpy = true;
1484 1485
		pdata->chan_allocation_order = CHAN_ALLOCATION_ASCENDING;
		pdata->chan_priority = CHAN_PRIORITY_ASCENDING;
1486
	} else if (chip->pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS) {
1487 1488
		err = -EINVAL;
		goto err_pdata;
1489
	} else {
1490
		memcpy(dw->pdata, chip->pdata, sizeof(*dw->pdata));
1491 1492 1493

		/* Reassign the platform data pointer */
		pdata = dw->pdata;
1494
	}
1495

1496
	dw->chan = devm_kcalloc(chip->dev, pdata->nr_channels, sizeof(*dw->chan),
1497
				GFP_KERNEL);
1498 1499 1500 1501
	if (!dw->chan) {
		err = -ENOMEM;
		goto err_pdata;
	}
1502

1503
	/* Calculate all channel mask before DMA setup */
1504
	dw->all_chan_mask = (1 << pdata->nr_channels) - 1;
1505

1506
	/* Force dma off, just in case */
1507 1508
	dw_dma_off(dw);

1509
	/* Create a pool of consistent memory blocks for hardware descriptors */
1510
	dw->desc_pool = dmam_pool_create("dw_dmac_desc_pool", chip->dev,
1511 1512
					 sizeof(struct dw_desc), 4, 0);
	if (!dw->desc_pool) {
1513
		dev_err(chip->dev, "No memory for descriptors dma pool\n");
1514 1515
		err = -ENOMEM;
		goto err_pdata;
1516 1517
	}

1518 1519
	tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw);

1520 1521 1522
	err = request_irq(chip->irq, dw_dma_interrupt, IRQF_SHARED,
			  "dw_dmac", dw);
	if (err)
1523
		goto err_pdata;
1524

1525
	INIT_LIST_HEAD(&dw->dma.channels);
1526
	for (i = 0; i < pdata->nr_channels; i++) {
1527 1528 1529
		struct dw_dma_chan	*dwc = &dw->chan[i];

		dwc->chan.device = &dw->dma;
1530
		dma_cookie_init(&dwc->chan);
1531 1532 1533 1534 1535
		if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING)
			list_add_tail(&dwc->chan.device_node,
					&dw->dma.channels);
		else
			list_add(&dwc->chan.device_node, &dw->dma.channels);
1536

1537 1538
		/* 7 is highest priority & 0 is lowest. */
		if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING)
1539
			dwc->priority = pdata->nr_channels - i - 1;
1540 1541 1542
		else
			dwc->priority = i;

1543 1544 1545 1546 1547 1548 1549 1550
		dwc->ch_regs = &__dw_regs(dw)->CHAN[i];
		spin_lock_init(&dwc->lock);
		dwc->mask = 1 << i;

		INIT_LIST_HEAD(&dwc->active_list);
		INIT_LIST_HEAD(&dwc->queue);

		channel_clear_bit(dw, CH_EN, dwc->mask);
1551

1552
		dwc->direction = DMA_TRANS_NONE;
1553

1554
		/* Hardware configuration */
1555
		if (autocfg) {
1556
			unsigned int r = DW_DMA_MAX_NR_CHANNELS - i - 1;
1557 1558
			void __iomem *addr = &__dw_regs(dw)->DWC_PARAMS[r];
			unsigned int dwc_params = dma_readl_native(addr);
1559

1560 1561
			dev_dbg(chip->dev, "DWC_PARAMS[%d]: 0x%08x\n", i,
					   dwc_params);
1562

1563 1564
			/*
			 * Decode maximum block size for given channel. The
1565
			 * stored 4 bit value represents blocks from 0x00 for 3
1566 1567
			 * up to 0x0a for 4095.
			 */
1568
			dwc->block_size =
1569
				(4 << ((pdata->block_size >> 4 * i) & 0xf)) - 1;
1570 1571 1572
			dwc->nollp =
				(dwc_params >> DWC_PARAMS_MBLK_EN & 0x1) == 0;
		} else {
1573
			dwc->block_size = pdata->block_size;
1574
			dwc->nollp = pdata->is_nollp;
1575
		}
1576 1577
	}

1578
	/* Clear all interrupts on all channels. */
1579
	dma_writel(dw, CLEAR.XFER, dw->all_chan_mask);
1580
	dma_writel(dw, CLEAR.BLOCK, dw->all_chan_mask);
1581 1582 1583 1584
	dma_writel(dw, CLEAR.SRC_TRAN, dw->all_chan_mask);
	dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask);
	dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask);

1585
	/* Set capabilities */
1586
	dma_cap_set(DMA_SLAVE, dw->dma.cap_mask);
1587 1588
	if (pdata->is_private)
		dma_cap_set(DMA_PRIVATE, dw->dma.cap_mask);
1589 1590 1591
	if (pdata->is_memcpy)
		dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask);

1592
	dw->dma.dev = chip->dev;
1593 1594 1595 1596 1597
	dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources;
	dw->dma.device_free_chan_resources = dwc_free_chan_resources;

	dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy;
	dw->dma.device_prep_slave_sg = dwc_prep_slave_sg;
1598

1599 1600 1601 1602
	dw->dma.device_config = dwc_config;
	dw->dma.device_pause = dwc_pause;
	dw->dma.device_resume = dwc_resume;
	dw->dma.device_terminate_all = dwc_terminate_all;
1603

1604
	dw->dma.device_tx_status = dwc_tx_status;
1605 1606
	dw->dma.device_issue_pending = dwc_issue_pending;

1607 1608 1609 1610 1611 1612 1613
	/* DMA capabilities */
	dw->dma.src_addr_widths = DW_DMA_BUSWIDTHS;
	dw->dma.dst_addr_widths = DW_DMA_BUSWIDTHS;
	dw->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV) |
			     BIT(DMA_MEM_TO_MEM);
	dw->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;

1614 1615 1616 1617
	err = dma_async_device_register(&dw->dma);
	if (err)
		goto err_dma_register;

1618
	dev_info(chip->dev, "DesignWare DMA Controller, %d channels\n",
1619
		 pdata->nr_channels);
1620

A
Andy Shevchenko 已提交
1621 1622
	pm_runtime_put_sync_suspend(chip->dev);

1623
	return 0;
1624

1625 1626
err_dma_register:
	free_irq(chip->irq, dw);
1627
err_pdata:
A
Andy Shevchenko 已提交
1628
	pm_runtime_put_sync_suspend(chip->dev);
1629
	return err;
1630
}
1631
EXPORT_SYMBOL_GPL(dw_dma_probe);
1632

1633
int dw_dma_remove(struct dw_dma_chip *chip)
1634
{
1635
	struct dw_dma		*dw = chip->dw;
1636 1637
	struct dw_dma_chan	*dwc, *_dwc;

A
Andy Shevchenko 已提交
1638 1639
	pm_runtime_get_sync(chip->dev);

1640 1641 1642
	dw_dma_off(dw);
	dma_async_device_unregister(&dw->dma);

1643
	free_irq(chip->irq, dw);
1644 1645 1646 1647 1648 1649 1650 1651
	tasklet_kill(&dw->tasklet);

	list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels,
			chan.device_node) {
		list_del(&dwc->chan.device_node);
		channel_clear_bit(dw, CH_EN, dwc->mask);
	}

A
Andy Shevchenko 已提交
1652
	pm_runtime_put_sync_suspend(chip->dev);
1653 1654
	return 0;
}
1655
EXPORT_SYMBOL_GPL(dw_dma_remove);
1656

1657
int dw_dma_disable(struct dw_dma_chip *chip)
1658
{
1659
	struct dw_dma *dw = chip->dw;
1660

1661
	dw_dma_off(dw);
1662 1663
	return 0;
}
1664
EXPORT_SYMBOL_GPL(dw_dma_disable);
1665

1666
int dw_dma_enable(struct dw_dma_chip *chip)
1667
{
1668
	struct dw_dma *dw = chip->dw;
1669

1670
	dw_dma_on(dw);
1671 1672
	return 0;
}
1673
EXPORT_SYMBOL_GPL(dw_dma_enable);
1674 1675

MODULE_LICENSE("GPL v2");
1676
MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller core driver");
J
Jean Delvare 已提交
1677
MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
1678
MODULE_AUTHOR("Viresh Kumar <vireshk@kernel.org>");