dw_dmac.c 41.6 KB
Newer Older
1 2 3 4 5
/*
 * Driver for the Synopsys DesignWare DMA Controller (aka DMACA on
 * AVR32 systems.)
 *
 * Copyright (C) 2007-2008 Atmel Corporation
6
 * Copyright (C) 2010-2011 ST Microelectronics
7 8 9 10 11
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */
12
#include <linux/bitops.h>
13 14 15 16 17 18 19
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
20
#include <linux/of.h>
21 22 23 24 25 26
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>

#include "dw_dmac_regs.h"
27
#include "dmaengine.h"
28 29 30 31 32 33 34 35 36 37 38

/*
 * This supports the Synopsys "DesignWare AHB Central DMA Controller",
 * (DW_ahb_dmac) which is used with various AMBA 2.0 systems (not all
 * of which use ARM any more).  See the "Databook" from Synopsys for
 * information beyond what licensees probably provide.
 *
 * The driver has currently been tested only with the Atmel AT32AP7000,
 * which does not support descriptor writeback.
 */

39 40 41 42 43 44 45 46 47 48
#define DWC_DEFAULT_CTLLO(_chan) ({				\
		struct dw_dma_slave *__slave = (_chan->private);	\
		struct dw_dma_chan *_dwc = to_dw_dma_chan(_chan);	\
		struct dma_slave_config	*_sconfig = &_dwc->dma_sconfig;	\
		int _dms = __slave ? __slave->dst_master : 0;	\
		int _sms = __slave ? __slave->src_master : 1;	\
		u8 _smsize = __slave ? _sconfig->src_maxburst :	\
			DW_DMA_MSIZE_16;			\
		u8 _dmsize = __slave ? _sconfig->dst_maxburst :	\
			DW_DMA_MSIZE_16;			\
49
								\
50 51
		(DWC_CTLL_DST_MSIZE(_dmsize)			\
		 | DWC_CTLL_SRC_MSIZE(_smsize)			\
52 53
		 | DWC_CTLL_LLP_D_EN				\
		 | DWC_CTLL_LLP_S_EN				\
54 55
		 | DWC_CTLL_DMS(_dms)				\
		 | DWC_CTLL_SMS(_sms));				\
56
	})
57 58 59 60 61

/*
 * This is configuration-dependent and usually a funny size like 4095.
 *
 * Note that this is a transfer count, i.e. if we transfer 32-bit
62
 * words, we can do 16380 bytes per descriptor.
63 64 65
 *
 * This parameter is also system-specific.
 */
66
#define DWC_MAX_COUNT	4095U
67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84

/*
 * Number of descriptors to allocate for each channel. This should be
 * made configurable somehow; preferably, the clients (at least the
 * ones using slave transfers) should be able to give us a hint.
 */
#define NR_DESCS_PER_CHANNEL	64

/*----------------------------------------------------------------------*/

/*
 * Because we're not relying on writeback from the controller (it may not
 * even be configured into the core!) we don't need to use dma_pool.  These
 * descriptors -- and associated data -- are cacheable.  We do need to make
 * sure their dcache entries are written back before handing them off to
 * the controller, though.
 */

85 86 87 88 89 90 91 92 93
static struct device *chan2dev(struct dma_chan *chan)
{
	return &chan->dev->device;
}
static struct device *chan2parent(struct dma_chan *chan)
{
	return chan->dev->device.parent;
}

94 95 96 97 98 99 100 101 102 103
static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc)
{
	return list_entry(dwc->active_list.next, struct dw_desc, desc_node);
}

static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
{
	struct dw_desc *desc, *_desc;
	struct dw_desc *ret = NULL;
	unsigned int i = 0;
104
	unsigned long flags;
105

106
	spin_lock_irqsave(&dwc->lock, flags);
107 108 109 110 111 112
	list_for_each_entry_safe(desc, _desc, &dwc->free_list, desc_node) {
		if (async_tx_test_ack(&desc->txd)) {
			list_del(&desc->desc_node);
			ret = desc;
			break;
		}
113
		dev_dbg(chan2dev(&dwc->chan), "desc %p not ACKed\n", desc);
114 115
		i++;
	}
116
	spin_unlock_irqrestore(&dwc->lock, flags);
117

118
	dev_vdbg(chan2dev(&dwc->chan), "scanned %u descriptors on freelist\n", i);
119 120 121 122 123 124 125 126

	return ret;
}

static void dwc_sync_desc_for_cpu(struct dw_dma_chan *dwc, struct dw_desc *desc)
{
	struct dw_desc	*child;

127
	list_for_each_entry(child, &desc->tx_list, desc_node)
128
		dma_sync_single_for_cpu(chan2parent(&dwc->chan),
129 130
				child->txd.phys, sizeof(child->lli),
				DMA_TO_DEVICE);
131
	dma_sync_single_for_cpu(chan2parent(&dwc->chan),
132 133 134 135 136 137 138 139 140 141
			desc->txd.phys, sizeof(desc->lli),
			DMA_TO_DEVICE);
}

/*
 * Move a descriptor, including any children, to the free list.
 * `desc' must not be on any lists.
 */
static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
{
142 143
	unsigned long flags;

144 145 146 147 148
	if (desc) {
		struct dw_desc *child;

		dwc_sync_desc_for_cpu(dwc, desc);

149
		spin_lock_irqsave(&dwc->lock, flags);
150
		list_for_each_entry(child, &desc->tx_list, desc_node)
151
			dev_vdbg(chan2dev(&dwc->chan),
152 153
					"moving child desc %p to freelist\n",
					child);
154
		list_splice_init(&desc->tx_list, &dwc->free_list);
155
		dev_vdbg(chan2dev(&dwc->chan), "moving desc %p to freelist\n", desc);
156
		list_add(&desc->desc_node, &dwc->free_list);
157
		spin_unlock_irqrestore(&dwc->lock, flags);
158 159 160
	}
}

161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191
static void dwc_initialize(struct dw_dma_chan *dwc)
{
	struct dw_dma *dw = to_dw_dma(dwc->chan.device);
	struct dw_dma_slave *dws = dwc->chan.private;
	u32 cfghi = DWC_CFGH_FIFO_MODE;
	u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority);

	if (dwc->initialized == true)
		return;

	if (dws) {
		/*
		 * We need controller-specific data to set up slave
		 * transfers.
		 */
		BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev);

		cfghi = dws->cfg_hi;
		cfglo |= dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK;
	}

	channel_writel(dwc, CFG_LO, cfglo);
	channel_writel(dwc, CFG_HI, cfghi);

	/* Enable interrupts */
	channel_set_bit(dw, MASK.XFER, dwc->mask);
	channel_set_bit(dw, MASK.ERROR, dwc->mask);

	dwc->initialized = true;
}

192 193 194 195 196 197 198 199 200
/*----------------------------------------------------------------------*/

/* Called with dwc->lock held and bh disabled */
static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
{
	struct dw_dma	*dw = to_dw_dma(dwc->chan.device);

	/* ASSERT:  channel is idle */
	if (dma_readl(dw, CH_EN) & dwc->mask) {
201
		dev_err(chan2dev(&dwc->chan),
202
			"BUG: Attempted to start non-idle channel\n");
203
		dev_err(chan2dev(&dwc->chan),
204 205 206 207 208 209 210 211 212 213 214
			"  SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
			channel_readl(dwc, SAR),
			channel_readl(dwc, DAR),
			channel_readl(dwc, LLP),
			channel_readl(dwc, CTL_HI),
			channel_readl(dwc, CTL_LO));

		/* The tasklet will hopefully advance the queue... */
		return;
	}

215 216
	dwc_initialize(dwc);

217 218 219 220 221 222 223 224 225 226
	channel_writel(dwc, LLP, first->txd.phys);
	channel_writel(dwc, CTL_LO,
			DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
	channel_writel(dwc, CTL_HI, 0);
	channel_set_bit(dw, CH_EN, dwc->mask);
}

/*----------------------------------------------------------------------*/

static void
227 228
dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc,
		bool callback_required)
229
{
230 231
	dma_async_tx_callback		callback = NULL;
	void				*param = NULL;
232
	struct dma_async_tx_descriptor	*txd = &desc->txd;
233
	struct dw_desc			*child;
234
	unsigned long			flags;
235

236
	dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie);
237

238
	spin_lock_irqsave(&dwc->lock, flags);
239
	dma_cookie_complete(txd);
240 241 242 243
	if (callback_required) {
		callback = txd->callback;
		param = txd->callback_param;
	}
244 245

	dwc_sync_desc_for_cpu(dwc, desc);
246 247 248 249 250 251

	/* async_tx_ack */
	list_for_each_entry(child, &desc->tx_list, desc_node)
		async_tx_ack(&child->txd);
	async_tx_ack(&desc->txd);

252
	list_splice_init(&desc->tx_list, &dwc->free_list);
253 254
	list_move(&desc->desc_node, &dwc->free_list);

255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273
	if (!dwc->chan.private) {
		struct device *parent = chan2parent(&dwc->chan);
		if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
			if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
				dma_unmap_single(parent, desc->lli.dar,
						desc->len, DMA_FROM_DEVICE);
			else
				dma_unmap_page(parent, desc->lli.dar,
						desc->len, DMA_FROM_DEVICE);
		}
		if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
			if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
				dma_unmap_single(parent, desc->lli.sar,
						desc->len, DMA_TO_DEVICE);
			else
				dma_unmap_page(parent, desc->lli.sar,
						desc->len, DMA_TO_DEVICE);
		}
	}
274

275 276
	spin_unlock_irqrestore(&dwc->lock, flags);

277
	if (callback_required && callback)
278 279 280 281 282 283 284
		callback(param);
}

static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
{
	struct dw_desc *desc, *_desc;
	LIST_HEAD(list);
285
	unsigned long flags;
286

287
	spin_lock_irqsave(&dwc->lock, flags);
288
	if (dma_readl(dw, CH_EN) & dwc->mask) {
289
		dev_err(chan2dev(&dwc->chan),
290 291 292 293 294 295 296 297 298 299 300 301 302
			"BUG: XFER bit set, but channel not idle!\n");

		/* Try to continue after resetting the channel... */
		channel_clear_bit(dw, CH_EN, dwc->mask);
		while (dma_readl(dw, CH_EN) & dwc->mask)
			cpu_relax();
	}

	/*
	 * Submit queued descriptors ASAP, i.e. before we go through
	 * the completed ones.
	 */
	list_splice_init(&dwc->active_list, &list);
303 304 305 306
	if (!list_empty(&dwc->queue)) {
		list_move(dwc->queue.next, &dwc->active_list);
		dwc_dostart(dwc, dwc_first_active(dwc));
	}
307

308 309
	spin_unlock_irqrestore(&dwc->lock, flags);

310
	list_for_each_entry_safe(desc, _desc, &list, desc_node)
311
		dwc_descriptor_complete(dwc, desc, true);
312 313 314 315 316 317 318 319
}

static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
{
	dma_addr_t llp;
	struct dw_desc *desc, *_desc;
	struct dw_desc *child;
	u32 status_xfer;
320
	unsigned long flags;
321

322
	spin_lock_irqsave(&dwc->lock, flags);
323 324 325 326 327 328
	llp = channel_readl(dwc, LLP);
	status_xfer = dma_readl(dw, RAW.XFER);

	if (status_xfer & dwc->mask) {
		/* Everything we've submitted is done */
		dma_writel(dw, CLEAR.XFER, dwc->mask);
329 330
		spin_unlock_irqrestore(&dwc->lock, flags);

331 332 333 334
		dwc_complete_all(dw, dwc);
		return;
	}

335 336
	if (list_empty(&dwc->active_list)) {
		spin_unlock_irqrestore(&dwc->lock, flags);
337
		return;
338
	}
339

340 341
	dev_vdbg(chan2dev(&dwc->chan), "scan_descriptors: llp=0x%llx\n",
			(unsigned long long)llp);
342 343

	list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) {
344
		/* check first descriptors addr */
345 346
		if (desc->txd.phys == llp) {
			spin_unlock_irqrestore(&dwc->lock, flags);
347
			return;
348
		}
349 350

		/* check first descriptors llp */
351
		if (desc->lli.llp == llp) {
352
			/* This one is currently in progress */
353
			spin_unlock_irqrestore(&dwc->lock, flags);
354
			return;
355
		}
356

357
		list_for_each_entry(child, &desc->tx_list, desc_node)
358
			if (child->lli.llp == llp) {
359
				/* Currently in progress */
360
				spin_unlock_irqrestore(&dwc->lock, flags);
361
				return;
362
			}
363 364 365 366 367

		/*
		 * No descriptors so far seem to be in progress, i.e.
		 * this one must be done.
		 */
368
		spin_unlock_irqrestore(&dwc->lock, flags);
369
		dwc_descriptor_complete(dwc, desc, true);
370
		spin_lock_irqsave(&dwc->lock, flags);
371 372
	}

373
	dev_err(chan2dev(&dwc->chan),
374 375 376 377 378 379 380 381
		"BUG: All descriptors done, but channel not idle!\n");

	/* Try to continue after resetting the channel... */
	channel_clear_bit(dw, CH_EN, dwc->mask);
	while (dma_readl(dw, CH_EN) & dwc->mask)
		cpu_relax();

	if (!list_empty(&dwc->queue)) {
382 383
		list_move(dwc->queue.next, &dwc->active_list);
		dwc_dostart(dwc, dwc_first_active(dwc));
384
	}
385
	spin_unlock_irqrestore(&dwc->lock, flags);
386 387 388 389
}

static void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli)
{
390
	dev_printk(KERN_CRIT, chan2dev(&dwc->chan),
391 392 393 394
			"  desc: s0x%llx d0x%llx l0x%llx c0x%x:%x\n",
			(unsigned long long)lli->sar,
			(unsigned long long)lli->dar,
			(unsigned long long)lli->llp,
395 396 397 398 399 400 401
			lli->ctlhi, lli->ctllo);
}

static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
{
	struct dw_desc *bad_desc;
	struct dw_desc *child;
402
	unsigned long flags;
403 404 405

	dwc_scan_descriptors(dw, dwc);

406 407
	spin_lock_irqsave(&dwc->lock, flags);

408 409 410 411 412 413 414
	/*
	 * The descriptor currently at the head of the active list is
	 * borked. Since we don't have any way to report errors, we'll
	 * just have to scream loudly and try to carry on.
	 */
	bad_desc = dwc_first_active(dwc);
	list_del_init(&bad_desc->desc_node);
415
	list_move(dwc->queue.next, dwc->active_list.prev);
416 417 418 419 420 421 422 423 424 425 426 427 428

	/* Clear the error flag and try to restart the controller */
	dma_writel(dw, CLEAR.ERROR, dwc->mask);
	if (!list_empty(&dwc->active_list))
		dwc_dostart(dwc, dwc_first_active(dwc));

	/*
	 * KERN_CRITICAL may seem harsh, but since this only happens
	 * when someone submits a bad physical address in a
	 * descriptor, we should consider ourselves lucky that the
	 * controller flagged an error instead of scribbling over
	 * random memory locations.
	 */
429
	dev_printk(KERN_CRIT, chan2dev(&dwc->chan),
430
			"Bad descriptor submitted for DMA!\n");
431
	dev_printk(KERN_CRIT, chan2dev(&dwc->chan),
432 433
			"  cookie: %d\n", bad_desc->txd.cookie);
	dwc_dump_lli(dwc, &bad_desc->lli);
434
	list_for_each_entry(child, &bad_desc->tx_list, desc_node)
435 436
		dwc_dump_lli(dwc, &child->lli);

437 438
	spin_unlock_irqrestore(&dwc->lock, flags);

439
	/* Pretend the descriptor completed successfully */
440
	dwc_descriptor_complete(dwc, bad_desc, true);
441 442
}

443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460
/* --------------------- Cyclic DMA API extensions -------------------- */

inline dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan)
{
	struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
	return channel_readl(dwc, SAR);
}
EXPORT_SYMBOL(dw_dma_get_src_addr);

inline dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan)
{
	struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
	return channel_readl(dwc, DAR);
}
EXPORT_SYMBOL(dw_dma_get_dst_addr);

/* called with dwc->lock held and all DMAC interrupts disabled */
static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
461
		u32 status_err, u32 status_xfer)
462
{
463 464
	unsigned long flags;

465
	if (dwc->mask) {
466 467 468 469 470 471 472 473
		void (*callback)(void *param);
		void *callback_param;

		dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n",
				channel_readl(dwc, LLP));

		callback = dwc->cdesc->period_callback;
		callback_param = dwc->cdesc->period_callback_param;
474 475

		if (callback)
476 477 478 479 480 481 482 483 484 485 486 487 488 489
			callback(callback_param);
	}

	/*
	 * Error and transfer complete are highly unlikely, and will most
	 * likely be due to a configuration error by the user.
	 */
	if (unlikely(status_err & dwc->mask) ||
			unlikely(status_xfer & dwc->mask)) {
		int i;

		dev_err(chan2dev(&dwc->chan), "cyclic DMA unexpected %s "
				"interrupt, stopping DMA transfer\n",
				status_xfer ? "xfer" : "error");
490 491 492

		spin_lock_irqsave(&dwc->lock, flags);

493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514
		dev_err(chan2dev(&dwc->chan),
			"  SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
			channel_readl(dwc, SAR),
			channel_readl(dwc, DAR),
			channel_readl(dwc, LLP),
			channel_readl(dwc, CTL_HI),
			channel_readl(dwc, CTL_LO));

		channel_clear_bit(dw, CH_EN, dwc->mask);
		while (dma_readl(dw, CH_EN) & dwc->mask)
			cpu_relax();

		/* make sure DMA does not restart by loading a new list */
		channel_writel(dwc, LLP, 0);
		channel_writel(dwc, CTL_LO, 0);
		channel_writel(dwc, CTL_HI, 0);

		dma_writel(dw, CLEAR.ERROR, dwc->mask);
		dma_writel(dw, CLEAR.XFER, dwc->mask);

		for (i = 0; i < dwc->cdesc->periods; i++)
			dwc_dump_lli(dwc, &dwc->cdesc->desc[i]->lli);
515 516

		spin_unlock_irqrestore(&dwc->lock, flags);
517 518 519 520 521
	}
}

/* ------------------------------------------------------------------------- */

522 523 524 525 526 527 528 529
static void dw_dma_tasklet(unsigned long data)
{
	struct dw_dma *dw = (struct dw_dma *)data;
	struct dw_dma_chan *dwc;
	u32 status_xfer;
	u32 status_err;
	int i;

530
	status_xfer = dma_readl(dw, RAW.XFER);
531 532
	status_err = dma_readl(dw, RAW.ERROR);

533
	dev_vdbg(dw->dma.dev, "tasklet: status_err=%x\n", status_err);
534 535 536

	for (i = 0; i < dw->dma.chancnt; i++) {
		dwc = &dw->chan[i];
537
		if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags))
538
			dwc_handle_cyclic(dw, dwc, status_err, status_xfer);
539
		else if (status_err & (1 << i))
540
			dwc_handle_error(dw, dwc);
541
		else if (status_xfer & (1 << i))
542 543 544 545
			dwc_scan_descriptors(dw, dwc);
	}

	/*
546
	 * Re-enable interrupts.
547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591
	 */
	channel_set_bit(dw, MASK.XFER, dw->all_chan_mask);
	channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask);
}

static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
{
	struct dw_dma *dw = dev_id;
	u32 status;

	dev_vdbg(dw->dma.dev, "interrupt: status=0x%x\n",
			dma_readl(dw, STATUS_INT));

	/*
	 * Just disable the interrupts. We'll turn them back on in the
	 * softirq handler.
	 */
	channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
	channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);

	status = dma_readl(dw, STATUS_INT);
	if (status) {
		dev_err(dw->dma.dev,
			"BUG: Unexpected interrupts pending: 0x%x\n",
			status);

		/* Try to recover */
		channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1);
		channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1);
		channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1);
		channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1);
	}

	tasklet_schedule(&dw->tasklet);

	return IRQ_HANDLED;
}

/*----------------------------------------------------------------------*/

static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx)
{
	struct dw_desc		*desc = txd_to_dw_desc(tx);
	struct dw_dma_chan	*dwc = to_dw_dma_chan(tx->chan);
	dma_cookie_t		cookie;
592
	unsigned long		flags;
593

594
	spin_lock_irqsave(&dwc->lock, flags);
595
	cookie = dma_cookie_assign(tx);
596 597 598 599 600 601 602

	/*
	 * REVISIT: We should attempt to chain as many descriptors as
	 * possible, perhaps even appending to those already submitted
	 * for DMA. But this is hard to do in a race-free manner.
	 */
	if (list_empty(&dwc->active_list)) {
603
		dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n",
604 605
				desc->txd.cookie);
		list_add_tail(&desc->desc_node, &dwc->active_list);
606
		dwc_dostart(dwc, dwc_first_active(dwc));
607
	} else {
608
		dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n",
609 610 611 612 613
				desc->txd.cookie);

		list_add_tail(&desc->desc_node, &dwc->queue);
	}

614
	spin_unlock_irqrestore(&dwc->lock, flags);
615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632

	return cookie;
}

static struct dma_async_tx_descriptor *
dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
		size_t len, unsigned long flags)
{
	struct dw_dma_chan	*dwc = to_dw_dma_chan(chan);
	struct dw_desc		*desc;
	struct dw_desc		*first;
	struct dw_desc		*prev;
	size_t			xfer_count;
	size_t			offset;
	unsigned int		src_width;
	unsigned int		dst_width;
	u32			ctllo;

633 634 635 636
	dev_vdbg(chan2dev(chan),
			"prep_dma_memcpy d0x%llx s0x%llx l0x%zx f0x%lx\n",
			(unsigned long long)dest, (unsigned long long)src,
			len, flags);
637 638

	if (unlikely(!len)) {
639
		dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n");
640 641 642 643 644 645 646
		return NULL;
	}

	/*
	 * We can be a lot more clever here, but this should take care
	 * of the most common optimization.
	 */
647 648 649
	if (!((src | dest  | len) & 7))
		src_width = dst_width = 3;
	else if (!((src | dest  | len) & 3))
650 651 652 653 654 655
		src_width = dst_width = 2;
	else if (!((src | dest | len) & 1))
		src_width = dst_width = 1;
	else
		src_width = dst_width = 0;

656
	ctllo = DWC_DEFAULT_CTLLO(chan)
657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680
			| DWC_CTLL_DST_WIDTH(dst_width)
			| DWC_CTLL_SRC_WIDTH(src_width)
			| DWC_CTLL_DST_INC
			| DWC_CTLL_SRC_INC
			| DWC_CTLL_FC_M2M;
	prev = first = NULL;

	for (offset = 0; offset < len; offset += xfer_count << src_width) {
		xfer_count = min_t(size_t, (len - offset) >> src_width,
				DWC_MAX_COUNT);

		desc = dwc_desc_get(dwc);
		if (!desc)
			goto err_desc_get;

		desc->lli.sar = src + offset;
		desc->lli.dar = dest + offset;
		desc->lli.ctllo = ctllo;
		desc->lli.ctlhi = xfer_count;

		if (!first) {
			first = desc;
		} else {
			prev->lli.llp = desc->txd.phys;
681
			dma_sync_single_for_device(chan2parent(chan),
682 683 684
					prev->txd.phys, sizeof(prev->lli),
					DMA_TO_DEVICE);
			list_add_tail(&desc->desc_node,
685
					&first->tx_list);
686 687 688 689 690 691 692 693 694 695
		}
		prev = desc;
	}


	if (flags & DMA_PREP_INTERRUPT)
		/* Trigger interrupt after last block */
		prev->lli.ctllo |= DWC_CTLL_INT_EN;

	prev->lli.llp = 0;
696
	dma_sync_single_for_device(chan2parent(chan),
697 698 699 700 701 702 703 704 705 706 707 708 709 710 711
			prev->txd.phys, sizeof(prev->lli),
			DMA_TO_DEVICE);

	first->txd.flags = flags;
	first->len = len;

	return &first->txd;

err_desc_get:
	dwc_desc_put(dwc, first);
	return NULL;
}

static struct dma_async_tx_descriptor *
dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
712
		unsigned int sg_len, enum dma_transfer_direction direction,
713
		unsigned long flags, void *context)
714 715
{
	struct dw_dma_chan	*dwc = to_dw_dma_chan(chan);
716
	struct dw_dma_slave	*dws = chan->private;
717
	struct dma_slave_config	*sconfig = &dwc->dma_sconfig;
718 719 720 721 722 723 724 725 726 727
	struct dw_desc		*prev;
	struct dw_desc		*first;
	u32			ctllo;
	dma_addr_t		reg;
	unsigned int		reg_width;
	unsigned int		mem_width;
	unsigned int		i;
	struct scatterlist	*sg;
	size_t			total_len = 0;

728
	dev_vdbg(chan2dev(chan), "prep_dma_slave\n");
729 730 731 732 733 734 735

	if (unlikely(!dws || !sg_len))
		return NULL;

	prev = first = NULL;

	switch (direction) {
736
	case DMA_MEM_TO_DEV:
737 738 739
		reg_width = __fls(sconfig->dst_addr_width);
		reg = sconfig->dst_addr;
		ctllo = (DWC_DEFAULT_CTLLO(chan)
740 741
				| DWC_CTLL_DST_WIDTH(reg_width)
				| DWC_CTLL_DST_FIX
742 743 744 745 746
				| DWC_CTLL_SRC_INC);

		ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) :
			DWC_CTLL_FC(DW_DMA_FC_D_M2P);

747 748
		for_each_sg(sgl, sg, sg_len, i) {
			struct dw_desc	*desc;
749
			u32		len, dlen, mem;
750

751
			mem = sg_dma_address(sg);
752
			len = sg_dma_len(sg);
753 754 755 756 757 758 759 760

			if (!((mem | len) & 7))
				mem_width = 3;
			else if (!((mem | len) & 3))
				mem_width = 2;
			else if (!((mem | len) & 1))
				mem_width = 1;
			else
761
				mem_width = 0;
762

763
slave_sg_todev_fill_desc:
764 765
			desc = dwc_desc_get(dwc);
			if (!desc) {
766
				dev_err(chan2dev(chan),
767 768 769 770 771 772 773
					"not enough descriptors available\n");
				goto err_desc_get;
			}

			desc->lli.sar = mem;
			desc->lli.dar = reg;
			desc->lli.ctllo = ctllo | DWC_CTLL_SRC_WIDTH(mem_width);
774 775 776 777 778 779 780 781 782 783
			if ((len >> mem_width) > DWC_MAX_COUNT) {
				dlen = DWC_MAX_COUNT << mem_width;
				mem += dlen;
				len -= dlen;
			} else {
				dlen = len;
				len = 0;
			}

			desc->lli.ctlhi = dlen >> mem_width;
784 785 786 787 788

			if (!first) {
				first = desc;
			} else {
				prev->lli.llp = desc->txd.phys;
789
				dma_sync_single_for_device(chan2parent(chan),
790 791 792 793
						prev->txd.phys,
						sizeof(prev->lli),
						DMA_TO_DEVICE);
				list_add_tail(&desc->desc_node,
794
						&first->tx_list);
795 796
			}
			prev = desc;
797 798 799 800
			total_len += dlen;

			if (len)
				goto slave_sg_todev_fill_desc;
801 802
		}
		break;
803
	case DMA_DEV_TO_MEM:
804 805 806
		reg_width = __fls(sconfig->src_addr_width);
		reg = sconfig->src_addr;
		ctllo = (DWC_DEFAULT_CTLLO(chan)
807 808
				| DWC_CTLL_SRC_WIDTH(reg_width)
				| DWC_CTLL_DST_INC
809 810 811 812
				| DWC_CTLL_SRC_FIX);

		ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) :
			DWC_CTLL_FC(DW_DMA_FC_D_P2M);
813 814 815

		for_each_sg(sgl, sg, sg_len, i) {
			struct dw_desc	*desc;
816
			u32		len, dlen, mem;
817

818
			mem = sg_dma_address(sg);
819
			len = sg_dma_len(sg);
820 821 822 823 824 825 826 827

			if (!((mem | len) & 7))
				mem_width = 3;
			else if (!((mem | len) & 3))
				mem_width = 2;
			else if (!((mem | len) & 1))
				mem_width = 1;
			else
828 829
				mem_width = 0;

830 831 832 833 834 835 836 837
slave_sg_fromdev_fill_desc:
			desc = dwc_desc_get(dwc);
			if (!desc) {
				dev_err(chan2dev(chan),
						"not enough descriptors available\n");
				goto err_desc_get;
			}

838 839 840
			desc->lli.sar = reg;
			desc->lli.dar = mem;
			desc->lli.ctllo = ctllo | DWC_CTLL_DST_WIDTH(mem_width);
841 842 843 844 845 846 847 848 849
			if ((len >> reg_width) > DWC_MAX_COUNT) {
				dlen = DWC_MAX_COUNT << reg_width;
				mem += dlen;
				len -= dlen;
			} else {
				dlen = len;
				len = 0;
			}
			desc->lli.ctlhi = dlen >> reg_width;
850 851 852 853 854

			if (!first) {
				first = desc;
			} else {
				prev->lli.llp = desc->txd.phys;
855
				dma_sync_single_for_device(chan2parent(chan),
856 857 858 859
						prev->txd.phys,
						sizeof(prev->lli),
						DMA_TO_DEVICE);
				list_add_tail(&desc->desc_node,
860
						&first->tx_list);
861 862
			}
			prev = desc;
863 864 865 866
			total_len += dlen;

			if (len)
				goto slave_sg_fromdev_fill_desc;
867 868 869 870 871 872 873 874 875 876 877
		}
		break;
	default:
		return NULL;
	}

	if (flags & DMA_PREP_INTERRUPT)
		/* Trigger interrupt after last block */
		prev->lli.ctllo |= DWC_CTLL_INT_EN;

	prev->lli.llp = 0;
878
	dma_sync_single_for_device(chan2parent(chan),
879 880 881 882 883 884 885 886 887 888 889 890
			prev->txd.phys, sizeof(prev->lli),
			DMA_TO_DEVICE);

	first->len = total_len;

	return &first->txd;

err_desc_get:
	dwc_desc_put(dwc, first);
	return NULL;
}

891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923
/*
 * Fix sconfig's burst size according to dw_dmac. We need to convert them as:
 * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3.
 *
 * NOTE: burst size 2 is not supported by controller.
 *
 * This can be done by finding least significant bit set: n & (n - 1)
 */
static inline void convert_burst(u32 *maxburst)
{
	if (*maxburst > 1)
		*maxburst = fls(*maxburst) - 2;
	else
		*maxburst = 0;
}

static int
set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
{
	struct dw_dma_chan *dwc = to_dw_dma_chan(chan);

	/* Check if it is chan is configured for slave transfers */
	if (!chan->private)
		return -EINVAL;

	memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig));

	convert_burst(&dwc->dma_sconfig.src_maxburst);
	convert_burst(&dwc->dma_sconfig.dst_maxburst);

	return 0;
}

924 925
static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
		       unsigned long arg)
926 927 928 929
{
	struct dw_dma_chan	*dwc = to_dw_dma_chan(chan);
	struct dw_dma		*dw = to_dw_dma(chan->device);
	struct dw_desc		*desc, *_desc;
930
	unsigned long		flags;
931
	u32			cfglo;
932 933
	LIST_HEAD(list);

934 935
	if (cmd == DMA_PAUSE) {
		spin_lock_irqsave(&dwc->lock, flags);
936

937 938 939 940
		cfglo = channel_readl(dwc, CFG_LO);
		channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP);
		while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY))
			cpu_relax();
941

942 943 944 945 946
		dwc->paused = true;
		spin_unlock_irqrestore(&dwc->lock, flags);
	} else if (cmd == DMA_RESUME) {
		if (!dwc->paused)
			return 0;
947

948
		spin_lock_irqsave(&dwc->lock, flags);
949

950 951 952
		cfglo = channel_readl(dwc, CFG_LO);
		channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP);
		dwc->paused = false;
953

954 955 956
		spin_unlock_irqrestore(&dwc->lock, flags);
	} else if (cmd == DMA_TERMINATE_ALL) {
		spin_lock_irqsave(&dwc->lock, flags);
957

958 959 960 961 962 963 964 965 966 967 968 969 970 971 972
		channel_clear_bit(dw, CH_EN, dwc->mask);
		while (dma_readl(dw, CH_EN) & dwc->mask)
			cpu_relax();

		dwc->paused = false;

		/* active_list entries will end up before queued entries */
		list_splice_init(&dwc->queue, &list);
		list_splice_init(&dwc->active_list, &list);

		spin_unlock_irqrestore(&dwc->lock, flags);

		/* Flush all pending and queued descriptors */
		list_for_each_entry_safe(desc, _desc, &list, desc_node)
			dwc_descriptor_complete(dwc, desc, false);
973 974 975
	} else if (cmd == DMA_SLAVE_CONFIG) {
		return set_runtime_config(chan, (struct dma_slave_config *)arg);
	} else {
976
		return -ENXIO;
977
	}
978 979

	return 0;
980 981 982
}

static enum dma_status
983 984 985
dwc_tx_status(struct dma_chan *chan,
	      dma_cookie_t cookie,
	      struct dma_tx_state *txstate)
986 987
{
	struct dw_dma_chan	*dwc = to_dw_dma_chan(chan);
988
	enum dma_status		ret;
989

990
	ret = dma_cookie_status(chan, cookie, txstate);
991 992 993
	if (ret != DMA_SUCCESS) {
		dwc_scan_descriptors(to_dw_dma(chan->device), dwc);

994
		ret = dma_cookie_status(chan, cookie, txstate);
995 996
	}

997
	if (ret != DMA_SUCCESS)
998
		dma_set_residue(txstate, dwc_first_active(dwc)->len);
999

1000 1001
	if (dwc->paused)
		return DMA_PAUSED;
1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013

	return ret;
}

static void dwc_issue_pending(struct dma_chan *chan)
{
	struct dw_dma_chan	*dwc = to_dw_dma_chan(chan);

	if (!list_empty(&dwc->queue))
		dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
}

1014
static int dwc_alloc_chan_resources(struct dma_chan *chan)
1015 1016 1017 1018 1019
{
	struct dw_dma_chan	*dwc = to_dw_dma_chan(chan);
	struct dw_dma		*dw = to_dw_dma(chan->device);
	struct dw_desc		*desc;
	int			i;
1020
	unsigned long		flags;
1021

1022
	dev_vdbg(chan2dev(chan), "alloc_chan_resources\n");
1023 1024 1025

	/* ASSERT:  channel is idle */
	if (dma_readl(dw, CH_EN) & dwc->mask) {
1026
		dev_dbg(chan2dev(chan), "DMA channel not idle?\n");
1027 1028 1029
		return -EIO;
	}

1030
	dma_cookie_init(chan);
1031 1032 1033 1034 1035 1036 1037

	/*
	 * NOTE: some controllers may have additional features that we
	 * need to initialize here, like "scatter-gather" (which
	 * doesn't mean what you think it means), and status writeback.
	 */

1038
	spin_lock_irqsave(&dwc->lock, flags);
1039 1040
	i = dwc->descs_allocated;
	while (dwc->descs_allocated < NR_DESCS_PER_CHANNEL) {
1041
		spin_unlock_irqrestore(&dwc->lock, flags);
1042 1043 1044

		desc = kzalloc(sizeof(struct dw_desc), GFP_KERNEL);
		if (!desc) {
1045
			dev_info(chan2dev(chan),
1046
				"only allocated %d descriptors\n", i);
1047
			spin_lock_irqsave(&dwc->lock, flags);
1048 1049 1050
			break;
		}

1051
		INIT_LIST_HEAD(&desc->tx_list);
1052 1053 1054
		dma_async_tx_descriptor_init(&desc->txd, chan);
		desc->txd.tx_submit = dwc_tx_submit;
		desc->txd.flags = DMA_CTRL_ACK;
1055
		desc->txd.phys = dma_map_single(chan2parent(chan), &desc->lli,
1056 1057 1058
				sizeof(desc->lli), DMA_TO_DEVICE);
		dwc_desc_put(dwc, desc);

1059
		spin_lock_irqsave(&dwc->lock, flags);
1060 1061 1062
		i = ++dwc->descs_allocated;
	}

1063
	spin_unlock_irqrestore(&dwc->lock, flags);
1064

1065
	dev_dbg(chan2dev(chan),
1066 1067 1068 1069 1070 1071 1072 1073 1074 1075
		"alloc_chan_resources allocated %d descriptors\n", i);

	return i;
}

static void dwc_free_chan_resources(struct dma_chan *chan)
{
	struct dw_dma_chan	*dwc = to_dw_dma_chan(chan);
	struct dw_dma		*dw = to_dw_dma(chan->device);
	struct dw_desc		*desc, *_desc;
1076
	unsigned long		flags;
1077 1078
	LIST_HEAD(list);

1079
	dev_dbg(chan2dev(chan), "free_chan_resources (descs allocated=%u)\n",
1080 1081 1082 1083 1084 1085 1086
			dwc->descs_allocated);

	/* ASSERT:  channel is idle */
	BUG_ON(!list_empty(&dwc->active_list));
	BUG_ON(!list_empty(&dwc->queue));
	BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask);

1087
	spin_lock_irqsave(&dwc->lock, flags);
1088 1089
	list_splice_init(&dwc->free_list, &list);
	dwc->descs_allocated = 0;
1090
	dwc->initialized = false;
1091 1092 1093 1094 1095

	/* Disable interrupts */
	channel_clear_bit(dw, MASK.XFER, dwc->mask);
	channel_clear_bit(dw, MASK.ERROR, dwc->mask);

1096
	spin_unlock_irqrestore(&dwc->lock, flags);
1097 1098

	list_for_each_entry_safe(desc, _desc, &list, desc_node) {
1099 1100
		dev_vdbg(chan2dev(chan), "  freeing descriptor %p\n", desc);
		dma_unmap_single(chan2parent(chan), desc->txd.phys,
1101 1102 1103 1104
				sizeof(desc->lli), DMA_TO_DEVICE);
		kfree(desc);
	}

1105
	dev_vdbg(chan2dev(chan), "free_chan_resources done\n");
1106 1107
}

1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120
/* --------------------- Cyclic DMA API extensions -------------------- */

/**
 * dw_dma_cyclic_start - start the cyclic DMA transfer
 * @chan: the DMA channel to start
 *
 * Must be called with soft interrupts disabled. Returns zero on success or
 * -errno on failure.
 */
int dw_dma_cyclic_start(struct dma_chan *chan)
{
	struct dw_dma_chan	*dwc = to_dw_dma_chan(chan);
	struct dw_dma		*dw = to_dw_dma(dwc->chan.device);
1121
	unsigned long		flags;
1122 1123 1124 1125 1126 1127

	if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) {
		dev_err(chan2dev(&dwc->chan), "missing prep for cyclic DMA\n");
		return -ENODEV;
	}

1128
	spin_lock_irqsave(&dwc->lock, flags);
1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140

	/* assert channel is idle */
	if (dma_readl(dw, CH_EN) & dwc->mask) {
		dev_err(chan2dev(&dwc->chan),
			"BUG: Attempted to start non-idle channel\n");
		dev_err(chan2dev(&dwc->chan),
			"  SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
			channel_readl(dwc, SAR),
			channel_readl(dwc, DAR),
			channel_readl(dwc, LLP),
			channel_readl(dwc, CTL_HI),
			channel_readl(dwc, CTL_LO));
1141
		spin_unlock_irqrestore(&dwc->lock, flags);
1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154
		return -EBUSY;
	}

	dma_writel(dw, CLEAR.ERROR, dwc->mask);
	dma_writel(dw, CLEAR.XFER, dwc->mask);

	/* setup DMAC channel registers */
	channel_writel(dwc, LLP, dwc->cdesc->desc[0]->txd.phys);
	channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
	channel_writel(dwc, CTL_HI, 0);

	channel_set_bit(dw, CH_EN, dwc->mask);

1155
	spin_unlock_irqrestore(&dwc->lock, flags);
1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170

	return 0;
}
EXPORT_SYMBOL(dw_dma_cyclic_start);

/**
 * dw_dma_cyclic_stop - stop the cyclic DMA transfer
 * @chan: the DMA channel to stop
 *
 * Must be called with soft interrupts disabled.
 */
void dw_dma_cyclic_stop(struct dma_chan *chan)
{
	struct dw_dma_chan	*dwc = to_dw_dma_chan(chan);
	struct dw_dma		*dw = to_dw_dma(dwc->chan.device);
1171
	unsigned long		flags;
1172

1173
	spin_lock_irqsave(&dwc->lock, flags);
1174 1175 1176 1177 1178

	channel_clear_bit(dw, CH_EN, dwc->mask);
	while (dma_readl(dw, CH_EN) & dwc->mask)
		cpu_relax();

1179
	spin_unlock_irqrestore(&dwc->lock, flags);
1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195
}
EXPORT_SYMBOL(dw_dma_cyclic_stop);

/**
 * dw_dma_cyclic_prep - prepare the cyclic DMA transfer
 * @chan: the DMA channel to prepare
 * @buf_addr: physical DMA address where the buffer starts
 * @buf_len: total number of bytes for the entire buffer
 * @period_len: number of bytes for each period
 * @direction: transfer direction, to or from device
 *
 * Must be called before trying to start the transfer. Returns a valid struct
 * dw_cyclic_desc if successful or an ERR_PTR(-errno) if not successful.
 */
struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
		dma_addr_t buf_addr, size_t buf_len, size_t period_len,
1196
		enum dma_transfer_direction direction)
1197 1198
{
	struct dw_dma_chan		*dwc = to_dw_dma_chan(chan);
1199
	struct dma_slave_config		*sconfig = &dwc->dma_sconfig;
1200 1201 1202 1203 1204 1205 1206 1207
	struct dw_cyclic_desc		*cdesc;
	struct dw_cyclic_desc		*retval = NULL;
	struct dw_desc			*desc;
	struct dw_desc			*last = NULL;
	unsigned long			was_cyclic;
	unsigned int			reg_width;
	unsigned int			periods;
	unsigned int			i;
1208
	unsigned long			flags;
1209

1210
	spin_lock_irqsave(&dwc->lock, flags);
1211
	if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) {
1212
		spin_unlock_irqrestore(&dwc->lock, flags);
1213 1214 1215 1216 1217 1218
		dev_dbg(chan2dev(&dwc->chan),
				"queue and/or active list are not empty\n");
		return ERR_PTR(-EBUSY);
	}

	was_cyclic = test_and_set_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
1219
	spin_unlock_irqrestore(&dwc->lock, flags);
1220 1221 1222 1223 1224 1225 1226
	if (was_cyclic) {
		dev_dbg(chan2dev(&dwc->chan),
				"channel already prepared for cyclic DMA\n");
		return ERR_PTR(-EBUSY);
	}

	retval = ERR_PTR(-EINVAL);
1227 1228 1229 1230 1231 1232

	if (direction == DMA_MEM_TO_DEV)
		reg_width = __ffs(sconfig->dst_addr_width);
	else
		reg_width = __ffs(sconfig->src_addr_width);

1233 1234 1235 1236 1237 1238 1239 1240 1241
	periods = buf_len / period_len;

	/* Check for too big/unaligned periods and unaligned DMA buffer. */
	if (period_len > (DWC_MAX_COUNT << reg_width))
		goto out_err;
	if (unlikely(period_len & ((1 << reg_width) - 1)))
		goto out_err;
	if (unlikely(buf_addr & ((1 << reg_width) - 1)))
		goto out_err;
1242
	if (unlikely(!(direction & (DMA_MEM_TO_DEV | DMA_DEV_TO_MEM))))
1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263
		goto out_err;

	retval = ERR_PTR(-ENOMEM);

	if (periods > NR_DESCS_PER_CHANNEL)
		goto out_err;

	cdesc = kzalloc(sizeof(struct dw_cyclic_desc), GFP_KERNEL);
	if (!cdesc)
		goto out_err;

	cdesc->desc = kzalloc(sizeof(struct dw_desc *) * periods, GFP_KERNEL);
	if (!cdesc->desc)
		goto out_err_alloc;

	for (i = 0; i < periods; i++) {
		desc = dwc_desc_get(dwc);
		if (!desc)
			goto out_err_desc_get;

		switch (direction) {
1264
		case DMA_MEM_TO_DEV:
1265
			desc->lli.dar = sconfig->dst_addr;
1266
			desc->lli.sar = buf_addr + (period_len * i);
1267
			desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan)
1268 1269 1270 1271 1272
					| DWC_CTLL_DST_WIDTH(reg_width)
					| DWC_CTLL_SRC_WIDTH(reg_width)
					| DWC_CTLL_DST_FIX
					| DWC_CTLL_SRC_INC
					| DWC_CTLL_INT_EN);
1273 1274 1275 1276 1277

			desc->lli.ctllo |= sconfig->device_fc ?
				DWC_CTLL_FC(DW_DMA_FC_P_M2P) :
				DWC_CTLL_FC(DW_DMA_FC_D_M2P);

1278
			break;
1279
		case DMA_DEV_TO_MEM:
1280
			desc->lli.dar = buf_addr + (period_len * i);
1281 1282
			desc->lli.sar = sconfig->src_addr;
			desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan)
1283 1284 1285 1286 1287
					| DWC_CTLL_SRC_WIDTH(reg_width)
					| DWC_CTLL_DST_WIDTH(reg_width)
					| DWC_CTLL_DST_INC
					| DWC_CTLL_SRC_FIX
					| DWC_CTLL_INT_EN);
1288 1289 1290 1291 1292

			desc->lli.ctllo |= sconfig->device_fc ?
				DWC_CTLL_FC(DW_DMA_FC_P_P2M) :
				DWC_CTLL_FC(DW_DMA_FC_D_P2M);

1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315
			break;
		default:
			break;
		}

		desc->lli.ctlhi = (period_len >> reg_width);
		cdesc->desc[i] = desc;

		if (last) {
			last->lli.llp = desc->txd.phys;
			dma_sync_single_for_device(chan2parent(chan),
					last->txd.phys, sizeof(last->lli),
					DMA_TO_DEVICE);
		}

		last = desc;
	}

	/* lets make a cyclic list */
	last->lli.llp = cdesc->desc[0]->txd.phys;
	dma_sync_single_for_device(chan2parent(chan), last->txd.phys,
			sizeof(last->lli), DMA_TO_DEVICE);

1316 1317 1318
	dev_dbg(chan2dev(&dwc->chan), "cyclic prepared buf 0x%llx len %zu "
			"period %zu periods %d\n", (unsigned long long)buf_addr,
			buf_len, period_len, periods);
1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345

	cdesc->periods = periods;
	dwc->cdesc = cdesc;

	return cdesc;

out_err_desc_get:
	while (i--)
		dwc_desc_put(dwc, cdesc->desc[i]);
out_err_alloc:
	kfree(cdesc);
out_err:
	clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
	return (struct dw_cyclic_desc *)retval;
}
EXPORT_SYMBOL(dw_dma_cyclic_prep);

/**
 * dw_dma_cyclic_free - free a prepared cyclic DMA transfer
 * @chan: the DMA channel to free
 */
void dw_dma_cyclic_free(struct dma_chan *chan)
{
	struct dw_dma_chan	*dwc = to_dw_dma_chan(chan);
	struct dw_dma		*dw = to_dw_dma(dwc->chan.device);
	struct dw_cyclic_desc	*cdesc = dwc->cdesc;
	int			i;
1346
	unsigned long		flags;
1347 1348 1349 1350 1351 1352

	dev_dbg(chan2dev(&dwc->chan), "cyclic free\n");

	if (!cdesc)
		return;

1353
	spin_lock_irqsave(&dwc->lock, flags);
1354 1355 1356 1357 1358 1359 1360 1361

	channel_clear_bit(dw, CH_EN, dwc->mask);
	while (dma_readl(dw, CH_EN) & dwc->mask)
		cpu_relax();

	dma_writel(dw, CLEAR.ERROR, dwc->mask);
	dma_writel(dw, CLEAR.XFER, dwc->mask);

1362
	spin_unlock_irqrestore(&dwc->lock, flags);
1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373

	for (i = 0; i < cdesc->periods; i++)
		dwc_desc_put(dwc, cdesc->desc[i]);

	kfree(cdesc->desc);
	kfree(cdesc);

	clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
}
EXPORT_SYMBOL(dw_dma_cyclic_free);

1374 1375 1376 1377
/*----------------------------------------------------------------------*/

static void dw_dma_off(struct dw_dma *dw)
{
1378 1379
	int i;

1380 1381 1382 1383 1384 1385 1386 1387 1388
	dma_writel(dw, CFG, 0);

	channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
	channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask);
	channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
	channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);

	while (dma_readl(dw, CFG) & DW_CFG_DMA_EN)
		cpu_relax();
1389 1390 1391

	for (i = 0; i < dw->dma.chancnt; i++)
		dw->chan[i].initialized = false;
1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403
}

static int __init dw_probe(struct platform_device *pdev)
{
	struct dw_dma_platform_data *pdata;
	struct resource		*io;
	struct dw_dma		*dw;
	size_t			size;
	int			irq;
	int			err;
	int			i;

1404
	pdata = dev_get_platdata(&pdev->dev);
1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437
	if (!pdata || pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS)
		return -EINVAL;

	io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!io)
		return -EINVAL;

	irq = platform_get_irq(pdev, 0);
	if (irq < 0)
		return irq;

	size = sizeof(struct dw_dma);
	size += pdata->nr_channels * sizeof(struct dw_dma_chan);
	dw = kzalloc(size, GFP_KERNEL);
	if (!dw)
		return -ENOMEM;

	if (!request_mem_region(io->start, DW_REGLEN, pdev->dev.driver->name)) {
		err = -EBUSY;
		goto err_kfree;
	}

	dw->regs = ioremap(io->start, DW_REGLEN);
	if (!dw->regs) {
		err = -ENOMEM;
		goto err_release_r;
	}

	dw->clk = clk_get(&pdev->dev, "hclk");
	if (IS_ERR(dw->clk)) {
		err = PTR_ERR(dw->clk);
		goto err_clk;
	}
1438
	clk_prepare_enable(dw->clk);
1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453

	/* force dma off, just in case */
	dw_dma_off(dw);

	err = request_irq(irq, dw_dma_interrupt, 0, "dw_dmac", dw);
	if (err)
		goto err_irq;

	platform_set_drvdata(pdev, dw);

	tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw);

	dw->all_chan_mask = (1 << pdata->nr_channels) - 1;

	INIT_LIST_HEAD(&dw->dma.channels);
1454
	for (i = 0; i < pdata->nr_channels; i++) {
1455 1456 1457
		struct dw_dma_chan	*dwc = &dw->chan[i];

		dwc->chan.device = &dw->dma;
1458
		dma_cookie_init(&dwc->chan);
1459 1460 1461 1462 1463
		if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING)
			list_add_tail(&dwc->chan.device_node,
					&dw->dma.channels);
		else
			list_add(&dwc->chan.device_node, &dw->dma.channels);
1464

1465 1466
		/* 7 is highest priority & 0 is lowest. */
		if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING)
1467
			dwc->priority = pdata->nr_channels - i - 1;
1468 1469 1470
		else
			dwc->priority = i;

1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494
		dwc->ch_regs = &__dw_regs(dw)->CHAN[i];
		spin_lock_init(&dwc->lock);
		dwc->mask = 1 << i;

		INIT_LIST_HEAD(&dwc->active_list);
		INIT_LIST_HEAD(&dwc->queue);
		INIT_LIST_HEAD(&dwc->free_list);

		channel_clear_bit(dw, CH_EN, dwc->mask);
	}

	/* Clear/disable all interrupts on all channels. */
	dma_writel(dw, CLEAR.XFER, dw->all_chan_mask);
	dma_writel(dw, CLEAR.SRC_TRAN, dw->all_chan_mask);
	dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask);
	dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask);

	channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
	channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask);
	channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
	channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);

	dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask);
	dma_cap_set(DMA_SLAVE, dw->dma.cap_mask);
1495 1496
	if (pdata->is_private)
		dma_cap_set(DMA_PRIVATE, dw->dma.cap_mask);
1497 1498 1499 1500 1501 1502 1503
	dw->dma.dev = &pdev->dev;
	dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources;
	dw->dma.device_free_chan_resources = dwc_free_chan_resources;

	dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy;

	dw->dma.device_prep_slave_sg = dwc_prep_slave_sg;
1504
	dw->dma.device_control = dwc_control;
1505

1506
	dw->dma.device_tx_status = dwc_tx_status;
1507 1508 1509 1510 1511
	dw->dma.device_issue_pending = dwc_issue_pending;

	dma_writel(dw, CFG, DW_CFG_DMA_EN);

	printk(KERN_INFO "%s: DesignWare DMA Controller, %d channels\n",
1512
			dev_name(&pdev->dev), pdata->nr_channels);
1513 1514 1515 1516 1517 1518

	dma_async_device_register(&dw->dma);

	return 0;

err_irq:
1519
	clk_disable_unprepare(dw->clk);
1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548
	clk_put(dw->clk);
err_clk:
	iounmap(dw->regs);
	dw->regs = NULL;
err_release_r:
	release_resource(io);
err_kfree:
	kfree(dw);
	return err;
}

static int __exit dw_remove(struct platform_device *pdev)
{
	struct dw_dma		*dw = platform_get_drvdata(pdev);
	struct dw_dma_chan	*dwc, *_dwc;
	struct resource		*io;

	dw_dma_off(dw);
	dma_async_device_unregister(&dw->dma);

	free_irq(platform_get_irq(pdev, 0), dw);
	tasklet_kill(&dw->tasklet);

	list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels,
			chan.device_node) {
		list_del(&dwc->chan.device_node);
		channel_clear_bit(dw, CH_EN, dwc->mask);
	}

1549
	clk_disable_unprepare(dw->clk);
1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567
	clk_put(dw->clk);

	iounmap(dw->regs);
	dw->regs = NULL;

	io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	release_mem_region(io->start, DW_REGLEN);

	kfree(dw);

	return 0;
}

static void dw_shutdown(struct platform_device *pdev)
{
	struct dw_dma	*dw = platform_get_drvdata(pdev);

	dw_dma_off(platform_get_drvdata(pdev));
1568
	clk_disable_unprepare(dw->clk);
1569 1570
}

1571
static int dw_suspend_noirq(struct device *dev)
1572
{
1573
	struct platform_device *pdev = to_platform_device(dev);
1574 1575 1576
	struct dw_dma	*dw = platform_get_drvdata(pdev);

	dw_dma_off(platform_get_drvdata(pdev));
1577
	clk_disable_unprepare(dw->clk);
1578

1579 1580 1581
	return 0;
}

1582
static int dw_resume_noirq(struct device *dev)
1583
{
1584
	struct platform_device *pdev = to_platform_device(dev);
1585 1586
	struct dw_dma	*dw = platform_get_drvdata(pdev);

1587
	clk_prepare_enable(dw->clk);
1588 1589 1590 1591
	dma_writel(dw, CFG, DW_CFG_DMA_EN);
	return 0;
}

1592
static const struct dev_pm_ops dw_dev_pm_ops = {
1593 1594
	.suspend_noirq = dw_suspend_noirq,
	.resume_noirq = dw_resume_noirq,
1595 1596 1597 1598
	.freeze_noirq = dw_suspend_noirq,
	.thaw_noirq = dw_resume_noirq,
	.restore_noirq = dw_resume_noirq,
	.poweroff_noirq = dw_suspend_noirq,
1599 1600
};

1601 1602 1603 1604 1605 1606 1607 1608
#ifdef CONFIG_OF
static const struct of_device_id dw_dma_id_table[] = {
	{ .compatible = "snps,dma-spear1340" },
	{}
};
MODULE_DEVICE_TABLE(of, dw_dma_id_table);
#endif

1609 1610 1611 1612 1613
static struct platform_driver dw_driver = {
	.remove		= __exit_p(dw_remove),
	.shutdown	= dw_shutdown,
	.driver = {
		.name	= "dw_dmac",
1614
		.pm	= &dw_dev_pm_ops,
1615
		.of_match_table = of_match_ptr(dw_dma_id_table),
1616 1617 1618 1619 1620 1621 1622
	},
};

static int __init dw_init(void)
{
	return platform_driver_probe(&dw_driver, dw_probe);
}
1623
subsys_initcall(dw_init);
1624 1625 1626 1627 1628 1629 1630 1631 1632

static void __exit dw_exit(void)
{
	platform_driver_unregister(&dw_driver);
}
module_exit(dw_exit);

MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller driver");
J
Jean Delvare 已提交
1633
MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
1634
MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");