fsldma.c 37.7 KB
Newer Older
1 2 3
/*
 * Freescale MPC85xx, MPC83xx DMA Engine support
 *
4
 * Copyright (C) 2007-2010 Freescale Semiconductor, Inc. All rights reserved.
5 6 7 8 9 10 11 12
 *
 * Author:
 *   Zhang Wei <wei.zhang@freescale.com>, Jul 2007
 *   Ebony Zhu <ebony.zhu@freescale.com>, May 2007
 *
 * Description:
 *   DMA engine driver for Freescale MPC8540 DMA controller, which is
 *   also fit for MPC8560, MPC8555, MPC8548, MPC8641, and etc.
13
 *   The support for MPC8349 DMA controller is also added.
14
 *
15 16 17 18 19
 * This driver instructs the DMA controller to issue the PCI Read Multiple
 * command for PCI read operations, instead of using the default PCI Read Line
 * command. Please be aware that this setting may result in read pre-fetching
 * on some platforms.
 *
20 21 22 23 24 25 26 27 28 29
 * This is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 */

#include <linux/init.h>
#include <linux/module.h>
#include <linux/pci.h>
30
#include <linux/slab.h>
31 32 33 34 35
#include <linux/interrupt.h>
#include <linux/dmaengine.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
36 37
#include <linux/of_address.h>
#include <linux/of_irq.h>
38
#include <linux/of_platform.h>
39
#include <linux/fsldma.h>
40
#include "dmaengine.h"
41 42
#include "fsldma.h"

43 44 45 46
#define chan_dbg(chan, fmt, arg...)					\
	dev_dbg(chan->dev, "%s: " fmt, chan->name, ##arg)
#define chan_err(chan, fmt, arg...)					\
	dev_err(chan->dev, "%s: " fmt, chan->name, ##arg)
47

48
static const char msg_ld_oom[] = "No free memory for link descriptor";
49

50 51 52
/*
 * Register Helpers
 */
53

I
Ira Snyder 已提交
54
static void set_sr(struct fsldma_chan *chan, u32 val)
55
{
I
Ira Snyder 已提交
56
	DMA_OUT(chan, &chan->regs->sr, val, 32);
57 58
}

I
Ira Snyder 已提交
59
static u32 get_sr(struct fsldma_chan *chan)
60
{
I
Ira Snyder 已提交
61
	return DMA_IN(chan, &chan->regs->sr, 32);
62 63
}

64 65 66 67 68 69 70 71 72 73
static void set_mr(struct fsldma_chan *chan, u32 val)
{
	DMA_OUT(chan, &chan->regs->mr, val, 32);
}

static u32 get_mr(struct fsldma_chan *chan)
{
	return DMA_IN(chan, &chan->regs->mr, 32);
}

74 75 76 77 78 79 80 81 82 83
static void set_cdar(struct fsldma_chan *chan, dma_addr_t addr)
{
	DMA_OUT(chan, &chan->regs->cdar, addr | FSL_DMA_SNEN, 64);
}

static dma_addr_t get_cdar(struct fsldma_chan *chan)
{
	return DMA_IN(chan, &chan->regs->cdar, 64) & ~FSL_DMA_SNEN;
}

84 85 86 87 88
static void set_bcr(struct fsldma_chan *chan, u32 val)
{
	DMA_OUT(chan, &chan->regs->bcr, val, 32);
}

89 90 91 92 93 94 95 96 97
static u32 get_bcr(struct fsldma_chan *chan)
{
	return DMA_IN(chan, &chan->regs->bcr, 32);
}

/*
 * Descriptor Helpers
 */

I
Ira Snyder 已提交
98
static void set_desc_cnt(struct fsldma_chan *chan,
99 100
				struct fsl_dma_ld_hw *hw, u32 count)
{
I
Ira Snyder 已提交
101
	hw->count = CPU_TO_DMA(chan, count, 32);
102 103
}

I
Ira Snyder 已提交
104
static void set_desc_src(struct fsldma_chan *chan,
105
			 struct fsl_dma_ld_hw *hw, dma_addr_t src)
106 107 108
{
	u64 snoop_bits;

I
Ira Snyder 已提交
109
	snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
110
		? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0;
I
Ira Snyder 已提交
111
	hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64);
112 113
}

I
Ira Snyder 已提交
114
static void set_desc_dst(struct fsldma_chan *chan,
115
			 struct fsl_dma_ld_hw *hw, dma_addr_t dst)
116 117 118
{
	u64 snoop_bits;

I
Ira Snyder 已提交
119
	snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
120
		? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0;
I
Ira Snyder 已提交
121
	hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64);
122 123
}

I
Ira Snyder 已提交
124
static void set_desc_next(struct fsldma_chan *chan,
125
			  struct fsl_dma_ld_hw *hw, dma_addr_t next)
126 127 128
{
	u64 snoop_bits;

I
Ira Snyder 已提交
129
	snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
130
		? FSL_DMA_SNEN : 0;
I
Ira Snyder 已提交
131
	hw->next_ln_addr = CPU_TO_DMA(chan, snoop_bits | next, 64);
132 133
}

134
static void set_ld_eol(struct fsldma_chan *chan, struct fsl_desc_sw *desc)
135
{
136
	u64 snoop_bits;
137

138 139
	snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
		? FSL_DMA_SNEN : 0;
140

141 142 143
	desc->hw.next_ln_addr = CPU_TO_DMA(chan,
		DMA_TO_CPU(chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL
			| snoop_bits, 64);
144 145
}

146 147 148 149 150
/*
 * DMA Engine Hardware Control Helpers
 */

static void dma_init(struct fsldma_chan *chan)
151
{
152
	/* Reset the channel */
153
	set_mr(chan, 0);
154 155 156 157 158 159 160 161

	switch (chan->feature & FSL_DMA_IP_MASK) {
	case FSL_DMA_IP_85XX:
		/* Set the channel to below modes:
		 * EIE - Error interrupt enable
		 * EOLNIE - End of links interrupt enable
		 * BWC - Bandwidth sharing among channels
		 */
162 163
		set_mr(chan, FSL_DMA_MR_BWC | FSL_DMA_MR_EIE
			| FSL_DMA_MR_EOLNIE);
164 165 166 167 168 169
		break;
	case FSL_DMA_IP_83XX:
		/* Set the channel to below modes:
		 * EOTIE - End-of-transfer interrupt enable
		 * PRC_RM - PCI read multiple
		 */
170
		set_mr(chan, FSL_DMA_MR_EOTIE | FSL_DMA_MR_PRC_RM);
171 172
		break;
	}
173 174
}

I
Ira Snyder 已提交
175
static int dma_is_idle(struct fsldma_chan *chan)
176
{
I
Ira Snyder 已提交
177
	u32 sr = get_sr(chan);
178 179 180
	return (!(sr & FSL_DMA_SR_CB)) || (sr & FSL_DMA_SR_CH);
}

I
Ira Snyder 已提交
181 182 183 184 185 186 187
/*
 * Start the DMA controller
 *
 * Preconditions:
 * - the CDAR register must point to the start descriptor
 * - the MRn[CS] bit must be cleared
 */
I
Ira Snyder 已提交
188
static void dma_start(struct fsldma_chan *chan)
189
{
I
Ira Snyder 已提交
190 191
	u32 mode;

192
	mode = get_mr(chan);
I
Ira Snyder 已提交
193

I
Ira Snyder 已提交
194
	if (chan->feature & FSL_DMA_CHAN_PAUSE_EXT) {
195
		set_bcr(chan, 0);
I
Ira Snyder 已提交
196 197 198
		mode |= FSL_DMA_MR_EMP_EN;
	} else {
		mode &= ~FSL_DMA_MR_EMP_EN;
199
	}
200

I
Ira Snyder 已提交
201
	if (chan->feature & FSL_DMA_CHAN_START_EXT) {
I
Ira Snyder 已提交
202
		mode |= FSL_DMA_MR_EMS_EN;
I
Ira Snyder 已提交
203 204
	} else {
		mode &= ~FSL_DMA_MR_EMS_EN;
I
Ira Snyder 已提交
205
		mode |= FSL_DMA_MR_CS;
I
Ira Snyder 已提交
206
	}
207

208
	set_mr(chan, mode);
209 210
}

I
Ira Snyder 已提交
211
static void dma_halt(struct fsldma_chan *chan)
212
{
I
Ira Snyder 已提交
213
	u32 mode;
214 215
	int i;

216
	/* read the mode register */
217
	mode = get_mr(chan);
I
Ira Snyder 已提交
218

219 220 221 222 223 224 225
	/*
	 * The 85xx controller supports channel abort, which will stop
	 * the current transfer. On 83xx, this bit is the transfer error
	 * mask bit, which should not be changed.
	 */
	if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) {
		mode |= FSL_DMA_MR_CA;
226
		set_mr(chan, mode);
227 228 229 230 231 232

		mode &= ~FSL_DMA_MR_CA;
	}

	/* stop the DMA controller */
	mode &= ~(FSL_DMA_MR_CS | FSL_DMA_MR_EMS_EN);
233
	set_mr(chan, mode);
234

235
	/* wait for the DMA controller to become idle */
236
	for (i = 0; i < 100; i++) {
I
Ira Snyder 已提交
237
		if (dma_is_idle(chan))
I
Ira Snyder 已提交
238 239
			return;

240
		udelay(10);
241
	}
I
Ira Snyder 已提交
242

I
Ira Snyder 已提交
243
	if (!dma_is_idle(chan))
244
		chan_err(chan, "DMA halt timeout!\n");
245 246 247 248
}

/**
 * fsl_chan_set_src_loop_size - Set source address hold transfer size
I
Ira Snyder 已提交
249
 * @chan : Freescale DMA channel
250 251 252 253 254 255 256 257
 * @size     : Address loop size, 0 for disable loop
 *
 * The set source address hold transfer size. The source
 * address hold or loop transfer size is when the DMA transfer
 * data from source address (SA), if the loop size is 4, the DMA will
 * read data from SA, SA + 1, SA + 2, SA + 3, then loop back to SA,
 * SA + 1 ... and so on.
 */
I
Ira Snyder 已提交
258
static void fsl_chan_set_src_loop_size(struct fsldma_chan *chan, int size)
259
{
I
Ira Snyder 已提交
260 261
	u32 mode;

262
	mode = get_mr(chan);
I
Ira Snyder 已提交
263

264 265
	switch (size) {
	case 0:
I
Ira Snyder 已提交
266
		mode &= ~FSL_DMA_MR_SAHE;
267 268 269 270 271
		break;
	case 1:
	case 2:
	case 4:
	case 8:
I
Ira Snyder 已提交
272
		mode |= FSL_DMA_MR_SAHE | (__ilog2(size) << 14);
273 274
		break;
	}
I
Ira Snyder 已提交
275

276
	set_mr(chan, mode);
277 278 279
}

/**
280
 * fsl_chan_set_dst_loop_size - Set destination address hold transfer size
I
Ira Snyder 已提交
281
 * @chan : Freescale DMA channel
282 283 284 285 286 287 288 289
 * @size     : Address loop size, 0 for disable loop
 *
 * The set destination address hold transfer size. The destination
 * address hold or loop transfer size is when the DMA transfer
 * data to destination address (TA), if the loop size is 4, the DMA will
 * write data to TA, TA + 1, TA + 2, TA + 3, then loop back to TA,
 * TA + 1 ... and so on.
 */
I
Ira Snyder 已提交
290
static void fsl_chan_set_dst_loop_size(struct fsldma_chan *chan, int size)
291
{
I
Ira Snyder 已提交
292 293
	u32 mode;

294
	mode = get_mr(chan);
I
Ira Snyder 已提交
295

296 297
	switch (size) {
	case 0:
I
Ira Snyder 已提交
298
		mode &= ~FSL_DMA_MR_DAHE;
299 300 301 302 303
		break;
	case 1:
	case 2:
	case 4:
	case 8:
I
Ira Snyder 已提交
304
		mode |= FSL_DMA_MR_DAHE | (__ilog2(size) << 16);
305 306
		break;
	}
I
Ira Snyder 已提交
307

308
	set_mr(chan, mode);
309 310 311
}

/**
312
 * fsl_chan_set_request_count - Set DMA Request Count for external control
I
Ira Snyder 已提交
313
 * @chan : Freescale DMA channel
314 315 316 317 318 319
 * @size     : Number of bytes to transfer in a single request
 *
 * The Freescale DMA channel can be controlled by the external signal DREQ#.
 * The DMA request count is how many bytes are allowed to transfer before
 * pausing the channel, after which a new assertion of DREQ# resumes channel
 * operation.
320
 *
321
 * A size of 0 disables external pause control. The maximum size is 1024.
322
 */
I
Ira Snyder 已提交
323
static void fsl_chan_set_request_count(struct fsldma_chan *chan, int size)
324
{
I
Ira Snyder 已提交
325 326
	u32 mode;

327
	BUG_ON(size > 1024);
I
Ira Snyder 已提交
328

329
	mode = get_mr(chan);
I
Ira Snyder 已提交
330 331
	mode |= (__ilog2(size) << 24) & 0x0f000000;

332
	set_mr(chan, mode);
333
}
334

335 336
/**
 * fsl_chan_toggle_ext_pause - Toggle channel external pause status
I
Ira Snyder 已提交
337
 * @chan : Freescale DMA channel
338 339 340 341 342 343
 * @enable   : 0 is disabled, 1 is enabled.
 *
 * The Freescale DMA channel can be controlled by the external signal DREQ#.
 * The DMA Request Count feature should be used in addition to this feature
 * to set the number of bytes to transfer before pausing the channel.
 */
I
Ira Snyder 已提交
344
static void fsl_chan_toggle_ext_pause(struct fsldma_chan *chan, int enable)
345 346
{
	if (enable)
I
Ira Snyder 已提交
347
		chan->feature |= FSL_DMA_CHAN_PAUSE_EXT;
348
	else
I
Ira Snyder 已提交
349
		chan->feature &= ~FSL_DMA_CHAN_PAUSE_EXT;
350 351 352 353
}

/**
 * fsl_chan_toggle_ext_start - Toggle channel external start status
I
Ira Snyder 已提交
354
 * @chan : Freescale DMA channel
355 356 357 358 359 360 361
 * @enable   : 0 is disabled, 1 is enabled.
 *
 * If enable the external start, the channel can be started by an
 * external DMA start pin. So the dma_start() does not start the
 * transfer immediately. The DMA channel will wait for the
 * control pin asserted.
 */
I
Ira Snyder 已提交
362
static void fsl_chan_toggle_ext_start(struct fsldma_chan *chan, int enable)
363 364
{
	if (enable)
I
Ira Snyder 已提交
365
		chan->feature |= FSL_DMA_CHAN_START_EXT;
366
	else
I
Ira Snyder 已提交
367
		chan->feature &= ~FSL_DMA_CHAN_START_EXT;
368 369
}

370 371 372 373 374 375 376 377 378 379 380 381 382 383
int fsl_dma_external_start(struct dma_chan *dchan, int enable)
{
	struct fsldma_chan *chan;

	if (!dchan)
		return -EINVAL;

	chan = to_fsl_chan(dchan);

	fsl_chan_toggle_ext_start(chan, enable);
	return 0;
}
EXPORT_SYMBOL_GPL(fsl_dma_external_start);

384
static void append_ld_queue(struct fsldma_chan *chan, struct fsl_desc_sw *desc)
I
Ira Snyder 已提交
385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407
{
	struct fsl_desc_sw *tail = to_fsl_desc(chan->ld_pending.prev);

	if (list_empty(&chan->ld_pending))
		goto out_splice;

	/*
	 * Add the hardware descriptor to the chain of hardware descriptors
	 * that already exists in memory.
	 *
	 * This will un-set the EOL bit of the existing transaction, and the
	 * last link in this transaction will become the EOL descriptor.
	 */
	set_desc_next(chan, &tail->hw, desc->async_tx.phys);

	/*
	 * Add the software descriptor and all children to the list
	 * of pending transactions
	 */
out_splice:
	list_splice_tail_init(&desc->tx_list, &chan->ld_pending);
}

408 409
static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
{
I
Ira Snyder 已提交
410
	struct fsldma_chan *chan = to_fsl_chan(tx->chan);
411 412
	struct fsl_desc_sw *desc = tx_to_fsl_desc(tx);
	struct fsl_desc_sw *child;
D
Dan Williams 已提交
413
	dma_cookie_t cookie = -EINVAL;
414

415
	spin_lock_bh(&chan->desc_lock);
416

417 418 419 420 421 422 423 424
#ifdef CONFIG_PM
	if (unlikely(chan->pm_state != RUNNING)) {
		chan_dbg(chan, "cannot submit due to suspend\n");
		spin_unlock_bh(&chan->desc_lock);
		return -1;
	}
#endif

I
Ira Snyder 已提交
425 426 427 428
	/*
	 * assign cookies to all of the software descriptors
	 * that make up this transaction
	 */
429
	list_for_each_entry(child, &desc->tx_list, node) {
430
		cookie = dma_cookie_assign(&child->async_tx);
431 432
	}

I
Ira Snyder 已提交
433
	/* put this transaction onto the tail of the pending queue */
I
Ira Snyder 已提交
434
	append_ld_queue(chan, desc);
435

436
	spin_unlock_bh(&chan->desc_lock);
437 438 439 440

	return cookie;
}

441 442 443 444 445 446 447 448 449 450 451 452 453
/**
 * fsl_dma_free_descriptor - Free descriptor from channel's DMA pool.
 * @chan : Freescale DMA channel
 * @desc: descriptor to be freed
 */
static void fsl_dma_free_descriptor(struct fsldma_chan *chan,
		struct fsl_desc_sw *desc)
{
	list_del(&desc->node);
	chan_dbg(chan, "LD %p free\n", desc);
	dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
}

454 455
/**
 * fsl_dma_alloc_descriptor - Allocate descriptor from channel's DMA pool.
I
Ira Snyder 已提交
456
 * @chan : Freescale DMA channel
457 458 459
 *
 * Return - The descriptor allocated. NULL for failed.
 */
460
static struct fsl_desc_sw *fsl_dma_alloc_descriptor(struct fsldma_chan *chan)
461
{
I
Ira Snyder 已提交
462
	struct fsl_desc_sw *desc;
463
	dma_addr_t pdesc;
I
Ira Snyder 已提交
464 465 466

	desc = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc);
	if (!desc) {
467
		chan_dbg(chan, "out of memory for link descriptor\n");
I
Ira Snyder 已提交
468
		return NULL;
469 470
	}

I
Ira Snyder 已提交
471 472 473 474 475 476
	memset(desc, 0, sizeof(*desc));
	INIT_LIST_HEAD(&desc->tx_list);
	dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
	desc->async_tx.tx_submit = fsl_dma_tx_submit;
	desc->async_tx.phys = pdesc;

477 478
	chan_dbg(chan, "LD %p allocated\n", desc);

I
Ira Snyder 已提交
479
	return desc;
480 481
}

482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563
/**
 * fsldma_clean_completed_descriptor - free all descriptors which
 * has been completed and acked
 * @chan: Freescale DMA channel
 *
 * This function is used on all completed and acked descriptors.
 * All descriptors should only be freed in this function.
 */
static void fsldma_clean_completed_descriptor(struct fsldma_chan *chan)
{
	struct fsl_desc_sw *desc, *_desc;

	/* Run the callback for each descriptor, in order */
	list_for_each_entry_safe(desc, _desc, &chan->ld_completed, node)
		if (async_tx_test_ack(&desc->async_tx))
			fsl_dma_free_descriptor(chan, desc);
}

/**
 * fsldma_run_tx_complete_actions - cleanup a single link descriptor
 * @chan: Freescale DMA channel
 * @desc: descriptor to cleanup and free
 * @cookie: Freescale DMA transaction identifier
 *
 * This function is used on a descriptor which has been executed by the DMA
 * controller. It will run any callbacks, submit any dependencies.
 */
static dma_cookie_t fsldma_run_tx_complete_actions(struct fsldma_chan *chan,
		struct fsl_desc_sw *desc, dma_cookie_t cookie)
{
	struct dma_async_tx_descriptor *txd = &desc->async_tx;
	dma_cookie_t ret = cookie;

	BUG_ON(txd->cookie < 0);

	if (txd->cookie > 0) {
		ret = txd->cookie;

		/* Run the link descriptor callback function */
		if (txd->callback) {
			chan_dbg(chan, "LD %p callback\n", desc);
			txd->callback(txd->callback_param);
		}
	}

	/* Run any dependencies */
	dma_run_dependencies(txd);

	return ret;
}

/**
 * fsldma_clean_running_descriptor - move the completed descriptor from
 * ld_running to ld_completed
 * @chan: Freescale DMA channel
 * @desc: the descriptor which is completed
 *
 * Free the descriptor directly if acked by async_tx api, or move it to
 * queue ld_completed.
 */
static void fsldma_clean_running_descriptor(struct fsldma_chan *chan,
		struct fsl_desc_sw *desc)
{
	/* Remove from the list of transactions */
	list_del(&desc->node);

	/*
	 * the client is allowed to attach dependent operations
	 * until 'ack' is set
	 */
	if (!async_tx_test_ack(&desc->async_tx)) {
		/*
		 * Move this descriptor to the list of descriptors which is
		 * completed, but still awaiting the 'ack' bit to be set.
		 */
		list_add_tail(&desc->node, &chan->ld_completed);
		return;
	}

	dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
}

564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631
/**
 * fsl_chan_xfer_ld_queue - transfer any pending transactions
 * @chan : Freescale DMA channel
 *
 * HARDWARE STATE: idle
 * LOCKING: must hold chan->desc_lock
 */
static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan)
{
	struct fsl_desc_sw *desc;

	/*
	 * If the list of pending descriptors is empty, then we
	 * don't need to do any work at all
	 */
	if (list_empty(&chan->ld_pending)) {
		chan_dbg(chan, "no pending LDs\n");
		return;
	}

	/*
	 * The DMA controller is not idle, which means that the interrupt
	 * handler will start any queued transactions when it runs after
	 * this transaction finishes
	 */
	if (!chan->idle) {
		chan_dbg(chan, "DMA controller still busy\n");
		return;
	}

	/*
	 * If there are some link descriptors which have not been
	 * transferred, we need to start the controller
	 */

	/*
	 * Move all elements from the queue of pending transactions
	 * onto the list of running transactions
	 */
	chan_dbg(chan, "idle, starting controller\n");
	desc = list_first_entry(&chan->ld_pending, struct fsl_desc_sw, node);
	list_splice_tail_init(&chan->ld_pending, &chan->ld_running);

	/*
	 * The 85xx DMA controller doesn't clear the channel start bit
	 * automatically at the end of a transfer. Therefore we must clear
	 * it in software before starting the transfer.
	 */
	if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) {
		u32 mode;

		mode = get_mr(chan);
		mode &= ~FSL_DMA_MR_CS;
		set_mr(chan, mode);
	}

	/*
	 * Program the descriptor's address into the DMA controller,
	 * then start the DMA transaction
	 */
	set_cdar(chan, desc->async_tx.phys);
	get_cdar(chan);

	dma_start(chan);
	chan->idle = false;
}

/**
632 633
 * fsldma_cleanup_descriptors - cleanup link descriptors which are completed
 * and move them to ld_completed to free until flag 'ack' is set
634 635
 * @chan: Freescale DMA channel
 *
636 637 638
 * This function is used on descriptors which have been executed by the DMA
 * controller. It will run any callbacks, submit any dependencies, then
 * free these descriptors if flag 'ack' is set.
639
 */
640
static void fsldma_cleanup_descriptors(struct fsldma_chan *chan)
641
{
642 643 644 645
	struct fsl_desc_sw *desc, *_desc;
	dma_cookie_t cookie = 0;
	dma_addr_t curr_phys = get_cdar(chan);
	int seen_current = 0;
646

647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671
	fsldma_clean_completed_descriptor(chan);

	/* Run the callback for each descriptor, in order */
	list_for_each_entry_safe(desc, _desc, &chan->ld_running, node) {
		/*
		 * do not advance past the current descriptor loaded into the
		 * hardware channel, subsequent descriptors are either in
		 * process or have not been submitted
		 */
		if (seen_current)
			break;

		/*
		 * stop the search if we reach the current descriptor and the
		 * channel is busy
		 */
		if (desc->async_tx.phys == curr_phys) {
			seen_current = 1;
			if (!dma_is_idle(chan))
				break;
		}

		cookie = fsldma_run_tx_complete_actions(chan, desc, cookie);

		fsldma_clean_running_descriptor(chan, desc);
672 673
	}

674 675 676 677 678 679 680
	/*
	 * Start any pending transactions automatically
	 *
	 * In the ideal case, we keep the DMA controller busy while we go
	 * ahead and free the descriptors below.
	 */
	fsl_chan_xfer_ld_queue(chan);
681

682 683
	if (cookie > 0)
		chan->common.completed_cookie = cookie;
684 685
}

686 687
/**
 * fsl_dma_alloc_chan_resources - Allocate resources for DMA channel.
I
Ira Snyder 已提交
688
 * @chan : Freescale DMA channel
689 690 691 692 693
 *
 * This function will create a dma pool for descriptor allocation.
 *
 * Return - The number of descriptors allocated.
 */
I
Ira Snyder 已提交
694
static int fsl_dma_alloc_chan_resources(struct dma_chan *dchan)
695
{
I
Ira Snyder 已提交
696
	struct fsldma_chan *chan = to_fsl_chan(dchan);
697 698

	/* Has this channel already been allocated? */
I
Ira Snyder 已提交
699
	if (chan->desc_pool)
700
		return 1;
701

I
Ira Snyder 已提交
702 703
	/*
	 * We need the descriptor to be aligned to 32bytes
704 705
	 * for meeting FSL DMA specification requirement.
	 */
706
	chan->desc_pool = dma_pool_create(chan->name, chan->dev,
I
Ira Snyder 已提交
707 708
					  sizeof(struct fsl_desc_sw),
					  __alignof__(struct fsl_desc_sw), 0);
I
Ira Snyder 已提交
709
	if (!chan->desc_pool) {
710
		chan_err(chan, "unable to allocate descriptor pool\n");
I
Ira Snyder 已提交
711
		return -ENOMEM;
712 713
	}

I
Ira Snyder 已提交
714
	/* there is at least one descriptor free to be allocated */
715 716 717
	return 1;
}

I
Ira Snyder 已提交
718 719 720 721 722 723 724 725 726 727 728 729
/**
 * fsldma_free_desc_list - Free all descriptors in a queue
 * @chan: Freescae DMA channel
 * @list: the list to free
 *
 * LOCKING: must hold chan->desc_lock
 */
static void fsldma_free_desc_list(struct fsldma_chan *chan,
				  struct list_head *list)
{
	struct fsl_desc_sw *desc, *_desc;

730 731
	list_for_each_entry_safe(desc, _desc, list, node)
		fsl_dma_free_descriptor(chan, desc);
I
Ira Snyder 已提交
732 733 734 735 736 737 738
}

static void fsldma_free_desc_list_reverse(struct fsldma_chan *chan,
					  struct list_head *list)
{
	struct fsl_desc_sw *desc, *_desc;

739 740
	list_for_each_entry_safe_reverse(desc, _desc, list, node)
		fsl_dma_free_descriptor(chan, desc);
I
Ira Snyder 已提交
741 742
}

743 744
/**
 * fsl_dma_free_chan_resources - Free all resources of the channel.
I
Ira Snyder 已提交
745
 * @chan : Freescale DMA channel
746
 */
I
Ira Snyder 已提交
747
static void fsl_dma_free_chan_resources(struct dma_chan *dchan)
748
{
I
Ira Snyder 已提交
749
	struct fsldma_chan *chan = to_fsl_chan(dchan);
750

751
	chan_dbg(chan, "free all channel resources\n");
752
	spin_lock_bh(&chan->desc_lock);
753
	fsldma_cleanup_descriptors(chan);
I
Ira Snyder 已提交
754 755
	fsldma_free_desc_list(chan, &chan->ld_pending);
	fsldma_free_desc_list(chan, &chan->ld_running);
756
	fsldma_free_desc_list(chan, &chan->ld_completed);
757
	spin_unlock_bh(&chan->desc_lock);
758

I
Ira Snyder 已提交
759
	dma_pool_destroy(chan->desc_pool);
I
Ira Snyder 已提交
760
	chan->desc_pool = NULL;
761 762
}

763 764 765
static struct dma_async_tx_descriptor *
fsl_dma_prep_memcpy(struct dma_chan *dchan,
	dma_addr_t dma_dst, dma_addr_t dma_src,
766 767
	size_t len, unsigned long flags)
{
I
Ira Snyder 已提交
768
	struct fsldma_chan *chan;
769 770 771
	struct fsl_desc_sw *first = NULL, *prev = NULL, *new;
	size_t copy;

I
Ira Snyder 已提交
772
	if (!dchan)
773 774 775 776 777
		return NULL;

	if (!len)
		return NULL;

I
Ira Snyder 已提交
778
	chan = to_fsl_chan(dchan);
779 780 781 782

	do {

		/* Allocate the link descriptor from DMA pool */
I
Ira Snyder 已提交
783
		new = fsl_dma_alloc_descriptor(chan);
784
		if (!new) {
785
			chan_err(chan, "%s\n", msg_ld_oom);
786
			goto fail;
787 788
		}

789
		copy = min(len, (size_t)FSL_DMA_BCR_MAX_CNT);
790

I
Ira Snyder 已提交
791 792 793
		set_desc_cnt(chan, &new->hw, copy);
		set_desc_src(chan, &new->hw, dma_src);
		set_desc_dst(chan, &new->hw, dma_dst);
794 795 796 797

		if (!first)
			first = new;
		else
I
Ira Snyder 已提交
798
			set_desc_next(chan, &prev->hw, new->async_tx.phys);
799 800

		new->async_tx.cookie = 0;
801
		async_tx_ack(&new->async_tx);
802 803 804 805

		prev = new;
		len -= copy;
		dma_src += copy;
806
		dma_dst += copy;
807 808

		/* Insert the link descriptor to the LD ring */
809
		list_add_tail(&new->node, &first->tx_list);
810 811
	} while (len);

812
	new->async_tx.flags = flags; /* client is in control of this ack */
813 814
	new->async_tx.cookie = -EBUSY;

815
	/* Set End-of-link to the last link descriptor of new list */
I
Ira Snyder 已提交
816
	set_ld_eol(chan, new);
817

818 819 820 821 822 823
	return &first->async_tx;

fail:
	if (!first)
		return NULL;

I
Ira Snyder 已提交
824
	fsldma_free_desc_list_reverse(chan, &first->tx_list);
825
	return NULL;
826 827
}

828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869
static struct dma_async_tx_descriptor *fsl_dma_prep_sg(struct dma_chan *dchan,
	struct scatterlist *dst_sg, unsigned int dst_nents,
	struct scatterlist *src_sg, unsigned int src_nents,
	unsigned long flags)
{
	struct fsl_desc_sw *first = NULL, *prev = NULL, *new = NULL;
	struct fsldma_chan *chan = to_fsl_chan(dchan);
	size_t dst_avail, src_avail;
	dma_addr_t dst, src;
	size_t len;

	/* basic sanity checks */
	if (dst_nents == 0 || src_nents == 0)
		return NULL;

	if (dst_sg == NULL || src_sg == NULL)
		return NULL;

	/*
	 * TODO: should we check that both scatterlists have the same
	 * TODO: number of bytes in total? Is that really an error?
	 */

	/* get prepared for the loop */
	dst_avail = sg_dma_len(dst_sg);
	src_avail = sg_dma_len(src_sg);

	/* run until we are out of scatterlist entries */
	while (true) {

		/* create the largest transaction possible */
		len = min_t(size_t, src_avail, dst_avail);
		len = min_t(size_t, len, FSL_DMA_BCR_MAX_CNT);
		if (len == 0)
			goto fetch;

		dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) - dst_avail;
		src = sg_dma_address(src_sg) + sg_dma_len(src_sg) - src_avail;

		/* allocate and populate the descriptor */
		new = fsl_dma_alloc_descriptor(chan);
		if (!new) {
870
			chan_err(chan, "%s\n", msg_ld_oom);
871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943
			goto fail;
		}

		set_desc_cnt(chan, &new->hw, len);
		set_desc_src(chan, &new->hw, src);
		set_desc_dst(chan, &new->hw, dst);

		if (!first)
			first = new;
		else
			set_desc_next(chan, &prev->hw, new->async_tx.phys);

		new->async_tx.cookie = 0;
		async_tx_ack(&new->async_tx);
		prev = new;

		/* Insert the link descriptor to the LD ring */
		list_add_tail(&new->node, &first->tx_list);

		/* update metadata */
		dst_avail -= len;
		src_avail -= len;

fetch:
		/* fetch the next dst scatterlist entry */
		if (dst_avail == 0) {

			/* no more entries: we're done */
			if (dst_nents == 0)
				break;

			/* fetch the next entry: if there are no more: done */
			dst_sg = sg_next(dst_sg);
			if (dst_sg == NULL)
				break;

			dst_nents--;
			dst_avail = sg_dma_len(dst_sg);
		}

		/* fetch the next src scatterlist entry */
		if (src_avail == 0) {

			/* no more entries: we're done */
			if (src_nents == 0)
				break;

			/* fetch the next entry: if there are no more: done */
			src_sg = sg_next(src_sg);
			if (src_sg == NULL)
				break;

			src_nents--;
			src_avail = sg_dma_len(src_sg);
		}
	}

	new->async_tx.flags = flags; /* client is in control of this ack */
	new->async_tx.cookie = -EBUSY;

	/* Set End-of-link to the last link descriptor of new list */
	set_ld_eol(chan, new);

	return &first->async_tx;

fail:
	if (!first)
		return NULL;

	fsldma_free_desc_list_reverse(chan, &first->tx_list);
	return NULL;
}

944
static int fsl_dma_device_terminate_all(struct dma_chan *dchan)
I
Ira Snyder 已提交
945
{
I
Ira Snyder 已提交
946
	struct fsldma_chan *chan;
947

I
Ira Snyder 已提交
948
	if (!dchan)
949
		return -EINVAL;
I
Ira Snyder 已提交
950

I
Ira Snyder 已提交
951
	chan = to_fsl_chan(dchan);
I
Ira Snyder 已提交
952

953
	spin_lock_bh(&chan->desc_lock);
I
Ira Snyder 已提交
954

955 956
	/* Halt the DMA engine */
	dma_halt(chan);
I
Ira Snyder 已提交
957

958 959 960 961 962
	/* Remove and free all of the descriptors in the LD queue */
	fsldma_free_desc_list(chan, &chan->ld_pending);
	fsldma_free_desc_list(chan, &chan->ld_running);
	fsldma_free_desc_list(chan, &chan->ld_completed);
	chan->idle = true;
I
Ira Snyder 已提交
963

964 965 966
	spin_unlock_bh(&chan->desc_lock);
	return 0;
}
I
Ira Snyder 已提交
967

968 969 970 971 972
static int fsl_dma_device_config(struct dma_chan *dchan,
				 struct dma_slave_config *config)
{
	struct fsldma_chan *chan;
	int size;
I
Ira Snyder 已提交
973

974 975
	if (!dchan)
		return -EINVAL;
I
Ira Snyder 已提交
976

977
	chan = to_fsl_chan(dchan);
I
Ira Snyder 已提交
978

979 980
	/* make sure the channel supports setting burst size */
	if (!chan->set_request_count)
I
Ira Snyder 已提交
981
		return -ENXIO;
982

983 984 985 986 987 988 989
	/* we set the controller burst size depending on direction */
	if (config->direction == DMA_MEM_TO_DEV)
		size = config->dst_addr_width * config->dst_maxburst;
	else
		size = config->src_addr_width * config->src_maxburst;

	chan->set_request_count(chan, size);
990
	return 0;
I
Ira Snyder 已提交
991 992
}

993

994 995
/**
 * fsl_dma_memcpy_issue_pending - Issue the DMA start command
I
Ira Snyder 已提交
996
 * @chan : Freescale DMA channel
997
 */
I
Ira Snyder 已提交
998
static void fsl_dma_memcpy_issue_pending(struct dma_chan *dchan)
999
{
I
Ira Snyder 已提交
1000
	struct fsldma_chan *chan = to_fsl_chan(dchan);
1001

1002
	spin_lock_bh(&chan->desc_lock);
I
Ira Snyder 已提交
1003
	fsl_chan_xfer_ld_queue(chan);
1004
	spin_unlock_bh(&chan->desc_lock);
1005 1006 1007
}

/**
1008
 * fsl_tx_status - Determine the DMA status
I
Ira Snyder 已提交
1009
 * @chan : Freescale DMA channel
1010
 */
1011
static enum dma_status fsl_tx_status(struct dma_chan *dchan,
1012
					dma_cookie_t cookie,
1013
					struct dma_tx_state *txstate)
1014
{
1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025
	struct fsldma_chan *chan = to_fsl_chan(dchan);
	enum dma_status ret;

	ret = dma_cookie_status(dchan, cookie, txstate);
	if (ret == DMA_COMPLETE)
		return ret;

	spin_lock_bh(&chan->desc_lock);
	fsldma_cleanup_descriptors(chan);
	spin_unlock_bh(&chan->desc_lock);

1026
	return dma_cookie_status(dchan, cookie, txstate);
1027 1028
}

1029 1030 1031 1032
/*----------------------------------------------------------------------------*/
/* Interrupt Handling                                                         */
/*----------------------------------------------------------------------------*/

1033
static irqreturn_t fsldma_chan_irq(int irq, void *data)
1034
{
I
Ira Snyder 已提交
1035 1036
	struct fsldma_chan *chan = data;
	u32 stat;
1037

I
Ira Snyder 已提交
1038
	/* save and clear the status register */
I
Ira Snyder 已提交
1039
	stat = get_sr(chan);
I
Ira Snyder 已提交
1040
	set_sr(chan, stat);
1041
	chan_dbg(chan, "irq: stat = 0x%x\n", stat);
1042

I
Ira Snyder 已提交
1043
	/* check that this was really our device */
1044 1045 1046 1047 1048
	stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH);
	if (!stat)
		return IRQ_NONE;

	if (stat & FSL_DMA_SR_TE)
1049
		chan_err(chan, "Transfer Error!\n");
1050

I
Ira Snyder 已提交
1051 1052
	/*
	 * Programming Error
1053
	 * The DMA_INTERRUPT async_tx is a NULL transfer, which will
M
Masanari Iida 已提交
1054
	 * trigger a PE interrupt.
1055 1056
	 */
	if (stat & FSL_DMA_SR_PE) {
1057
		chan_dbg(chan, "irq: Programming Error INT\n");
1058
		stat &= ~FSL_DMA_SR_PE;
I
Ira Snyder 已提交
1059 1060
		if (get_bcr(chan) != 0)
			chan_err(chan, "Programming Error!\n");
1061 1062
	}

I
Ira Snyder 已提交
1063 1064
	/*
	 * For MPC8349, EOCDI event need to update cookie
1065 1066 1067
	 * and start the next transfer if it exist.
	 */
	if (stat & FSL_DMA_SR_EOCDI) {
1068
		chan_dbg(chan, "irq: End-of-Chain link INT\n");
1069
		stat &= ~FSL_DMA_SR_EOCDI;
1070 1071
	}

I
Ira Snyder 已提交
1072 1073
	/*
	 * If it current transfer is the end-of-transfer,
1074 1075 1076
	 * we should clear the Channel Start bit for
	 * prepare next transfer.
	 */
1077
	if (stat & FSL_DMA_SR_EOLNI) {
1078
		chan_dbg(chan, "irq: End-of-link INT\n");
1079 1080 1081
		stat &= ~FSL_DMA_SR_EOLNI;
	}

I
Ira Snyder 已提交
1082 1083 1084 1085 1086
	/* check that the DMA controller is really idle */
	if (!dma_is_idle(chan))
		chan_err(chan, "irq: controller not idle!\n");

	/* check that we handled all of the bits */
1087
	if (stat)
I
Ira Snyder 已提交
1088
		chan_err(chan, "irq: unhandled sr 0x%08x\n", stat);
1089

I
Ira Snyder 已提交
1090 1091 1092 1093 1094
	/*
	 * Schedule the tasklet to handle all cleanup of the current
	 * transaction. It will start a new transaction if there is
	 * one pending.
	 */
I
Ira Snyder 已提交
1095
	tasklet_schedule(&chan->tasklet);
I
Ira Snyder 已提交
1096
	chan_dbg(chan, "irq: Exit\n");
1097 1098 1099
	return IRQ_HANDLED;
}

1100 1101
static void dma_do_tasklet(unsigned long data)
{
I
Ira Snyder 已提交
1102
	struct fsldma_chan *chan = (struct fsldma_chan *)data;
I
Ira Snyder 已提交
1103 1104 1105

	chan_dbg(chan, "tasklet entry\n");

1106
	spin_lock_bh(&chan->desc_lock);
1107 1108

	/* the hardware is now idle and ready for more */
I
Ira Snyder 已提交
1109 1110
	chan->idle = true;

1111 1112
	/* Run all cleanup for descriptors which have been completed */
	fsldma_cleanup_descriptors(chan);
1113

1114
	spin_unlock_bh(&chan->desc_lock);
1115

I
Ira Snyder 已提交
1116
	chan_dbg(chan, "tasklet exit\n");
1117 1118 1119
}

static irqreturn_t fsldma_ctrl_irq(int irq, void *data)
1120
{
1121
	struct fsldma_device *fdev = data;
1122 1123 1124 1125
	struct fsldma_chan *chan;
	unsigned int handled = 0;
	u32 gsr, mask;
	int i;
1126

1127
	gsr = (fdev->feature & FSL_DMA_BIG_ENDIAN) ? in_be32(fdev->regs)
1128 1129 1130
						   : in_le32(fdev->regs);
	mask = 0xff000000;
	dev_dbg(fdev->dev, "IRQ: gsr 0x%.8x\n", gsr);
1131

1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147
	for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
		chan = fdev->chan[i];
		if (!chan)
			continue;

		if (gsr & mask) {
			dev_dbg(fdev->dev, "IRQ: chan %d\n", chan->id);
			fsldma_chan_irq(irq, chan);
			handled++;
		}

		gsr &= ~mask;
		mask >>= 8;
	}

	return IRQ_RETVAL(handled);
1148 1149
}

1150
static void fsldma_free_irqs(struct fsldma_device *fdev)
1151
{
1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163
	struct fsldma_chan *chan;
	int i;

	if (fdev->irq != NO_IRQ) {
		dev_dbg(fdev->dev, "free per-controller IRQ\n");
		free_irq(fdev->irq, fdev);
		return;
	}

	for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
		chan = fdev->chan[i];
		if (chan && chan->irq != NO_IRQ) {
1164
			chan_dbg(chan, "free per-channel IRQ\n");
1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190
			free_irq(chan->irq, chan);
		}
	}
}

static int fsldma_request_irqs(struct fsldma_device *fdev)
{
	struct fsldma_chan *chan;
	int ret;
	int i;

	/* if we have a per-controller IRQ, use that */
	if (fdev->irq != NO_IRQ) {
		dev_dbg(fdev->dev, "request per-controller IRQ\n");
		ret = request_irq(fdev->irq, fsldma_ctrl_irq, IRQF_SHARED,
				  "fsldma-controller", fdev);
		return ret;
	}

	/* no per-controller IRQ, use the per-channel IRQs */
	for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
		chan = fdev->chan[i];
		if (!chan)
			continue;

		if (chan->irq == NO_IRQ) {
1191
			chan_err(chan, "interrupts property missing in device tree\n");
1192 1193 1194 1195
			ret = -ENODEV;
			goto out_unwind;
		}

1196
		chan_dbg(chan, "request per-channel IRQ\n");
1197 1198 1199
		ret = request_irq(chan->irq, fsldma_chan_irq, IRQF_SHARED,
				  "fsldma-chan", chan);
		if (ret) {
1200
			chan_err(chan, "unable to request per-channel IRQ\n");
1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219
			goto out_unwind;
		}
	}

	return 0;

out_unwind:
	for (/* none */; i >= 0; i--) {
		chan = fdev->chan[i];
		if (!chan)
			continue;

		if (chan->irq == NO_IRQ)
			continue;

		free_irq(chan->irq, chan);
	}

	return ret;
1220 1221
}

1222 1223 1224 1225
/*----------------------------------------------------------------------------*/
/* OpenFirmware Subsystem                                                     */
/*----------------------------------------------------------------------------*/

B
Bill Pemberton 已提交
1226
static int fsl_dma_chan_probe(struct fsldma_device *fdev,
1227
	struct device_node *node, u32 feature, const char *compatible)
1228
{
I
Ira Snyder 已提交
1229
	struct fsldma_chan *chan;
1230
	struct resource res;
1231 1232 1233
	int err;

	/* alloc channel */
I
Ira Snyder 已提交
1234 1235
	chan = kzalloc(sizeof(*chan), GFP_KERNEL);
	if (!chan) {
1236 1237 1238 1239 1240 1241
		dev_err(fdev->dev, "no free memory for DMA channels!\n");
		err = -ENOMEM;
		goto out_return;
	}

	/* ioremap registers for use */
I
Ira Snyder 已提交
1242 1243
	chan->regs = of_iomap(node, 0);
	if (!chan->regs) {
1244 1245
		dev_err(fdev->dev, "unable to ioremap registers\n");
		err = -ENOMEM;
I
Ira Snyder 已提交
1246
		goto out_free_chan;
1247 1248
	}

1249
	err = of_address_to_resource(node, 0, &res);
1250
	if (err) {
1251 1252
		dev_err(fdev->dev, "unable to find 'reg' property\n");
		goto out_iounmap_regs;
1253 1254
	}

I
Ira Snyder 已提交
1255
	chan->feature = feature;
1256
	if (!fdev->feature)
I
Ira Snyder 已提交
1257
		fdev->feature = chan->feature;
1258

1259 1260 1261
	/*
	 * If the DMA device's feature is different than the feature
	 * of its channels, report the bug
1262
	 */
I
Ira Snyder 已提交
1263
	WARN_ON(fdev->feature != chan->feature);
1264

I
Ira Snyder 已提交
1265
	chan->dev = fdev->dev;
1266 1267 1268
	chan->id = (res.start & 0xfff) < 0x300 ?
		   ((res.start - 0x100) & 0xfff) >> 7 :
		   ((res.start - 0x200) & 0xfff) >> 7;
I
Ira Snyder 已提交
1269
	if (chan->id >= FSL_DMA_MAX_CHANS_PER_DEVICE) {
1270
		dev_err(fdev->dev, "too many channels for device\n");
1271
		err = -EINVAL;
1272
		goto out_iounmap_regs;
1273 1274
	}

I
Ira Snyder 已提交
1275 1276
	fdev->chan[chan->id] = chan;
	tasklet_init(&chan->tasklet, dma_do_tasklet, (unsigned long)chan);
1277
	snprintf(chan->name, sizeof(chan->name), "chan%d", chan->id);
1278 1279

	/* Initialize the channel */
I
Ira Snyder 已提交
1280
	dma_init(chan);
1281 1282

	/* Clear cdar registers */
I
Ira Snyder 已提交
1283
	set_cdar(chan, 0);
1284

I
Ira Snyder 已提交
1285
	switch (chan->feature & FSL_DMA_IP_MASK) {
1286
	case FSL_DMA_IP_85XX:
I
Ira Snyder 已提交
1287
		chan->toggle_ext_pause = fsl_chan_toggle_ext_pause;
1288
	case FSL_DMA_IP_83XX:
I
Ira Snyder 已提交
1289 1290 1291 1292
		chan->toggle_ext_start = fsl_chan_toggle_ext_start;
		chan->set_src_loop_size = fsl_chan_set_src_loop_size;
		chan->set_dst_loop_size = fsl_chan_set_dst_loop_size;
		chan->set_request_count = fsl_chan_set_request_count;
1293 1294
	}

I
Ira Snyder 已提交
1295
	spin_lock_init(&chan->desc_lock);
I
Ira Snyder 已提交
1296 1297
	INIT_LIST_HEAD(&chan->ld_pending);
	INIT_LIST_HEAD(&chan->ld_running);
1298
	INIT_LIST_HEAD(&chan->ld_completed);
I
Ira Snyder 已提交
1299
	chan->idle = true;
1300 1301 1302
#ifdef CONFIG_PM
	chan->pm_state = RUNNING;
#endif
1303

I
Ira Snyder 已提交
1304
	chan->common.device = &fdev->common;
1305
	dma_cookie_init(&chan->common);
1306

1307
	/* find the IRQ line, if it exists in the device tree */
I
Ira Snyder 已提交
1308
	chan->irq = irq_of_parse_and_map(node, 0);
1309

1310
	/* Add the channel to DMA device channel list */
I
Ira Snyder 已提交
1311
	list_add_tail(&chan->common.device_node, &fdev->common.channels);
1312

I
Ira Snyder 已提交
1313 1314
	dev_info(fdev->dev, "#%d (%s), irq %d\n", chan->id, compatible,
		 chan->irq != NO_IRQ ? chan->irq : fdev->irq);
1315 1316

	return 0;
1317

1318
out_iounmap_regs:
I
Ira Snyder 已提交
1319 1320 1321
	iounmap(chan->regs);
out_free_chan:
	kfree(chan);
1322
out_return:
1323 1324 1325
	return err;
}

I
Ira Snyder 已提交
1326
static void fsl_dma_chan_remove(struct fsldma_chan *chan)
1327
{
I
Ira Snyder 已提交
1328 1329 1330 1331
	irq_dispose_mapping(chan->irq);
	list_del(&chan->common.device_node);
	iounmap(chan->regs);
	kfree(chan);
1332 1333
}

B
Bill Pemberton 已提交
1334
static int fsldma_of_probe(struct platform_device *op)
1335
{
1336
	struct fsldma_device *fdev;
1337
	struct device_node *child;
1338
	int err;
1339

1340
	fdev = kzalloc(sizeof(*fdev), GFP_KERNEL);
1341
	if (!fdev) {
1342 1343 1344
		dev_err(&op->dev, "No enough memory for 'priv'\n");
		err = -ENOMEM;
		goto out_return;
1345
	}
1346 1347

	fdev->dev = &op->dev;
1348 1349
	INIT_LIST_HEAD(&fdev->common.channels);

1350
	/* ioremap the registers for use */
1351
	fdev->regs = of_iomap(op->dev.of_node, 0);
1352 1353 1354 1355
	if (!fdev->regs) {
		dev_err(&op->dev, "unable to ioremap registers\n");
		err = -ENOMEM;
		goto out_free_fdev;
1356 1357
	}

1358
	/* map the channel IRQ if it exists, but don't hookup the handler yet */
1359
	fdev->irq = irq_of_parse_and_map(op->dev.of_node, 0);
1360

1361
	dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask);
1362
	dma_cap_set(DMA_SG, fdev->common.cap_mask);
I
Ira Snyder 已提交
1363
	dma_cap_set(DMA_SLAVE, fdev->common.cap_mask);
1364 1365 1366
	fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources;
	fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources;
	fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy;
1367
	fdev->common.device_prep_dma_sg = fsl_dma_prep_sg;
1368
	fdev->common.device_tx_status = fsl_tx_status;
1369
	fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending;
1370 1371
	fdev->common.device_config = fsl_dma_device_config;
	fdev->common.device_terminate_all = fsl_dma_device_terminate_all;
1372
	fdev->common.dev = &op->dev;
1373

1374 1375 1376 1377 1378
	fdev->common.src_addr_widths = FSL_DMA_BUSWIDTHS;
	fdev->common.dst_addr_widths = FSL_DMA_BUSWIDTHS;
	fdev->common.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
	fdev->common.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;

1379 1380
	dma_set_mask(&(op->dev), DMA_BIT_MASK(36));

1381
	platform_set_drvdata(op, fdev);
1382

1383 1384 1385
	/*
	 * We cannot use of_platform_bus_probe() because there is no
	 * of_platform_bus_remove(). Instead, we manually instantiate every DMA
1386 1387
	 * channel object.
	 */
1388
	for_each_child_of_node(op->dev.of_node, child) {
1389
		if (of_device_is_compatible(child, "fsl,eloplus-dma-channel")) {
1390 1391 1392
			fsl_dma_chan_probe(fdev, child,
				FSL_DMA_IP_85XX | FSL_DMA_BIG_ENDIAN,
				"fsl,eloplus-dma-channel");
1393 1394 1395
		}

		if (of_device_is_compatible(child, "fsl,elo-dma-channel")) {
1396 1397 1398
			fsl_dma_chan_probe(fdev, child,
				FSL_DMA_IP_83XX | FSL_DMA_LITTLE_ENDIAN,
				"fsl,elo-dma-channel");
1399
		}
1400
	}
1401

1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414
	/*
	 * Hookup the IRQ handler(s)
	 *
	 * If we have a per-controller interrupt, we prefer that to the
	 * per-channel interrupts to reduce the number of shared interrupt
	 * handlers on the same IRQ line
	 */
	err = fsldma_request_irqs(fdev);
	if (err) {
		dev_err(fdev->dev, "unable to request IRQs\n");
		goto out_free_fdev;
	}

1415 1416 1417
	dma_async_device_register(&fdev->common);
	return 0;

1418
out_free_fdev:
1419
	irq_dispose_mapping(fdev->irq);
1420
	kfree(fdev);
1421
out_return:
1422 1423 1424
	return err;
}

1425
static int fsldma_of_remove(struct platform_device *op)
1426
{
1427
	struct fsldma_device *fdev;
1428 1429
	unsigned int i;

1430
	fdev = platform_get_drvdata(op);
1431 1432
	dma_async_device_unregister(&fdev->common);

1433 1434
	fsldma_free_irqs(fdev);

1435
	for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
1436 1437
		if (fdev->chan[i])
			fsl_dma_chan_remove(fdev->chan[i]);
1438
	}
1439

1440
	iounmap(fdev->regs);
1441 1442 1443 1444 1445
	kfree(fdev);

	return 0;
}

1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508
#ifdef CONFIG_PM
static int fsldma_suspend_late(struct device *dev)
{
	struct platform_device *pdev = to_platform_device(dev);
	struct fsldma_device *fdev = platform_get_drvdata(pdev);
	struct fsldma_chan *chan;
	int i;

	for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
		chan = fdev->chan[i];
		if (!chan)
			continue;

		spin_lock_bh(&chan->desc_lock);
		if (unlikely(!chan->idle))
			goto out;
		chan->regs_save.mr = get_mr(chan);
		chan->pm_state = SUSPENDED;
		spin_unlock_bh(&chan->desc_lock);
	}
	return 0;

out:
	for (; i >= 0; i--) {
		chan = fdev->chan[i];
		if (!chan)
			continue;
		chan->pm_state = RUNNING;
		spin_unlock_bh(&chan->desc_lock);
	}
	return -EBUSY;
}

static int fsldma_resume_early(struct device *dev)
{
	struct platform_device *pdev = to_platform_device(dev);
	struct fsldma_device *fdev = platform_get_drvdata(pdev);
	struct fsldma_chan *chan;
	u32 mode;
	int i;

	for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
		chan = fdev->chan[i];
		if (!chan)
			continue;

		spin_lock_bh(&chan->desc_lock);
		mode = chan->regs_save.mr
			& ~FSL_DMA_MR_CS & ~FSL_DMA_MR_CC & ~FSL_DMA_MR_CA;
		set_mr(chan, mode);
		chan->pm_state = RUNNING;
		spin_unlock_bh(&chan->desc_lock);
	}

	return 0;
}

static const struct dev_pm_ops fsldma_pm_ops = {
	.suspend_late	= fsldma_suspend_late,
	.resume_early	= fsldma_resume_early,
};
#endif

1509
static const struct of_device_id fsldma_of_ids[] = {
1510
	{ .compatible = "fsl,elo3-dma", },
1511 1512
	{ .compatible = "fsl,eloplus-dma", },
	{ .compatible = "fsl,elo-dma", },
1513 1514
	{}
};
1515
MODULE_DEVICE_TABLE(of, fsldma_of_ids);
1516

1517
static struct platform_driver fsldma_of_driver = {
1518 1519 1520
	.driver = {
		.name = "fsl-elo-dma",
		.of_match_table = fsldma_of_ids,
1521 1522 1523
#ifdef CONFIG_PM
		.pm = &fsldma_pm_ops,
#endif
1524 1525 1526
	},
	.probe = fsldma_of_probe,
	.remove = fsldma_of_remove,
1527 1528
};

1529 1530 1531 1532 1533
/*----------------------------------------------------------------------------*/
/* Module Init / Exit                                                         */
/*----------------------------------------------------------------------------*/

static __init int fsldma_init(void)
1534
{
1535
	pr_info("Freescale Elo series DMA driver\n");
1536
	return platform_driver_register(&fsldma_of_driver);
1537 1538
}

1539
static void __exit fsldma_exit(void)
1540
{
1541
	platform_driver_unregister(&fsldma_of_driver);
1542 1543
}

1544 1545
subsys_initcall(fsldma_init);
module_exit(fsldma_exit);
1546

1547
MODULE_DESCRIPTION("Freescale Elo series DMA driver");
1548
MODULE_LICENSE("GPL");