fsldma.c 36.1 KB
Newer Older
1 2 3
/*
 * Freescale MPC85xx, MPC83xx DMA Engine support
 *
4
 * Copyright (C) 2007-2010 Freescale Semiconductor, Inc. All rights reserved.
5 6 7 8 9 10 11 12
 *
 * Author:
 *   Zhang Wei <wei.zhang@freescale.com>, Jul 2007
 *   Ebony Zhu <ebony.zhu@freescale.com>, May 2007
 *
 * Description:
 *   DMA engine driver for Freescale MPC8540 DMA controller, which is
 *   also fit for MPC8560, MPC8555, MPC8548, MPC8641, and etc.
13
 *   The support for MPC8349 DMA controller is also added.
14
 *
15 16 17 18 19
 * This driver instructs the DMA controller to issue the PCI Read Multiple
 * command for PCI read operations, instead of using the default PCI Read Line
 * command. Please be aware that this setting may result in read pre-fetching
 * on some platforms.
 *
20 21 22 23 24 25 26 27 28 29
 * This is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 */

#include <linux/init.h>
#include <linux/module.h>
#include <linux/pci.h>
30
#include <linux/slab.h>
31 32 33 34 35
#include <linux/interrupt.h>
#include <linux/dmaengine.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
36 37
#include <linux/of_address.h>
#include <linux/of_irq.h>
38 39
#include <linux/of_platform.h>

40
#include "dmaengine.h"
41 42
#include "fsldma.h"

43 44 45 46
#define chan_dbg(chan, fmt, arg...)					\
	dev_dbg(chan->dev, "%s: " fmt, chan->name, ##arg)
#define chan_err(chan, fmt, arg...)					\
	dev_err(chan->dev, "%s: " fmt, chan->name, ##arg)
47

48
static const char msg_ld_oom[] = "No free memory for link descriptor";
49

50 51 52
/*
 * Register Helpers
 */
53

I
Ira Snyder 已提交
54
static void set_sr(struct fsldma_chan *chan, u32 val)
55
{
I
Ira Snyder 已提交
56
	DMA_OUT(chan, &chan->regs->sr, val, 32);
57 58
}

I
Ira Snyder 已提交
59
static u32 get_sr(struct fsldma_chan *chan)
60
{
I
Ira Snyder 已提交
61
	return DMA_IN(chan, &chan->regs->sr, 32);
62 63
}

64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82
static void set_cdar(struct fsldma_chan *chan, dma_addr_t addr)
{
	DMA_OUT(chan, &chan->regs->cdar, addr | FSL_DMA_SNEN, 64);
}

static dma_addr_t get_cdar(struct fsldma_chan *chan)
{
	return DMA_IN(chan, &chan->regs->cdar, 64) & ~FSL_DMA_SNEN;
}

static u32 get_bcr(struct fsldma_chan *chan)
{
	return DMA_IN(chan, &chan->regs->bcr, 32);
}

/*
 * Descriptor Helpers
 */

I
Ira Snyder 已提交
83
static void set_desc_cnt(struct fsldma_chan *chan,
84 85
				struct fsl_dma_ld_hw *hw, u32 count)
{
I
Ira Snyder 已提交
86
	hw->count = CPU_TO_DMA(chan, count, 32);
87 88
}

89 90 91 92 93
static u32 get_desc_cnt(struct fsldma_chan *chan, struct fsl_desc_sw *desc)
{
	return DMA_TO_CPU(chan, desc->hw.count, 32);
}

I
Ira Snyder 已提交
94
static void set_desc_src(struct fsldma_chan *chan,
95
			 struct fsl_dma_ld_hw *hw, dma_addr_t src)
96 97 98
{
	u64 snoop_bits;

I
Ira Snyder 已提交
99
	snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
100
		? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0;
I
Ira Snyder 已提交
101
	hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64);
102 103
}

104 105 106 107 108 109 110 111 112 113
static dma_addr_t get_desc_src(struct fsldma_chan *chan,
			       struct fsl_desc_sw *desc)
{
	u64 snoop_bits;

	snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
		? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0;
	return DMA_TO_CPU(chan, desc->hw.src_addr, 64) & ~snoop_bits;
}

I
Ira Snyder 已提交
114
static void set_desc_dst(struct fsldma_chan *chan,
115
			 struct fsl_dma_ld_hw *hw, dma_addr_t dst)
116 117 118
{
	u64 snoop_bits;

I
Ira Snyder 已提交
119
	snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
120
		? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0;
I
Ira Snyder 已提交
121
	hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64);
122 123
}

124 125 126 127 128 129 130 131 132 133
static dma_addr_t get_desc_dst(struct fsldma_chan *chan,
			       struct fsl_desc_sw *desc)
{
	u64 snoop_bits;

	snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
		? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0;
	return DMA_TO_CPU(chan, desc->hw.dst_addr, 64) & ~snoop_bits;
}

I
Ira Snyder 已提交
134
static void set_desc_next(struct fsldma_chan *chan,
135
			  struct fsl_dma_ld_hw *hw, dma_addr_t next)
136 137 138
{
	u64 snoop_bits;

I
Ira Snyder 已提交
139
	snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
140
		? FSL_DMA_SNEN : 0;
I
Ira Snyder 已提交
141
	hw->next_ln_addr = CPU_TO_DMA(chan, snoop_bits | next, 64);
142 143
}

144
static void set_ld_eol(struct fsldma_chan *chan, struct fsl_desc_sw *desc)
145
{
146
	u64 snoop_bits;
147

148 149
	snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
		? FSL_DMA_SNEN : 0;
150

151 152 153
	desc->hw.next_ln_addr = CPU_TO_DMA(chan,
		DMA_TO_CPU(chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL
			| snoop_bits, 64);
154 155
}

156 157 158 159 160
/*
 * DMA Engine Hardware Control Helpers
 */

static void dma_init(struct fsldma_chan *chan)
161
{
162 163 164 165 166 167 168 169 170 171 172
	/* Reset the channel */
	DMA_OUT(chan, &chan->regs->mr, 0, 32);

	switch (chan->feature & FSL_DMA_IP_MASK) {
	case FSL_DMA_IP_85XX:
		/* Set the channel to below modes:
		 * EIE - Error interrupt enable
		 * EOLNIE - End of links interrupt enable
		 * BWC - Bandwidth sharing among channels
		 */
		DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_BWC
I
Ira Snyder 已提交
173
				| FSL_DMA_MR_EIE | FSL_DMA_MR_EOLNIE, 32);
174 175 176 177 178 179 180 181 182 183
		break;
	case FSL_DMA_IP_83XX:
		/* Set the channel to below modes:
		 * EOTIE - End-of-transfer interrupt enable
		 * PRC_RM - PCI read multiple
		 */
		DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_EOTIE
				| FSL_DMA_MR_PRC_RM, 32);
		break;
	}
184 185
}

I
Ira Snyder 已提交
186
static int dma_is_idle(struct fsldma_chan *chan)
187
{
I
Ira Snyder 已提交
188
	u32 sr = get_sr(chan);
189 190 191
	return (!(sr & FSL_DMA_SR_CB)) || (sr & FSL_DMA_SR_CH);
}

I
Ira Snyder 已提交
192 193 194 195 196 197 198
/*
 * Start the DMA controller
 *
 * Preconditions:
 * - the CDAR register must point to the start descriptor
 * - the MRn[CS] bit must be cleared
 */
I
Ira Snyder 已提交
199
static void dma_start(struct fsldma_chan *chan)
200
{
I
Ira Snyder 已提交
201 202
	u32 mode;

I
Ira Snyder 已提交
203
	mode = DMA_IN(chan, &chan->regs->mr, 32);
I
Ira Snyder 已提交
204

I
Ira Snyder 已提交
205 206 207 208 209
	if (chan->feature & FSL_DMA_CHAN_PAUSE_EXT) {
		DMA_OUT(chan, &chan->regs->bcr, 0, 32);
		mode |= FSL_DMA_MR_EMP_EN;
	} else {
		mode &= ~FSL_DMA_MR_EMP_EN;
210
	}
211

I
Ira Snyder 已提交
212
	if (chan->feature & FSL_DMA_CHAN_START_EXT) {
I
Ira Snyder 已提交
213
		mode |= FSL_DMA_MR_EMS_EN;
I
Ira Snyder 已提交
214 215
	} else {
		mode &= ~FSL_DMA_MR_EMS_EN;
I
Ira Snyder 已提交
216
		mode |= FSL_DMA_MR_CS;
I
Ira Snyder 已提交
217
	}
218

I
Ira Snyder 已提交
219
	DMA_OUT(chan, &chan->regs->mr, mode, 32);
220 221
}

I
Ira Snyder 已提交
222
static void dma_halt(struct fsldma_chan *chan)
223
{
I
Ira Snyder 已提交
224
	u32 mode;
225 226
	int i;

227
	/* read the mode register */
I
Ira Snyder 已提交
228
	mode = DMA_IN(chan, &chan->regs->mr, 32);
I
Ira Snyder 已提交
229

230 231 232 233 234 235 236 237 238 239 240 241 242 243
	/*
	 * The 85xx controller supports channel abort, which will stop
	 * the current transfer. On 83xx, this bit is the transfer error
	 * mask bit, which should not be changed.
	 */
	if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) {
		mode |= FSL_DMA_MR_CA;
		DMA_OUT(chan, &chan->regs->mr, mode, 32);

		mode &= ~FSL_DMA_MR_CA;
	}

	/* stop the DMA controller */
	mode &= ~(FSL_DMA_MR_CS | FSL_DMA_MR_EMS_EN);
I
Ira Snyder 已提交
244
	DMA_OUT(chan, &chan->regs->mr, mode, 32);
245

246
	/* wait for the DMA controller to become idle */
247
	for (i = 0; i < 100; i++) {
I
Ira Snyder 已提交
248
		if (dma_is_idle(chan))
I
Ira Snyder 已提交
249 250
			return;

251
		udelay(10);
252
	}
I
Ira Snyder 已提交
253

I
Ira Snyder 已提交
254
	if (!dma_is_idle(chan))
255
		chan_err(chan, "DMA halt timeout!\n");
256 257 258 259
}

/**
 * fsl_chan_set_src_loop_size - Set source address hold transfer size
I
Ira Snyder 已提交
260
 * @chan : Freescale DMA channel
261 262 263 264 265 266 267 268
 * @size     : Address loop size, 0 for disable loop
 *
 * The set source address hold transfer size. The source
 * address hold or loop transfer size is when the DMA transfer
 * data from source address (SA), if the loop size is 4, the DMA will
 * read data from SA, SA + 1, SA + 2, SA + 3, then loop back to SA,
 * SA + 1 ... and so on.
 */
I
Ira Snyder 已提交
269
static void fsl_chan_set_src_loop_size(struct fsldma_chan *chan, int size)
270
{
I
Ira Snyder 已提交
271 272
	u32 mode;

I
Ira Snyder 已提交
273
	mode = DMA_IN(chan, &chan->regs->mr, 32);
I
Ira Snyder 已提交
274

275 276
	switch (size) {
	case 0:
I
Ira Snyder 已提交
277
		mode &= ~FSL_DMA_MR_SAHE;
278 279 280 281 282
		break;
	case 1:
	case 2:
	case 4:
	case 8:
I
Ira Snyder 已提交
283
		mode |= FSL_DMA_MR_SAHE | (__ilog2(size) << 14);
284 285
		break;
	}
I
Ira Snyder 已提交
286

I
Ira Snyder 已提交
287
	DMA_OUT(chan, &chan->regs->mr, mode, 32);
288 289 290
}

/**
291
 * fsl_chan_set_dst_loop_size - Set destination address hold transfer size
I
Ira Snyder 已提交
292
 * @chan : Freescale DMA channel
293 294 295 296 297 298 299 300
 * @size     : Address loop size, 0 for disable loop
 *
 * The set destination address hold transfer size. The destination
 * address hold or loop transfer size is when the DMA transfer
 * data to destination address (TA), if the loop size is 4, the DMA will
 * write data to TA, TA + 1, TA + 2, TA + 3, then loop back to TA,
 * TA + 1 ... and so on.
 */
I
Ira Snyder 已提交
301
static void fsl_chan_set_dst_loop_size(struct fsldma_chan *chan, int size)
302
{
I
Ira Snyder 已提交
303 304
	u32 mode;

I
Ira Snyder 已提交
305
	mode = DMA_IN(chan, &chan->regs->mr, 32);
I
Ira Snyder 已提交
306

307 308
	switch (size) {
	case 0:
I
Ira Snyder 已提交
309
		mode &= ~FSL_DMA_MR_DAHE;
310 311 312 313 314
		break;
	case 1:
	case 2:
	case 4:
	case 8:
I
Ira Snyder 已提交
315
		mode |= FSL_DMA_MR_DAHE | (__ilog2(size) << 16);
316 317
		break;
	}
I
Ira Snyder 已提交
318

I
Ira Snyder 已提交
319
	DMA_OUT(chan, &chan->regs->mr, mode, 32);
320 321 322
}

/**
323
 * fsl_chan_set_request_count - Set DMA Request Count for external control
I
Ira Snyder 已提交
324
 * @chan : Freescale DMA channel
325 326 327 328 329 330
 * @size     : Number of bytes to transfer in a single request
 *
 * The Freescale DMA channel can be controlled by the external signal DREQ#.
 * The DMA request count is how many bytes are allowed to transfer before
 * pausing the channel, after which a new assertion of DREQ# resumes channel
 * operation.
331
 *
332
 * A size of 0 disables external pause control. The maximum size is 1024.
333
 */
I
Ira Snyder 已提交
334
static void fsl_chan_set_request_count(struct fsldma_chan *chan, int size)
335
{
I
Ira Snyder 已提交
336 337
	u32 mode;

338
	BUG_ON(size > 1024);
I
Ira Snyder 已提交
339

I
Ira Snyder 已提交
340
	mode = DMA_IN(chan, &chan->regs->mr, 32);
I
Ira Snyder 已提交
341 342
	mode |= (__ilog2(size) << 24) & 0x0f000000;

I
Ira Snyder 已提交
343
	DMA_OUT(chan, &chan->regs->mr, mode, 32);
344
}
345

346 347
/**
 * fsl_chan_toggle_ext_pause - Toggle channel external pause status
I
Ira Snyder 已提交
348
 * @chan : Freescale DMA channel
349 350 351 352 353 354
 * @enable   : 0 is disabled, 1 is enabled.
 *
 * The Freescale DMA channel can be controlled by the external signal DREQ#.
 * The DMA Request Count feature should be used in addition to this feature
 * to set the number of bytes to transfer before pausing the channel.
 */
I
Ira Snyder 已提交
355
static void fsl_chan_toggle_ext_pause(struct fsldma_chan *chan, int enable)
356 357
{
	if (enable)
I
Ira Snyder 已提交
358
		chan->feature |= FSL_DMA_CHAN_PAUSE_EXT;
359
	else
I
Ira Snyder 已提交
360
		chan->feature &= ~FSL_DMA_CHAN_PAUSE_EXT;
361 362 363 364
}

/**
 * fsl_chan_toggle_ext_start - Toggle channel external start status
I
Ira Snyder 已提交
365
 * @chan : Freescale DMA channel
366 367 368 369 370 371 372
 * @enable   : 0 is disabled, 1 is enabled.
 *
 * If enable the external start, the channel can be started by an
 * external DMA start pin. So the dma_start() does not start the
 * transfer immediately. The DMA channel will wait for the
 * control pin asserted.
 */
I
Ira Snyder 已提交
373
static void fsl_chan_toggle_ext_start(struct fsldma_chan *chan, int enable)
374 375
{
	if (enable)
I
Ira Snyder 已提交
376
		chan->feature |= FSL_DMA_CHAN_START_EXT;
377
	else
I
Ira Snyder 已提交
378
		chan->feature &= ~FSL_DMA_CHAN_START_EXT;
379 380
}

381
static void append_ld_queue(struct fsldma_chan *chan, struct fsl_desc_sw *desc)
I
Ira Snyder 已提交
382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404
{
	struct fsl_desc_sw *tail = to_fsl_desc(chan->ld_pending.prev);

	if (list_empty(&chan->ld_pending))
		goto out_splice;

	/*
	 * Add the hardware descriptor to the chain of hardware descriptors
	 * that already exists in memory.
	 *
	 * This will un-set the EOL bit of the existing transaction, and the
	 * last link in this transaction will become the EOL descriptor.
	 */
	set_desc_next(chan, &tail->hw, desc->async_tx.phys);

	/*
	 * Add the software descriptor and all children to the list
	 * of pending transactions
	 */
out_splice:
	list_splice_tail_init(&desc->tx_list, &chan->ld_pending);
}

405 406
static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
{
I
Ira Snyder 已提交
407
	struct fsldma_chan *chan = to_fsl_chan(tx->chan);
408 409
	struct fsl_desc_sw *desc = tx_to_fsl_desc(tx);
	struct fsl_desc_sw *child;
410 411 412
	unsigned long flags;
	dma_cookie_t cookie;

I
Ira Snyder 已提交
413
	spin_lock_irqsave(&chan->desc_lock, flags);
414

I
Ira Snyder 已提交
415 416 417 418
	/*
	 * assign cookies to all of the software descriptors
	 * that make up this transaction
	 */
419
	list_for_each_entry(child, &desc->tx_list, node) {
420
		cookie = dma_cookie_assign(&child->async_tx);
421 422
	}

I
Ira Snyder 已提交
423
	/* put this transaction onto the tail of the pending queue */
I
Ira Snyder 已提交
424
	append_ld_queue(chan, desc);
425

I
Ira Snyder 已提交
426
	spin_unlock_irqrestore(&chan->desc_lock, flags);
427 428 429 430 431 432

	return cookie;
}

/**
 * fsl_dma_alloc_descriptor - Allocate descriptor from channel's DMA pool.
I
Ira Snyder 已提交
433
 * @chan : Freescale DMA channel
434 435 436
 *
 * Return - The descriptor allocated. NULL for failed.
 */
437
static struct fsl_desc_sw *fsl_dma_alloc_descriptor(struct fsldma_chan *chan)
438
{
I
Ira Snyder 已提交
439
	struct fsl_desc_sw *desc;
440
	dma_addr_t pdesc;
I
Ira Snyder 已提交
441 442 443

	desc = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc);
	if (!desc) {
444
		chan_dbg(chan, "out of memory for link descriptor\n");
I
Ira Snyder 已提交
445
		return NULL;
446 447
	}

I
Ira Snyder 已提交
448 449 450 451 452 453
	memset(desc, 0, sizeof(*desc));
	INIT_LIST_HEAD(&desc->tx_list);
	dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
	desc->async_tx.tx_submit = fsl_dma_tx_submit;
	desc->async_tx.phys = pdesc;

454 455 456 457
#ifdef FSL_DMA_LD_DEBUG
	chan_dbg(chan, "LD %p allocated\n", desc);
#endif

I
Ira Snyder 已提交
458
	return desc;
459 460 461 462
}

/**
 * fsl_dma_alloc_chan_resources - Allocate resources for DMA channel.
I
Ira Snyder 已提交
463
 * @chan : Freescale DMA channel
464 465 466 467 468
 *
 * This function will create a dma pool for descriptor allocation.
 *
 * Return - The number of descriptors allocated.
 */
I
Ira Snyder 已提交
469
static int fsl_dma_alloc_chan_resources(struct dma_chan *dchan)
470
{
I
Ira Snyder 已提交
471
	struct fsldma_chan *chan = to_fsl_chan(dchan);
472 473

	/* Has this channel already been allocated? */
I
Ira Snyder 已提交
474
	if (chan->desc_pool)
475
		return 1;
476

I
Ira Snyder 已提交
477 478
	/*
	 * We need the descriptor to be aligned to 32bytes
479 480
	 * for meeting FSL DMA specification requirement.
	 */
481
	chan->desc_pool = dma_pool_create(chan->name, chan->dev,
I
Ira Snyder 已提交
482 483
					  sizeof(struct fsl_desc_sw),
					  __alignof__(struct fsl_desc_sw), 0);
I
Ira Snyder 已提交
484
	if (!chan->desc_pool) {
485
		chan_err(chan, "unable to allocate descriptor pool\n");
I
Ira Snyder 已提交
486
		return -ENOMEM;
487 488
	}

I
Ira Snyder 已提交
489
	/* there is at least one descriptor free to be allocated */
490 491 492
	return 1;
}

I
Ira Snyder 已提交
493 494 495 496 497 498 499 500 501 502 503 504 505 506
/**
 * fsldma_free_desc_list - Free all descriptors in a queue
 * @chan: Freescae DMA channel
 * @list: the list to free
 *
 * LOCKING: must hold chan->desc_lock
 */
static void fsldma_free_desc_list(struct fsldma_chan *chan,
				  struct list_head *list)
{
	struct fsl_desc_sw *desc, *_desc;

	list_for_each_entry_safe(desc, _desc, list, node) {
		list_del(&desc->node);
507 508 509
#ifdef FSL_DMA_LD_DEBUG
		chan_dbg(chan, "LD %p free\n", desc);
#endif
I
Ira Snyder 已提交
510 511 512 513 514 515 516 517 518 519 520
		dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
	}
}

static void fsldma_free_desc_list_reverse(struct fsldma_chan *chan,
					  struct list_head *list)
{
	struct fsl_desc_sw *desc, *_desc;

	list_for_each_entry_safe_reverse(desc, _desc, list, node) {
		list_del(&desc->node);
521 522 523
#ifdef FSL_DMA_LD_DEBUG
		chan_dbg(chan, "LD %p free\n", desc);
#endif
I
Ira Snyder 已提交
524 525 526 527
		dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
	}
}

528 529
/**
 * fsl_dma_free_chan_resources - Free all resources of the channel.
I
Ira Snyder 已提交
530
 * @chan : Freescale DMA channel
531
 */
I
Ira Snyder 已提交
532
static void fsl_dma_free_chan_resources(struct dma_chan *dchan)
533
{
I
Ira Snyder 已提交
534
	struct fsldma_chan *chan = to_fsl_chan(dchan);
535 536
	unsigned long flags;

537
	chan_dbg(chan, "free all channel resources\n");
I
Ira Snyder 已提交
538
	spin_lock_irqsave(&chan->desc_lock, flags);
I
Ira Snyder 已提交
539 540
	fsldma_free_desc_list(chan, &chan->ld_pending);
	fsldma_free_desc_list(chan, &chan->ld_running);
I
Ira Snyder 已提交
541
	spin_unlock_irqrestore(&chan->desc_lock, flags);
542

I
Ira Snyder 已提交
543
	dma_pool_destroy(chan->desc_pool);
I
Ira Snyder 已提交
544
	chan->desc_pool = NULL;
545 546
}

547
static struct dma_async_tx_descriptor *
I
Ira Snyder 已提交
548
fsl_dma_prep_interrupt(struct dma_chan *dchan, unsigned long flags)
549
{
I
Ira Snyder 已提交
550
	struct fsldma_chan *chan;
551 552
	struct fsl_desc_sw *new;

I
Ira Snyder 已提交
553
	if (!dchan)
554 555
		return NULL;

I
Ira Snyder 已提交
556
	chan = to_fsl_chan(dchan);
557

I
Ira Snyder 已提交
558
	new = fsl_dma_alloc_descriptor(chan);
559
	if (!new) {
560
		chan_err(chan, "%s\n", msg_ld_oom);
561 562 563 564
		return NULL;
	}

	new->async_tx.cookie = -EBUSY;
565
	new->async_tx.flags = flags;
566

567
	/* Insert the link descriptor to the LD ring */
568
	list_add_tail(&new->node, &new->tx_list);
569

570
	/* Set End-of-link to the last link descriptor of new list */
I
Ira Snyder 已提交
571
	set_ld_eol(chan, new);
572 573 574 575

	return &new->async_tx;
}

576 577 578
static struct dma_async_tx_descriptor *
fsl_dma_prep_memcpy(struct dma_chan *dchan,
	dma_addr_t dma_dst, dma_addr_t dma_src,
579 580
	size_t len, unsigned long flags)
{
I
Ira Snyder 已提交
581
	struct fsldma_chan *chan;
582 583 584
	struct fsl_desc_sw *first = NULL, *prev = NULL, *new;
	size_t copy;

I
Ira Snyder 已提交
585
	if (!dchan)
586 587 588 589 590
		return NULL;

	if (!len)
		return NULL;

I
Ira Snyder 已提交
591
	chan = to_fsl_chan(dchan);
592 593 594 595

	do {

		/* Allocate the link descriptor from DMA pool */
I
Ira Snyder 已提交
596
		new = fsl_dma_alloc_descriptor(chan);
597
		if (!new) {
598
			chan_err(chan, "%s\n", msg_ld_oom);
599
			goto fail;
600 601
		}

602
		copy = min(len, (size_t)FSL_DMA_BCR_MAX_CNT);
603

I
Ira Snyder 已提交
604 605 606
		set_desc_cnt(chan, &new->hw, copy);
		set_desc_src(chan, &new->hw, dma_src);
		set_desc_dst(chan, &new->hw, dma_dst);
607 608 609 610

		if (!first)
			first = new;
		else
I
Ira Snyder 已提交
611
			set_desc_next(chan, &prev->hw, new->async_tx.phys);
612 613

		new->async_tx.cookie = 0;
614
		async_tx_ack(&new->async_tx);
615 616 617 618

		prev = new;
		len -= copy;
		dma_src += copy;
619
		dma_dst += copy;
620 621

		/* Insert the link descriptor to the LD ring */
622
		list_add_tail(&new->node, &first->tx_list);
623 624
	} while (len);

625
	new->async_tx.flags = flags; /* client is in control of this ack */
626 627
	new->async_tx.cookie = -EBUSY;

628
	/* Set End-of-link to the last link descriptor of new list */
I
Ira Snyder 已提交
629
	set_ld_eol(chan, new);
630

631 632 633 634 635 636
	return &first->async_tx;

fail:
	if (!first)
		return NULL;

I
Ira Snyder 已提交
637
	fsldma_free_desc_list_reverse(chan, &first->tx_list);
638
	return NULL;
639 640
}

641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682
static struct dma_async_tx_descriptor *fsl_dma_prep_sg(struct dma_chan *dchan,
	struct scatterlist *dst_sg, unsigned int dst_nents,
	struct scatterlist *src_sg, unsigned int src_nents,
	unsigned long flags)
{
	struct fsl_desc_sw *first = NULL, *prev = NULL, *new = NULL;
	struct fsldma_chan *chan = to_fsl_chan(dchan);
	size_t dst_avail, src_avail;
	dma_addr_t dst, src;
	size_t len;

	/* basic sanity checks */
	if (dst_nents == 0 || src_nents == 0)
		return NULL;

	if (dst_sg == NULL || src_sg == NULL)
		return NULL;

	/*
	 * TODO: should we check that both scatterlists have the same
	 * TODO: number of bytes in total? Is that really an error?
	 */

	/* get prepared for the loop */
	dst_avail = sg_dma_len(dst_sg);
	src_avail = sg_dma_len(src_sg);

	/* run until we are out of scatterlist entries */
	while (true) {

		/* create the largest transaction possible */
		len = min_t(size_t, src_avail, dst_avail);
		len = min_t(size_t, len, FSL_DMA_BCR_MAX_CNT);
		if (len == 0)
			goto fetch;

		dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) - dst_avail;
		src = sg_dma_address(src_sg) + sg_dma_len(src_sg) - src_avail;

		/* allocate and populate the descriptor */
		new = fsl_dma_alloc_descriptor(chan);
		if (!new) {
683
			chan_err(chan, "%s\n", msg_ld_oom);
684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756
			goto fail;
		}

		set_desc_cnt(chan, &new->hw, len);
		set_desc_src(chan, &new->hw, src);
		set_desc_dst(chan, &new->hw, dst);

		if (!first)
			first = new;
		else
			set_desc_next(chan, &prev->hw, new->async_tx.phys);

		new->async_tx.cookie = 0;
		async_tx_ack(&new->async_tx);
		prev = new;

		/* Insert the link descriptor to the LD ring */
		list_add_tail(&new->node, &first->tx_list);

		/* update metadata */
		dst_avail -= len;
		src_avail -= len;

fetch:
		/* fetch the next dst scatterlist entry */
		if (dst_avail == 0) {

			/* no more entries: we're done */
			if (dst_nents == 0)
				break;

			/* fetch the next entry: if there are no more: done */
			dst_sg = sg_next(dst_sg);
			if (dst_sg == NULL)
				break;

			dst_nents--;
			dst_avail = sg_dma_len(dst_sg);
		}

		/* fetch the next src scatterlist entry */
		if (src_avail == 0) {

			/* no more entries: we're done */
			if (src_nents == 0)
				break;

			/* fetch the next entry: if there are no more: done */
			src_sg = sg_next(src_sg);
			if (src_sg == NULL)
				break;

			src_nents--;
			src_avail = sg_dma_len(src_sg);
		}
	}

	new->async_tx.flags = flags; /* client is in control of this ack */
	new->async_tx.cookie = -EBUSY;

	/* Set End-of-link to the last link descriptor of new list */
	set_ld_eol(chan, new);

	return &first->async_tx;

fail:
	if (!first)
		return NULL;

	fsldma_free_desc_list_reverse(chan, &first->tx_list);
	return NULL;
}

I
Ira Snyder 已提交
757 758 759 760 761 762 763
/**
 * fsl_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
 * @chan: DMA channel
 * @sgl: scatterlist to transfer to/from
 * @sg_len: number of entries in @scatterlist
 * @direction: DMA direction
 * @flags: DMAEngine flags
764
 * @context: transaction context (ignored)
I
Ira Snyder 已提交
765 766 767 768 769 770
 *
 * Prepare a set of descriptors for a DMA_SLAVE transaction. Following the
 * DMA_SLAVE API, this gets the device-specific information from the
 * chan->private variable.
 */
static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg(
I
Ira Snyder 已提交
771
	struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
772 773
	enum dma_transfer_direction direction, unsigned long flags,
	void *context)
I
Ira Snyder 已提交
774 775
{
	/*
I
Ira Snyder 已提交
776
	 * This operation is not supported on the Freescale DMA controller
I
Ira Snyder 已提交
777
	 *
I
Ira Snyder 已提交
778 779
	 * However, we need to provide the function pointer to allow the
	 * device_control() method to work.
I
Ira Snyder 已提交
780 781 782 783
	 */
	return NULL;
}

784
static int fsl_dma_device_control(struct dma_chan *dchan,
785
				  enum dma_ctrl_cmd cmd, unsigned long arg)
I
Ira Snyder 已提交
786
{
I
Ira Snyder 已提交
787
	struct dma_slave_config *config;
I
Ira Snyder 已提交
788
	struct fsldma_chan *chan;
I
Ira Snyder 已提交
789
	unsigned long flags;
I
Ira Snyder 已提交
790
	int size;
791

I
Ira Snyder 已提交
792
	if (!dchan)
793
		return -EINVAL;
I
Ira Snyder 已提交
794

I
Ira Snyder 已提交
795
	chan = to_fsl_chan(dchan);
I
Ira Snyder 已提交
796

I
Ira Snyder 已提交
797 798
	switch (cmd) {
	case DMA_TERMINATE_ALL:
I
Ira Snyder 已提交
799 800
		spin_lock_irqsave(&chan->desc_lock, flags);

I
Ira Snyder 已提交
801 802
		/* Halt the DMA engine */
		dma_halt(chan);
I
Ira Snyder 已提交
803

I
Ira Snyder 已提交
804 805 806
		/* Remove and free all of the descriptors in the LD queue */
		fsldma_free_desc_list(chan, &chan->ld_pending);
		fsldma_free_desc_list(chan, &chan->ld_running);
I
Ira Snyder 已提交
807
		chan->idle = true;
I
Ira Snyder 已提交
808

I
Ira Snyder 已提交
809 810 811 812 813 814 815 816 817 818 819
		spin_unlock_irqrestore(&chan->desc_lock, flags);
		return 0;

	case DMA_SLAVE_CONFIG:
		config = (struct dma_slave_config *)arg;

		/* make sure the channel supports setting burst size */
		if (!chan->set_request_count)
			return -ENXIO;

		/* we set the controller burst size depending on direction */
820
		if (config->direction == DMA_MEM_TO_DEV)
I
Ira Snyder 已提交
821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839
			size = config->dst_addr_width * config->dst_maxburst;
		else
			size = config->src_addr_width * config->src_maxburst;

		chan->set_request_count(chan, size);
		return 0;

	case FSLDMA_EXTERNAL_START:

		/* make sure the channel supports external start */
		if (!chan->toggle_ext_start)
			return -ENXIO;

		chan->toggle_ext_start(chan, arg);
		return 0;

	default:
		return -ENXIO;
	}
840 841

	return 0;
I
Ira Snyder 已提交
842 843
}

844
/**
845
 * fsldma_cleanup_descriptor - cleanup and free a single link descriptor
I
Ira Snyder 已提交
846
 * @chan: Freescale DMA channel
847
 * @desc: descriptor to cleanup and free
848
 *
849 850 851
 * This function is used on a descriptor which has been executed by the DMA
 * controller. It will run any callbacks, submit any dependencies, and then
 * free the descriptor.
852
 */
853 854
static void fsldma_cleanup_descriptor(struct fsldma_chan *chan,
				      struct fsl_desc_sw *desc)
855
{
856 857 858 859 860 861 862 863 864 865 866 867 868
	struct dma_async_tx_descriptor *txd = &desc->async_tx;
	struct device *dev = chan->common.device->dev;
	dma_addr_t src = get_desc_src(chan, desc);
	dma_addr_t dst = get_desc_dst(chan, desc);
	u32 len = get_desc_cnt(chan, desc);

	/* Run the link descriptor callback function */
	if (txd->callback) {
#ifdef FSL_DMA_LD_DEBUG
		chan_dbg(chan, "LD %p callback\n", desc);
#endif
		txd->callback(txd->callback_param);
	}
869

870 871
	/* Run any dependencies */
	dma_run_dependencies(txd);
872

873
	dma_descriptor_unmap(txd);
874 875 876 877
#ifdef FSL_DMA_LD_DEBUG
	chan_dbg(chan, "LD %p free\n", desc);
#endif
	dma_pool_free(chan->desc_pool, desc, txd->phys);
878 879 880
}

/**
I
Ira Snyder 已提交
881
 * fsl_chan_xfer_ld_queue - transfer any pending transactions
I
Ira Snyder 已提交
882
 * @chan : Freescale DMA channel
I
Ira Snyder 已提交
883
 *
I
Ira Snyder 已提交
884
 * HARDWARE STATE: idle
885
 * LOCKING: must hold chan->desc_lock
886
 */
I
Ira Snyder 已提交
887
static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan)
888
{
I
Ira Snyder 已提交
889
	struct fsl_desc_sw *desc;
890

I
Ira Snyder 已提交
891 892 893 894 895
	/*
	 * If the list of pending descriptors is empty, then we
	 * don't need to do any work at all
	 */
	if (list_empty(&chan->ld_pending)) {
896
		chan_dbg(chan, "no pending LDs\n");
897
		return;
I
Ira Snyder 已提交
898
	}
899

I
Ira Snyder 已提交
900
	/*
I
Ira Snyder 已提交
901 902 903
	 * The DMA controller is not idle, which means that the interrupt
	 * handler will start any queued transactions when it runs after
	 * this transaction finishes
I
Ira Snyder 已提交
904
	 */
I
Ira Snyder 已提交
905
	if (!chan->idle) {
906
		chan_dbg(chan, "DMA controller still busy\n");
907
		return;
I
Ira Snyder 已提交
908 909 910 911 912
	}

	/*
	 * If there are some link descriptors which have not been
	 * transferred, we need to start the controller
913 914
	 */

I
Ira Snyder 已提交
915 916 917 918
	/*
	 * Move all elements from the queue of pending transactions
	 * onto the list of running transactions
	 */
I
Ira Snyder 已提交
919
	chan_dbg(chan, "idle, starting controller\n");
I
Ira Snyder 已提交
920 921 922
	desc = list_first_entry(&chan->ld_pending, struct fsl_desc_sw, node);
	list_splice_tail_init(&chan->ld_pending, &chan->ld_running);

I
Ira Snyder 已提交
923 924 925 926 927 928 929 930 931 932 933 934 935
	/*
	 * The 85xx DMA controller doesn't clear the channel start bit
	 * automatically at the end of a transfer. Therefore we must clear
	 * it in software before starting the transfer.
	 */
	if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) {
		u32 mode;

		mode = DMA_IN(chan, &chan->regs->mr, 32);
		mode &= ~FSL_DMA_MR_CS;
		DMA_OUT(chan, &chan->regs->mr, mode, 32);
	}

I
Ira Snyder 已提交
936 937 938 939 940
	/*
	 * Program the descriptor's address into the DMA controller,
	 * then start the DMA transaction
	 */
	set_cdar(chan, desc->async_tx.phys);
I
Ira Snyder 已提交
941
	get_cdar(chan);
942

I
Ira Snyder 已提交
943
	dma_start(chan);
I
Ira Snyder 已提交
944
	chan->idle = false;
945 946 947 948
}

/**
 * fsl_dma_memcpy_issue_pending - Issue the DMA start command
I
Ira Snyder 已提交
949
 * @chan : Freescale DMA channel
950
 */
I
Ira Snyder 已提交
951
static void fsl_dma_memcpy_issue_pending(struct dma_chan *dchan)
952
{
I
Ira Snyder 已提交
953
	struct fsldma_chan *chan = to_fsl_chan(dchan);
954 955 956
	unsigned long flags;

	spin_lock_irqsave(&chan->desc_lock, flags);
I
Ira Snyder 已提交
957
	fsl_chan_xfer_ld_queue(chan);
958
	spin_unlock_irqrestore(&chan->desc_lock, flags);
959 960 961
}

/**
962
 * fsl_tx_status - Determine the DMA status
I
Ira Snyder 已提交
963
 * @chan : Freescale DMA channel
964
 */
965
static enum dma_status fsl_tx_status(struct dma_chan *dchan,
966
					dma_cookie_t cookie,
967
					struct dma_tx_state *txstate)
968
{
969
	return dma_cookie_status(dchan, cookie, txstate);
970 971
}

972 973 974 975
/*----------------------------------------------------------------------------*/
/* Interrupt Handling                                                         */
/*----------------------------------------------------------------------------*/

976
static irqreturn_t fsldma_chan_irq(int irq, void *data)
977
{
I
Ira Snyder 已提交
978 979
	struct fsldma_chan *chan = data;
	u32 stat;
980

I
Ira Snyder 已提交
981
	/* save and clear the status register */
I
Ira Snyder 已提交
982
	stat = get_sr(chan);
I
Ira Snyder 已提交
983
	set_sr(chan, stat);
984
	chan_dbg(chan, "irq: stat = 0x%x\n", stat);
985

I
Ira Snyder 已提交
986
	/* check that this was really our device */
987 988 989 990 991
	stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH);
	if (!stat)
		return IRQ_NONE;

	if (stat & FSL_DMA_SR_TE)
992
		chan_err(chan, "Transfer Error!\n");
993

I
Ira Snyder 已提交
994 995
	/*
	 * Programming Error
996
	 * The DMA_INTERRUPT async_tx is a NULL transfer, which will
M
Masanari Iida 已提交
997
	 * trigger a PE interrupt.
998 999
	 */
	if (stat & FSL_DMA_SR_PE) {
1000
		chan_dbg(chan, "irq: Programming Error INT\n");
1001
		stat &= ~FSL_DMA_SR_PE;
I
Ira Snyder 已提交
1002 1003
		if (get_bcr(chan) != 0)
			chan_err(chan, "Programming Error!\n");
1004 1005
	}

I
Ira Snyder 已提交
1006 1007
	/*
	 * For MPC8349, EOCDI event need to update cookie
1008 1009 1010
	 * and start the next transfer if it exist.
	 */
	if (stat & FSL_DMA_SR_EOCDI) {
1011
		chan_dbg(chan, "irq: End-of-Chain link INT\n");
1012
		stat &= ~FSL_DMA_SR_EOCDI;
1013 1014
	}

I
Ira Snyder 已提交
1015 1016
	/*
	 * If it current transfer is the end-of-transfer,
1017 1018 1019
	 * we should clear the Channel Start bit for
	 * prepare next transfer.
	 */
1020
	if (stat & FSL_DMA_SR_EOLNI) {
1021
		chan_dbg(chan, "irq: End-of-link INT\n");
1022 1023 1024
		stat &= ~FSL_DMA_SR_EOLNI;
	}

I
Ira Snyder 已提交
1025 1026 1027 1028 1029
	/* check that the DMA controller is really idle */
	if (!dma_is_idle(chan))
		chan_err(chan, "irq: controller not idle!\n");

	/* check that we handled all of the bits */
1030
	if (stat)
I
Ira Snyder 已提交
1031
		chan_err(chan, "irq: unhandled sr 0x%08x\n", stat);
1032

I
Ira Snyder 已提交
1033 1034 1035 1036 1037
	/*
	 * Schedule the tasklet to handle all cleanup of the current
	 * transaction. It will start a new transaction if there is
	 * one pending.
	 */
I
Ira Snyder 已提交
1038
	tasklet_schedule(&chan->tasklet);
I
Ira Snyder 已提交
1039
	chan_dbg(chan, "irq: Exit\n");
1040 1041 1042
	return IRQ_HANDLED;
}

1043 1044
static void dma_do_tasklet(unsigned long data)
{
I
Ira Snyder 已提交
1045
	struct fsldma_chan *chan = (struct fsldma_chan *)data;
1046 1047
	struct fsl_desc_sw *desc, *_desc;
	LIST_HEAD(ld_cleanup);
I
Ira Snyder 已提交
1048 1049 1050 1051 1052
	unsigned long flags;

	chan_dbg(chan, "tasklet entry\n");

	spin_lock_irqsave(&chan->desc_lock, flags);
1053 1054 1055 1056 1057 1058 1059

	/* update the cookie if we have some descriptors to cleanup */
	if (!list_empty(&chan->ld_running)) {
		dma_cookie_t cookie;

		desc = to_fsl_desc(chan->ld_running.prev);
		cookie = desc->async_tx.cookie;
1060
		dma_cookie_complete(&desc->async_tx);
1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071

		chan_dbg(chan, "completed_cookie=%d\n", cookie);
	}

	/*
	 * move the descriptors to a temporary list so we can drop the lock
	 * during the entire cleanup operation
	 */
	list_splice_tail_init(&chan->ld_running, &ld_cleanup);

	/* the hardware is now idle and ready for more */
I
Ira Snyder 已提交
1072 1073
	chan->idle = true;

1074 1075 1076 1077 1078 1079
	/*
	 * Start any pending transactions automatically
	 *
	 * In the ideal case, we keep the DMA controller busy while we go
	 * ahead and free the descriptors below.
	 */
I
Ira Snyder 已提交
1080
	fsl_chan_xfer_ld_queue(chan);
1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092
	spin_unlock_irqrestore(&chan->desc_lock, flags);

	/* Run the callback for each descriptor, in order */
	list_for_each_entry_safe(desc, _desc, &ld_cleanup, node) {

		/* Remove from the list of transactions */
		list_del(&desc->node);

		/* Run all cleanup for this descriptor */
		fsldma_cleanup_descriptor(chan, desc);
	}

I
Ira Snyder 已提交
1093
	chan_dbg(chan, "tasklet exit\n");
1094 1095 1096
}

static irqreturn_t fsldma_ctrl_irq(int irq, void *data)
1097
{
1098
	struct fsldma_device *fdev = data;
1099 1100 1101 1102
	struct fsldma_chan *chan;
	unsigned int handled = 0;
	u32 gsr, mask;
	int i;
1103

1104
	gsr = (fdev->feature & FSL_DMA_BIG_ENDIAN) ? in_be32(fdev->regs)
1105 1106 1107
						   : in_le32(fdev->regs);
	mask = 0xff000000;
	dev_dbg(fdev->dev, "IRQ: gsr 0x%.8x\n", gsr);
1108

1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124
	for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
		chan = fdev->chan[i];
		if (!chan)
			continue;

		if (gsr & mask) {
			dev_dbg(fdev->dev, "IRQ: chan %d\n", chan->id);
			fsldma_chan_irq(irq, chan);
			handled++;
		}

		gsr &= ~mask;
		mask >>= 8;
	}

	return IRQ_RETVAL(handled);
1125 1126
}

1127
static void fsldma_free_irqs(struct fsldma_device *fdev)
1128
{
1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140
	struct fsldma_chan *chan;
	int i;

	if (fdev->irq != NO_IRQ) {
		dev_dbg(fdev->dev, "free per-controller IRQ\n");
		free_irq(fdev->irq, fdev);
		return;
	}

	for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
		chan = fdev->chan[i];
		if (chan && chan->irq != NO_IRQ) {
1141
			chan_dbg(chan, "free per-channel IRQ\n");
1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167
			free_irq(chan->irq, chan);
		}
	}
}

static int fsldma_request_irqs(struct fsldma_device *fdev)
{
	struct fsldma_chan *chan;
	int ret;
	int i;

	/* if we have a per-controller IRQ, use that */
	if (fdev->irq != NO_IRQ) {
		dev_dbg(fdev->dev, "request per-controller IRQ\n");
		ret = request_irq(fdev->irq, fsldma_ctrl_irq, IRQF_SHARED,
				  "fsldma-controller", fdev);
		return ret;
	}

	/* no per-controller IRQ, use the per-channel IRQs */
	for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
		chan = fdev->chan[i];
		if (!chan)
			continue;

		if (chan->irq == NO_IRQ) {
1168
			chan_err(chan, "interrupts property missing in device tree\n");
1169 1170 1171 1172
			ret = -ENODEV;
			goto out_unwind;
		}

1173
		chan_dbg(chan, "request per-channel IRQ\n");
1174 1175 1176
		ret = request_irq(chan->irq, fsldma_chan_irq, IRQF_SHARED,
				  "fsldma-chan", chan);
		if (ret) {
1177
			chan_err(chan, "unable to request per-channel IRQ\n");
1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196
			goto out_unwind;
		}
	}

	return 0;

out_unwind:
	for (/* none */; i >= 0; i--) {
		chan = fdev->chan[i];
		if (!chan)
			continue;

		if (chan->irq == NO_IRQ)
			continue;

		free_irq(chan->irq, chan);
	}

	return ret;
1197 1198
}

1199 1200 1201 1202
/*----------------------------------------------------------------------------*/
/* OpenFirmware Subsystem                                                     */
/*----------------------------------------------------------------------------*/

B
Bill Pemberton 已提交
1203
static int fsl_dma_chan_probe(struct fsldma_device *fdev,
1204
	struct device_node *node, u32 feature, const char *compatible)
1205
{
I
Ira Snyder 已提交
1206
	struct fsldma_chan *chan;
1207
	struct resource res;
1208 1209 1210
	int err;

	/* alloc channel */
I
Ira Snyder 已提交
1211 1212
	chan = kzalloc(sizeof(*chan), GFP_KERNEL);
	if (!chan) {
1213 1214 1215 1216 1217 1218
		dev_err(fdev->dev, "no free memory for DMA channels!\n");
		err = -ENOMEM;
		goto out_return;
	}

	/* ioremap registers for use */
I
Ira Snyder 已提交
1219 1220
	chan->regs = of_iomap(node, 0);
	if (!chan->regs) {
1221 1222
		dev_err(fdev->dev, "unable to ioremap registers\n");
		err = -ENOMEM;
I
Ira Snyder 已提交
1223
		goto out_free_chan;
1224 1225
	}

1226
	err = of_address_to_resource(node, 0, &res);
1227
	if (err) {
1228 1229
		dev_err(fdev->dev, "unable to find 'reg' property\n");
		goto out_iounmap_regs;
1230 1231
	}

I
Ira Snyder 已提交
1232
	chan->feature = feature;
1233
	if (!fdev->feature)
I
Ira Snyder 已提交
1234
		fdev->feature = chan->feature;
1235

1236 1237 1238
	/*
	 * If the DMA device's feature is different than the feature
	 * of its channels, report the bug
1239
	 */
I
Ira Snyder 已提交
1240
	WARN_ON(fdev->feature != chan->feature);
1241

I
Ira Snyder 已提交
1242
	chan->dev = fdev->dev;
1243 1244 1245
	chan->id = (res.start & 0xfff) < 0x300 ?
		   ((res.start - 0x100) & 0xfff) >> 7 :
		   ((res.start - 0x200) & 0xfff) >> 7;
I
Ira Snyder 已提交
1246
	if (chan->id >= FSL_DMA_MAX_CHANS_PER_DEVICE) {
1247
		dev_err(fdev->dev, "too many channels for device\n");
1248
		err = -EINVAL;
1249
		goto out_iounmap_regs;
1250 1251
	}

I
Ira Snyder 已提交
1252 1253
	fdev->chan[chan->id] = chan;
	tasklet_init(&chan->tasklet, dma_do_tasklet, (unsigned long)chan);
1254
	snprintf(chan->name, sizeof(chan->name), "chan%d", chan->id);
1255 1256

	/* Initialize the channel */
I
Ira Snyder 已提交
1257
	dma_init(chan);
1258 1259

	/* Clear cdar registers */
I
Ira Snyder 已提交
1260
	set_cdar(chan, 0);
1261

I
Ira Snyder 已提交
1262
	switch (chan->feature & FSL_DMA_IP_MASK) {
1263
	case FSL_DMA_IP_85XX:
I
Ira Snyder 已提交
1264
		chan->toggle_ext_pause = fsl_chan_toggle_ext_pause;
1265
	case FSL_DMA_IP_83XX:
I
Ira Snyder 已提交
1266 1267 1268 1269
		chan->toggle_ext_start = fsl_chan_toggle_ext_start;
		chan->set_src_loop_size = fsl_chan_set_src_loop_size;
		chan->set_dst_loop_size = fsl_chan_set_dst_loop_size;
		chan->set_request_count = fsl_chan_set_request_count;
1270 1271
	}

I
Ira Snyder 已提交
1272
	spin_lock_init(&chan->desc_lock);
I
Ira Snyder 已提交
1273 1274
	INIT_LIST_HEAD(&chan->ld_pending);
	INIT_LIST_HEAD(&chan->ld_running);
I
Ira Snyder 已提交
1275
	chan->idle = true;
1276

I
Ira Snyder 已提交
1277
	chan->common.device = &fdev->common;
1278
	dma_cookie_init(&chan->common);
1279

1280
	/* find the IRQ line, if it exists in the device tree */
I
Ira Snyder 已提交
1281
	chan->irq = irq_of_parse_and_map(node, 0);
1282

1283
	/* Add the channel to DMA device channel list */
I
Ira Snyder 已提交
1284
	list_add_tail(&chan->common.device_node, &fdev->common.channels);
1285 1286
	fdev->common.chancnt++;

I
Ira Snyder 已提交
1287 1288
	dev_info(fdev->dev, "#%d (%s), irq %d\n", chan->id, compatible,
		 chan->irq != NO_IRQ ? chan->irq : fdev->irq);
1289 1290

	return 0;
1291

1292
out_iounmap_regs:
I
Ira Snyder 已提交
1293 1294 1295
	iounmap(chan->regs);
out_free_chan:
	kfree(chan);
1296
out_return:
1297 1298 1299
	return err;
}

I
Ira Snyder 已提交
1300
static void fsl_dma_chan_remove(struct fsldma_chan *chan)
1301
{
I
Ira Snyder 已提交
1302 1303 1304 1305
	irq_dispose_mapping(chan->irq);
	list_del(&chan->common.device_node);
	iounmap(chan->regs);
	kfree(chan);
1306 1307
}

B
Bill Pemberton 已提交
1308
static int fsldma_of_probe(struct platform_device *op)
1309
{
1310
	struct fsldma_device *fdev;
1311
	struct device_node *child;
1312
	int err;
1313

1314
	fdev = kzalloc(sizeof(*fdev), GFP_KERNEL);
1315
	if (!fdev) {
1316 1317 1318
		dev_err(&op->dev, "No enough memory for 'priv'\n");
		err = -ENOMEM;
		goto out_return;
1319
	}
1320 1321

	fdev->dev = &op->dev;
1322 1323
	INIT_LIST_HEAD(&fdev->common.channels);

1324
	/* ioremap the registers for use */
1325
	fdev->regs = of_iomap(op->dev.of_node, 0);
1326 1327 1328 1329
	if (!fdev->regs) {
		dev_err(&op->dev, "unable to ioremap registers\n");
		err = -ENOMEM;
		goto out_free_fdev;
1330 1331
	}

1332
	/* map the channel IRQ if it exists, but don't hookup the handler yet */
1333
	fdev->irq = irq_of_parse_and_map(op->dev.of_node, 0);
1334

1335 1336
	dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask);
	dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask);
1337
	dma_cap_set(DMA_SG, fdev->common.cap_mask);
I
Ira Snyder 已提交
1338
	dma_cap_set(DMA_SLAVE, fdev->common.cap_mask);
1339 1340
	fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources;
	fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources;
1341
	fdev->common.device_prep_dma_interrupt = fsl_dma_prep_interrupt;
1342
	fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy;
1343
	fdev->common.device_prep_dma_sg = fsl_dma_prep_sg;
1344
	fdev->common.device_tx_status = fsl_tx_status;
1345
	fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending;
I
Ira Snyder 已提交
1346
	fdev->common.device_prep_slave_sg = fsl_dma_prep_slave_sg;
1347
	fdev->common.device_control = fsl_dma_device_control;
1348
	fdev->common.dev = &op->dev;
1349

1350 1351
	dma_set_mask(&(op->dev), DMA_BIT_MASK(36));

1352
	platform_set_drvdata(op, fdev);
1353

1354 1355 1356
	/*
	 * We cannot use of_platform_bus_probe() because there is no
	 * of_platform_bus_remove(). Instead, we manually instantiate every DMA
1357 1358
	 * channel object.
	 */
1359
	for_each_child_of_node(op->dev.of_node, child) {
1360
		if (of_device_is_compatible(child, "fsl,eloplus-dma-channel")) {
1361 1362 1363
			fsl_dma_chan_probe(fdev, child,
				FSL_DMA_IP_85XX | FSL_DMA_BIG_ENDIAN,
				"fsl,eloplus-dma-channel");
1364 1365 1366
		}

		if (of_device_is_compatible(child, "fsl,elo-dma-channel")) {
1367 1368 1369
			fsl_dma_chan_probe(fdev, child,
				FSL_DMA_IP_83XX | FSL_DMA_LITTLE_ENDIAN,
				"fsl,elo-dma-channel");
1370
		}
1371
	}
1372

1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385
	/*
	 * Hookup the IRQ handler(s)
	 *
	 * If we have a per-controller interrupt, we prefer that to the
	 * per-channel interrupts to reduce the number of shared interrupt
	 * handlers on the same IRQ line
	 */
	err = fsldma_request_irqs(fdev);
	if (err) {
		dev_err(fdev->dev, "unable to request IRQs\n");
		goto out_free_fdev;
	}

1386 1387 1388
	dma_async_device_register(&fdev->common);
	return 0;

1389
out_free_fdev:
1390
	irq_dispose_mapping(fdev->irq);
1391
	kfree(fdev);
1392
out_return:
1393 1394 1395
	return err;
}

1396
static int fsldma_of_remove(struct platform_device *op)
1397
{
1398
	struct fsldma_device *fdev;
1399 1400
	unsigned int i;

1401
	fdev = platform_get_drvdata(op);
1402 1403
	dma_async_device_unregister(&fdev->common);

1404 1405
	fsldma_free_irqs(fdev);

1406
	for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
1407 1408
		if (fdev->chan[i])
			fsl_dma_chan_remove(fdev->chan[i]);
1409
	}
1410

1411
	iounmap(fdev->regs);
1412 1413 1414 1415 1416
	kfree(fdev);

	return 0;
}

1417
static const struct of_device_id fsldma_of_ids[] = {
1418
	{ .compatible = "fsl,elo3-dma", },
1419 1420
	{ .compatible = "fsl,eloplus-dma", },
	{ .compatible = "fsl,elo-dma", },
1421 1422 1423
	{}
};

1424
static struct platform_driver fsldma_of_driver = {
1425 1426 1427 1428 1429 1430 1431
	.driver = {
		.name = "fsl-elo-dma",
		.owner = THIS_MODULE,
		.of_match_table = fsldma_of_ids,
	},
	.probe = fsldma_of_probe,
	.remove = fsldma_of_remove,
1432 1433
};

1434 1435 1436 1437 1438
/*----------------------------------------------------------------------------*/
/* Module Init / Exit                                                         */
/*----------------------------------------------------------------------------*/

static __init int fsldma_init(void)
1439
{
1440
	pr_info("Freescale Elo series DMA driver\n");
1441
	return platform_driver_register(&fsldma_of_driver);
1442 1443
}

1444
static void __exit fsldma_exit(void)
1445
{
1446
	platform_driver_unregister(&fsldma_of_driver);
1447 1448
}

1449 1450
subsys_initcall(fsldma_init);
module_exit(fsldma_exit);
1451

1452
MODULE_DESCRIPTION("Freescale Elo series DMA driver");
1453
MODULE_LICENSE("GPL");