fsldma.c 35.8 KB
Newer Older
1 2 3
/*
 * Freescale MPC85xx, MPC83xx DMA Engine support
 *
4
 * Copyright (C) 2007-2010 Freescale Semiconductor, Inc. All rights reserved.
5 6 7 8 9 10 11 12
 *
 * Author:
 *   Zhang Wei <wei.zhang@freescale.com>, Jul 2007
 *   Ebony Zhu <ebony.zhu@freescale.com>, May 2007
 *
 * Description:
 *   DMA engine driver for Freescale MPC8540 DMA controller, which is
 *   also fit for MPC8560, MPC8555, MPC8548, MPC8641, and etc.
13
 *   The support for MPC8349 DMA controller is also added.
14
 *
15 16 17 18 19
 * This driver instructs the DMA controller to issue the PCI Read Multiple
 * command for PCI read operations, instead of using the default PCI Read Line
 * command. Please be aware that this setting may result in read pre-fetching
 * on some platforms.
 *
20 21 22 23 24 25 26 27 28 29
 * This is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 */

#include <linux/init.h>
#include <linux/module.h>
#include <linux/pci.h>
30
#include <linux/slab.h>
31 32 33 34 35 36 37 38 39
#include <linux/interrupt.h>
#include <linux/dmaengine.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
#include <linux/of_platform.h>

#include "fsldma.h"

40 41 42 43 44 45
#define chan_dbg(chan, fmt, arg...)					\
	dev_dbg(chan->dev, "%s: " fmt, chan->name, ##arg)
#define chan_err(chan, fmt, arg...)					\
	dev_err(chan->dev, "%s: " fmt, chan->name, ##arg)

static const char msg_ld_oom[] = "No free memory for link descriptor";
46

47 48 49
/*
 * Register Helpers
 */
50

I
Ira Snyder 已提交
51
static void set_sr(struct fsldma_chan *chan, u32 val)
52
{
I
Ira Snyder 已提交
53
	DMA_OUT(chan, &chan->regs->sr, val, 32);
54 55
}

I
Ira Snyder 已提交
56
static u32 get_sr(struct fsldma_chan *chan)
57
{
I
Ira Snyder 已提交
58
	return DMA_IN(chan, &chan->regs->sr, 32);
59 60
}

61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84
static void set_cdar(struct fsldma_chan *chan, dma_addr_t addr)
{
	DMA_OUT(chan, &chan->regs->cdar, addr | FSL_DMA_SNEN, 64);
}

static dma_addr_t get_cdar(struct fsldma_chan *chan)
{
	return DMA_IN(chan, &chan->regs->cdar, 64) & ~FSL_DMA_SNEN;
}

static dma_addr_t get_ndar(struct fsldma_chan *chan)
{
	return DMA_IN(chan, &chan->regs->ndar, 64);
}

static u32 get_bcr(struct fsldma_chan *chan)
{
	return DMA_IN(chan, &chan->regs->bcr, 32);
}

/*
 * Descriptor Helpers
 */

I
Ira Snyder 已提交
85
static void set_desc_cnt(struct fsldma_chan *chan,
86 87
				struct fsl_dma_ld_hw *hw, u32 count)
{
I
Ira Snyder 已提交
88
	hw->count = CPU_TO_DMA(chan, count, 32);
89 90
}

I
Ira Snyder 已提交
91
static void set_desc_src(struct fsldma_chan *chan,
92 93 94 95
				struct fsl_dma_ld_hw *hw, dma_addr_t src)
{
	u64 snoop_bits;

I
Ira Snyder 已提交
96
	snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
97
		? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0;
I
Ira Snyder 已提交
98
	hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64);
99 100
}

I
Ira Snyder 已提交
101
static void set_desc_dst(struct fsldma_chan *chan,
102
				struct fsl_dma_ld_hw *hw, dma_addr_t dst)
103 104 105
{
	u64 snoop_bits;

I
Ira Snyder 已提交
106
	snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
107
		? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0;
I
Ira Snyder 已提交
108
	hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64);
109 110
}

I
Ira Snyder 已提交
111
static void set_desc_next(struct fsldma_chan *chan,
112 113 114 115
				struct fsl_dma_ld_hw *hw, dma_addr_t next)
{
	u64 snoop_bits;

I
Ira Snyder 已提交
116
	snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
117
		? FSL_DMA_SNEN : 0;
I
Ira Snyder 已提交
118
	hw->next_ln_addr = CPU_TO_DMA(chan, snoop_bits | next, 64);
119 120
}

121 122
static void set_ld_eol(struct fsldma_chan *chan,
			struct fsl_desc_sw *desc)
123
{
124
	u64 snoop_bits;
125

126 127
	snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
		? FSL_DMA_SNEN : 0;
128

129 130 131
	desc->hw.next_ln_addr = CPU_TO_DMA(chan,
		DMA_TO_CPU(chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL
			| snoop_bits, 64);
132 133
}

134 135 136 137 138
/*
 * DMA Engine Hardware Control Helpers
 */

static void dma_init(struct fsldma_chan *chan)
139
{
140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163
	/* Reset the channel */
	DMA_OUT(chan, &chan->regs->mr, 0, 32);

	switch (chan->feature & FSL_DMA_IP_MASK) {
	case FSL_DMA_IP_85XX:
		/* Set the channel to below modes:
		 * EIE - Error interrupt enable
		 * EOSIE - End of segments interrupt enable (basic mode)
		 * EOLNIE - End of links interrupt enable
		 * BWC - Bandwidth sharing among channels
		 */
		DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_BWC
				| FSL_DMA_MR_EIE | FSL_DMA_MR_EOLNIE
				| FSL_DMA_MR_EOSIE, 32);
		break;
	case FSL_DMA_IP_83XX:
		/* Set the channel to below modes:
		 * EOTIE - End-of-transfer interrupt enable
		 * PRC_RM - PCI read multiple
		 */
		DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_EOTIE
				| FSL_DMA_MR_PRC_RM, 32);
		break;
	}
164 165
}

I
Ira Snyder 已提交
166
static int dma_is_idle(struct fsldma_chan *chan)
167
{
I
Ira Snyder 已提交
168
	u32 sr = get_sr(chan);
169 170 171
	return (!(sr & FSL_DMA_SR_CB)) || (sr & FSL_DMA_SR_CH);
}

I
Ira Snyder 已提交
172
static void dma_start(struct fsldma_chan *chan)
173
{
I
Ira Snyder 已提交
174 175
	u32 mode;

I
Ira Snyder 已提交
176
	mode = DMA_IN(chan, &chan->regs->mr, 32);
I
Ira Snyder 已提交
177

I
Ira Snyder 已提交
178 179 180
	if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) {
		if (chan->feature & FSL_DMA_CHAN_PAUSE_EXT) {
			DMA_OUT(chan, &chan->regs->bcr, 0, 32);
I
Ira Snyder 已提交
181 182 183 184
			mode |= FSL_DMA_MR_EMP_EN;
		} else {
			mode &= ~FSL_DMA_MR_EMP_EN;
		}
185
	}
186

I
Ira Snyder 已提交
187
	if (chan->feature & FSL_DMA_CHAN_START_EXT)
I
Ira Snyder 已提交
188
		mode |= FSL_DMA_MR_EMS_EN;
189
	else
I
Ira Snyder 已提交
190
		mode |= FSL_DMA_MR_CS;
191

I
Ira Snyder 已提交
192
	DMA_OUT(chan, &chan->regs->mr, mode, 32);
193 194
}

I
Ira Snyder 已提交
195
static void dma_halt(struct fsldma_chan *chan)
196
{
I
Ira Snyder 已提交
197
	u32 mode;
198 199
	int i;

I
Ira Snyder 已提交
200
	mode = DMA_IN(chan, &chan->regs->mr, 32);
I
Ira Snyder 已提交
201
	mode |= FSL_DMA_MR_CA;
I
Ira Snyder 已提交
202
	DMA_OUT(chan, &chan->regs->mr, mode, 32);
I
Ira Snyder 已提交
203 204

	mode &= ~(FSL_DMA_MR_CS | FSL_DMA_MR_EMS_EN | FSL_DMA_MR_CA);
I
Ira Snyder 已提交
205
	DMA_OUT(chan, &chan->regs->mr, mode, 32);
206

207
	for (i = 0; i < 100; i++) {
I
Ira Snyder 已提交
208
		if (dma_is_idle(chan))
I
Ira Snyder 已提交
209 210
			return;

211
		udelay(10);
212
	}
I
Ira Snyder 已提交
213

I
Ira Snyder 已提交
214
	if (!dma_is_idle(chan))
215
		chan_err(chan, "DMA halt timeout!\n");
216 217 218 219
}

/**
 * fsl_chan_set_src_loop_size - Set source address hold transfer size
I
Ira Snyder 已提交
220
 * @chan : Freescale DMA channel
221 222 223 224 225 226 227 228
 * @size     : Address loop size, 0 for disable loop
 *
 * The set source address hold transfer size. The source
 * address hold or loop transfer size is when the DMA transfer
 * data from source address (SA), if the loop size is 4, the DMA will
 * read data from SA, SA + 1, SA + 2, SA + 3, then loop back to SA,
 * SA + 1 ... and so on.
 */
I
Ira Snyder 已提交
229
static void fsl_chan_set_src_loop_size(struct fsldma_chan *chan, int size)
230
{
I
Ira Snyder 已提交
231 232
	u32 mode;

I
Ira Snyder 已提交
233
	mode = DMA_IN(chan, &chan->regs->mr, 32);
I
Ira Snyder 已提交
234

235 236
	switch (size) {
	case 0:
I
Ira Snyder 已提交
237
		mode &= ~FSL_DMA_MR_SAHE;
238 239 240 241 242
		break;
	case 1:
	case 2:
	case 4:
	case 8:
I
Ira Snyder 已提交
243
		mode |= FSL_DMA_MR_SAHE | (__ilog2(size) << 14);
244 245
		break;
	}
I
Ira Snyder 已提交
246

I
Ira Snyder 已提交
247
	DMA_OUT(chan, &chan->regs->mr, mode, 32);
248 249 250
}

/**
251
 * fsl_chan_set_dst_loop_size - Set destination address hold transfer size
I
Ira Snyder 已提交
252
 * @chan : Freescale DMA channel
253 254 255 256 257 258 259 260
 * @size     : Address loop size, 0 for disable loop
 *
 * The set destination address hold transfer size. The destination
 * address hold or loop transfer size is when the DMA transfer
 * data to destination address (TA), if the loop size is 4, the DMA will
 * write data to TA, TA + 1, TA + 2, TA + 3, then loop back to TA,
 * TA + 1 ... and so on.
 */
I
Ira Snyder 已提交
261
static void fsl_chan_set_dst_loop_size(struct fsldma_chan *chan, int size)
262
{
I
Ira Snyder 已提交
263 264
	u32 mode;

I
Ira Snyder 已提交
265
	mode = DMA_IN(chan, &chan->regs->mr, 32);
I
Ira Snyder 已提交
266

267 268
	switch (size) {
	case 0:
I
Ira Snyder 已提交
269
		mode &= ~FSL_DMA_MR_DAHE;
270 271 272 273 274
		break;
	case 1:
	case 2:
	case 4:
	case 8:
I
Ira Snyder 已提交
275
		mode |= FSL_DMA_MR_DAHE | (__ilog2(size) << 16);
276 277
		break;
	}
I
Ira Snyder 已提交
278

I
Ira Snyder 已提交
279
	DMA_OUT(chan, &chan->regs->mr, mode, 32);
280 281 282
}

/**
283
 * fsl_chan_set_request_count - Set DMA Request Count for external control
I
Ira Snyder 已提交
284
 * @chan : Freescale DMA channel
285 286 287 288 289 290
 * @size     : Number of bytes to transfer in a single request
 *
 * The Freescale DMA channel can be controlled by the external signal DREQ#.
 * The DMA request count is how many bytes are allowed to transfer before
 * pausing the channel, after which a new assertion of DREQ# resumes channel
 * operation.
291
 *
292
 * A size of 0 disables external pause control. The maximum size is 1024.
293
 */
I
Ira Snyder 已提交
294
static void fsl_chan_set_request_count(struct fsldma_chan *chan, int size)
295
{
I
Ira Snyder 已提交
296 297
	u32 mode;

298
	BUG_ON(size > 1024);
I
Ira Snyder 已提交
299

I
Ira Snyder 已提交
300
	mode = DMA_IN(chan, &chan->regs->mr, 32);
I
Ira Snyder 已提交
301 302
	mode |= (__ilog2(size) << 24) & 0x0f000000;

I
Ira Snyder 已提交
303
	DMA_OUT(chan, &chan->regs->mr, mode, 32);
304
}
305

306 307
/**
 * fsl_chan_toggle_ext_pause - Toggle channel external pause status
I
Ira Snyder 已提交
308
 * @chan : Freescale DMA channel
309 310 311 312 313 314
 * @enable   : 0 is disabled, 1 is enabled.
 *
 * The Freescale DMA channel can be controlled by the external signal DREQ#.
 * The DMA Request Count feature should be used in addition to this feature
 * to set the number of bytes to transfer before pausing the channel.
 */
I
Ira Snyder 已提交
315
static void fsl_chan_toggle_ext_pause(struct fsldma_chan *chan, int enable)
316 317
{
	if (enable)
I
Ira Snyder 已提交
318
		chan->feature |= FSL_DMA_CHAN_PAUSE_EXT;
319
	else
I
Ira Snyder 已提交
320
		chan->feature &= ~FSL_DMA_CHAN_PAUSE_EXT;
321 322 323 324
}

/**
 * fsl_chan_toggle_ext_start - Toggle channel external start status
I
Ira Snyder 已提交
325
 * @chan : Freescale DMA channel
326 327 328 329 330 331 332
 * @enable   : 0 is disabled, 1 is enabled.
 *
 * If enable the external start, the channel can be started by an
 * external DMA start pin. So the dma_start() does not start the
 * transfer immediately. The DMA channel will wait for the
 * control pin asserted.
 */
I
Ira Snyder 已提交
333
static void fsl_chan_toggle_ext_start(struct fsldma_chan *chan, int enable)
334 335
{
	if (enable)
I
Ira Snyder 已提交
336
		chan->feature |= FSL_DMA_CHAN_START_EXT;
337
	else
I
Ira Snyder 已提交
338
		chan->feature &= ~FSL_DMA_CHAN_START_EXT;
339 340
}

I
Ira Snyder 已提交
341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365
static void append_ld_queue(struct fsldma_chan *chan,
			    struct fsl_desc_sw *desc)
{
	struct fsl_desc_sw *tail = to_fsl_desc(chan->ld_pending.prev);

	if (list_empty(&chan->ld_pending))
		goto out_splice;

	/*
	 * Add the hardware descriptor to the chain of hardware descriptors
	 * that already exists in memory.
	 *
	 * This will un-set the EOL bit of the existing transaction, and the
	 * last link in this transaction will become the EOL descriptor.
	 */
	set_desc_next(chan, &tail->hw, desc->async_tx.phys);

	/*
	 * Add the software descriptor and all children to the list
	 * of pending transactions
	 */
out_splice:
	list_splice_tail_init(&desc->tx_list, &chan->ld_pending);
}

366 367
static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
{
I
Ira Snyder 已提交
368
	struct fsldma_chan *chan = to_fsl_chan(tx->chan);
369 370
	struct fsl_desc_sw *desc = tx_to_fsl_desc(tx);
	struct fsl_desc_sw *child;
371 372 373
	unsigned long flags;
	dma_cookie_t cookie;

I
Ira Snyder 已提交
374
	spin_lock_irqsave(&chan->desc_lock, flags);
375

I
Ira Snyder 已提交
376 377 378 379
	/*
	 * assign cookies to all of the software descriptors
	 * that make up this transaction
	 */
I
Ira Snyder 已提交
380
	cookie = chan->common.cookie;
381
	list_for_each_entry(child, &desc->tx_list, node) {
382 383 384 385
		cookie++;
		if (cookie < 0)
			cookie = 1;

S
Steven J. Magnani 已提交
386
		child->async_tx.cookie = cookie;
387 388
	}

I
Ira Snyder 已提交
389
	chan->common.cookie = cookie;
I
Ira Snyder 已提交
390 391

	/* put this transaction onto the tail of the pending queue */
I
Ira Snyder 已提交
392
	append_ld_queue(chan, desc);
393

I
Ira Snyder 已提交
394
	spin_unlock_irqrestore(&chan->desc_lock, flags);
395 396 397 398 399 400

	return cookie;
}

/**
 * fsl_dma_alloc_descriptor - Allocate descriptor from channel's DMA pool.
I
Ira Snyder 已提交
401
 * @chan : Freescale DMA channel
402 403 404 405
 *
 * Return - The descriptor allocated. NULL for failed.
 */
static struct fsl_desc_sw *fsl_dma_alloc_descriptor(
I
Ira Snyder 已提交
406
					struct fsldma_chan *chan)
407
{
I
Ira Snyder 已提交
408
	struct fsl_desc_sw *desc;
409
	dma_addr_t pdesc;
I
Ira Snyder 已提交
410 411 412

	desc = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc);
	if (!desc) {
413
		chan_dbg(chan, "out of memory for link descriptor\n");
I
Ira Snyder 已提交
414
		return NULL;
415 416
	}

I
Ira Snyder 已提交
417 418 419 420 421 422
	memset(desc, 0, sizeof(*desc));
	INIT_LIST_HEAD(&desc->tx_list);
	dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
	desc->async_tx.tx_submit = fsl_dma_tx_submit;
	desc->async_tx.phys = pdesc;

423 424 425 426
#ifdef FSL_DMA_LD_DEBUG
	chan_dbg(chan, "LD %p allocated\n", desc);
#endif

I
Ira Snyder 已提交
427
	return desc;
428 429 430 431 432
}


/**
 * fsl_dma_alloc_chan_resources - Allocate resources for DMA channel.
I
Ira Snyder 已提交
433
 * @chan : Freescale DMA channel
434 435 436 437 438
 *
 * This function will create a dma pool for descriptor allocation.
 *
 * Return - The number of descriptors allocated.
 */
I
Ira Snyder 已提交
439
static int fsl_dma_alloc_chan_resources(struct dma_chan *dchan)
440
{
I
Ira Snyder 已提交
441
	struct fsldma_chan *chan = to_fsl_chan(dchan);
442 443

	/* Has this channel already been allocated? */
I
Ira Snyder 已提交
444
	if (chan->desc_pool)
445
		return 1;
446

I
Ira Snyder 已提交
447 448
	/*
	 * We need the descriptor to be aligned to 32bytes
449 450
	 * for meeting FSL DMA specification requirement.
	 */
451
	chan->desc_pool = dma_pool_create(chan->name, chan->dev,
I
Ira Snyder 已提交
452 453
					  sizeof(struct fsl_desc_sw),
					  __alignof__(struct fsl_desc_sw), 0);
I
Ira Snyder 已提交
454
	if (!chan->desc_pool) {
455
		chan_err(chan, "unable to allocate descriptor pool\n");
I
Ira Snyder 已提交
456
		return -ENOMEM;
457 458
	}

I
Ira Snyder 已提交
459
	/* there is at least one descriptor free to be allocated */
460 461 462
	return 1;
}

I
Ira Snyder 已提交
463 464 465 466 467 468 469 470 471 472 473 474 475 476
/**
 * fsldma_free_desc_list - Free all descriptors in a queue
 * @chan: Freescae DMA channel
 * @list: the list to free
 *
 * LOCKING: must hold chan->desc_lock
 */
static void fsldma_free_desc_list(struct fsldma_chan *chan,
				  struct list_head *list)
{
	struct fsl_desc_sw *desc, *_desc;

	list_for_each_entry_safe(desc, _desc, list, node) {
		list_del(&desc->node);
477 478 479
#ifdef FSL_DMA_LD_DEBUG
		chan_dbg(chan, "LD %p free\n", desc);
#endif
I
Ira Snyder 已提交
480 481 482 483 484 485 486 487 488 489 490
		dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
	}
}

static void fsldma_free_desc_list_reverse(struct fsldma_chan *chan,
					  struct list_head *list)
{
	struct fsl_desc_sw *desc, *_desc;

	list_for_each_entry_safe_reverse(desc, _desc, list, node) {
		list_del(&desc->node);
491 492 493
#ifdef FSL_DMA_LD_DEBUG
		chan_dbg(chan, "LD %p free\n", desc);
#endif
I
Ira Snyder 已提交
494 495 496 497
		dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
	}
}

498 499
/**
 * fsl_dma_free_chan_resources - Free all resources of the channel.
I
Ira Snyder 已提交
500
 * @chan : Freescale DMA channel
501
 */
I
Ira Snyder 已提交
502
static void fsl_dma_free_chan_resources(struct dma_chan *dchan)
503
{
I
Ira Snyder 已提交
504
	struct fsldma_chan *chan = to_fsl_chan(dchan);
505 506
	unsigned long flags;

507
	chan_dbg(chan, "free all channel resources\n");
I
Ira Snyder 已提交
508
	spin_lock_irqsave(&chan->desc_lock, flags);
I
Ira Snyder 已提交
509 510
	fsldma_free_desc_list(chan, &chan->ld_pending);
	fsldma_free_desc_list(chan, &chan->ld_running);
I
Ira Snyder 已提交
511
	spin_unlock_irqrestore(&chan->desc_lock, flags);
512

I
Ira Snyder 已提交
513
	dma_pool_destroy(chan->desc_pool);
I
Ira Snyder 已提交
514
	chan->desc_pool = NULL;
515 516
}

517
static struct dma_async_tx_descriptor *
I
Ira Snyder 已提交
518
fsl_dma_prep_interrupt(struct dma_chan *dchan, unsigned long flags)
519
{
I
Ira Snyder 已提交
520
	struct fsldma_chan *chan;
521 522
	struct fsl_desc_sw *new;

I
Ira Snyder 已提交
523
	if (!dchan)
524 525
		return NULL;

I
Ira Snyder 已提交
526
	chan = to_fsl_chan(dchan);
527

I
Ira Snyder 已提交
528
	new = fsl_dma_alloc_descriptor(chan);
529
	if (!new) {
530
		chan_err(chan, "%s\n", msg_ld_oom);
531 532 533 534
		return NULL;
	}

	new->async_tx.cookie = -EBUSY;
535
	new->async_tx.flags = flags;
536

537
	/* Insert the link descriptor to the LD ring */
538
	list_add_tail(&new->node, &new->tx_list);
539

540
	/* Set End-of-link to the last link descriptor of new list*/
I
Ira Snyder 已提交
541
	set_ld_eol(chan, new);
542 543 544 545

	return &new->async_tx;
}

546
static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy(
I
Ira Snyder 已提交
547
	struct dma_chan *dchan, dma_addr_t dma_dst, dma_addr_t dma_src,
548 549
	size_t len, unsigned long flags)
{
I
Ira Snyder 已提交
550
	struct fsldma_chan *chan;
551 552 553
	struct fsl_desc_sw *first = NULL, *prev = NULL, *new;
	size_t copy;

I
Ira Snyder 已提交
554
	if (!dchan)
555 556 557 558 559
		return NULL;

	if (!len)
		return NULL;

I
Ira Snyder 已提交
560
	chan = to_fsl_chan(dchan);
561 562 563 564

	do {

		/* Allocate the link descriptor from DMA pool */
I
Ira Snyder 已提交
565
		new = fsl_dma_alloc_descriptor(chan);
566
		if (!new) {
567
			chan_err(chan, "%s\n", msg_ld_oom);
568
			goto fail;
569 570
		}

571
		copy = min(len, (size_t)FSL_DMA_BCR_MAX_CNT);
572

I
Ira Snyder 已提交
573 574 575
		set_desc_cnt(chan, &new->hw, copy);
		set_desc_src(chan, &new->hw, dma_src);
		set_desc_dst(chan, &new->hw, dma_dst);
576 577 578 579

		if (!first)
			first = new;
		else
I
Ira Snyder 已提交
580
			set_desc_next(chan, &prev->hw, new->async_tx.phys);
581 582

		new->async_tx.cookie = 0;
583
		async_tx_ack(&new->async_tx);
584 585 586 587

		prev = new;
		len -= copy;
		dma_src += copy;
588
		dma_dst += copy;
589 590

		/* Insert the link descriptor to the LD ring */
591
		list_add_tail(&new->node, &first->tx_list);
592 593
	} while (len);

594
	new->async_tx.flags = flags; /* client is in control of this ack */
595 596 597
	new->async_tx.cookie = -EBUSY;

	/* Set End-of-link to the last link descriptor of new list*/
I
Ira Snyder 已提交
598
	set_ld_eol(chan, new);
599

600 601 602 603 604 605
	return &first->async_tx;

fail:
	if (!first)
		return NULL;

I
Ira Snyder 已提交
606
	fsldma_free_desc_list_reverse(chan, &first->tx_list);
607
	return NULL;
608 609
}

610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651
static struct dma_async_tx_descriptor *fsl_dma_prep_sg(struct dma_chan *dchan,
	struct scatterlist *dst_sg, unsigned int dst_nents,
	struct scatterlist *src_sg, unsigned int src_nents,
	unsigned long flags)
{
	struct fsl_desc_sw *first = NULL, *prev = NULL, *new = NULL;
	struct fsldma_chan *chan = to_fsl_chan(dchan);
	size_t dst_avail, src_avail;
	dma_addr_t dst, src;
	size_t len;

	/* basic sanity checks */
	if (dst_nents == 0 || src_nents == 0)
		return NULL;

	if (dst_sg == NULL || src_sg == NULL)
		return NULL;

	/*
	 * TODO: should we check that both scatterlists have the same
	 * TODO: number of bytes in total? Is that really an error?
	 */

	/* get prepared for the loop */
	dst_avail = sg_dma_len(dst_sg);
	src_avail = sg_dma_len(src_sg);

	/* run until we are out of scatterlist entries */
	while (true) {

		/* create the largest transaction possible */
		len = min_t(size_t, src_avail, dst_avail);
		len = min_t(size_t, len, FSL_DMA_BCR_MAX_CNT);
		if (len == 0)
			goto fetch;

		dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) - dst_avail;
		src = sg_dma_address(src_sg) + sg_dma_len(src_sg) - src_avail;

		/* allocate and populate the descriptor */
		new = fsl_dma_alloc_descriptor(chan);
		if (!new) {
652
			chan_err(chan, "%s\n", msg_ld_oom);
653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725
			goto fail;
		}

		set_desc_cnt(chan, &new->hw, len);
		set_desc_src(chan, &new->hw, src);
		set_desc_dst(chan, &new->hw, dst);

		if (!first)
			first = new;
		else
			set_desc_next(chan, &prev->hw, new->async_tx.phys);

		new->async_tx.cookie = 0;
		async_tx_ack(&new->async_tx);
		prev = new;

		/* Insert the link descriptor to the LD ring */
		list_add_tail(&new->node, &first->tx_list);

		/* update metadata */
		dst_avail -= len;
		src_avail -= len;

fetch:
		/* fetch the next dst scatterlist entry */
		if (dst_avail == 0) {

			/* no more entries: we're done */
			if (dst_nents == 0)
				break;

			/* fetch the next entry: if there are no more: done */
			dst_sg = sg_next(dst_sg);
			if (dst_sg == NULL)
				break;

			dst_nents--;
			dst_avail = sg_dma_len(dst_sg);
		}

		/* fetch the next src scatterlist entry */
		if (src_avail == 0) {

			/* no more entries: we're done */
			if (src_nents == 0)
				break;

			/* fetch the next entry: if there are no more: done */
			src_sg = sg_next(src_sg);
			if (src_sg == NULL)
				break;

			src_nents--;
			src_avail = sg_dma_len(src_sg);
		}
	}

	new->async_tx.flags = flags; /* client is in control of this ack */
	new->async_tx.cookie = -EBUSY;

	/* Set End-of-link to the last link descriptor of new list */
	set_ld_eol(chan, new);

	return &first->async_tx;

fail:
	if (!first)
		return NULL;

	fsldma_free_desc_list_reverse(chan, &first->tx_list);
	return NULL;
}

I
Ira Snyder 已提交
726 727 728 729 730 731 732 733 734 735 736 737 738
/**
 * fsl_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
 * @chan: DMA channel
 * @sgl: scatterlist to transfer to/from
 * @sg_len: number of entries in @scatterlist
 * @direction: DMA direction
 * @flags: DMAEngine flags
 *
 * Prepare a set of descriptors for a DMA_SLAVE transaction. Following the
 * DMA_SLAVE API, this gets the device-specific information from the
 * chan->private variable.
 */
static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg(
I
Ira Snyder 已提交
739
	struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
I
Ira Snyder 已提交
740 741 742
	enum dma_data_direction direction, unsigned long flags)
{
	/*
I
Ira Snyder 已提交
743
	 * This operation is not supported on the Freescale DMA controller
I
Ira Snyder 已提交
744
	 *
I
Ira Snyder 已提交
745 746
	 * However, we need to provide the function pointer to allow the
	 * device_control() method to work.
I
Ira Snyder 已提交
747 748 749 750
	 */
	return NULL;
}

751
static int fsl_dma_device_control(struct dma_chan *dchan,
752
				  enum dma_ctrl_cmd cmd, unsigned long arg)
I
Ira Snyder 已提交
753
{
I
Ira Snyder 已提交
754
	struct dma_slave_config *config;
I
Ira Snyder 已提交
755
	struct fsldma_chan *chan;
I
Ira Snyder 已提交
756
	unsigned long flags;
I
Ira Snyder 已提交
757
	int size;
758

I
Ira Snyder 已提交
759
	if (!dchan)
760
		return -EINVAL;
I
Ira Snyder 已提交
761

I
Ira Snyder 已提交
762
	chan = to_fsl_chan(dchan);
I
Ira Snyder 已提交
763

I
Ira Snyder 已提交
764 765 766 767
	switch (cmd) {
	case DMA_TERMINATE_ALL:
		/* Halt the DMA engine */
		dma_halt(chan);
I
Ira Snyder 已提交
768

I
Ira Snyder 已提交
769
		spin_lock_irqsave(&chan->desc_lock, flags);
I
Ira Snyder 已提交
770

I
Ira Snyder 已提交
771 772 773
		/* Remove and free all of the descriptors in the LD queue */
		fsldma_free_desc_list(chan, &chan->ld_pending);
		fsldma_free_desc_list(chan, &chan->ld_running);
I
Ira Snyder 已提交
774

I
Ira Snyder 已提交
775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805
		spin_unlock_irqrestore(&chan->desc_lock, flags);
		return 0;

	case DMA_SLAVE_CONFIG:
		config = (struct dma_slave_config *)arg;

		/* make sure the channel supports setting burst size */
		if (!chan->set_request_count)
			return -ENXIO;

		/* we set the controller burst size depending on direction */
		if (config->direction == DMA_TO_DEVICE)
			size = config->dst_addr_width * config->dst_maxburst;
		else
			size = config->src_addr_width * config->src_maxburst;

		chan->set_request_count(chan, size);
		return 0;

	case FSLDMA_EXTERNAL_START:

		/* make sure the channel supports external start */
		if (!chan->toggle_ext_start)
			return -ENXIO;

		chan->toggle_ext_start(chan, arg);
		return 0;

	default:
		return -ENXIO;
	}
806 807

	return 0;
I
Ira Snyder 已提交
808 809
}

810 811
/**
 * fsl_dma_update_completed_cookie - Update the completed cookie.
I
Ira Snyder 已提交
812
 * @chan : Freescale DMA channel
I
Ira Snyder 已提交
813 814
 *
 * CONTEXT: hardirq
815
 */
I
Ira Snyder 已提交
816
static void fsl_dma_update_completed_cookie(struct fsldma_chan *chan)
817
{
I
Ira Snyder 已提交
818 819 820
	struct fsl_desc_sw *desc;
	unsigned long flags;
	dma_cookie_t cookie;
821

I
Ira Snyder 已提交
822
	spin_lock_irqsave(&chan->desc_lock, flags);
823

I
Ira Snyder 已提交
824
	if (list_empty(&chan->ld_running)) {
825
		chan_dbg(chan, "no running descriptors\n");
I
Ira Snyder 已提交
826
		goto out_unlock;
827
	}
I
Ira Snyder 已提交
828 829 830 831 832

	/* Get the last descriptor, update the cookie to that */
	desc = to_fsl_desc(chan->ld_running.prev);
	if (dma_is_idle(chan))
		cookie = desc->async_tx.cookie;
S
Steven J. Magnani 已提交
833
	else {
I
Ira Snyder 已提交
834
		cookie = desc->async_tx.cookie - 1;
S
Steven J. Magnani 已提交
835 836 837
		if (unlikely(cookie < DMA_MIN_COOKIE))
			cookie = DMA_MAX_COOKIE;
	}
I
Ira Snyder 已提交
838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857

	chan->completed_cookie = cookie;

out_unlock:
	spin_unlock_irqrestore(&chan->desc_lock, flags);
}

/**
 * fsldma_desc_status - Check the status of a descriptor
 * @chan: Freescale DMA channel
 * @desc: DMA SW descriptor
 *
 * This function will return the status of the given descriptor
 */
static enum dma_status fsldma_desc_status(struct fsldma_chan *chan,
					  struct fsl_desc_sw *desc)
{
	return dma_async_is_complete(desc->async_tx.cookie,
				     chan->completed_cookie,
				     chan->common.cookie);
858 859 860 861
}

/**
 * fsl_chan_ld_cleanup - Clean up link descriptors
I
Ira Snyder 已提交
862
 * @chan : Freescale DMA channel
863 864 865
 *
 * This function clean up the ld_queue of DMA channel.
 */
I
Ira Snyder 已提交
866
static void fsl_chan_ld_cleanup(struct fsldma_chan *chan)
867 868 869 870
{
	struct fsl_desc_sw *desc, *_desc;
	unsigned long flags;

I
Ira Snyder 已提交
871
	spin_lock_irqsave(&chan->desc_lock, flags);
872

873
	chan_dbg(chan, "chan completed_cookie = %d\n", chan->completed_cookie);
I
Ira Snyder 已提交
874
	list_for_each_entry_safe(desc, _desc, &chan->ld_running, node) {
875 876 877
		dma_async_tx_callback callback;
		void *callback_param;

I
Ira Snyder 已提交
878
		if (fsldma_desc_status(chan, desc) == DMA_IN_PROGRESS)
879 880
			break;

I
Ira Snyder 已提交
881
		/* Remove from the list of running transactions */
882 883 884
		list_del(&desc->node);

		/* Run the link descriptor callback function */
I
Ira Snyder 已提交
885 886
		callback = desc->async_tx.callback;
		callback_param = desc->async_tx.callback_param;
887
		if (callback) {
I
Ira Snyder 已提交
888
			spin_unlock_irqrestore(&chan->desc_lock, flags);
889
#ifdef FSL_DMA_LD_DEBUG
890
			chan_dbg(chan, "LD %p callback\n", desc);
891
#endif
892
			callback(callback_param);
I
Ira Snyder 已提交
893
			spin_lock_irqsave(&chan->desc_lock, flags);
894
		}
I
Ira Snyder 已提交
895 896 897

		/* Run any dependencies, then free the descriptor */
		dma_run_dependencies(&desc->async_tx);
898 899 900
#ifdef FSL_DMA_LD_DEBUG
		chan_dbg(chan, "LD %p free\n", desc);
#endif
I
Ira Snyder 已提交
901
		dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
902
	}
I
Ira Snyder 已提交
903

I
Ira Snyder 已提交
904
	spin_unlock_irqrestore(&chan->desc_lock, flags);
905 906 907
}

/**
I
Ira Snyder 已提交
908
 * fsl_chan_xfer_ld_queue - transfer any pending transactions
I
Ira Snyder 已提交
909
 * @chan : Freescale DMA channel
I
Ira Snyder 已提交
910 911 912 913 914
 *
 * This will make sure that any pending transactions will be run.
 * If the DMA controller is idle, it will be started. Otherwise,
 * the DMA controller's interrupt handler will start any pending
 * transactions when it becomes idle.
915
 */
I
Ira Snyder 已提交
916
static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan)
917
{
I
Ira Snyder 已提交
918
	struct fsl_desc_sw *desc;
919 920
	unsigned long flags;

I
Ira Snyder 已提交
921
	spin_lock_irqsave(&chan->desc_lock, flags);
922

I
Ira Snyder 已提交
923 924 925 926 927
	/*
	 * If the list of pending descriptors is empty, then we
	 * don't need to do any work at all
	 */
	if (list_empty(&chan->ld_pending)) {
928
		chan_dbg(chan, "no pending LDs\n");
929
		goto out_unlock;
I
Ira Snyder 已提交
930
	}
931

I
Ira Snyder 已提交
932 933 934 935 936 937
	/*
	 * The DMA controller is not idle, which means the interrupt
	 * handler will start any queued transactions when it runs
	 * at the end of the current transaction
	 */
	if (!dma_is_idle(chan)) {
938
		chan_dbg(chan, "DMA controller still busy\n");
I
Ira Snyder 已提交
939 940 941 942 943 944 945 946
		goto out_unlock;
	}

	/*
	 * TODO:
	 * make sure the dma_halt() function really un-wedges the
	 * controller as much as possible
	 */
I
Ira Snyder 已提交
947
	dma_halt(chan);
948

I
Ira Snyder 已提交
949 950 951
	/*
	 * If there are some link descriptors which have not been
	 * transferred, we need to start the controller
952 953
	 */

I
Ira Snyder 已提交
954 955 956 957 958 959 960 961 962 963 964 965 966
	/*
	 * Move all elements from the queue of pending transactions
	 * onto the list of running transactions
	 */
	desc = list_first_entry(&chan->ld_pending, struct fsl_desc_sw, node);
	list_splice_tail_init(&chan->ld_pending, &chan->ld_running);

	/*
	 * Program the descriptor's address into the DMA controller,
	 * then start the DMA transaction
	 */
	set_cdar(chan, desc->async_tx.phys);
	dma_start(chan);
967 968

out_unlock:
I
Ira Snyder 已提交
969
	spin_unlock_irqrestore(&chan->desc_lock, flags);
970 971 972 973
}

/**
 * fsl_dma_memcpy_issue_pending - Issue the DMA start command
I
Ira Snyder 已提交
974
 * @chan : Freescale DMA channel
975
 */
I
Ira Snyder 已提交
976
static void fsl_dma_memcpy_issue_pending(struct dma_chan *dchan)
977
{
I
Ira Snyder 已提交
978 979
	struct fsldma_chan *chan = to_fsl_chan(dchan);
	fsl_chan_xfer_ld_queue(chan);
980 981 982
}

/**
983
 * fsl_tx_status - Determine the DMA status
I
Ira Snyder 已提交
984
 * @chan : Freescale DMA channel
985
 */
986
static enum dma_status fsl_tx_status(struct dma_chan *dchan,
987
					dma_cookie_t cookie,
988
					struct dma_tx_state *txstate)
989
{
I
Ira Snyder 已提交
990
	struct fsldma_chan *chan = to_fsl_chan(dchan);
991 992 993
	dma_cookie_t last_used;
	dma_cookie_t last_complete;

I
Ira Snyder 已提交
994
	fsl_chan_ld_cleanup(chan);
995

I
Ira Snyder 已提交
996 997
	last_used = dchan->cookie;
	last_complete = chan->completed_cookie;
998

999
	dma_set_tx_state(txstate, last_complete, last_used, 0);
1000 1001 1002 1003

	return dma_async_is_complete(cookie, last_complete, last_used);
}

1004 1005 1006 1007
/*----------------------------------------------------------------------------*/
/* Interrupt Handling                                                         */
/*----------------------------------------------------------------------------*/

1008
static irqreturn_t fsldma_chan_irq(int irq, void *data)
1009
{
I
Ira Snyder 已提交
1010
	struct fsldma_chan *chan = data;
1011 1012
	int update_cookie = 0;
	int xfer_ld_q = 0;
I
Ira Snyder 已提交
1013
	u32 stat;
1014

I
Ira Snyder 已提交
1015
	/* save and clear the status register */
I
Ira Snyder 已提交
1016
	stat = get_sr(chan);
I
Ira Snyder 已提交
1017
	set_sr(chan, stat);
1018
	chan_dbg(chan, "irq: stat = 0x%x\n", stat);
1019 1020 1021 1022 1023 1024

	stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH);
	if (!stat)
		return IRQ_NONE;

	if (stat & FSL_DMA_SR_TE)
1025
		chan_err(chan, "Transfer Error!\n");
1026

I
Ira Snyder 已提交
1027 1028
	/*
	 * Programming Error
1029 1030 1031 1032
	 * The DMA_INTERRUPT async_tx is a NULL transfer, which will
	 * triger a PE interrupt.
	 */
	if (stat & FSL_DMA_SR_PE) {
1033
		chan_dbg(chan, "irq: Programming Error INT\n");
I
Ira Snyder 已提交
1034
		if (get_bcr(chan) == 0) {
1035 1036 1037 1038
			/* BCR register is 0, this is a DMA_INTERRUPT async_tx.
			 * Now, update the completed cookie, and continue the
			 * next uncompleted transfer.
			 */
1039 1040
			update_cookie = 1;
			xfer_ld_q = 1;
1041 1042 1043 1044
		}
		stat &= ~FSL_DMA_SR_PE;
	}

I
Ira Snyder 已提交
1045 1046
	/*
	 * If the link descriptor segment transfer finishes,
1047 1048 1049
	 * we will recycle the used descriptor.
	 */
	if (stat & FSL_DMA_SR_EOSI) {
1050 1051
		chan_dbg(chan, "irq: End-of-segments INT\n");
		chan_dbg(chan, "irq: clndar 0x%llx, nlndar 0x%llx\n",
I
Ira Snyder 已提交
1052 1053
			(unsigned long long)get_cdar(chan),
			(unsigned long long)get_ndar(chan));
1054
		stat &= ~FSL_DMA_SR_EOSI;
1055 1056 1057
		update_cookie = 1;
	}

I
Ira Snyder 已提交
1058 1059
	/*
	 * For MPC8349, EOCDI event need to update cookie
1060 1061 1062
	 * and start the next transfer if it exist.
	 */
	if (stat & FSL_DMA_SR_EOCDI) {
1063
		chan_dbg(chan, "irq: End-of-Chain link INT\n");
1064 1065 1066
		stat &= ~FSL_DMA_SR_EOCDI;
		update_cookie = 1;
		xfer_ld_q = 1;
1067 1068
	}

I
Ira Snyder 已提交
1069 1070
	/*
	 * If it current transfer is the end-of-transfer,
1071 1072 1073
	 * we should clear the Channel Start bit for
	 * prepare next transfer.
	 */
1074
	if (stat & FSL_DMA_SR_EOLNI) {
1075
		chan_dbg(chan, "irq: End-of-link INT\n");
1076
		stat &= ~FSL_DMA_SR_EOLNI;
1077
		xfer_ld_q = 1;
1078 1079
	}

1080
	if (update_cookie)
I
Ira Snyder 已提交
1081
		fsl_dma_update_completed_cookie(chan);
1082
	if (xfer_ld_q)
I
Ira Snyder 已提交
1083
		fsl_chan_xfer_ld_queue(chan);
1084
	if (stat)
1085
		chan_dbg(chan, "irq: unhandled sr 0x%08x\n", stat);
1086

1087
	chan_dbg(chan, "irq: Exit\n");
I
Ira Snyder 已提交
1088
	tasklet_schedule(&chan->tasklet);
1089 1090 1091
	return IRQ_HANDLED;
}

1092 1093
static void dma_do_tasklet(unsigned long data)
{
I
Ira Snyder 已提交
1094 1095
	struct fsldma_chan *chan = (struct fsldma_chan *)data;
	fsl_chan_ld_cleanup(chan);
1096 1097 1098
}

static irqreturn_t fsldma_ctrl_irq(int irq, void *data)
1099
{
1100
	struct fsldma_device *fdev = data;
1101 1102 1103 1104
	struct fsldma_chan *chan;
	unsigned int handled = 0;
	u32 gsr, mask;
	int i;
1105

1106
	gsr = (fdev->feature & FSL_DMA_BIG_ENDIAN) ? in_be32(fdev->regs)
1107 1108 1109
						   : in_le32(fdev->regs);
	mask = 0xff000000;
	dev_dbg(fdev->dev, "IRQ: gsr 0x%.8x\n", gsr);
1110

1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126
	for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
		chan = fdev->chan[i];
		if (!chan)
			continue;

		if (gsr & mask) {
			dev_dbg(fdev->dev, "IRQ: chan %d\n", chan->id);
			fsldma_chan_irq(irq, chan);
			handled++;
		}

		gsr &= ~mask;
		mask >>= 8;
	}

	return IRQ_RETVAL(handled);
1127 1128
}

1129
static void fsldma_free_irqs(struct fsldma_device *fdev)
1130
{
1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142
	struct fsldma_chan *chan;
	int i;

	if (fdev->irq != NO_IRQ) {
		dev_dbg(fdev->dev, "free per-controller IRQ\n");
		free_irq(fdev->irq, fdev);
		return;
	}

	for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
		chan = fdev->chan[i];
		if (chan && chan->irq != NO_IRQ) {
1143
			chan_dbg(chan, "free per-channel IRQ\n");
1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169
			free_irq(chan->irq, chan);
		}
	}
}

static int fsldma_request_irqs(struct fsldma_device *fdev)
{
	struct fsldma_chan *chan;
	int ret;
	int i;

	/* if we have a per-controller IRQ, use that */
	if (fdev->irq != NO_IRQ) {
		dev_dbg(fdev->dev, "request per-controller IRQ\n");
		ret = request_irq(fdev->irq, fsldma_ctrl_irq, IRQF_SHARED,
				  "fsldma-controller", fdev);
		return ret;
	}

	/* no per-controller IRQ, use the per-channel IRQs */
	for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
		chan = fdev->chan[i];
		if (!chan)
			continue;

		if (chan->irq == NO_IRQ) {
1170
			chan_err(chan, "interrupts property missing in device tree\n");
1171 1172 1173 1174
			ret = -ENODEV;
			goto out_unwind;
		}

1175
		chan_dbg(chan, "request per-channel IRQ\n");
1176 1177 1178
		ret = request_irq(chan->irq, fsldma_chan_irq, IRQF_SHARED,
				  "fsldma-chan", chan);
		if (ret) {
1179
			chan_err(chan, "unable to request per-channel IRQ\n");
1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198
			goto out_unwind;
		}
	}

	return 0;

out_unwind:
	for (/* none */; i >= 0; i--) {
		chan = fdev->chan[i];
		if (!chan)
			continue;

		if (chan->irq == NO_IRQ)
			continue;

		free_irq(chan->irq, chan);
	}

	return ret;
1199 1200
}

1201 1202 1203 1204 1205
/*----------------------------------------------------------------------------*/
/* OpenFirmware Subsystem                                                     */
/*----------------------------------------------------------------------------*/

static int __devinit fsl_dma_chan_probe(struct fsldma_device *fdev,
1206
	struct device_node *node, u32 feature, const char *compatible)
1207
{
I
Ira Snyder 已提交
1208
	struct fsldma_chan *chan;
1209
	struct resource res;
1210 1211 1212
	int err;

	/* alloc channel */
I
Ira Snyder 已提交
1213 1214
	chan = kzalloc(sizeof(*chan), GFP_KERNEL);
	if (!chan) {
1215 1216 1217 1218 1219 1220
		dev_err(fdev->dev, "no free memory for DMA channels!\n");
		err = -ENOMEM;
		goto out_return;
	}

	/* ioremap registers for use */
I
Ira Snyder 已提交
1221 1222
	chan->regs = of_iomap(node, 0);
	if (!chan->regs) {
1223 1224
		dev_err(fdev->dev, "unable to ioremap registers\n");
		err = -ENOMEM;
I
Ira Snyder 已提交
1225
		goto out_free_chan;
1226 1227
	}

1228
	err = of_address_to_resource(node, 0, &res);
1229
	if (err) {
1230 1231
		dev_err(fdev->dev, "unable to find 'reg' property\n");
		goto out_iounmap_regs;
1232 1233
	}

I
Ira Snyder 已提交
1234
	chan->feature = feature;
1235
	if (!fdev->feature)
I
Ira Snyder 已提交
1236
		fdev->feature = chan->feature;
1237

1238 1239 1240
	/*
	 * If the DMA device's feature is different than the feature
	 * of its channels, report the bug
1241
	 */
I
Ira Snyder 已提交
1242
	WARN_ON(fdev->feature != chan->feature);
1243

I
Ira Snyder 已提交
1244 1245 1246
	chan->dev = fdev->dev;
	chan->id = ((res.start - 0x100) & 0xfff) >> 7;
	if (chan->id >= FSL_DMA_MAX_CHANS_PER_DEVICE) {
1247
		dev_err(fdev->dev, "too many channels for device\n");
1248
		err = -EINVAL;
1249
		goto out_iounmap_regs;
1250 1251
	}

I
Ira Snyder 已提交
1252 1253
	fdev->chan[chan->id] = chan;
	tasklet_init(&chan->tasklet, dma_do_tasklet, (unsigned long)chan);
1254
	snprintf(chan->name, sizeof(chan->name), "chan%d", chan->id);
1255 1256

	/* Initialize the channel */
I
Ira Snyder 已提交
1257
	dma_init(chan);
1258 1259

	/* Clear cdar registers */
I
Ira Snyder 已提交
1260
	set_cdar(chan, 0);
1261

I
Ira Snyder 已提交
1262
	switch (chan->feature & FSL_DMA_IP_MASK) {
1263
	case FSL_DMA_IP_85XX:
I
Ira Snyder 已提交
1264
		chan->toggle_ext_pause = fsl_chan_toggle_ext_pause;
1265
	case FSL_DMA_IP_83XX:
I
Ira Snyder 已提交
1266 1267 1268 1269
		chan->toggle_ext_start = fsl_chan_toggle_ext_start;
		chan->set_src_loop_size = fsl_chan_set_src_loop_size;
		chan->set_dst_loop_size = fsl_chan_set_dst_loop_size;
		chan->set_request_count = fsl_chan_set_request_count;
1270 1271
	}

I
Ira Snyder 已提交
1272
	spin_lock_init(&chan->desc_lock);
I
Ira Snyder 已提交
1273 1274
	INIT_LIST_HEAD(&chan->ld_pending);
	INIT_LIST_HEAD(&chan->ld_running);
1275

I
Ira Snyder 已提交
1276
	chan->common.device = &fdev->common;
1277

1278
	/* find the IRQ line, if it exists in the device tree */
I
Ira Snyder 已提交
1279
	chan->irq = irq_of_parse_and_map(node, 0);
1280

1281
	/* Add the channel to DMA device channel list */
I
Ira Snyder 已提交
1282
	list_add_tail(&chan->common.device_node, &fdev->common.channels);
1283 1284
	fdev->common.chancnt++;

I
Ira Snyder 已提交
1285 1286
	dev_info(fdev->dev, "#%d (%s), irq %d\n", chan->id, compatible,
		 chan->irq != NO_IRQ ? chan->irq : fdev->irq);
1287 1288

	return 0;
1289

1290
out_iounmap_regs:
I
Ira Snyder 已提交
1291 1292 1293
	iounmap(chan->regs);
out_free_chan:
	kfree(chan);
1294
out_return:
1295 1296 1297
	return err;
}

I
Ira Snyder 已提交
1298
static void fsl_dma_chan_remove(struct fsldma_chan *chan)
1299
{
I
Ira Snyder 已提交
1300 1301 1302 1303
	irq_dispose_mapping(chan->irq);
	list_del(&chan->common.device_node);
	iounmap(chan->regs);
	kfree(chan);
1304 1305
}

1306
static int __devinit fsldma_of_probe(struct platform_device *op,
1307 1308
			const struct of_device_id *match)
{
1309
	struct fsldma_device *fdev;
1310
	struct device_node *child;
1311
	int err;
1312

1313
	fdev = kzalloc(sizeof(*fdev), GFP_KERNEL);
1314
	if (!fdev) {
1315 1316 1317
		dev_err(&op->dev, "No enough memory for 'priv'\n");
		err = -ENOMEM;
		goto out_return;
1318
	}
1319 1320

	fdev->dev = &op->dev;
1321 1322
	INIT_LIST_HEAD(&fdev->common.channels);

1323
	/* ioremap the registers for use */
1324
	fdev->regs = of_iomap(op->dev.of_node, 0);
1325 1326 1327 1328
	if (!fdev->regs) {
		dev_err(&op->dev, "unable to ioremap registers\n");
		err = -ENOMEM;
		goto out_free_fdev;
1329 1330
	}

1331
	/* map the channel IRQ if it exists, but don't hookup the handler yet */
1332
	fdev->irq = irq_of_parse_and_map(op->dev.of_node, 0);
1333

1334 1335
	dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask);
	dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask);
1336
	dma_cap_set(DMA_SG, fdev->common.cap_mask);
I
Ira Snyder 已提交
1337
	dma_cap_set(DMA_SLAVE, fdev->common.cap_mask);
1338 1339
	fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources;
	fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources;
1340
	fdev->common.device_prep_dma_interrupt = fsl_dma_prep_interrupt;
1341
	fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy;
1342
	fdev->common.device_prep_dma_sg = fsl_dma_prep_sg;
1343
	fdev->common.device_tx_status = fsl_tx_status;
1344
	fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending;
I
Ira Snyder 已提交
1345
	fdev->common.device_prep_slave_sg = fsl_dma_prep_slave_sg;
1346
	fdev->common.device_control = fsl_dma_device_control;
1347
	fdev->common.dev = &op->dev;
1348

1349 1350
	dma_set_mask(&(op->dev), DMA_BIT_MASK(36));

1351
	dev_set_drvdata(&op->dev, fdev);
1352

1353 1354 1355
	/*
	 * We cannot use of_platform_bus_probe() because there is no
	 * of_platform_bus_remove(). Instead, we manually instantiate every DMA
1356 1357
	 * channel object.
	 */
1358
	for_each_child_of_node(op->dev.of_node, child) {
1359
		if (of_device_is_compatible(child, "fsl,eloplus-dma-channel")) {
1360 1361 1362
			fsl_dma_chan_probe(fdev, child,
				FSL_DMA_IP_85XX | FSL_DMA_BIG_ENDIAN,
				"fsl,eloplus-dma-channel");
1363 1364 1365
		}

		if (of_device_is_compatible(child, "fsl,elo-dma-channel")) {
1366 1367 1368
			fsl_dma_chan_probe(fdev, child,
				FSL_DMA_IP_83XX | FSL_DMA_LITTLE_ENDIAN,
				"fsl,elo-dma-channel");
1369
		}
1370
	}
1371

1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384
	/*
	 * Hookup the IRQ handler(s)
	 *
	 * If we have a per-controller interrupt, we prefer that to the
	 * per-channel interrupts to reduce the number of shared interrupt
	 * handlers on the same IRQ line
	 */
	err = fsldma_request_irqs(fdev);
	if (err) {
		dev_err(fdev->dev, "unable to request IRQs\n");
		goto out_free_fdev;
	}

1385 1386 1387
	dma_async_device_register(&fdev->common);
	return 0;

1388
out_free_fdev:
1389
	irq_dispose_mapping(fdev->irq);
1390
	kfree(fdev);
1391
out_return:
1392 1393 1394
	return err;
}

1395
static int fsldma_of_remove(struct platform_device *op)
1396
{
1397
	struct fsldma_device *fdev;
1398 1399
	unsigned int i;

1400
	fdev = dev_get_drvdata(&op->dev);
1401 1402
	dma_async_device_unregister(&fdev->common);

1403 1404
	fsldma_free_irqs(fdev);

1405
	for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
1406 1407
		if (fdev->chan[i])
			fsl_dma_chan_remove(fdev->chan[i]);
1408
	}
1409

1410 1411
	iounmap(fdev->regs);
	dev_set_drvdata(&op->dev, NULL);
1412 1413 1414 1415 1416
	kfree(fdev);

	return 0;
}

1417
static const struct of_device_id fsldma_of_ids[] = {
1418 1419
	{ .compatible = "fsl,eloplus-dma", },
	{ .compatible = "fsl,elo-dma", },
1420 1421 1422
	{}
};

1423
static struct of_platform_driver fsldma_of_driver = {
1424 1425 1426 1427 1428 1429 1430
	.driver = {
		.name = "fsl-elo-dma",
		.owner = THIS_MODULE,
		.of_match_table = fsldma_of_ids,
	},
	.probe = fsldma_of_probe,
	.remove = fsldma_of_remove,
1431 1432
};

1433 1434 1435 1436 1437
/*----------------------------------------------------------------------------*/
/* Module Init / Exit                                                         */
/*----------------------------------------------------------------------------*/

static __init int fsldma_init(void)
1438
{
1439 1440 1441 1442
	int ret;

	pr_info("Freescale Elo / Elo Plus DMA driver\n");

1443
	ret = of_register_platform_driver(&fsldma_of_driver);
1444 1445 1446 1447 1448 1449
	if (ret)
		pr_err("fsldma: failed to register platform driver\n");

	return ret;
}

1450
static void __exit fsldma_exit(void)
1451
{
1452
	of_unregister_platform_driver(&fsldma_of_driver);
1453 1454
}

1455 1456
subsys_initcall(fsldma_init);
module_exit(fsldma_exit);
1457 1458 1459

MODULE_DESCRIPTION("Freescale Elo / Elo Plus DMA driver");
MODULE_LICENSE("GPL");