fsldma.c 35.6 KB
Newer Older
1 2 3
/*
 * Freescale MPC85xx, MPC83xx DMA Engine support
 *
4
 * Copyright (C) 2007-2010 Freescale Semiconductor, Inc. All rights reserved.
5 6 7 8 9 10 11 12
 *
 * Author:
 *   Zhang Wei <wei.zhang@freescale.com>, Jul 2007
 *   Ebony Zhu <ebony.zhu@freescale.com>, May 2007
 *
 * Description:
 *   DMA engine driver for Freescale MPC8540 DMA controller, which is
 *   also fit for MPC8560, MPC8555, MPC8548, MPC8641, and etc.
13
 *   The support for MPC8349 DMA controller is also added.
14
 *
15 16 17 18 19
 * This driver instructs the DMA controller to issue the PCI Read Multiple
 * command for PCI read operations, instead of using the default PCI Read Line
 * command. Please be aware that this setting may result in read pre-fetching
 * on some platforms.
 *
20 21 22 23 24 25 26 27 28 29
 * This is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 */

#include <linux/init.h>
#include <linux/module.h>
#include <linux/pci.h>
30
#include <linux/slab.h>
31 32 33 34 35 36 37 38 39
#include <linux/interrupt.h>
#include <linux/dmaengine.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
#include <linux/of_platform.h>

#include "fsldma.h"

40 41 42 43 44 45
#define chan_dbg(chan, fmt, arg...)					\
	dev_dbg(chan->dev, "%s: " fmt, chan->name, ##arg)
#define chan_err(chan, fmt, arg...)					\
	dev_err(chan->dev, "%s: " fmt, chan->name, ##arg)

static const char msg_ld_oom[] = "No free memory for link descriptor";
46

47 48 49
/*
 * Register Helpers
 */
50

I
Ira Snyder 已提交
51
static void set_sr(struct fsldma_chan *chan, u32 val)
52
{
I
Ira Snyder 已提交
53
	DMA_OUT(chan, &chan->regs->sr, val, 32);
54 55
}

I
Ira Snyder 已提交
56
static u32 get_sr(struct fsldma_chan *chan)
57
{
I
Ira Snyder 已提交
58
	return DMA_IN(chan, &chan->regs->sr, 32);
59 60
}

61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84
static void set_cdar(struct fsldma_chan *chan, dma_addr_t addr)
{
	DMA_OUT(chan, &chan->regs->cdar, addr | FSL_DMA_SNEN, 64);
}

static dma_addr_t get_cdar(struct fsldma_chan *chan)
{
	return DMA_IN(chan, &chan->regs->cdar, 64) & ~FSL_DMA_SNEN;
}

static dma_addr_t get_ndar(struct fsldma_chan *chan)
{
	return DMA_IN(chan, &chan->regs->ndar, 64);
}

static u32 get_bcr(struct fsldma_chan *chan)
{
	return DMA_IN(chan, &chan->regs->bcr, 32);
}

/*
 * Descriptor Helpers
 */

I
Ira Snyder 已提交
85
static void set_desc_cnt(struct fsldma_chan *chan,
86 87
				struct fsl_dma_ld_hw *hw, u32 count)
{
I
Ira Snyder 已提交
88
	hw->count = CPU_TO_DMA(chan, count, 32);
89 90
}

I
Ira Snyder 已提交
91
static void set_desc_src(struct fsldma_chan *chan,
92 93 94 95
				struct fsl_dma_ld_hw *hw, dma_addr_t src)
{
	u64 snoop_bits;

I
Ira Snyder 已提交
96
	snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
97
		? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0;
I
Ira Snyder 已提交
98
	hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64);
99 100
}

I
Ira Snyder 已提交
101
static void set_desc_dst(struct fsldma_chan *chan,
102
				struct fsl_dma_ld_hw *hw, dma_addr_t dst)
103 104 105
{
	u64 snoop_bits;

I
Ira Snyder 已提交
106
	snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
107
		? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0;
I
Ira Snyder 已提交
108
	hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64);
109 110
}

I
Ira Snyder 已提交
111
static void set_desc_next(struct fsldma_chan *chan,
112 113 114 115
				struct fsl_dma_ld_hw *hw, dma_addr_t next)
{
	u64 snoop_bits;

I
Ira Snyder 已提交
116
	snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
117
		? FSL_DMA_SNEN : 0;
I
Ira Snyder 已提交
118
	hw->next_ln_addr = CPU_TO_DMA(chan, snoop_bits | next, 64);
119 120
}

121 122
static void set_ld_eol(struct fsldma_chan *chan,
			struct fsl_desc_sw *desc)
123
{
124
	u64 snoop_bits;
125

126 127
	snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
		? FSL_DMA_SNEN : 0;
128

129 130 131
	desc->hw.next_ln_addr = CPU_TO_DMA(chan,
		DMA_TO_CPU(chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL
			| snoop_bits, 64);
132 133
}

134 135 136 137 138
/*
 * DMA Engine Hardware Control Helpers
 */

static void dma_init(struct fsldma_chan *chan)
139
{
140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163
	/* Reset the channel */
	DMA_OUT(chan, &chan->regs->mr, 0, 32);

	switch (chan->feature & FSL_DMA_IP_MASK) {
	case FSL_DMA_IP_85XX:
		/* Set the channel to below modes:
		 * EIE - Error interrupt enable
		 * EOSIE - End of segments interrupt enable (basic mode)
		 * EOLNIE - End of links interrupt enable
		 * BWC - Bandwidth sharing among channels
		 */
		DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_BWC
				| FSL_DMA_MR_EIE | FSL_DMA_MR_EOLNIE
				| FSL_DMA_MR_EOSIE, 32);
		break;
	case FSL_DMA_IP_83XX:
		/* Set the channel to below modes:
		 * EOTIE - End-of-transfer interrupt enable
		 * PRC_RM - PCI read multiple
		 */
		DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_EOTIE
				| FSL_DMA_MR_PRC_RM, 32);
		break;
	}
164 165
}

I
Ira Snyder 已提交
166
static int dma_is_idle(struct fsldma_chan *chan)
167
{
I
Ira Snyder 已提交
168
	u32 sr = get_sr(chan);
169 170 171
	return (!(sr & FSL_DMA_SR_CB)) || (sr & FSL_DMA_SR_CH);
}

I
Ira Snyder 已提交
172
static void dma_start(struct fsldma_chan *chan)
173
{
I
Ira Snyder 已提交
174 175
	u32 mode;

I
Ira Snyder 已提交
176
	mode = DMA_IN(chan, &chan->regs->mr, 32);
I
Ira Snyder 已提交
177

I
Ira Snyder 已提交
178 179 180
	if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) {
		if (chan->feature & FSL_DMA_CHAN_PAUSE_EXT) {
			DMA_OUT(chan, &chan->regs->bcr, 0, 32);
I
Ira Snyder 已提交
181 182 183 184
			mode |= FSL_DMA_MR_EMP_EN;
		} else {
			mode &= ~FSL_DMA_MR_EMP_EN;
		}
185
	}
186

I
Ira Snyder 已提交
187
	if (chan->feature & FSL_DMA_CHAN_START_EXT)
I
Ira Snyder 已提交
188
		mode |= FSL_DMA_MR_EMS_EN;
189
	else
I
Ira Snyder 已提交
190
		mode |= FSL_DMA_MR_CS;
191

I
Ira Snyder 已提交
192
	DMA_OUT(chan, &chan->regs->mr, mode, 32);
193 194
}

I
Ira Snyder 已提交
195
static void dma_halt(struct fsldma_chan *chan)
196
{
I
Ira Snyder 已提交
197
	u32 mode;
198 199
	int i;

I
Ira Snyder 已提交
200
	mode = DMA_IN(chan, &chan->regs->mr, 32);
I
Ira Snyder 已提交
201
	mode |= FSL_DMA_MR_CA;
I
Ira Snyder 已提交
202
	DMA_OUT(chan, &chan->regs->mr, mode, 32);
I
Ira Snyder 已提交
203 204

	mode &= ~(FSL_DMA_MR_CS | FSL_DMA_MR_EMS_EN | FSL_DMA_MR_CA);
I
Ira Snyder 已提交
205
	DMA_OUT(chan, &chan->regs->mr, mode, 32);
206

207
	for (i = 0; i < 100; i++) {
I
Ira Snyder 已提交
208
		if (dma_is_idle(chan))
I
Ira Snyder 已提交
209 210
			return;

211
		udelay(10);
212
	}
I
Ira Snyder 已提交
213

I
Ira Snyder 已提交
214
	if (!dma_is_idle(chan))
215
		chan_err(chan, "DMA halt timeout!\n");
216 217 218 219
}

/**
 * fsl_chan_set_src_loop_size - Set source address hold transfer size
I
Ira Snyder 已提交
220
 * @chan : Freescale DMA channel
221 222 223 224 225 226 227 228
 * @size     : Address loop size, 0 for disable loop
 *
 * The set source address hold transfer size. The source
 * address hold or loop transfer size is when the DMA transfer
 * data from source address (SA), if the loop size is 4, the DMA will
 * read data from SA, SA + 1, SA + 2, SA + 3, then loop back to SA,
 * SA + 1 ... and so on.
 */
I
Ira Snyder 已提交
229
static void fsl_chan_set_src_loop_size(struct fsldma_chan *chan, int size)
230
{
I
Ira Snyder 已提交
231 232
	u32 mode;

I
Ira Snyder 已提交
233
	mode = DMA_IN(chan, &chan->regs->mr, 32);
I
Ira Snyder 已提交
234

235 236
	switch (size) {
	case 0:
I
Ira Snyder 已提交
237
		mode &= ~FSL_DMA_MR_SAHE;
238 239 240 241 242
		break;
	case 1:
	case 2:
	case 4:
	case 8:
I
Ira Snyder 已提交
243
		mode |= FSL_DMA_MR_SAHE | (__ilog2(size) << 14);
244 245
		break;
	}
I
Ira Snyder 已提交
246

I
Ira Snyder 已提交
247
	DMA_OUT(chan, &chan->regs->mr, mode, 32);
248 249 250
}

/**
251
 * fsl_chan_set_dst_loop_size - Set destination address hold transfer size
I
Ira Snyder 已提交
252
 * @chan : Freescale DMA channel
253 254 255 256 257 258 259 260
 * @size     : Address loop size, 0 for disable loop
 *
 * The set destination address hold transfer size. The destination
 * address hold or loop transfer size is when the DMA transfer
 * data to destination address (TA), if the loop size is 4, the DMA will
 * write data to TA, TA + 1, TA + 2, TA + 3, then loop back to TA,
 * TA + 1 ... and so on.
 */
I
Ira Snyder 已提交
261
static void fsl_chan_set_dst_loop_size(struct fsldma_chan *chan, int size)
262
{
I
Ira Snyder 已提交
263 264
	u32 mode;

I
Ira Snyder 已提交
265
	mode = DMA_IN(chan, &chan->regs->mr, 32);
I
Ira Snyder 已提交
266

267 268
	switch (size) {
	case 0:
I
Ira Snyder 已提交
269
		mode &= ~FSL_DMA_MR_DAHE;
270 271 272 273 274
		break;
	case 1:
	case 2:
	case 4:
	case 8:
I
Ira Snyder 已提交
275
		mode |= FSL_DMA_MR_DAHE | (__ilog2(size) << 16);
276 277
		break;
	}
I
Ira Snyder 已提交
278

I
Ira Snyder 已提交
279
	DMA_OUT(chan, &chan->regs->mr, mode, 32);
280 281 282
}

/**
283
 * fsl_chan_set_request_count - Set DMA Request Count for external control
I
Ira Snyder 已提交
284
 * @chan : Freescale DMA channel
285 286 287 288 289 290
 * @size     : Number of bytes to transfer in a single request
 *
 * The Freescale DMA channel can be controlled by the external signal DREQ#.
 * The DMA request count is how many bytes are allowed to transfer before
 * pausing the channel, after which a new assertion of DREQ# resumes channel
 * operation.
291
 *
292
 * A size of 0 disables external pause control. The maximum size is 1024.
293
 */
I
Ira Snyder 已提交
294
static void fsl_chan_set_request_count(struct fsldma_chan *chan, int size)
295
{
I
Ira Snyder 已提交
296 297
	u32 mode;

298
	BUG_ON(size > 1024);
I
Ira Snyder 已提交
299

I
Ira Snyder 已提交
300
	mode = DMA_IN(chan, &chan->regs->mr, 32);
I
Ira Snyder 已提交
301 302
	mode |= (__ilog2(size) << 24) & 0x0f000000;

I
Ira Snyder 已提交
303
	DMA_OUT(chan, &chan->regs->mr, mode, 32);
304
}
305

306 307
/**
 * fsl_chan_toggle_ext_pause - Toggle channel external pause status
I
Ira Snyder 已提交
308
 * @chan : Freescale DMA channel
309 310 311 312 313 314
 * @enable   : 0 is disabled, 1 is enabled.
 *
 * The Freescale DMA channel can be controlled by the external signal DREQ#.
 * The DMA Request Count feature should be used in addition to this feature
 * to set the number of bytes to transfer before pausing the channel.
 */
I
Ira Snyder 已提交
315
static void fsl_chan_toggle_ext_pause(struct fsldma_chan *chan, int enable)
316 317
{
	if (enable)
I
Ira Snyder 已提交
318
		chan->feature |= FSL_DMA_CHAN_PAUSE_EXT;
319
	else
I
Ira Snyder 已提交
320
		chan->feature &= ~FSL_DMA_CHAN_PAUSE_EXT;
321 322 323 324
}

/**
 * fsl_chan_toggle_ext_start - Toggle channel external start status
I
Ira Snyder 已提交
325
 * @chan : Freescale DMA channel
326 327 328 329 330 331 332
 * @enable   : 0 is disabled, 1 is enabled.
 *
 * If enable the external start, the channel can be started by an
 * external DMA start pin. So the dma_start() does not start the
 * transfer immediately. The DMA channel will wait for the
 * control pin asserted.
 */
I
Ira Snyder 已提交
333
static void fsl_chan_toggle_ext_start(struct fsldma_chan *chan, int enable)
334 335
{
	if (enable)
I
Ira Snyder 已提交
336
		chan->feature |= FSL_DMA_CHAN_START_EXT;
337
	else
I
Ira Snyder 已提交
338
		chan->feature &= ~FSL_DMA_CHAN_START_EXT;
339 340
}

I
Ira Snyder 已提交
341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365
static void append_ld_queue(struct fsldma_chan *chan,
			    struct fsl_desc_sw *desc)
{
	struct fsl_desc_sw *tail = to_fsl_desc(chan->ld_pending.prev);

	if (list_empty(&chan->ld_pending))
		goto out_splice;

	/*
	 * Add the hardware descriptor to the chain of hardware descriptors
	 * that already exists in memory.
	 *
	 * This will un-set the EOL bit of the existing transaction, and the
	 * last link in this transaction will become the EOL descriptor.
	 */
	set_desc_next(chan, &tail->hw, desc->async_tx.phys);

	/*
	 * Add the software descriptor and all children to the list
	 * of pending transactions
	 */
out_splice:
	list_splice_tail_init(&desc->tx_list, &chan->ld_pending);
}

366 367
static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
{
I
Ira Snyder 已提交
368
	struct fsldma_chan *chan = to_fsl_chan(tx->chan);
369 370
	struct fsl_desc_sw *desc = tx_to_fsl_desc(tx);
	struct fsl_desc_sw *child;
371 372 373
	unsigned long flags;
	dma_cookie_t cookie;

I
Ira Snyder 已提交
374
	spin_lock_irqsave(&chan->desc_lock, flags);
375

I
Ira Snyder 已提交
376 377 378 379
	/*
	 * assign cookies to all of the software descriptors
	 * that make up this transaction
	 */
I
Ira Snyder 已提交
380
	cookie = chan->common.cookie;
381
	list_for_each_entry(child, &desc->tx_list, node) {
382 383 384 385
		cookie++;
		if (cookie < 0)
			cookie = 1;

S
Steven J. Magnani 已提交
386
		child->async_tx.cookie = cookie;
387 388
	}

I
Ira Snyder 已提交
389
	chan->common.cookie = cookie;
I
Ira Snyder 已提交
390 391

	/* put this transaction onto the tail of the pending queue */
I
Ira Snyder 已提交
392
	append_ld_queue(chan, desc);
393

I
Ira Snyder 已提交
394
	spin_unlock_irqrestore(&chan->desc_lock, flags);
395 396 397 398 399 400

	return cookie;
}

/**
 * fsl_dma_alloc_descriptor - Allocate descriptor from channel's DMA pool.
I
Ira Snyder 已提交
401
 * @chan : Freescale DMA channel
402 403 404 405
 *
 * Return - The descriptor allocated. NULL for failed.
 */
static struct fsl_desc_sw *fsl_dma_alloc_descriptor(
I
Ira Snyder 已提交
406
					struct fsldma_chan *chan)
407
{
I
Ira Snyder 已提交
408
	struct fsl_desc_sw *desc;
409
	dma_addr_t pdesc;
I
Ira Snyder 已提交
410 411 412

	desc = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc);
	if (!desc) {
413
		chan_dbg(chan, "out of memory for link descriptor\n");
I
Ira Snyder 已提交
414
		return NULL;
415 416
	}

I
Ira Snyder 已提交
417 418 419 420 421 422 423
	memset(desc, 0, sizeof(*desc));
	INIT_LIST_HEAD(&desc->tx_list);
	dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
	desc->async_tx.tx_submit = fsl_dma_tx_submit;
	desc->async_tx.phys = pdesc;

	return desc;
424 425 426 427 428
}


/**
 * fsl_dma_alloc_chan_resources - Allocate resources for DMA channel.
I
Ira Snyder 已提交
429
 * @chan : Freescale DMA channel
430 431 432 433 434
 *
 * This function will create a dma pool for descriptor allocation.
 *
 * Return - The number of descriptors allocated.
 */
I
Ira Snyder 已提交
435
static int fsl_dma_alloc_chan_resources(struct dma_chan *dchan)
436
{
I
Ira Snyder 已提交
437
	struct fsldma_chan *chan = to_fsl_chan(dchan);
438 439

	/* Has this channel already been allocated? */
I
Ira Snyder 已提交
440
	if (chan->desc_pool)
441
		return 1;
442

I
Ira Snyder 已提交
443 444
	/*
	 * We need the descriptor to be aligned to 32bytes
445 446
	 * for meeting FSL DMA specification requirement.
	 */
447
	chan->desc_pool = dma_pool_create(chan->name, chan->dev,
I
Ira Snyder 已提交
448 449
					  sizeof(struct fsl_desc_sw),
					  __alignof__(struct fsl_desc_sw), 0);
I
Ira Snyder 已提交
450
	if (!chan->desc_pool) {
451
		chan_err(chan, "unable to allocate descriptor pool\n");
I
Ira Snyder 已提交
452
		return -ENOMEM;
453 454
	}

I
Ira Snyder 已提交
455
	/* there is at least one descriptor free to be allocated */
456 457 458
	return 1;
}

I
Ira Snyder 已提交
459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487
/**
 * fsldma_free_desc_list - Free all descriptors in a queue
 * @chan: Freescae DMA channel
 * @list: the list to free
 *
 * LOCKING: must hold chan->desc_lock
 */
static void fsldma_free_desc_list(struct fsldma_chan *chan,
				  struct list_head *list)
{
	struct fsl_desc_sw *desc, *_desc;

	list_for_each_entry_safe(desc, _desc, list, node) {
		list_del(&desc->node);
		dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
	}
}

static void fsldma_free_desc_list_reverse(struct fsldma_chan *chan,
					  struct list_head *list)
{
	struct fsl_desc_sw *desc, *_desc;

	list_for_each_entry_safe_reverse(desc, _desc, list, node) {
		list_del(&desc->node);
		dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
	}
}

488 489
/**
 * fsl_dma_free_chan_resources - Free all resources of the channel.
I
Ira Snyder 已提交
490
 * @chan : Freescale DMA channel
491
 */
I
Ira Snyder 已提交
492
static void fsl_dma_free_chan_resources(struct dma_chan *dchan)
493
{
I
Ira Snyder 已提交
494
	struct fsldma_chan *chan = to_fsl_chan(dchan);
495 496
	unsigned long flags;

497
	chan_dbg(chan, "free all channel resources\n");
I
Ira Snyder 已提交
498
	spin_lock_irqsave(&chan->desc_lock, flags);
I
Ira Snyder 已提交
499 500
	fsldma_free_desc_list(chan, &chan->ld_pending);
	fsldma_free_desc_list(chan, &chan->ld_running);
I
Ira Snyder 已提交
501
	spin_unlock_irqrestore(&chan->desc_lock, flags);
502

I
Ira Snyder 已提交
503
	dma_pool_destroy(chan->desc_pool);
I
Ira Snyder 已提交
504
	chan->desc_pool = NULL;
505 506
}

507
static struct dma_async_tx_descriptor *
I
Ira Snyder 已提交
508
fsl_dma_prep_interrupt(struct dma_chan *dchan, unsigned long flags)
509
{
I
Ira Snyder 已提交
510
	struct fsldma_chan *chan;
511 512
	struct fsl_desc_sw *new;

I
Ira Snyder 已提交
513
	if (!dchan)
514 515
		return NULL;

I
Ira Snyder 已提交
516
	chan = to_fsl_chan(dchan);
517

I
Ira Snyder 已提交
518
	new = fsl_dma_alloc_descriptor(chan);
519
	if (!new) {
520
		chan_err(chan, "%s\n", msg_ld_oom);
521 522 523 524
		return NULL;
	}

	new->async_tx.cookie = -EBUSY;
525
	new->async_tx.flags = flags;
526

527
	/* Insert the link descriptor to the LD ring */
528
	list_add_tail(&new->node, &new->tx_list);
529

530
	/* Set End-of-link to the last link descriptor of new list*/
I
Ira Snyder 已提交
531
	set_ld_eol(chan, new);
532 533 534 535

	return &new->async_tx;
}

536
static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy(
I
Ira Snyder 已提交
537
	struct dma_chan *dchan, dma_addr_t dma_dst, dma_addr_t dma_src,
538 539
	size_t len, unsigned long flags)
{
I
Ira Snyder 已提交
540
	struct fsldma_chan *chan;
541 542 543
	struct fsl_desc_sw *first = NULL, *prev = NULL, *new;
	size_t copy;

I
Ira Snyder 已提交
544
	if (!dchan)
545 546 547 548 549
		return NULL;

	if (!len)
		return NULL;

I
Ira Snyder 已提交
550
	chan = to_fsl_chan(dchan);
551 552 553 554

	do {

		/* Allocate the link descriptor from DMA pool */
I
Ira Snyder 已提交
555
		new = fsl_dma_alloc_descriptor(chan);
556
		if (!new) {
557
			chan_err(chan, "%s\n", msg_ld_oom);
558
			goto fail;
559 560
		}
#ifdef FSL_DMA_LD_DEBUG
561
		chan_dbg(chan, "new link desc alloc %p\n", new);
562 563
#endif

564
		copy = min(len, (size_t)FSL_DMA_BCR_MAX_CNT);
565

I
Ira Snyder 已提交
566 567 568
		set_desc_cnt(chan, &new->hw, copy);
		set_desc_src(chan, &new->hw, dma_src);
		set_desc_dst(chan, &new->hw, dma_dst);
569 570 571 572

		if (!first)
			first = new;
		else
I
Ira Snyder 已提交
573
			set_desc_next(chan, &prev->hw, new->async_tx.phys);
574 575

		new->async_tx.cookie = 0;
576
		async_tx_ack(&new->async_tx);
577 578 579 580

		prev = new;
		len -= copy;
		dma_src += copy;
581
		dma_dst += copy;
582 583

		/* Insert the link descriptor to the LD ring */
584
		list_add_tail(&new->node, &first->tx_list);
585 586
	} while (len);

587
	new->async_tx.flags = flags; /* client is in control of this ack */
588 589 590
	new->async_tx.cookie = -EBUSY;

	/* Set End-of-link to the last link descriptor of new list*/
I
Ira Snyder 已提交
591
	set_ld_eol(chan, new);
592

593 594 595 596 597 598
	return &first->async_tx;

fail:
	if (!first)
		return NULL;

I
Ira Snyder 已提交
599
	fsldma_free_desc_list_reverse(chan, &first->tx_list);
600
	return NULL;
601 602
}

603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644
static struct dma_async_tx_descriptor *fsl_dma_prep_sg(struct dma_chan *dchan,
	struct scatterlist *dst_sg, unsigned int dst_nents,
	struct scatterlist *src_sg, unsigned int src_nents,
	unsigned long flags)
{
	struct fsl_desc_sw *first = NULL, *prev = NULL, *new = NULL;
	struct fsldma_chan *chan = to_fsl_chan(dchan);
	size_t dst_avail, src_avail;
	dma_addr_t dst, src;
	size_t len;

	/* basic sanity checks */
	if (dst_nents == 0 || src_nents == 0)
		return NULL;

	if (dst_sg == NULL || src_sg == NULL)
		return NULL;

	/*
	 * TODO: should we check that both scatterlists have the same
	 * TODO: number of bytes in total? Is that really an error?
	 */

	/* get prepared for the loop */
	dst_avail = sg_dma_len(dst_sg);
	src_avail = sg_dma_len(src_sg);

	/* run until we are out of scatterlist entries */
	while (true) {

		/* create the largest transaction possible */
		len = min_t(size_t, src_avail, dst_avail);
		len = min_t(size_t, len, FSL_DMA_BCR_MAX_CNT);
		if (len == 0)
			goto fetch;

		dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) - dst_avail;
		src = sg_dma_address(src_sg) + sg_dma_len(src_sg) - src_avail;

		/* allocate and populate the descriptor */
		new = fsl_dma_alloc_descriptor(chan);
		if (!new) {
645
			chan_err(chan, "%s\n", msg_ld_oom);
646 647 648
			goto fail;
		}
#ifdef FSL_DMA_LD_DEBUG
649
		chan_dbg(chan, "new link desc alloc %p\n", new);
650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721
#endif

		set_desc_cnt(chan, &new->hw, len);
		set_desc_src(chan, &new->hw, src);
		set_desc_dst(chan, &new->hw, dst);

		if (!first)
			first = new;
		else
			set_desc_next(chan, &prev->hw, new->async_tx.phys);

		new->async_tx.cookie = 0;
		async_tx_ack(&new->async_tx);
		prev = new;

		/* Insert the link descriptor to the LD ring */
		list_add_tail(&new->node, &first->tx_list);

		/* update metadata */
		dst_avail -= len;
		src_avail -= len;

fetch:
		/* fetch the next dst scatterlist entry */
		if (dst_avail == 0) {

			/* no more entries: we're done */
			if (dst_nents == 0)
				break;

			/* fetch the next entry: if there are no more: done */
			dst_sg = sg_next(dst_sg);
			if (dst_sg == NULL)
				break;

			dst_nents--;
			dst_avail = sg_dma_len(dst_sg);
		}

		/* fetch the next src scatterlist entry */
		if (src_avail == 0) {

			/* no more entries: we're done */
			if (src_nents == 0)
				break;

			/* fetch the next entry: if there are no more: done */
			src_sg = sg_next(src_sg);
			if (src_sg == NULL)
				break;

			src_nents--;
			src_avail = sg_dma_len(src_sg);
		}
	}

	new->async_tx.flags = flags; /* client is in control of this ack */
	new->async_tx.cookie = -EBUSY;

	/* Set End-of-link to the last link descriptor of new list */
	set_ld_eol(chan, new);

	return &first->async_tx;

fail:
	if (!first)
		return NULL;

	fsldma_free_desc_list_reverse(chan, &first->tx_list);
	return NULL;
}

I
Ira Snyder 已提交
722 723 724 725 726 727 728 729 730 731 732 733 734
/**
 * fsl_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
 * @chan: DMA channel
 * @sgl: scatterlist to transfer to/from
 * @sg_len: number of entries in @scatterlist
 * @direction: DMA direction
 * @flags: DMAEngine flags
 *
 * Prepare a set of descriptors for a DMA_SLAVE transaction. Following the
 * DMA_SLAVE API, this gets the device-specific information from the
 * chan->private variable.
 */
static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg(
I
Ira Snyder 已提交
735
	struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
I
Ira Snyder 已提交
736 737 738
	enum dma_data_direction direction, unsigned long flags)
{
	/*
I
Ira Snyder 已提交
739
	 * This operation is not supported on the Freescale DMA controller
I
Ira Snyder 已提交
740
	 *
I
Ira Snyder 已提交
741 742
	 * However, we need to provide the function pointer to allow the
	 * device_control() method to work.
I
Ira Snyder 已提交
743 744 745 746
	 */
	return NULL;
}

747
static int fsl_dma_device_control(struct dma_chan *dchan,
748
				  enum dma_ctrl_cmd cmd, unsigned long arg)
I
Ira Snyder 已提交
749
{
I
Ira Snyder 已提交
750
	struct dma_slave_config *config;
I
Ira Snyder 已提交
751
	struct fsldma_chan *chan;
I
Ira Snyder 已提交
752
	unsigned long flags;
I
Ira Snyder 已提交
753
	int size;
754

I
Ira Snyder 已提交
755
	if (!dchan)
756
		return -EINVAL;
I
Ira Snyder 已提交
757

I
Ira Snyder 已提交
758
	chan = to_fsl_chan(dchan);
I
Ira Snyder 已提交
759

I
Ira Snyder 已提交
760 761 762 763
	switch (cmd) {
	case DMA_TERMINATE_ALL:
		/* Halt the DMA engine */
		dma_halt(chan);
I
Ira Snyder 已提交
764

I
Ira Snyder 已提交
765
		spin_lock_irqsave(&chan->desc_lock, flags);
I
Ira Snyder 已提交
766

I
Ira Snyder 已提交
767 768 769
		/* Remove and free all of the descriptors in the LD queue */
		fsldma_free_desc_list(chan, &chan->ld_pending);
		fsldma_free_desc_list(chan, &chan->ld_running);
I
Ira Snyder 已提交
770

I
Ira Snyder 已提交
771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801
		spin_unlock_irqrestore(&chan->desc_lock, flags);
		return 0;

	case DMA_SLAVE_CONFIG:
		config = (struct dma_slave_config *)arg;

		/* make sure the channel supports setting burst size */
		if (!chan->set_request_count)
			return -ENXIO;

		/* we set the controller burst size depending on direction */
		if (config->direction == DMA_TO_DEVICE)
			size = config->dst_addr_width * config->dst_maxburst;
		else
			size = config->src_addr_width * config->src_maxburst;

		chan->set_request_count(chan, size);
		return 0;

	case FSLDMA_EXTERNAL_START:

		/* make sure the channel supports external start */
		if (!chan->toggle_ext_start)
			return -ENXIO;

		chan->toggle_ext_start(chan, arg);
		return 0;

	default:
		return -ENXIO;
	}
802 803

	return 0;
I
Ira Snyder 已提交
804 805
}

806 807
/**
 * fsl_dma_update_completed_cookie - Update the completed cookie.
I
Ira Snyder 已提交
808
 * @chan : Freescale DMA channel
I
Ira Snyder 已提交
809 810
 *
 * CONTEXT: hardirq
811
 */
I
Ira Snyder 已提交
812
static void fsl_dma_update_completed_cookie(struct fsldma_chan *chan)
813
{
I
Ira Snyder 已提交
814 815 816
	struct fsl_desc_sw *desc;
	unsigned long flags;
	dma_cookie_t cookie;
817

I
Ira Snyder 已提交
818
	spin_lock_irqsave(&chan->desc_lock, flags);
819

I
Ira Snyder 已提交
820
	if (list_empty(&chan->ld_running)) {
821
		chan_dbg(chan, "no running descriptors\n");
I
Ira Snyder 已提交
822
		goto out_unlock;
823
	}
I
Ira Snyder 已提交
824 825 826 827 828

	/* Get the last descriptor, update the cookie to that */
	desc = to_fsl_desc(chan->ld_running.prev);
	if (dma_is_idle(chan))
		cookie = desc->async_tx.cookie;
S
Steven J. Magnani 已提交
829
	else {
I
Ira Snyder 已提交
830
		cookie = desc->async_tx.cookie - 1;
S
Steven J. Magnani 已提交
831 832 833
		if (unlikely(cookie < DMA_MIN_COOKIE))
			cookie = DMA_MAX_COOKIE;
	}
I
Ira Snyder 已提交
834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853

	chan->completed_cookie = cookie;

out_unlock:
	spin_unlock_irqrestore(&chan->desc_lock, flags);
}

/**
 * fsldma_desc_status - Check the status of a descriptor
 * @chan: Freescale DMA channel
 * @desc: DMA SW descriptor
 *
 * This function will return the status of the given descriptor
 */
static enum dma_status fsldma_desc_status(struct fsldma_chan *chan,
					  struct fsl_desc_sw *desc)
{
	return dma_async_is_complete(desc->async_tx.cookie,
				     chan->completed_cookie,
				     chan->common.cookie);
854 855 856 857
}

/**
 * fsl_chan_ld_cleanup - Clean up link descriptors
I
Ira Snyder 已提交
858
 * @chan : Freescale DMA channel
859 860 861
 *
 * This function clean up the ld_queue of DMA channel.
 */
I
Ira Snyder 已提交
862
static void fsl_chan_ld_cleanup(struct fsldma_chan *chan)
863 864 865 866
{
	struct fsl_desc_sw *desc, *_desc;
	unsigned long flags;

I
Ira Snyder 已提交
867
	spin_lock_irqsave(&chan->desc_lock, flags);
868

869
	chan_dbg(chan, "chan completed_cookie = %d\n", chan->completed_cookie);
I
Ira Snyder 已提交
870
	list_for_each_entry_safe(desc, _desc, &chan->ld_running, node) {
871 872 873
		dma_async_tx_callback callback;
		void *callback_param;

I
Ira Snyder 已提交
874
		if (fsldma_desc_status(chan, desc) == DMA_IN_PROGRESS)
875 876
			break;

I
Ira Snyder 已提交
877
		/* Remove from the list of running transactions */
878 879 880
		list_del(&desc->node);

		/* Run the link descriptor callback function */
I
Ira Snyder 已提交
881 882
		callback = desc->async_tx.callback;
		callback_param = desc->async_tx.callback_param;
883
		if (callback) {
I
Ira Snyder 已提交
884
			spin_unlock_irqrestore(&chan->desc_lock, flags);
885
			chan_dbg(chan, "LD %p callback\n", desc);
886
			callback(callback_param);
I
Ira Snyder 已提交
887
			spin_lock_irqsave(&chan->desc_lock, flags);
888
		}
I
Ira Snyder 已提交
889 890 891 892

		/* Run any dependencies, then free the descriptor */
		dma_run_dependencies(&desc->async_tx);
		dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
893
	}
I
Ira Snyder 已提交
894

I
Ira Snyder 已提交
895
	spin_unlock_irqrestore(&chan->desc_lock, flags);
896 897 898
}

/**
I
Ira Snyder 已提交
899
 * fsl_chan_xfer_ld_queue - transfer any pending transactions
I
Ira Snyder 已提交
900
 * @chan : Freescale DMA channel
I
Ira Snyder 已提交
901 902 903 904 905
 *
 * This will make sure that any pending transactions will be run.
 * If the DMA controller is idle, it will be started. Otherwise,
 * the DMA controller's interrupt handler will start any pending
 * transactions when it becomes idle.
906
 */
I
Ira Snyder 已提交
907
static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan)
908
{
I
Ira Snyder 已提交
909
	struct fsl_desc_sw *desc;
910 911
	unsigned long flags;

I
Ira Snyder 已提交
912
	spin_lock_irqsave(&chan->desc_lock, flags);
913

I
Ira Snyder 已提交
914 915 916 917 918
	/*
	 * If the list of pending descriptors is empty, then we
	 * don't need to do any work at all
	 */
	if (list_empty(&chan->ld_pending)) {
919
		chan_dbg(chan, "no pending LDs\n");
920
		goto out_unlock;
I
Ira Snyder 已提交
921
	}
922

I
Ira Snyder 已提交
923 924 925 926 927 928
	/*
	 * The DMA controller is not idle, which means the interrupt
	 * handler will start any queued transactions when it runs
	 * at the end of the current transaction
	 */
	if (!dma_is_idle(chan)) {
929
		chan_dbg(chan, "DMA controller still busy\n");
I
Ira Snyder 已提交
930 931 932 933 934 935 936 937
		goto out_unlock;
	}

	/*
	 * TODO:
	 * make sure the dma_halt() function really un-wedges the
	 * controller as much as possible
	 */
I
Ira Snyder 已提交
938
	dma_halt(chan);
939

I
Ira Snyder 已提交
940 941 942
	/*
	 * If there are some link descriptors which have not been
	 * transferred, we need to start the controller
943 944
	 */

I
Ira Snyder 已提交
945 946 947 948 949 950 951 952 953 954 955 956 957
	/*
	 * Move all elements from the queue of pending transactions
	 * onto the list of running transactions
	 */
	desc = list_first_entry(&chan->ld_pending, struct fsl_desc_sw, node);
	list_splice_tail_init(&chan->ld_pending, &chan->ld_running);

	/*
	 * Program the descriptor's address into the DMA controller,
	 * then start the DMA transaction
	 */
	set_cdar(chan, desc->async_tx.phys);
	dma_start(chan);
958 959

out_unlock:
I
Ira Snyder 已提交
960
	spin_unlock_irqrestore(&chan->desc_lock, flags);
961 962 963 964
}

/**
 * fsl_dma_memcpy_issue_pending - Issue the DMA start command
I
Ira Snyder 已提交
965
 * @chan : Freescale DMA channel
966
 */
I
Ira Snyder 已提交
967
static void fsl_dma_memcpy_issue_pending(struct dma_chan *dchan)
968
{
I
Ira Snyder 已提交
969 970
	struct fsldma_chan *chan = to_fsl_chan(dchan);
	fsl_chan_xfer_ld_queue(chan);
971 972 973
}

/**
974
 * fsl_tx_status - Determine the DMA status
I
Ira Snyder 已提交
975
 * @chan : Freescale DMA channel
976
 */
977
static enum dma_status fsl_tx_status(struct dma_chan *dchan,
978
					dma_cookie_t cookie,
979
					struct dma_tx_state *txstate)
980
{
I
Ira Snyder 已提交
981
	struct fsldma_chan *chan = to_fsl_chan(dchan);
982 983 984
	dma_cookie_t last_used;
	dma_cookie_t last_complete;

I
Ira Snyder 已提交
985
	fsl_chan_ld_cleanup(chan);
986

I
Ira Snyder 已提交
987 988
	last_used = dchan->cookie;
	last_complete = chan->completed_cookie;
989

990
	dma_set_tx_state(txstate, last_complete, last_used, 0);
991 992 993 994

	return dma_async_is_complete(cookie, last_complete, last_used);
}

995 996 997 998
/*----------------------------------------------------------------------------*/
/* Interrupt Handling                                                         */
/*----------------------------------------------------------------------------*/

999
static irqreturn_t fsldma_chan_irq(int irq, void *data)
1000
{
I
Ira Snyder 已提交
1001
	struct fsldma_chan *chan = data;
1002 1003
	int update_cookie = 0;
	int xfer_ld_q = 0;
I
Ira Snyder 已提交
1004
	u32 stat;
1005

I
Ira Snyder 已提交
1006
	/* save and clear the status register */
I
Ira Snyder 已提交
1007
	stat = get_sr(chan);
I
Ira Snyder 已提交
1008
	set_sr(chan, stat);
1009
	chan_dbg(chan, "irq: stat = 0x%x\n", stat);
1010 1011 1012 1013 1014 1015

	stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH);
	if (!stat)
		return IRQ_NONE;

	if (stat & FSL_DMA_SR_TE)
1016
		chan_err(chan, "Transfer Error!\n");
1017

I
Ira Snyder 已提交
1018 1019
	/*
	 * Programming Error
1020 1021 1022 1023
	 * The DMA_INTERRUPT async_tx is a NULL transfer, which will
	 * triger a PE interrupt.
	 */
	if (stat & FSL_DMA_SR_PE) {
1024
		chan_dbg(chan, "irq: Programming Error INT\n");
I
Ira Snyder 已提交
1025
		if (get_bcr(chan) == 0) {
1026 1027 1028 1029
			/* BCR register is 0, this is a DMA_INTERRUPT async_tx.
			 * Now, update the completed cookie, and continue the
			 * next uncompleted transfer.
			 */
1030 1031
			update_cookie = 1;
			xfer_ld_q = 1;
1032 1033 1034 1035
		}
		stat &= ~FSL_DMA_SR_PE;
	}

I
Ira Snyder 已提交
1036 1037
	/*
	 * If the link descriptor segment transfer finishes,
1038 1039 1040
	 * we will recycle the used descriptor.
	 */
	if (stat & FSL_DMA_SR_EOSI) {
1041 1042
		chan_dbg(chan, "irq: End-of-segments INT\n");
		chan_dbg(chan, "irq: clndar 0x%llx, nlndar 0x%llx\n",
I
Ira Snyder 已提交
1043 1044
			(unsigned long long)get_cdar(chan),
			(unsigned long long)get_ndar(chan));
1045
		stat &= ~FSL_DMA_SR_EOSI;
1046 1047 1048
		update_cookie = 1;
	}

I
Ira Snyder 已提交
1049 1050
	/*
	 * For MPC8349, EOCDI event need to update cookie
1051 1052 1053
	 * and start the next transfer if it exist.
	 */
	if (stat & FSL_DMA_SR_EOCDI) {
1054
		chan_dbg(chan, "irq: End-of-Chain link INT\n");
1055 1056 1057
		stat &= ~FSL_DMA_SR_EOCDI;
		update_cookie = 1;
		xfer_ld_q = 1;
1058 1059
	}

I
Ira Snyder 已提交
1060 1061
	/*
	 * If it current transfer is the end-of-transfer,
1062 1063 1064
	 * we should clear the Channel Start bit for
	 * prepare next transfer.
	 */
1065
	if (stat & FSL_DMA_SR_EOLNI) {
1066
		chan_dbg(chan, "irq: End-of-link INT\n");
1067
		stat &= ~FSL_DMA_SR_EOLNI;
1068
		xfer_ld_q = 1;
1069 1070
	}

1071
	if (update_cookie)
I
Ira Snyder 已提交
1072
		fsl_dma_update_completed_cookie(chan);
1073
	if (xfer_ld_q)
I
Ira Snyder 已提交
1074
		fsl_chan_xfer_ld_queue(chan);
1075
	if (stat)
1076
		chan_dbg(chan, "irq: unhandled sr 0x%08x\n", stat);
1077

1078
	chan_dbg(chan, "irq: Exit\n");
I
Ira Snyder 已提交
1079
	tasklet_schedule(&chan->tasklet);
1080 1081 1082
	return IRQ_HANDLED;
}

1083 1084
static void dma_do_tasklet(unsigned long data)
{
I
Ira Snyder 已提交
1085 1086
	struct fsldma_chan *chan = (struct fsldma_chan *)data;
	fsl_chan_ld_cleanup(chan);
1087 1088 1089
}

static irqreturn_t fsldma_ctrl_irq(int irq, void *data)
1090
{
1091
	struct fsldma_device *fdev = data;
1092 1093 1094 1095
	struct fsldma_chan *chan;
	unsigned int handled = 0;
	u32 gsr, mask;
	int i;
1096

1097
	gsr = (fdev->feature & FSL_DMA_BIG_ENDIAN) ? in_be32(fdev->regs)
1098 1099 1100
						   : in_le32(fdev->regs);
	mask = 0xff000000;
	dev_dbg(fdev->dev, "IRQ: gsr 0x%.8x\n", gsr);
1101

1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117
	for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
		chan = fdev->chan[i];
		if (!chan)
			continue;

		if (gsr & mask) {
			dev_dbg(fdev->dev, "IRQ: chan %d\n", chan->id);
			fsldma_chan_irq(irq, chan);
			handled++;
		}

		gsr &= ~mask;
		mask >>= 8;
	}

	return IRQ_RETVAL(handled);
1118 1119
}

1120
static void fsldma_free_irqs(struct fsldma_device *fdev)
1121
{
1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133
	struct fsldma_chan *chan;
	int i;

	if (fdev->irq != NO_IRQ) {
		dev_dbg(fdev->dev, "free per-controller IRQ\n");
		free_irq(fdev->irq, fdev);
		return;
	}

	for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
		chan = fdev->chan[i];
		if (chan && chan->irq != NO_IRQ) {
1134
			chan_dbg(chan, "free per-channel IRQ\n");
1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160
			free_irq(chan->irq, chan);
		}
	}
}

static int fsldma_request_irqs(struct fsldma_device *fdev)
{
	struct fsldma_chan *chan;
	int ret;
	int i;

	/* if we have a per-controller IRQ, use that */
	if (fdev->irq != NO_IRQ) {
		dev_dbg(fdev->dev, "request per-controller IRQ\n");
		ret = request_irq(fdev->irq, fsldma_ctrl_irq, IRQF_SHARED,
				  "fsldma-controller", fdev);
		return ret;
	}

	/* no per-controller IRQ, use the per-channel IRQs */
	for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
		chan = fdev->chan[i];
		if (!chan)
			continue;

		if (chan->irq == NO_IRQ) {
1161
			chan_err(chan, "interrupts property missing in device tree\n");
1162 1163 1164 1165
			ret = -ENODEV;
			goto out_unwind;
		}

1166
		chan_dbg(chan, "request per-channel IRQ\n");
1167 1168 1169
		ret = request_irq(chan->irq, fsldma_chan_irq, IRQF_SHARED,
				  "fsldma-chan", chan);
		if (ret) {
1170
			chan_err(chan, "unable to request per-channel IRQ\n");
1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189
			goto out_unwind;
		}
	}

	return 0;

out_unwind:
	for (/* none */; i >= 0; i--) {
		chan = fdev->chan[i];
		if (!chan)
			continue;

		if (chan->irq == NO_IRQ)
			continue;

		free_irq(chan->irq, chan);
	}

	return ret;
1190 1191
}

1192 1193 1194 1195 1196
/*----------------------------------------------------------------------------*/
/* OpenFirmware Subsystem                                                     */
/*----------------------------------------------------------------------------*/

static int __devinit fsl_dma_chan_probe(struct fsldma_device *fdev,
1197
	struct device_node *node, u32 feature, const char *compatible)
1198
{
I
Ira Snyder 已提交
1199
	struct fsldma_chan *chan;
1200
	struct resource res;
1201 1202 1203
	int err;

	/* alloc channel */
I
Ira Snyder 已提交
1204 1205
	chan = kzalloc(sizeof(*chan), GFP_KERNEL);
	if (!chan) {
1206 1207 1208 1209 1210 1211
		dev_err(fdev->dev, "no free memory for DMA channels!\n");
		err = -ENOMEM;
		goto out_return;
	}

	/* ioremap registers for use */
I
Ira Snyder 已提交
1212 1213
	chan->regs = of_iomap(node, 0);
	if (!chan->regs) {
1214 1215
		dev_err(fdev->dev, "unable to ioremap registers\n");
		err = -ENOMEM;
I
Ira Snyder 已提交
1216
		goto out_free_chan;
1217 1218
	}

1219
	err = of_address_to_resource(node, 0, &res);
1220
	if (err) {
1221 1222
		dev_err(fdev->dev, "unable to find 'reg' property\n");
		goto out_iounmap_regs;
1223 1224
	}

I
Ira Snyder 已提交
1225
	chan->feature = feature;
1226
	if (!fdev->feature)
I
Ira Snyder 已提交
1227
		fdev->feature = chan->feature;
1228

1229 1230 1231
	/*
	 * If the DMA device's feature is different than the feature
	 * of its channels, report the bug
1232
	 */
I
Ira Snyder 已提交
1233
	WARN_ON(fdev->feature != chan->feature);
1234

I
Ira Snyder 已提交
1235 1236 1237
	chan->dev = fdev->dev;
	chan->id = ((res.start - 0x100) & 0xfff) >> 7;
	if (chan->id >= FSL_DMA_MAX_CHANS_PER_DEVICE) {
1238
		dev_err(fdev->dev, "too many channels for device\n");
1239
		err = -EINVAL;
1240
		goto out_iounmap_regs;
1241 1242
	}

I
Ira Snyder 已提交
1243 1244
	fdev->chan[chan->id] = chan;
	tasklet_init(&chan->tasklet, dma_do_tasklet, (unsigned long)chan);
1245
	snprintf(chan->name, sizeof(chan->name), "chan%d", chan->id);
1246 1247

	/* Initialize the channel */
I
Ira Snyder 已提交
1248
	dma_init(chan);
1249 1250

	/* Clear cdar registers */
I
Ira Snyder 已提交
1251
	set_cdar(chan, 0);
1252

I
Ira Snyder 已提交
1253
	switch (chan->feature & FSL_DMA_IP_MASK) {
1254
	case FSL_DMA_IP_85XX:
I
Ira Snyder 已提交
1255
		chan->toggle_ext_pause = fsl_chan_toggle_ext_pause;
1256
	case FSL_DMA_IP_83XX:
I
Ira Snyder 已提交
1257 1258 1259 1260
		chan->toggle_ext_start = fsl_chan_toggle_ext_start;
		chan->set_src_loop_size = fsl_chan_set_src_loop_size;
		chan->set_dst_loop_size = fsl_chan_set_dst_loop_size;
		chan->set_request_count = fsl_chan_set_request_count;
1261 1262
	}

I
Ira Snyder 已提交
1263
	spin_lock_init(&chan->desc_lock);
I
Ira Snyder 已提交
1264 1265
	INIT_LIST_HEAD(&chan->ld_pending);
	INIT_LIST_HEAD(&chan->ld_running);
1266

I
Ira Snyder 已提交
1267
	chan->common.device = &fdev->common;
1268

1269
	/* find the IRQ line, if it exists in the device tree */
I
Ira Snyder 已提交
1270
	chan->irq = irq_of_parse_and_map(node, 0);
1271

1272
	/* Add the channel to DMA device channel list */
I
Ira Snyder 已提交
1273
	list_add_tail(&chan->common.device_node, &fdev->common.channels);
1274 1275
	fdev->common.chancnt++;

I
Ira Snyder 已提交
1276 1277
	dev_info(fdev->dev, "#%d (%s), irq %d\n", chan->id, compatible,
		 chan->irq != NO_IRQ ? chan->irq : fdev->irq);
1278 1279

	return 0;
1280

1281
out_iounmap_regs:
I
Ira Snyder 已提交
1282 1283 1284
	iounmap(chan->regs);
out_free_chan:
	kfree(chan);
1285
out_return:
1286 1287 1288
	return err;
}

I
Ira Snyder 已提交
1289
static void fsl_dma_chan_remove(struct fsldma_chan *chan)
1290
{
I
Ira Snyder 已提交
1291 1292 1293 1294
	irq_dispose_mapping(chan->irq);
	list_del(&chan->common.device_node);
	iounmap(chan->regs);
	kfree(chan);
1295 1296
}

1297
static int __devinit fsldma_of_probe(struct platform_device *op,
1298 1299
			const struct of_device_id *match)
{
1300
	struct fsldma_device *fdev;
1301
	struct device_node *child;
1302
	int err;
1303

1304
	fdev = kzalloc(sizeof(*fdev), GFP_KERNEL);
1305
	if (!fdev) {
1306 1307 1308
		dev_err(&op->dev, "No enough memory for 'priv'\n");
		err = -ENOMEM;
		goto out_return;
1309
	}
1310 1311

	fdev->dev = &op->dev;
1312 1313
	INIT_LIST_HEAD(&fdev->common.channels);

1314
	/* ioremap the registers for use */
1315
	fdev->regs = of_iomap(op->dev.of_node, 0);
1316 1317 1318 1319
	if (!fdev->regs) {
		dev_err(&op->dev, "unable to ioremap registers\n");
		err = -ENOMEM;
		goto out_free_fdev;
1320 1321
	}

1322
	/* map the channel IRQ if it exists, but don't hookup the handler yet */
1323
	fdev->irq = irq_of_parse_and_map(op->dev.of_node, 0);
1324

1325 1326
	dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask);
	dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask);
1327
	dma_cap_set(DMA_SG, fdev->common.cap_mask);
I
Ira Snyder 已提交
1328
	dma_cap_set(DMA_SLAVE, fdev->common.cap_mask);
1329 1330
	fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources;
	fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources;
1331
	fdev->common.device_prep_dma_interrupt = fsl_dma_prep_interrupt;
1332
	fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy;
1333
	fdev->common.device_prep_dma_sg = fsl_dma_prep_sg;
1334
	fdev->common.device_tx_status = fsl_tx_status;
1335
	fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending;
I
Ira Snyder 已提交
1336
	fdev->common.device_prep_slave_sg = fsl_dma_prep_slave_sg;
1337
	fdev->common.device_control = fsl_dma_device_control;
1338
	fdev->common.dev = &op->dev;
1339

1340 1341
	dma_set_mask(&(op->dev), DMA_BIT_MASK(36));

1342
	dev_set_drvdata(&op->dev, fdev);
1343

1344 1345 1346
	/*
	 * We cannot use of_platform_bus_probe() because there is no
	 * of_platform_bus_remove(). Instead, we manually instantiate every DMA
1347 1348
	 * channel object.
	 */
1349
	for_each_child_of_node(op->dev.of_node, child) {
1350
		if (of_device_is_compatible(child, "fsl,eloplus-dma-channel")) {
1351 1352 1353
			fsl_dma_chan_probe(fdev, child,
				FSL_DMA_IP_85XX | FSL_DMA_BIG_ENDIAN,
				"fsl,eloplus-dma-channel");
1354 1355 1356
		}

		if (of_device_is_compatible(child, "fsl,elo-dma-channel")) {
1357 1358 1359
			fsl_dma_chan_probe(fdev, child,
				FSL_DMA_IP_83XX | FSL_DMA_LITTLE_ENDIAN,
				"fsl,elo-dma-channel");
1360
		}
1361
	}
1362

1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375
	/*
	 * Hookup the IRQ handler(s)
	 *
	 * If we have a per-controller interrupt, we prefer that to the
	 * per-channel interrupts to reduce the number of shared interrupt
	 * handlers on the same IRQ line
	 */
	err = fsldma_request_irqs(fdev);
	if (err) {
		dev_err(fdev->dev, "unable to request IRQs\n");
		goto out_free_fdev;
	}

1376 1377 1378
	dma_async_device_register(&fdev->common);
	return 0;

1379
out_free_fdev:
1380
	irq_dispose_mapping(fdev->irq);
1381
	kfree(fdev);
1382
out_return:
1383 1384 1385
	return err;
}

1386
static int fsldma_of_remove(struct platform_device *op)
1387
{
1388
	struct fsldma_device *fdev;
1389 1390
	unsigned int i;

1391
	fdev = dev_get_drvdata(&op->dev);
1392 1393
	dma_async_device_unregister(&fdev->common);

1394 1395
	fsldma_free_irqs(fdev);

1396
	for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
1397 1398
		if (fdev->chan[i])
			fsl_dma_chan_remove(fdev->chan[i]);
1399
	}
1400

1401 1402
	iounmap(fdev->regs);
	dev_set_drvdata(&op->dev, NULL);
1403 1404 1405 1406 1407
	kfree(fdev);

	return 0;
}

1408
static const struct of_device_id fsldma_of_ids[] = {
1409 1410
	{ .compatible = "fsl,eloplus-dma", },
	{ .compatible = "fsl,elo-dma", },
1411 1412 1413
	{}
};

1414
static struct of_platform_driver fsldma_of_driver = {
1415 1416 1417 1418 1419 1420 1421
	.driver = {
		.name = "fsl-elo-dma",
		.owner = THIS_MODULE,
		.of_match_table = fsldma_of_ids,
	},
	.probe = fsldma_of_probe,
	.remove = fsldma_of_remove,
1422 1423
};

1424 1425 1426 1427 1428
/*----------------------------------------------------------------------------*/
/* Module Init / Exit                                                         */
/*----------------------------------------------------------------------------*/

static __init int fsldma_init(void)
1429
{
1430 1431 1432 1433
	int ret;

	pr_info("Freescale Elo / Elo Plus DMA driver\n");

1434
	ret = of_register_platform_driver(&fsldma_of_driver);
1435 1436 1437 1438 1439 1440
	if (ret)
		pr_err("fsldma: failed to register platform driver\n");

	return ret;
}

1441
static void __exit fsldma_exit(void)
1442
{
1443
	of_unregister_platform_driver(&fsldma_of_driver);
1444 1445
}

1446 1447
subsys_initcall(fsldma_init);
module_exit(fsldma_exit);
1448 1449 1450

MODULE_DESCRIPTION("Freescale Elo / Elo Plus DMA driver");
MODULE_LICENSE("GPL");