fsldma.c 36.6 KB
Newer Older
1 2 3
/*
 * Freescale MPC85xx, MPC83xx DMA Engine support
 *
4
 * Copyright (C) 2007-2010 Freescale Semiconductor, Inc. All rights reserved.
5 6 7 8 9 10 11 12
 *
 * Author:
 *   Zhang Wei <wei.zhang@freescale.com>, Jul 2007
 *   Ebony Zhu <ebony.zhu@freescale.com>, May 2007
 *
 * Description:
 *   DMA engine driver for Freescale MPC8540 DMA controller, which is
 *   also fit for MPC8560, MPC8555, MPC8548, MPC8641, and etc.
13
 *   The support for MPC8349 DMA controller is also added.
14
 *
15 16 17 18 19
 * This driver instructs the DMA controller to issue the PCI Read Multiple
 * command for PCI read operations, instead of using the default PCI Read Line
 * command. Please be aware that this setting may result in read pre-fetching
 * on some platforms.
 *
20 21 22 23 24 25 26 27 28 29
 * This is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 */

#include <linux/init.h>
#include <linux/module.h>
#include <linux/pci.h>
30
#include <linux/slab.h>
31 32 33 34 35 36 37
#include <linux/interrupt.h>
#include <linux/dmaengine.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
#include <linux/of_platform.h>

38
#include "dmaengine.h"
39 40
#include "fsldma.h"

41 42 43 44
#define chan_dbg(chan, fmt, arg...)					\
	dev_dbg(chan->dev, "%s: " fmt, chan->name, ##arg)
#define chan_err(chan, fmt, arg...)					\
	dev_err(chan->dev, "%s: " fmt, chan->name, ##arg)
45

46
static const char msg_ld_oom[] = "No free memory for link descriptor";
47

48 49 50
/*
 * Register Helpers
 */
51

I
Ira Snyder 已提交
52
static void set_sr(struct fsldma_chan *chan, u32 val)
53
{
I
Ira Snyder 已提交
54
	DMA_OUT(chan, &chan->regs->sr, val, 32);
55 56
}

I
Ira Snyder 已提交
57
static u32 get_sr(struct fsldma_chan *chan)
58
{
I
Ira Snyder 已提交
59
	return DMA_IN(chan, &chan->regs->sr, 32);
60 61
}

62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80
static void set_cdar(struct fsldma_chan *chan, dma_addr_t addr)
{
	DMA_OUT(chan, &chan->regs->cdar, addr | FSL_DMA_SNEN, 64);
}

static dma_addr_t get_cdar(struct fsldma_chan *chan)
{
	return DMA_IN(chan, &chan->regs->cdar, 64) & ~FSL_DMA_SNEN;
}

static u32 get_bcr(struct fsldma_chan *chan)
{
	return DMA_IN(chan, &chan->regs->bcr, 32);
}

/*
 * Descriptor Helpers
 */

I
Ira Snyder 已提交
81
static void set_desc_cnt(struct fsldma_chan *chan,
82 83
				struct fsl_dma_ld_hw *hw, u32 count)
{
I
Ira Snyder 已提交
84
	hw->count = CPU_TO_DMA(chan, count, 32);
85 86
}

87 88 89 90 91
static u32 get_desc_cnt(struct fsldma_chan *chan, struct fsl_desc_sw *desc)
{
	return DMA_TO_CPU(chan, desc->hw.count, 32);
}

I
Ira Snyder 已提交
92
static void set_desc_src(struct fsldma_chan *chan,
93
			 struct fsl_dma_ld_hw *hw, dma_addr_t src)
94 95 96
{
	u64 snoop_bits;

I
Ira Snyder 已提交
97
	snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
98
		? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0;
I
Ira Snyder 已提交
99
	hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64);
100 101
}

102 103 104 105 106 107 108 109 110 111
static dma_addr_t get_desc_src(struct fsldma_chan *chan,
			       struct fsl_desc_sw *desc)
{
	u64 snoop_bits;

	snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
		? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0;
	return DMA_TO_CPU(chan, desc->hw.src_addr, 64) & ~snoop_bits;
}

I
Ira Snyder 已提交
112
static void set_desc_dst(struct fsldma_chan *chan,
113
			 struct fsl_dma_ld_hw *hw, dma_addr_t dst)
114 115 116
{
	u64 snoop_bits;

I
Ira Snyder 已提交
117
	snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
118
		? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0;
I
Ira Snyder 已提交
119
	hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64);
120 121
}

122 123 124 125 126 127 128 129 130 131
static dma_addr_t get_desc_dst(struct fsldma_chan *chan,
			       struct fsl_desc_sw *desc)
{
	u64 snoop_bits;

	snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
		? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0;
	return DMA_TO_CPU(chan, desc->hw.dst_addr, 64) & ~snoop_bits;
}

I
Ira Snyder 已提交
132
static void set_desc_next(struct fsldma_chan *chan,
133
			  struct fsl_dma_ld_hw *hw, dma_addr_t next)
134 135 136
{
	u64 snoop_bits;

I
Ira Snyder 已提交
137
	snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
138
		? FSL_DMA_SNEN : 0;
I
Ira Snyder 已提交
139
	hw->next_ln_addr = CPU_TO_DMA(chan, snoop_bits | next, 64);
140 141
}

142
static void set_ld_eol(struct fsldma_chan *chan, struct fsl_desc_sw *desc)
143
{
144
	u64 snoop_bits;
145

146 147
	snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
		? FSL_DMA_SNEN : 0;
148

149 150 151
	desc->hw.next_ln_addr = CPU_TO_DMA(chan,
		DMA_TO_CPU(chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL
			| snoop_bits, 64);
152 153
}

154 155 156 157 158
/*
 * DMA Engine Hardware Control Helpers
 */

static void dma_init(struct fsldma_chan *chan)
159
{
160 161 162 163 164 165 166 167 168 169 170
	/* Reset the channel */
	DMA_OUT(chan, &chan->regs->mr, 0, 32);

	switch (chan->feature & FSL_DMA_IP_MASK) {
	case FSL_DMA_IP_85XX:
		/* Set the channel to below modes:
		 * EIE - Error interrupt enable
		 * EOLNIE - End of links interrupt enable
		 * BWC - Bandwidth sharing among channels
		 */
		DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_BWC
I
Ira Snyder 已提交
171
				| FSL_DMA_MR_EIE | FSL_DMA_MR_EOLNIE, 32);
172 173 174 175 176 177 178 179 180 181
		break;
	case FSL_DMA_IP_83XX:
		/* Set the channel to below modes:
		 * EOTIE - End-of-transfer interrupt enable
		 * PRC_RM - PCI read multiple
		 */
		DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_EOTIE
				| FSL_DMA_MR_PRC_RM, 32);
		break;
	}
182 183
}

I
Ira Snyder 已提交
184
static int dma_is_idle(struct fsldma_chan *chan)
185
{
I
Ira Snyder 已提交
186
	u32 sr = get_sr(chan);
187 188 189
	return (!(sr & FSL_DMA_SR_CB)) || (sr & FSL_DMA_SR_CH);
}

I
Ira Snyder 已提交
190 191 192 193 194 195 196
/*
 * Start the DMA controller
 *
 * Preconditions:
 * - the CDAR register must point to the start descriptor
 * - the MRn[CS] bit must be cleared
 */
I
Ira Snyder 已提交
197
static void dma_start(struct fsldma_chan *chan)
198
{
I
Ira Snyder 已提交
199 200
	u32 mode;

I
Ira Snyder 已提交
201
	mode = DMA_IN(chan, &chan->regs->mr, 32);
I
Ira Snyder 已提交
202

I
Ira Snyder 已提交
203 204 205 206 207
	if (chan->feature & FSL_DMA_CHAN_PAUSE_EXT) {
		DMA_OUT(chan, &chan->regs->bcr, 0, 32);
		mode |= FSL_DMA_MR_EMP_EN;
	} else {
		mode &= ~FSL_DMA_MR_EMP_EN;
208
	}
209

I
Ira Snyder 已提交
210
	if (chan->feature & FSL_DMA_CHAN_START_EXT) {
I
Ira Snyder 已提交
211
		mode |= FSL_DMA_MR_EMS_EN;
I
Ira Snyder 已提交
212 213
	} else {
		mode &= ~FSL_DMA_MR_EMS_EN;
I
Ira Snyder 已提交
214
		mode |= FSL_DMA_MR_CS;
I
Ira Snyder 已提交
215
	}
216

I
Ira Snyder 已提交
217
	DMA_OUT(chan, &chan->regs->mr, mode, 32);
218 219
}

I
Ira Snyder 已提交
220
static void dma_halt(struct fsldma_chan *chan)
221
{
I
Ira Snyder 已提交
222
	u32 mode;
223 224
	int i;

225
	/* read the mode register */
I
Ira Snyder 已提交
226
	mode = DMA_IN(chan, &chan->regs->mr, 32);
I
Ira Snyder 已提交
227

228 229 230 231 232 233 234 235 236 237 238 239 240 241
	/*
	 * The 85xx controller supports channel abort, which will stop
	 * the current transfer. On 83xx, this bit is the transfer error
	 * mask bit, which should not be changed.
	 */
	if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) {
		mode |= FSL_DMA_MR_CA;
		DMA_OUT(chan, &chan->regs->mr, mode, 32);

		mode &= ~FSL_DMA_MR_CA;
	}

	/* stop the DMA controller */
	mode &= ~(FSL_DMA_MR_CS | FSL_DMA_MR_EMS_EN);
I
Ira Snyder 已提交
242
	DMA_OUT(chan, &chan->regs->mr, mode, 32);
243

244
	/* wait for the DMA controller to become idle */
245
	for (i = 0; i < 100; i++) {
I
Ira Snyder 已提交
246
		if (dma_is_idle(chan))
I
Ira Snyder 已提交
247 248
			return;

249
		udelay(10);
250
	}
I
Ira Snyder 已提交
251

I
Ira Snyder 已提交
252
	if (!dma_is_idle(chan))
253
		chan_err(chan, "DMA halt timeout!\n");
254 255 256 257
}

/**
 * fsl_chan_set_src_loop_size - Set source address hold transfer size
I
Ira Snyder 已提交
258
 * @chan : Freescale DMA channel
259 260 261 262 263 264 265 266
 * @size     : Address loop size, 0 for disable loop
 *
 * The set source address hold transfer size. The source
 * address hold or loop transfer size is when the DMA transfer
 * data from source address (SA), if the loop size is 4, the DMA will
 * read data from SA, SA + 1, SA + 2, SA + 3, then loop back to SA,
 * SA + 1 ... and so on.
 */
I
Ira Snyder 已提交
267
static void fsl_chan_set_src_loop_size(struct fsldma_chan *chan, int size)
268
{
I
Ira Snyder 已提交
269 270
	u32 mode;

I
Ira Snyder 已提交
271
	mode = DMA_IN(chan, &chan->regs->mr, 32);
I
Ira Snyder 已提交
272

273 274
	switch (size) {
	case 0:
I
Ira Snyder 已提交
275
		mode &= ~FSL_DMA_MR_SAHE;
276 277 278 279 280
		break;
	case 1:
	case 2:
	case 4:
	case 8:
I
Ira Snyder 已提交
281
		mode |= FSL_DMA_MR_SAHE | (__ilog2(size) << 14);
282 283
		break;
	}
I
Ira Snyder 已提交
284

I
Ira Snyder 已提交
285
	DMA_OUT(chan, &chan->regs->mr, mode, 32);
286 287 288
}

/**
289
 * fsl_chan_set_dst_loop_size - Set destination address hold transfer size
I
Ira Snyder 已提交
290
 * @chan : Freescale DMA channel
291 292 293 294 295 296 297 298
 * @size     : Address loop size, 0 for disable loop
 *
 * The set destination address hold transfer size. The destination
 * address hold or loop transfer size is when the DMA transfer
 * data to destination address (TA), if the loop size is 4, the DMA will
 * write data to TA, TA + 1, TA + 2, TA + 3, then loop back to TA,
 * TA + 1 ... and so on.
 */
I
Ira Snyder 已提交
299
static void fsl_chan_set_dst_loop_size(struct fsldma_chan *chan, int size)
300
{
I
Ira Snyder 已提交
301 302
	u32 mode;

I
Ira Snyder 已提交
303
	mode = DMA_IN(chan, &chan->regs->mr, 32);
I
Ira Snyder 已提交
304

305 306
	switch (size) {
	case 0:
I
Ira Snyder 已提交
307
		mode &= ~FSL_DMA_MR_DAHE;
308 309 310 311 312
		break;
	case 1:
	case 2:
	case 4:
	case 8:
I
Ira Snyder 已提交
313
		mode |= FSL_DMA_MR_DAHE | (__ilog2(size) << 16);
314 315
		break;
	}
I
Ira Snyder 已提交
316

I
Ira Snyder 已提交
317
	DMA_OUT(chan, &chan->regs->mr, mode, 32);
318 319 320
}

/**
321
 * fsl_chan_set_request_count - Set DMA Request Count for external control
I
Ira Snyder 已提交
322
 * @chan : Freescale DMA channel
323 324 325 326 327 328
 * @size     : Number of bytes to transfer in a single request
 *
 * The Freescale DMA channel can be controlled by the external signal DREQ#.
 * The DMA request count is how many bytes are allowed to transfer before
 * pausing the channel, after which a new assertion of DREQ# resumes channel
 * operation.
329
 *
330
 * A size of 0 disables external pause control. The maximum size is 1024.
331
 */
I
Ira Snyder 已提交
332
static void fsl_chan_set_request_count(struct fsldma_chan *chan, int size)
333
{
I
Ira Snyder 已提交
334 335
	u32 mode;

336
	BUG_ON(size > 1024);
I
Ira Snyder 已提交
337

I
Ira Snyder 已提交
338
	mode = DMA_IN(chan, &chan->regs->mr, 32);
I
Ira Snyder 已提交
339 340
	mode |= (__ilog2(size) << 24) & 0x0f000000;

I
Ira Snyder 已提交
341
	DMA_OUT(chan, &chan->regs->mr, mode, 32);
342
}
343

344 345
/**
 * fsl_chan_toggle_ext_pause - Toggle channel external pause status
I
Ira Snyder 已提交
346
 * @chan : Freescale DMA channel
347 348 349 350 351 352
 * @enable   : 0 is disabled, 1 is enabled.
 *
 * The Freescale DMA channel can be controlled by the external signal DREQ#.
 * The DMA Request Count feature should be used in addition to this feature
 * to set the number of bytes to transfer before pausing the channel.
 */
I
Ira Snyder 已提交
353
static void fsl_chan_toggle_ext_pause(struct fsldma_chan *chan, int enable)
354 355
{
	if (enable)
I
Ira Snyder 已提交
356
		chan->feature |= FSL_DMA_CHAN_PAUSE_EXT;
357
	else
I
Ira Snyder 已提交
358
		chan->feature &= ~FSL_DMA_CHAN_PAUSE_EXT;
359 360 361 362
}

/**
 * fsl_chan_toggle_ext_start - Toggle channel external start status
I
Ira Snyder 已提交
363
 * @chan : Freescale DMA channel
364 365 366 367 368 369 370
 * @enable   : 0 is disabled, 1 is enabled.
 *
 * If enable the external start, the channel can be started by an
 * external DMA start pin. So the dma_start() does not start the
 * transfer immediately. The DMA channel will wait for the
 * control pin asserted.
 */
I
Ira Snyder 已提交
371
static void fsl_chan_toggle_ext_start(struct fsldma_chan *chan, int enable)
372 373
{
	if (enable)
I
Ira Snyder 已提交
374
		chan->feature |= FSL_DMA_CHAN_START_EXT;
375
	else
I
Ira Snyder 已提交
376
		chan->feature &= ~FSL_DMA_CHAN_START_EXT;
377 378
}

379
static void append_ld_queue(struct fsldma_chan *chan, struct fsl_desc_sw *desc)
I
Ira Snyder 已提交
380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402
{
	struct fsl_desc_sw *tail = to_fsl_desc(chan->ld_pending.prev);

	if (list_empty(&chan->ld_pending))
		goto out_splice;

	/*
	 * Add the hardware descriptor to the chain of hardware descriptors
	 * that already exists in memory.
	 *
	 * This will un-set the EOL bit of the existing transaction, and the
	 * last link in this transaction will become the EOL descriptor.
	 */
	set_desc_next(chan, &tail->hw, desc->async_tx.phys);

	/*
	 * Add the software descriptor and all children to the list
	 * of pending transactions
	 */
out_splice:
	list_splice_tail_init(&desc->tx_list, &chan->ld_pending);
}

403 404
static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
{
I
Ira Snyder 已提交
405
	struct fsldma_chan *chan = to_fsl_chan(tx->chan);
406 407
	struct fsl_desc_sw *desc = tx_to_fsl_desc(tx);
	struct fsl_desc_sw *child;
408 409 410
	unsigned long flags;
	dma_cookie_t cookie;

I
Ira Snyder 已提交
411
	spin_lock_irqsave(&chan->desc_lock, flags);
412

I
Ira Snyder 已提交
413 414 415 416
	/*
	 * assign cookies to all of the software descriptors
	 * that make up this transaction
	 */
417
	list_for_each_entry(child, &desc->tx_list, node) {
418
		cookie = dma_cookie_assign(&child->async_tx);
419 420
	}

I
Ira Snyder 已提交
421
	/* put this transaction onto the tail of the pending queue */
I
Ira Snyder 已提交
422
	append_ld_queue(chan, desc);
423

I
Ira Snyder 已提交
424
	spin_unlock_irqrestore(&chan->desc_lock, flags);
425 426 427 428 429 430

	return cookie;
}

/**
 * fsl_dma_alloc_descriptor - Allocate descriptor from channel's DMA pool.
I
Ira Snyder 已提交
431
 * @chan : Freescale DMA channel
432 433 434
 *
 * Return - The descriptor allocated. NULL for failed.
 */
435
static struct fsl_desc_sw *fsl_dma_alloc_descriptor(struct fsldma_chan *chan)
436
{
I
Ira Snyder 已提交
437
	struct fsl_desc_sw *desc;
438
	dma_addr_t pdesc;
I
Ira Snyder 已提交
439 440 441

	desc = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc);
	if (!desc) {
442
		chan_dbg(chan, "out of memory for link descriptor\n");
I
Ira Snyder 已提交
443
		return NULL;
444 445
	}

I
Ira Snyder 已提交
446 447 448 449 450 451
	memset(desc, 0, sizeof(*desc));
	INIT_LIST_HEAD(&desc->tx_list);
	dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
	desc->async_tx.tx_submit = fsl_dma_tx_submit;
	desc->async_tx.phys = pdesc;

452 453 454 455
#ifdef FSL_DMA_LD_DEBUG
	chan_dbg(chan, "LD %p allocated\n", desc);
#endif

I
Ira Snyder 已提交
456
	return desc;
457 458 459 460
}

/**
 * fsl_dma_alloc_chan_resources - Allocate resources for DMA channel.
I
Ira Snyder 已提交
461
 * @chan : Freescale DMA channel
462 463 464 465 466
 *
 * This function will create a dma pool for descriptor allocation.
 *
 * Return - The number of descriptors allocated.
 */
I
Ira Snyder 已提交
467
static int fsl_dma_alloc_chan_resources(struct dma_chan *dchan)
468
{
I
Ira Snyder 已提交
469
	struct fsldma_chan *chan = to_fsl_chan(dchan);
470 471

	/* Has this channel already been allocated? */
I
Ira Snyder 已提交
472
	if (chan->desc_pool)
473
		return 1;
474

I
Ira Snyder 已提交
475 476
	/*
	 * We need the descriptor to be aligned to 32bytes
477 478
	 * for meeting FSL DMA specification requirement.
	 */
479
	chan->desc_pool = dma_pool_create(chan->name, chan->dev,
I
Ira Snyder 已提交
480 481
					  sizeof(struct fsl_desc_sw),
					  __alignof__(struct fsl_desc_sw), 0);
I
Ira Snyder 已提交
482
	if (!chan->desc_pool) {
483
		chan_err(chan, "unable to allocate descriptor pool\n");
I
Ira Snyder 已提交
484
		return -ENOMEM;
485 486
	}

I
Ira Snyder 已提交
487
	/* there is at least one descriptor free to be allocated */
488 489 490
	return 1;
}

I
Ira Snyder 已提交
491 492 493 494 495 496 497 498 499 500 501 502 503 504
/**
 * fsldma_free_desc_list - Free all descriptors in a queue
 * @chan: Freescae DMA channel
 * @list: the list to free
 *
 * LOCKING: must hold chan->desc_lock
 */
static void fsldma_free_desc_list(struct fsldma_chan *chan,
				  struct list_head *list)
{
	struct fsl_desc_sw *desc, *_desc;

	list_for_each_entry_safe(desc, _desc, list, node) {
		list_del(&desc->node);
505 506 507
#ifdef FSL_DMA_LD_DEBUG
		chan_dbg(chan, "LD %p free\n", desc);
#endif
I
Ira Snyder 已提交
508 509 510 511 512 513 514 515 516 517 518
		dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
	}
}

static void fsldma_free_desc_list_reverse(struct fsldma_chan *chan,
					  struct list_head *list)
{
	struct fsl_desc_sw *desc, *_desc;

	list_for_each_entry_safe_reverse(desc, _desc, list, node) {
		list_del(&desc->node);
519 520 521
#ifdef FSL_DMA_LD_DEBUG
		chan_dbg(chan, "LD %p free\n", desc);
#endif
I
Ira Snyder 已提交
522 523 524 525
		dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
	}
}

526 527
/**
 * fsl_dma_free_chan_resources - Free all resources of the channel.
I
Ira Snyder 已提交
528
 * @chan : Freescale DMA channel
529
 */
I
Ira Snyder 已提交
530
static void fsl_dma_free_chan_resources(struct dma_chan *dchan)
531
{
I
Ira Snyder 已提交
532
	struct fsldma_chan *chan = to_fsl_chan(dchan);
533 534
	unsigned long flags;

535
	chan_dbg(chan, "free all channel resources\n");
I
Ira Snyder 已提交
536
	spin_lock_irqsave(&chan->desc_lock, flags);
I
Ira Snyder 已提交
537 538
	fsldma_free_desc_list(chan, &chan->ld_pending);
	fsldma_free_desc_list(chan, &chan->ld_running);
I
Ira Snyder 已提交
539
	spin_unlock_irqrestore(&chan->desc_lock, flags);
540

I
Ira Snyder 已提交
541
	dma_pool_destroy(chan->desc_pool);
I
Ira Snyder 已提交
542
	chan->desc_pool = NULL;
543 544
}

545
static struct dma_async_tx_descriptor *
I
Ira Snyder 已提交
546
fsl_dma_prep_interrupt(struct dma_chan *dchan, unsigned long flags)
547
{
I
Ira Snyder 已提交
548
	struct fsldma_chan *chan;
549 550
	struct fsl_desc_sw *new;

I
Ira Snyder 已提交
551
	if (!dchan)
552 553
		return NULL;

I
Ira Snyder 已提交
554
	chan = to_fsl_chan(dchan);
555

I
Ira Snyder 已提交
556
	new = fsl_dma_alloc_descriptor(chan);
557
	if (!new) {
558
		chan_err(chan, "%s\n", msg_ld_oom);
559 560 561 562
		return NULL;
	}

	new->async_tx.cookie = -EBUSY;
563
	new->async_tx.flags = flags;
564

565
	/* Insert the link descriptor to the LD ring */
566
	list_add_tail(&new->node, &new->tx_list);
567

568
	/* Set End-of-link to the last link descriptor of new list */
I
Ira Snyder 已提交
569
	set_ld_eol(chan, new);
570 571 572 573

	return &new->async_tx;
}

574 575 576
static struct dma_async_tx_descriptor *
fsl_dma_prep_memcpy(struct dma_chan *dchan,
	dma_addr_t dma_dst, dma_addr_t dma_src,
577 578
	size_t len, unsigned long flags)
{
I
Ira Snyder 已提交
579
	struct fsldma_chan *chan;
580 581 582
	struct fsl_desc_sw *first = NULL, *prev = NULL, *new;
	size_t copy;

I
Ira Snyder 已提交
583
	if (!dchan)
584 585 586 587 588
		return NULL;

	if (!len)
		return NULL;

I
Ira Snyder 已提交
589
	chan = to_fsl_chan(dchan);
590 591 592 593

	do {

		/* Allocate the link descriptor from DMA pool */
I
Ira Snyder 已提交
594
		new = fsl_dma_alloc_descriptor(chan);
595
		if (!new) {
596
			chan_err(chan, "%s\n", msg_ld_oom);
597
			goto fail;
598 599
		}

600
		copy = min(len, (size_t)FSL_DMA_BCR_MAX_CNT);
601

I
Ira Snyder 已提交
602 603 604
		set_desc_cnt(chan, &new->hw, copy);
		set_desc_src(chan, &new->hw, dma_src);
		set_desc_dst(chan, &new->hw, dma_dst);
605 606 607 608

		if (!first)
			first = new;
		else
I
Ira Snyder 已提交
609
			set_desc_next(chan, &prev->hw, new->async_tx.phys);
610 611

		new->async_tx.cookie = 0;
612
		async_tx_ack(&new->async_tx);
613 614 615 616

		prev = new;
		len -= copy;
		dma_src += copy;
617
		dma_dst += copy;
618 619

		/* Insert the link descriptor to the LD ring */
620
		list_add_tail(&new->node, &first->tx_list);
621 622
	} while (len);

623
	new->async_tx.flags = flags; /* client is in control of this ack */
624 625
	new->async_tx.cookie = -EBUSY;

626
	/* Set End-of-link to the last link descriptor of new list */
I
Ira Snyder 已提交
627
	set_ld_eol(chan, new);
628

629 630 631 632 633 634
	return &first->async_tx;

fail:
	if (!first)
		return NULL;

I
Ira Snyder 已提交
635
	fsldma_free_desc_list_reverse(chan, &first->tx_list);
636
	return NULL;
637 638
}

639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680
static struct dma_async_tx_descriptor *fsl_dma_prep_sg(struct dma_chan *dchan,
	struct scatterlist *dst_sg, unsigned int dst_nents,
	struct scatterlist *src_sg, unsigned int src_nents,
	unsigned long flags)
{
	struct fsl_desc_sw *first = NULL, *prev = NULL, *new = NULL;
	struct fsldma_chan *chan = to_fsl_chan(dchan);
	size_t dst_avail, src_avail;
	dma_addr_t dst, src;
	size_t len;

	/* basic sanity checks */
	if (dst_nents == 0 || src_nents == 0)
		return NULL;

	if (dst_sg == NULL || src_sg == NULL)
		return NULL;

	/*
	 * TODO: should we check that both scatterlists have the same
	 * TODO: number of bytes in total? Is that really an error?
	 */

	/* get prepared for the loop */
	dst_avail = sg_dma_len(dst_sg);
	src_avail = sg_dma_len(src_sg);

	/* run until we are out of scatterlist entries */
	while (true) {

		/* create the largest transaction possible */
		len = min_t(size_t, src_avail, dst_avail);
		len = min_t(size_t, len, FSL_DMA_BCR_MAX_CNT);
		if (len == 0)
			goto fetch;

		dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) - dst_avail;
		src = sg_dma_address(src_sg) + sg_dma_len(src_sg) - src_avail;

		/* allocate and populate the descriptor */
		new = fsl_dma_alloc_descriptor(chan);
		if (!new) {
681
			chan_err(chan, "%s\n", msg_ld_oom);
682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754
			goto fail;
		}

		set_desc_cnt(chan, &new->hw, len);
		set_desc_src(chan, &new->hw, src);
		set_desc_dst(chan, &new->hw, dst);

		if (!first)
			first = new;
		else
			set_desc_next(chan, &prev->hw, new->async_tx.phys);

		new->async_tx.cookie = 0;
		async_tx_ack(&new->async_tx);
		prev = new;

		/* Insert the link descriptor to the LD ring */
		list_add_tail(&new->node, &first->tx_list);

		/* update metadata */
		dst_avail -= len;
		src_avail -= len;

fetch:
		/* fetch the next dst scatterlist entry */
		if (dst_avail == 0) {

			/* no more entries: we're done */
			if (dst_nents == 0)
				break;

			/* fetch the next entry: if there are no more: done */
			dst_sg = sg_next(dst_sg);
			if (dst_sg == NULL)
				break;

			dst_nents--;
			dst_avail = sg_dma_len(dst_sg);
		}

		/* fetch the next src scatterlist entry */
		if (src_avail == 0) {

			/* no more entries: we're done */
			if (src_nents == 0)
				break;

			/* fetch the next entry: if there are no more: done */
			src_sg = sg_next(src_sg);
			if (src_sg == NULL)
				break;

			src_nents--;
			src_avail = sg_dma_len(src_sg);
		}
	}

	new->async_tx.flags = flags; /* client is in control of this ack */
	new->async_tx.cookie = -EBUSY;

	/* Set End-of-link to the last link descriptor of new list */
	set_ld_eol(chan, new);

	return &first->async_tx;

fail:
	if (!first)
		return NULL;

	fsldma_free_desc_list_reverse(chan, &first->tx_list);
	return NULL;
}

I
Ira Snyder 已提交
755 756 757 758 759 760 761 762 763 764 765 766 767
/**
 * fsl_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
 * @chan: DMA channel
 * @sgl: scatterlist to transfer to/from
 * @sg_len: number of entries in @scatterlist
 * @direction: DMA direction
 * @flags: DMAEngine flags
 *
 * Prepare a set of descriptors for a DMA_SLAVE transaction. Following the
 * DMA_SLAVE API, this gets the device-specific information from the
 * chan->private variable.
 */
static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg(
I
Ira Snyder 已提交
768
	struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
769
	enum dma_transfer_direction direction, unsigned long flags)
I
Ira Snyder 已提交
770 771
{
	/*
I
Ira Snyder 已提交
772
	 * This operation is not supported on the Freescale DMA controller
I
Ira Snyder 已提交
773
	 *
I
Ira Snyder 已提交
774 775
	 * However, we need to provide the function pointer to allow the
	 * device_control() method to work.
I
Ira Snyder 已提交
776 777 778 779
	 */
	return NULL;
}

780
static int fsl_dma_device_control(struct dma_chan *dchan,
781
				  enum dma_ctrl_cmd cmd, unsigned long arg)
I
Ira Snyder 已提交
782
{
I
Ira Snyder 已提交
783
	struct dma_slave_config *config;
I
Ira Snyder 已提交
784
	struct fsldma_chan *chan;
I
Ira Snyder 已提交
785
	unsigned long flags;
I
Ira Snyder 已提交
786
	int size;
787

I
Ira Snyder 已提交
788
	if (!dchan)
789
		return -EINVAL;
I
Ira Snyder 已提交
790

I
Ira Snyder 已提交
791
	chan = to_fsl_chan(dchan);
I
Ira Snyder 已提交
792

I
Ira Snyder 已提交
793 794
	switch (cmd) {
	case DMA_TERMINATE_ALL:
I
Ira Snyder 已提交
795 796
		spin_lock_irqsave(&chan->desc_lock, flags);

I
Ira Snyder 已提交
797 798
		/* Halt the DMA engine */
		dma_halt(chan);
I
Ira Snyder 已提交
799

I
Ira Snyder 已提交
800 801 802
		/* Remove and free all of the descriptors in the LD queue */
		fsldma_free_desc_list(chan, &chan->ld_pending);
		fsldma_free_desc_list(chan, &chan->ld_running);
I
Ira Snyder 已提交
803
		chan->idle = true;
I
Ira Snyder 已提交
804

I
Ira Snyder 已提交
805 806 807 808 809 810 811 812 813 814 815
		spin_unlock_irqrestore(&chan->desc_lock, flags);
		return 0;

	case DMA_SLAVE_CONFIG:
		config = (struct dma_slave_config *)arg;

		/* make sure the channel supports setting burst size */
		if (!chan->set_request_count)
			return -ENXIO;

		/* we set the controller burst size depending on direction */
816
		if (config->direction == DMA_MEM_TO_DEV)
I
Ira Snyder 已提交
817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835
			size = config->dst_addr_width * config->dst_maxburst;
		else
			size = config->src_addr_width * config->src_maxburst;

		chan->set_request_count(chan, size);
		return 0;

	case FSLDMA_EXTERNAL_START:

		/* make sure the channel supports external start */
		if (!chan->toggle_ext_start)
			return -ENXIO;

		chan->toggle_ext_start(chan, arg);
		return 0;

	default:
		return -ENXIO;
	}
836 837

	return 0;
I
Ira Snyder 已提交
838 839
}

840
/**
841
 * fsldma_cleanup_descriptor - cleanup and free a single link descriptor
I
Ira Snyder 已提交
842
 * @chan: Freescale DMA channel
843
 * @desc: descriptor to cleanup and free
844
 *
845 846 847
 * This function is used on a descriptor which has been executed by the DMA
 * controller. It will run any callbacks, submit any dependencies, and then
 * free the descriptor.
848
 */
849 850
static void fsldma_cleanup_descriptor(struct fsldma_chan *chan,
				      struct fsl_desc_sw *desc)
851
{
852 853 854 855 856 857 858 859 860 861 862 863 864
	struct dma_async_tx_descriptor *txd = &desc->async_tx;
	struct device *dev = chan->common.device->dev;
	dma_addr_t src = get_desc_src(chan, desc);
	dma_addr_t dst = get_desc_dst(chan, desc);
	u32 len = get_desc_cnt(chan, desc);

	/* Run the link descriptor callback function */
	if (txd->callback) {
#ifdef FSL_DMA_LD_DEBUG
		chan_dbg(chan, "LD %p callback\n", desc);
#endif
		txd->callback(txd->callback_param);
	}
865

866 867
	/* Run any dependencies */
	dma_run_dependencies(txd);
868

869 870 871 872 873 874 875
	/* Unmap the dst buffer, if requested */
	if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
		if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
			dma_unmap_single(dev, dst, len, DMA_FROM_DEVICE);
		else
			dma_unmap_page(dev, dst, len, DMA_FROM_DEVICE);
	}
I
Ira Snyder 已提交
876

877 878 879 880 881 882
	/* Unmap the src buffer, if requested */
	if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
		if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
			dma_unmap_single(dev, src, len, DMA_TO_DEVICE);
		else
			dma_unmap_page(dev, src, len, DMA_TO_DEVICE);
883
	}
I
Ira Snyder 已提交
884

885 886 887 888
#ifdef FSL_DMA_LD_DEBUG
	chan_dbg(chan, "LD %p free\n", desc);
#endif
	dma_pool_free(chan->desc_pool, desc, txd->phys);
889 890 891
}

/**
I
Ira Snyder 已提交
892
 * fsl_chan_xfer_ld_queue - transfer any pending transactions
I
Ira Snyder 已提交
893
 * @chan : Freescale DMA channel
I
Ira Snyder 已提交
894
 *
I
Ira Snyder 已提交
895
 * HARDWARE STATE: idle
896
 * LOCKING: must hold chan->desc_lock
897
 */
I
Ira Snyder 已提交
898
static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan)
899
{
I
Ira Snyder 已提交
900
	struct fsl_desc_sw *desc;
901

I
Ira Snyder 已提交
902 903 904 905 906
	/*
	 * If the list of pending descriptors is empty, then we
	 * don't need to do any work at all
	 */
	if (list_empty(&chan->ld_pending)) {
907
		chan_dbg(chan, "no pending LDs\n");
908
		return;
I
Ira Snyder 已提交
909
	}
910

I
Ira Snyder 已提交
911
	/*
I
Ira Snyder 已提交
912 913 914
	 * The DMA controller is not idle, which means that the interrupt
	 * handler will start any queued transactions when it runs after
	 * this transaction finishes
I
Ira Snyder 已提交
915
	 */
I
Ira Snyder 已提交
916
	if (!chan->idle) {
917
		chan_dbg(chan, "DMA controller still busy\n");
918
		return;
I
Ira Snyder 已提交
919 920 921 922 923
	}

	/*
	 * If there are some link descriptors which have not been
	 * transferred, we need to start the controller
924 925
	 */

I
Ira Snyder 已提交
926 927 928 929
	/*
	 * Move all elements from the queue of pending transactions
	 * onto the list of running transactions
	 */
I
Ira Snyder 已提交
930
	chan_dbg(chan, "idle, starting controller\n");
I
Ira Snyder 已提交
931 932 933
	desc = list_first_entry(&chan->ld_pending, struct fsl_desc_sw, node);
	list_splice_tail_init(&chan->ld_pending, &chan->ld_running);

I
Ira Snyder 已提交
934 935 936 937 938 939 940 941 942 943 944 945 946
	/*
	 * The 85xx DMA controller doesn't clear the channel start bit
	 * automatically at the end of a transfer. Therefore we must clear
	 * it in software before starting the transfer.
	 */
	if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) {
		u32 mode;

		mode = DMA_IN(chan, &chan->regs->mr, 32);
		mode &= ~FSL_DMA_MR_CS;
		DMA_OUT(chan, &chan->regs->mr, mode, 32);
	}

I
Ira Snyder 已提交
947 948 949 950 951
	/*
	 * Program the descriptor's address into the DMA controller,
	 * then start the DMA transaction
	 */
	set_cdar(chan, desc->async_tx.phys);
I
Ira Snyder 已提交
952
	get_cdar(chan);
953

I
Ira Snyder 已提交
954
	dma_start(chan);
I
Ira Snyder 已提交
955
	chan->idle = false;
956 957 958 959
}

/**
 * fsl_dma_memcpy_issue_pending - Issue the DMA start command
I
Ira Snyder 已提交
960
 * @chan : Freescale DMA channel
961
 */
I
Ira Snyder 已提交
962
static void fsl_dma_memcpy_issue_pending(struct dma_chan *dchan)
963
{
I
Ira Snyder 已提交
964
	struct fsldma_chan *chan = to_fsl_chan(dchan);
965 966 967
	unsigned long flags;

	spin_lock_irqsave(&chan->desc_lock, flags);
I
Ira Snyder 已提交
968
	fsl_chan_xfer_ld_queue(chan);
969
	spin_unlock_irqrestore(&chan->desc_lock, flags);
970 971 972
}

/**
973
 * fsl_tx_status - Determine the DMA status
I
Ira Snyder 已提交
974
 * @chan : Freescale DMA channel
975
 */
976
static enum dma_status fsl_tx_status(struct dma_chan *dchan,
977
					dma_cookie_t cookie,
978
					struct dma_tx_state *txstate)
979
{
I
Ira Snyder 已提交
980
	struct fsldma_chan *chan = to_fsl_chan(dchan);
981
	enum dma_status ret;
I
Ira Snyder 已提交
982
	unsigned long flags;
983

I
Ira Snyder 已提交
984
	spin_lock_irqsave(&chan->desc_lock, flags);
985
	ret = dma_cookie_status(dchan, cookie, txstate);
I
Ira Snyder 已提交
986
	spin_unlock_irqrestore(&chan->desc_lock, flags);
987

988
	return ret;
989 990
}

991 992 993 994
/*----------------------------------------------------------------------------*/
/* Interrupt Handling                                                         */
/*----------------------------------------------------------------------------*/

995
static irqreturn_t fsldma_chan_irq(int irq, void *data)
996
{
I
Ira Snyder 已提交
997 998
	struct fsldma_chan *chan = data;
	u32 stat;
999

I
Ira Snyder 已提交
1000
	/* save and clear the status register */
I
Ira Snyder 已提交
1001
	stat = get_sr(chan);
I
Ira Snyder 已提交
1002
	set_sr(chan, stat);
1003
	chan_dbg(chan, "irq: stat = 0x%x\n", stat);
1004

I
Ira Snyder 已提交
1005
	/* check that this was really our device */
1006 1007 1008 1009 1010
	stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH);
	if (!stat)
		return IRQ_NONE;

	if (stat & FSL_DMA_SR_TE)
1011
		chan_err(chan, "Transfer Error!\n");
1012

I
Ira Snyder 已提交
1013 1014
	/*
	 * Programming Error
1015 1016 1017 1018
	 * The DMA_INTERRUPT async_tx is a NULL transfer, which will
	 * triger a PE interrupt.
	 */
	if (stat & FSL_DMA_SR_PE) {
1019
		chan_dbg(chan, "irq: Programming Error INT\n");
1020
		stat &= ~FSL_DMA_SR_PE;
I
Ira Snyder 已提交
1021 1022
		if (get_bcr(chan) != 0)
			chan_err(chan, "Programming Error!\n");
1023 1024
	}

I
Ira Snyder 已提交
1025 1026
	/*
	 * For MPC8349, EOCDI event need to update cookie
1027 1028 1029
	 * and start the next transfer if it exist.
	 */
	if (stat & FSL_DMA_SR_EOCDI) {
1030
		chan_dbg(chan, "irq: End-of-Chain link INT\n");
1031
		stat &= ~FSL_DMA_SR_EOCDI;
1032 1033
	}

I
Ira Snyder 已提交
1034 1035
	/*
	 * If it current transfer is the end-of-transfer,
1036 1037 1038
	 * we should clear the Channel Start bit for
	 * prepare next transfer.
	 */
1039
	if (stat & FSL_DMA_SR_EOLNI) {
1040
		chan_dbg(chan, "irq: End-of-link INT\n");
1041 1042 1043
		stat &= ~FSL_DMA_SR_EOLNI;
	}

I
Ira Snyder 已提交
1044 1045 1046 1047 1048
	/* check that the DMA controller is really idle */
	if (!dma_is_idle(chan))
		chan_err(chan, "irq: controller not idle!\n");

	/* check that we handled all of the bits */
1049
	if (stat)
I
Ira Snyder 已提交
1050
		chan_err(chan, "irq: unhandled sr 0x%08x\n", stat);
1051

I
Ira Snyder 已提交
1052 1053 1054 1055 1056
	/*
	 * Schedule the tasklet to handle all cleanup of the current
	 * transaction. It will start a new transaction if there is
	 * one pending.
	 */
I
Ira Snyder 已提交
1057
	tasklet_schedule(&chan->tasklet);
I
Ira Snyder 已提交
1058
	chan_dbg(chan, "irq: Exit\n");
1059 1060 1061
	return IRQ_HANDLED;
}

1062 1063
static void dma_do_tasklet(unsigned long data)
{
I
Ira Snyder 已提交
1064
	struct fsldma_chan *chan = (struct fsldma_chan *)data;
1065 1066
	struct fsl_desc_sw *desc, *_desc;
	LIST_HEAD(ld_cleanup);
I
Ira Snyder 已提交
1067 1068 1069 1070 1071
	unsigned long flags;

	chan_dbg(chan, "tasklet entry\n");

	spin_lock_irqsave(&chan->desc_lock, flags);
1072 1073 1074 1075 1076 1077 1078

	/* update the cookie if we have some descriptors to cleanup */
	if (!list_empty(&chan->ld_running)) {
		dma_cookie_t cookie;

		desc = to_fsl_desc(chan->ld_running.prev);
		cookie = desc->async_tx.cookie;
1079
		dma_cookie_complete(&desc->async_tx);
1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090

		chan_dbg(chan, "completed_cookie=%d\n", cookie);
	}

	/*
	 * move the descriptors to a temporary list so we can drop the lock
	 * during the entire cleanup operation
	 */
	list_splice_tail_init(&chan->ld_running, &ld_cleanup);

	/* the hardware is now idle and ready for more */
I
Ira Snyder 已提交
1091 1092
	chan->idle = true;

1093 1094 1095 1096 1097 1098
	/*
	 * Start any pending transactions automatically
	 *
	 * In the ideal case, we keep the DMA controller busy while we go
	 * ahead and free the descriptors below.
	 */
I
Ira Snyder 已提交
1099
	fsl_chan_xfer_ld_queue(chan);
1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111
	spin_unlock_irqrestore(&chan->desc_lock, flags);

	/* Run the callback for each descriptor, in order */
	list_for_each_entry_safe(desc, _desc, &ld_cleanup, node) {

		/* Remove from the list of transactions */
		list_del(&desc->node);

		/* Run all cleanup for this descriptor */
		fsldma_cleanup_descriptor(chan, desc);
	}

I
Ira Snyder 已提交
1112
	chan_dbg(chan, "tasklet exit\n");
1113 1114 1115
}

static irqreturn_t fsldma_ctrl_irq(int irq, void *data)
1116
{
1117
	struct fsldma_device *fdev = data;
1118 1119 1120 1121
	struct fsldma_chan *chan;
	unsigned int handled = 0;
	u32 gsr, mask;
	int i;
1122

1123
	gsr = (fdev->feature & FSL_DMA_BIG_ENDIAN) ? in_be32(fdev->regs)
1124 1125 1126
						   : in_le32(fdev->regs);
	mask = 0xff000000;
	dev_dbg(fdev->dev, "IRQ: gsr 0x%.8x\n", gsr);
1127

1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143
	for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
		chan = fdev->chan[i];
		if (!chan)
			continue;

		if (gsr & mask) {
			dev_dbg(fdev->dev, "IRQ: chan %d\n", chan->id);
			fsldma_chan_irq(irq, chan);
			handled++;
		}

		gsr &= ~mask;
		mask >>= 8;
	}

	return IRQ_RETVAL(handled);
1144 1145
}

1146
static void fsldma_free_irqs(struct fsldma_device *fdev)
1147
{
1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159
	struct fsldma_chan *chan;
	int i;

	if (fdev->irq != NO_IRQ) {
		dev_dbg(fdev->dev, "free per-controller IRQ\n");
		free_irq(fdev->irq, fdev);
		return;
	}

	for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
		chan = fdev->chan[i];
		if (chan && chan->irq != NO_IRQ) {
1160
			chan_dbg(chan, "free per-channel IRQ\n");
1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186
			free_irq(chan->irq, chan);
		}
	}
}

static int fsldma_request_irqs(struct fsldma_device *fdev)
{
	struct fsldma_chan *chan;
	int ret;
	int i;

	/* if we have a per-controller IRQ, use that */
	if (fdev->irq != NO_IRQ) {
		dev_dbg(fdev->dev, "request per-controller IRQ\n");
		ret = request_irq(fdev->irq, fsldma_ctrl_irq, IRQF_SHARED,
				  "fsldma-controller", fdev);
		return ret;
	}

	/* no per-controller IRQ, use the per-channel IRQs */
	for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
		chan = fdev->chan[i];
		if (!chan)
			continue;

		if (chan->irq == NO_IRQ) {
1187
			chan_err(chan, "interrupts property missing in device tree\n");
1188 1189 1190 1191
			ret = -ENODEV;
			goto out_unwind;
		}

1192
		chan_dbg(chan, "request per-channel IRQ\n");
1193 1194 1195
		ret = request_irq(chan->irq, fsldma_chan_irq, IRQF_SHARED,
				  "fsldma-chan", chan);
		if (ret) {
1196
			chan_err(chan, "unable to request per-channel IRQ\n");
1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215
			goto out_unwind;
		}
	}

	return 0;

out_unwind:
	for (/* none */; i >= 0; i--) {
		chan = fdev->chan[i];
		if (!chan)
			continue;

		if (chan->irq == NO_IRQ)
			continue;

		free_irq(chan->irq, chan);
	}

	return ret;
1216 1217
}

1218 1219 1220 1221 1222
/*----------------------------------------------------------------------------*/
/* OpenFirmware Subsystem                                                     */
/*----------------------------------------------------------------------------*/

static int __devinit fsl_dma_chan_probe(struct fsldma_device *fdev,
1223
	struct device_node *node, u32 feature, const char *compatible)
1224
{
I
Ira Snyder 已提交
1225
	struct fsldma_chan *chan;
1226
	struct resource res;
1227 1228 1229
	int err;

	/* alloc channel */
I
Ira Snyder 已提交
1230 1231
	chan = kzalloc(sizeof(*chan), GFP_KERNEL);
	if (!chan) {
1232 1233 1234 1235 1236 1237
		dev_err(fdev->dev, "no free memory for DMA channels!\n");
		err = -ENOMEM;
		goto out_return;
	}

	/* ioremap registers for use */
I
Ira Snyder 已提交
1238 1239
	chan->regs = of_iomap(node, 0);
	if (!chan->regs) {
1240 1241
		dev_err(fdev->dev, "unable to ioremap registers\n");
		err = -ENOMEM;
I
Ira Snyder 已提交
1242
		goto out_free_chan;
1243 1244
	}

1245
	err = of_address_to_resource(node, 0, &res);
1246
	if (err) {
1247 1248
		dev_err(fdev->dev, "unable to find 'reg' property\n");
		goto out_iounmap_regs;
1249 1250
	}

I
Ira Snyder 已提交
1251
	chan->feature = feature;
1252
	if (!fdev->feature)
I
Ira Snyder 已提交
1253
		fdev->feature = chan->feature;
1254

1255 1256 1257
	/*
	 * If the DMA device's feature is different than the feature
	 * of its channels, report the bug
1258
	 */
I
Ira Snyder 已提交
1259
	WARN_ON(fdev->feature != chan->feature);
1260

I
Ira Snyder 已提交
1261 1262 1263
	chan->dev = fdev->dev;
	chan->id = ((res.start - 0x100) & 0xfff) >> 7;
	if (chan->id >= FSL_DMA_MAX_CHANS_PER_DEVICE) {
1264
		dev_err(fdev->dev, "too many channels for device\n");
1265
		err = -EINVAL;
1266
		goto out_iounmap_regs;
1267 1268
	}

I
Ira Snyder 已提交
1269 1270
	fdev->chan[chan->id] = chan;
	tasklet_init(&chan->tasklet, dma_do_tasklet, (unsigned long)chan);
1271
	snprintf(chan->name, sizeof(chan->name), "chan%d", chan->id);
1272 1273

	/* Initialize the channel */
I
Ira Snyder 已提交
1274
	dma_init(chan);
1275 1276

	/* Clear cdar registers */
I
Ira Snyder 已提交
1277
	set_cdar(chan, 0);
1278

I
Ira Snyder 已提交
1279
	switch (chan->feature & FSL_DMA_IP_MASK) {
1280
	case FSL_DMA_IP_85XX:
I
Ira Snyder 已提交
1281
		chan->toggle_ext_pause = fsl_chan_toggle_ext_pause;
1282
	case FSL_DMA_IP_83XX:
I
Ira Snyder 已提交
1283 1284 1285 1286
		chan->toggle_ext_start = fsl_chan_toggle_ext_start;
		chan->set_src_loop_size = fsl_chan_set_src_loop_size;
		chan->set_dst_loop_size = fsl_chan_set_dst_loop_size;
		chan->set_request_count = fsl_chan_set_request_count;
1287 1288
	}

I
Ira Snyder 已提交
1289
	spin_lock_init(&chan->desc_lock);
I
Ira Snyder 已提交
1290 1291
	INIT_LIST_HEAD(&chan->ld_pending);
	INIT_LIST_HEAD(&chan->ld_running);
I
Ira Snyder 已提交
1292
	chan->idle = true;
1293

I
Ira Snyder 已提交
1294
	chan->common.device = &fdev->common;
1295
	dma_cookie_init(&chan->common);
1296

1297
	/* find the IRQ line, if it exists in the device tree */
I
Ira Snyder 已提交
1298
	chan->irq = irq_of_parse_and_map(node, 0);
1299

1300
	/* Add the channel to DMA device channel list */
I
Ira Snyder 已提交
1301
	list_add_tail(&chan->common.device_node, &fdev->common.channels);
1302 1303
	fdev->common.chancnt++;

I
Ira Snyder 已提交
1304 1305
	dev_info(fdev->dev, "#%d (%s), irq %d\n", chan->id, compatible,
		 chan->irq != NO_IRQ ? chan->irq : fdev->irq);
1306 1307

	return 0;
1308

1309
out_iounmap_regs:
I
Ira Snyder 已提交
1310 1311 1312
	iounmap(chan->regs);
out_free_chan:
	kfree(chan);
1313
out_return:
1314 1315 1316
	return err;
}

I
Ira Snyder 已提交
1317
static void fsl_dma_chan_remove(struct fsldma_chan *chan)
1318
{
I
Ira Snyder 已提交
1319 1320 1321 1322
	irq_dispose_mapping(chan->irq);
	list_del(&chan->common.device_node);
	iounmap(chan->regs);
	kfree(chan);
1323 1324
}

1325
static int __devinit fsldma_of_probe(struct platform_device *op)
1326
{
1327
	struct fsldma_device *fdev;
1328
	struct device_node *child;
1329
	int err;
1330

1331
	fdev = kzalloc(sizeof(*fdev), GFP_KERNEL);
1332
	if (!fdev) {
1333 1334 1335
		dev_err(&op->dev, "No enough memory for 'priv'\n");
		err = -ENOMEM;
		goto out_return;
1336
	}
1337 1338

	fdev->dev = &op->dev;
1339 1340
	INIT_LIST_HEAD(&fdev->common.channels);

1341
	/* ioremap the registers for use */
1342
	fdev->regs = of_iomap(op->dev.of_node, 0);
1343 1344 1345 1346
	if (!fdev->regs) {
		dev_err(&op->dev, "unable to ioremap registers\n");
		err = -ENOMEM;
		goto out_free_fdev;
1347 1348
	}

1349
	/* map the channel IRQ if it exists, but don't hookup the handler yet */
1350
	fdev->irq = irq_of_parse_and_map(op->dev.of_node, 0);
1351

1352 1353
	dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask);
	dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask);
1354
	dma_cap_set(DMA_SG, fdev->common.cap_mask);
I
Ira Snyder 已提交
1355
	dma_cap_set(DMA_SLAVE, fdev->common.cap_mask);
1356 1357
	fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources;
	fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources;
1358
	fdev->common.device_prep_dma_interrupt = fsl_dma_prep_interrupt;
1359
	fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy;
1360
	fdev->common.device_prep_dma_sg = fsl_dma_prep_sg;
1361
	fdev->common.device_tx_status = fsl_tx_status;
1362
	fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending;
I
Ira Snyder 已提交
1363
	fdev->common.device_prep_slave_sg = fsl_dma_prep_slave_sg;
1364
	fdev->common.device_control = fsl_dma_device_control;
1365
	fdev->common.dev = &op->dev;
1366

1367 1368
	dma_set_mask(&(op->dev), DMA_BIT_MASK(36));

1369
	dev_set_drvdata(&op->dev, fdev);
1370

1371 1372 1373
	/*
	 * We cannot use of_platform_bus_probe() because there is no
	 * of_platform_bus_remove(). Instead, we manually instantiate every DMA
1374 1375
	 * channel object.
	 */
1376
	for_each_child_of_node(op->dev.of_node, child) {
1377
		if (of_device_is_compatible(child, "fsl,eloplus-dma-channel")) {
1378 1379 1380
			fsl_dma_chan_probe(fdev, child,
				FSL_DMA_IP_85XX | FSL_DMA_BIG_ENDIAN,
				"fsl,eloplus-dma-channel");
1381 1382 1383
		}

		if (of_device_is_compatible(child, "fsl,elo-dma-channel")) {
1384 1385 1386
			fsl_dma_chan_probe(fdev, child,
				FSL_DMA_IP_83XX | FSL_DMA_LITTLE_ENDIAN,
				"fsl,elo-dma-channel");
1387
		}
1388
	}
1389

1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402
	/*
	 * Hookup the IRQ handler(s)
	 *
	 * If we have a per-controller interrupt, we prefer that to the
	 * per-channel interrupts to reduce the number of shared interrupt
	 * handlers on the same IRQ line
	 */
	err = fsldma_request_irqs(fdev);
	if (err) {
		dev_err(fdev->dev, "unable to request IRQs\n");
		goto out_free_fdev;
	}

1403 1404 1405
	dma_async_device_register(&fdev->common);
	return 0;

1406
out_free_fdev:
1407
	irq_dispose_mapping(fdev->irq);
1408
	kfree(fdev);
1409
out_return:
1410 1411 1412
	return err;
}

1413
static int fsldma_of_remove(struct platform_device *op)
1414
{
1415
	struct fsldma_device *fdev;
1416 1417
	unsigned int i;

1418
	fdev = dev_get_drvdata(&op->dev);
1419 1420
	dma_async_device_unregister(&fdev->common);

1421 1422
	fsldma_free_irqs(fdev);

1423
	for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
1424 1425
		if (fdev->chan[i])
			fsl_dma_chan_remove(fdev->chan[i]);
1426
	}
1427

1428 1429
	iounmap(fdev->regs);
	dev_set_drvdata(&op->dev, NULL);
1430 1431 1432 1433 1434
	kfree(fdev);

	return 0;
}

1435
static const struct of_device_id fsldma_of_ids[] = {
1436 1437
	{ .compatible = "fsl,eloplus-dma", },
	{ .compatible = "fsl,elo-dma", },
1438 1439 1440
	{}
};

1441
static struct platform_driver fsldma_of_driver = {
1442 1443 1444 1445 1446 1447 1448
	.driver = {
		.name = "fsl-elo-dma",
		.owner = THIS_MODULE,
		.of_match_table = fsldma_of_ids,
	},
	.probe = fsldma_of_probe,
	.remove = fsldma_of_remove,
1449 1450
};

1451 1452 1453 1454 1455
/*----------------------------------------------------------------------------*/
/* Module Init / Exit                                                         */
/*----------------------------------------------------------------------------*/

static __init int fsldma_init(void)
1456
{
1457
	pr_info("Freescale Elo / Elo Plus DMA driver\n");
1458
	return platform_driver_register(&fsldma_of_driver);
1459 1460
}

1461
static void __exit fsldma_exit(void)
1462
{
1463
	platform_driver_unregister(&fsldma_of_driver);
1464 1465
}

1466 1467
subsys_initcall(fsldma_init);
module_exit(fsldma_exit);
1468 1469 1470

MODULE_DESCRIPTION("Freescale Elo / Elo Plus DMA driver");
MODULE_LICENSE("GPL");