fsldma.c 36.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12
/*
 * Freescale MPC85xx, MPC83xx DMA Engine support
 *
 * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
 *
 * Author:
 *   Zhang Wei <wei.zhang@freescale.com>, Jul 2007
 *   Ebony Zhu <ebony.zhu@freescale.com>, May 2007
 *
 * Description:
 *   DMA engine driver for Freescale MPC8540 DMA controller, which is
 *   also fit for MPC8560, MPC8555, MPC8548, MPC8641, and etc.
13
 *   The support for MPC8349 DMA controller is also added.
14
 *
15 16 17 18 19
 * This driver instructs the DMA controller to issue the PCI Read Multiple
 * command for PCI read operations, instead of using the default PCI Read Line
 * command. Please be aware that this setting may result in read pre-fetching
 * on some platforms.
 *
20 21 22 23 24 25 26 27 28 29
 * This is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 */

#include <linux/init.h>
#include <linux/module.h>
#include <linux/pci.h>
30
#include <linux/slab.h>
31 32 33 34 35 36 37
#include <linux/interrupt.h>
#include <linux/dmaengine.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
#include <linux/of_platform.h>

I
Ira Snyder 已提交
38
#include <asm/fsldma.h>
39 40
#include "fsldma.h"

I
Ira Snyder 已提交
41
static void dma_init(struct fsldma_chan *chan)
42 43
{
	/* Reset the channel */
I
Ira Snyder 已提交
44
	DMA_OUT(chan, &chan->regs->mr, 0, 32);
45

I
Ira Snyder 已提交
46
	switch (chan->feature & FSL_DMA_IP_MASK) {
47 48 49 50 51 52
	case FSL_DMA_IP_85XX:
		/* Set the channel to below modes:
		 * EIE - Error interrupt enable
		 * EOSIE - End of segments interrupt enable (basic mode)
		 * EOLNIE - End of links interrupt enable
		 */
I
Ira Snyder 已提交
53
		DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_EIE
54 55 56 57 58
				| FSL_DMA_MR_EOLNIE | FSL_DMA_MR_EOSIE, 32);
		break;
	case FSL_DMA_IP_83XX:
		/* Set the channel to below modes:
		 * EOTIE - End-of-transfer interrupt enable
59
		 * PRC_RM - PCI read multiple
60
		 */
I
Ira Snyder 已提交
61
		DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_EOTIE
62
				| FSL_DMA_MR_PRC_RM, 32);
63 64 65 66
		break;
	}
}

I
Ira Snyder 已提交
67
static void set_sr(struct fsldma_chan *chan, u32 val)
68
{
I
Ira Snyder 已提交
69
	DMA_OUT(chan, &chan->regs->sr, val, 32);
70 71
}

I
Ira Snyder 已提交
72
static u32 get_sr(struct fsldma_chan *chan)
73
{
I
Ira Snyder 已提交
74
	return DMA_IN(chan, &chan->regs->sr, 32);
75 76
}

I
Ira Snyder 已提交
77
static void set_desc_cnt(struct fsldma_chan *chan,
78 79
				struct fsl_dma_ld_hw *hw, u32 count)
{
I
Ira Snyder 已提交
80
	hw->count = CPU_TO_DMA(chan, count, 32);
81 82
}

I
Ira Snyder 已提交
83
static void set_desc_src(struct fsldma_chan *chan,
84 85 86 87
				struct fsl_dma_ld_hw *hw, dma_addr_t src)
{
	u64 snoop_bits;

I
Ira Snyder 已提交
88
	snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
89
		? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0;
I
Ira Snyder 已提交
90
	hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64);
91 92
}

I
Ira Snyder 已提交
93
static void set_desc_dst(struct fsldma_chan *chan,
94
				struct fsl_dma_ld_hw *hw, dma_addr_t dst)
95 96 97
{
	u64 snoop_bits;

I
Ira Snyder 已提交
98
	snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
99
		? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0;
I
Ira Snyder 已提交
100
	hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64);
101 102
}

I
Ira Snyder 已提交
103
static void set_desc_next(struct fsldma_chan *chan,
104 105 106 107
				struct fsl_dma_ld_hw *hw, dma_addr_t next)
{
	u64 snoop_bits;

I
Ira Snyder 已提交
108
	snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
109
		? FSL_DMA_SNEN : 0;
I
Ira Snyder 已提交
110
	hw->next_ln_addr = CPU_TO_DMA(chan, snoop_bits | next, 64);
111 112
}

I
Ira Snyder 已提交
113
static void set_cdar(struct fsldma_chan *chan, dma_addr_t addr)
114
{
I
Ira Snyder 已提交
115
	DMA_OUT(chan, &chan->regs->cdar, addr | FSL_DMA_SNEN, 64);
116 117
}

I
Ira Snyder 已提交
118
static dma_addr_t get_cdar(struct fsldma_chan *chan)
119
{
I
Ira Snyder 已提交
120
	return DMA_IN(chan, &chan->regs->cdar, 64) & ~FSL_DMA_SNEN;
121 122
}

I
Ira Snyder 已提交
123
static dma_addr_t get_ndar(struct fsldma_chan *chan)
124
{
I
Ira Snyder 已提交
125
	return DMA_IN(chan, &chan->regs->ndar, 64);
126 127
}

I
Ira Snyder 已提交
128
static u32 get_bcr(struct fsldma_chan *chan)
129
{
I
Ira Snyder 已提交
130
	return DMA_IN(chan, &chan->regs->bcr, 32);
131 132
}

I
Ira Snyder 已提交
133
static int dma_is_idle(struct fsldma_chan *chan)
134
{
I
Ira Snyder 已提交
135
	u32 sr = get_sr(chan);
136 137 138
	return (!(sr & FSL_DMA_SR_CB)) || (sr & FSL_DMA_SR_CH);
}

I
Ira Snyder 已提交
139
static void dma_start(struct fsldma_chan *chan)
140
{
I
Ira Snyder 已提交
141 142
	u32 mode;

I
Ira Snyder 已提交
143
	mode = DMA_IN(chan, &chan->regs->mr, 32);
I
Ira Snyder 已提交
144

I
Ira Snyder 已提交
145 146 147
	if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) {
		if (chan->feature & FSL_DMA_CHAN_PAUSE_EXT) {
			DMA_OUT(chan, &chan->regs->bcr, 0, 32);
I
Ira Snyder 已提交
148 149 150 151
			mode |= FSL_DMA_MR_EMP_EN;
		} else {
			mode &= ~FSL_DMA_MR_EMP_EN;
		}
152
	}
153

I
Ira Snyder 已提交
154
	if (chan->feature & FSL_DMA_CHAN_START_EXT)
I
Ira Snyder 已提交
155
		mode |= FSL_DMA_MR_EMS_EN;
156
	else
I
Ira Snyder 已提交
157
		mode |= FSL_DMA_MR_CS;
158

I
Ira Snyder 已提交
159
	DMA_OUT(chan, &chan->regs->mr, mode, 32);
160 161
}

I
Ira Snyder 已提交
162
static void dma_halt(struct fsldma_chan *chan)
163
{
I
Ira Snyder 已提交
164
	u32 mode;
165 166
	int i;

I
Ira Snyder 已提交
167
	mode = DMA_IN(chan, &chan->regs->mr, 32);
I
Ira Snyder 已提交
168
	mode |= FSL_DMA_MR_CA;
I
Ira Snyder 已提交
169
	DMA_OUT(chan, &chan->regs->mr, mode, 32);
I
Ira Snyder 已提交
170 171

	mode &= ~(FSL_DMA_MR_CS | FSL_DMA_MR_EMS_EN | FSL_DMA_MR_CA);
I
Ira Snyder 已提交
172
	DMA_OUT(chan, &chan->regs->mr, mode, 32);
173

174
	for (i = 0; i < 100; i++) {
I
Ira Snyder 已提交
175
		if (dma_is_idle(chan))
I
Ira Snyder 已提交
176 177
			return;

178
		udelay(10);
179
	}
I
Ira Snyder 已提交
180

I
Ira Snyder 已提交
181
	if (!dma_is_idle(chan))
I
Ira Snyder 已提交
182
		dev_err(chan->dev, "DMA halt timeout!\n");
183 184
}

I
Ira Snyder 已提交
185
static void set_ld_eol(struct fsldma_chan *chan,
186 187
			struct fsl_desc_sw *desc)
{
188 189
	u64 snoop_bits;

I
Ira Snyder 已提交
190
	snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
191 192
		? FSL_DMA_SNEN : 0;

I
Ira Snyder 已提交
193 194
	desc->hw.next_ln_addr = CPU_TO_DMA(chan,
		DMA_TO_CPU(chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL
195
			| snoop_bits, 64);
196 197 198 199
}

/**
 * fsl_chan_set_src_loop_size - Set source address hold transfer size
I
Ira Snyder 已提交
200
 * @chan : Freescale DMA channel
201 202 203 204 205 206 207 208
 * @size     : Address loop size, 0 for disable loop
 *
 * The set source address hold transfer size. The source
 * address hold or loop transfer size is when the DMA transfer
 * data from source address (SA), if the loop size is 4, the DMA will
 * read data from SA, SA + 1, SA + 2, SA + 3, then loop back to SA,
 * SA + 1 ... and so on.
 */
I
Ira Snyder 已提交
209
static void fsl_chan_set_src_loop_size(struct fsldma_chan *chan, int size)
210
{
I
Ira Snyder 已提交
211 212
	u32 mode;

I
Ira Snyder 已提交
213
	mode = DMA_IN(chan, &chan->regs->mr, 32);
I
Ira Snyder 已提交
214

215 216
	switch (size) {
	case 0:
I
Ira Snyder 已提交
217
		mode &= ~FSL_DMA_MR_SAHE;
218 219 220 221 222
		break;
	case 1:
	case 2:
	case 4:
	case 8:
I
Ira Snyder 已提交
223
		mode |= FSL_DMA_MR_SAHE | (__ilog2(size) << 14);
224 225
		break;
	}
I
Ira Snyder 已提交
226

I
Ira Snyder 已提交
227
	DMA_OUT(chan, &chan->regs->mr, mode, 32);
228 229 230
}

/**
231
 * fsl_chan_set_dst_loop_size - Set destination address hold transfer size
I
Ira Snyder 已提交
232
 * @chan : Freescale DMA channel
233 234 235 236 237 238 239 240
 * @size     : Address loop size, 0 for disable loop
 *
 * The set destination address hold transfer size. The destination
 * address hold or loop transfer size is when the DMA transfer
 * data to destination address (TA), if the loop size is 4, the DMA will
 * write data to TA, TA + 1, TA + 2, TA + 3, then loop back to TA,
 * TA + 1 ... and so on.
 */
I
Ira Snyder 已提交
241
static void fsl_chan_set_dst_loop_size(struct fsldma_chan *chan, int size)
242
{
I
Ira Snyder 已提交
243 244
	u32 mode;

I
Ira Snyder 已提交
245
	mode = DMA_IN(chan, &chan->regs->mr, 32);
I
Ira Snyder 已提交
246

247 248
	switch (size) {
	case 0:
I
Ira Snyder 已提交
249
		mode &= ~FSL_DMA_MR_DAHE;
250 251 252 253 254
		break;
	case 1:
	case 2:
	case 4:
	case 8:
I
Ira Snyder 已提交
255
		mode |= FSL_DMA_MR_DAHE | (__ilog2(size) << 16);
256 257
		break;
	}
I
Ira Snyder 已提交
258

I
Ira Snyder 已提交
259
	DMA_OUT(chan, &chan->regs->mr, mode, 32);
260 261 262
}

/**
263
 * fsl_chan_set_request_count - Set DMA Request Count for external control
I
Ira Snyder 已提交
264
 * @chan : Freescale DMA channel
265 266 267 268 269 270
 * @size     : Number of bytes to transfer in a single request
 *
 * The Freescale DMA channel can be controlled by the external signal DREQ#.
 * The DMA request count is how many bytes are allowed to transfer before
 * pausing the channel, after which a new assertion of DREQ# resumes channel
 * operation.
271
 *
272
 * A size of 0 disables external pause control. The maximum size is 1024.
273
 */
I
Ira Snyder 已提交
274
static void fsl_chan_set_request_count(struct fsldma_chan *chan, int size)
275
{
I
Ira Snyder 已提交
276 277
	u32 mode;

278
	BUG_ON(size > 1024);
I
Ira Snyder 已提交
279

I
Ira Snyder 已提交
280
	mode = DMA_IN(chan, &chan->regs->mr, 32);
I
Ira Snyder 已提交
281 282
	mode |= (__ilog2(size) << 24) & 0x0f000000;

I
Ira Snyder 已提交
283
	DMA_OUT(chan, &chan->regs->mr, mode, 32);
284
}
285

286 287
/**
 * fsl_chan_toggle_ext_pause - Toggle channel external pause status
I
Ira Snyder 已提交
288
 * @chan : Freescale DMA channel
289 290 291 292 293 294
 * @enable   : 0 is disabled, 1 is enabled.
 *
 * The Freescale DMA channel can be controlled by the external signal DREQ#.
 * The DMA Request Count feature should be used in addition to this feature
 * to set the number of bytes to transfer before pausing the channel.
 */
I
Ira Snyder 已提交
295
static void fsl_chan_toggle_ext_pause(struct fsldma_chan *chan, int enable)
296 297
{
	if (enable)
I
Ira Snyder 已提交
298
		chan->feature |= FSL_DMA_CHAN_PAUSE_EXT;
299
	else
I
Ira Snyder 已提交
300
		chan->feature &= ~FSL_DMA_CHAN_PAUSE_EXT;
301 302 303 304
}

/**
 * fsl_chan_toggle_ext_start - Toggle channel external start status
I
Ira Snyder 已提交
305
 * @chan : Freescale DMA channel
306 307 308 309 310 311 312
 * @enable   : 0 is disabled, 1 is enabled.
 *
 * If enable the external start, the channel can be started by an
 * external DMA start pin. So the dma_start() does not start the
 * transfer immediately. The DMA channel will wait for the
 * control pin asserted.
 */
I
Ira Snyder 已提交
313
static void fsl_chan_toggle_ext_start(struct fsldma_chan *chan, int enable)
314 315
{
	if (enable)
I
Ira Snyder 已提交
316
		chan->feature |= FSL_DMA_CHAN_START_EXT;
317
	else
I
Ira Snyder 已提交
318
		chan->feature &= ~FSL_DMA_CHAN_START_EXT;
319 320
}

I
Ira Snyder 已提交
321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345
static void append_ld_queue(struct fsldma_chan *chan,
			    struct fsl_desc_sw *desc)
{
	struct fsl_desc_sw *tail = to_fsl_desc(chan->ld_pending.prev);

	if (list_empty(&chan->ld_pending))
		goto out_splice;

	/*
	 * Add the hardware descriptor to the chain of hardware descriptors
	 * that already exists in memory.
	 *
	 * This will un-set the EOL bit of the existing transaction, and the
	 * last link in this transaction will become the EOL descriptor.
	 */
	set_desc_next(chan, &tail->hw, desc->async_tx.phys);

	/*
	 * Add the software descriptor and all children to the list
	 * of pending transactions
	 */
out_splice:
	list_splice_tail_init(&desc->tx_list, &chan->ld_pending);
}

346 347
static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
{
I
Ira Snyder 已提交
348
	struct fsldma_chan *chan = to_fsl_chan(tx->chan);
349 350
	struct fsl_desc_sw *desc = tx_to_fsl_desc(tx);
	struct fsl_desc_sw *child;
351 352 353
	unsigned long flags;
	dma_cookie_t cookie;

I
Ira Snyder 已提交
354
	spin_lock_irqsave(&chan->desc_lock, flags);
355

I
Ira Snyder 已提交
356 357 358 359
	/*
	 * assign cookies to all of the software descriptors
	 * that make up this transaction
	 */
I
Ira Snyder 已提交
360
	cookie = chan->common.cookie;
361
	list_for_each_entry(child, &desc->tx_list, node) {
362 363 364 365
		cookie++;
		if (cookie < 0)
			cookie = 1;

S
Steven J. Magnani 已提交
366
		child->async_tx.cookie = cookie;
367 368
	}

I
Ira Snyder 已提交
369
	chan->common.cookie = cookie;
I
Ira Snyder 已提交
370 371

	/* put this transaction onto the tail of the pending queue */
I
Ira Snyder 已提交
372
	append_ld_queue(chan, desc);
373

I
Ira Snyder 已提交
374
	spin_unlock_irqrestore(&chan->desc_lock, flags);
375 376 377 378 379 380

	return cookie;
}

/**
 * fsl_dma_alloc_descriptor - Allocate descriptor from channel's DMA pool.
I
Ira Snyder 已提交
381
 * @chan : Freescale DMA channel
382 383 384 385
 *
 * Return - The descriptor allocated. NULL for failed.
 */
static struct fsl_desc_sw *fsl_dma_alloc_descriptor(
I
Ira Snyder 已提交
386
					struct fsldma_chan *chan)
387
{
I
Ira Snyder 已提交
388
	struct fsl_desc_sw *desc;
389
	dma_addr_t pdesc;
I
Ira Snyder 已提交
390 391 392 393 394

	desc = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc);
	if (!desc) {
		dev_dbg(chan->dev, "out of memory for link desc\n");
		return NULL;
395 396
	}

I
Ira Snyder 已提交
397 398 399 400 401 402 403
	memset(desc, 0, sizeof(*desc));
	INIT_LIST_HEAD(&desc->tx_list);
	dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
	desc->async_tx.tx_submit = fsl_dma_tx_submit;
	desc->async_tx.phys = pdesc;

	return desc;
404 405 406 407 408
}


/**
 * fsl_dma_alloc_chan_resources - Allocate resources for DMA channel.
I
Ira Snyder 已提交
409
 * @chan : Freescale DMA channel
410 411 412 413 414
 *
 * This function will create a dma pool for descriptor allocation.
 *
 * Return - The number of descriptors allocated.
 */
I
Ira Snyder 已提交
415
static int fsl_dma_alloc_chan_resources(struct dma_chan *dchan)
416
{
I
Ira Snyder 已提交
417
	struct fsldma_chan *chan = to_fsl_chan(dchan);
418 419

	/* Has this channel already been allocated? */
I
Ira Snyder 已提交
420
	if (chan->desc_pool)
421
		return 1;
422

I
Ira Snyder 已提交
423 424
	/*
	 * We need the descriptor to be aligned to 32bytes
425 426
	 * for meeting FSL DMA specification requirement.
	 */
I
Ira Snyder 已提交
427
	chan->desc_pool = dma_pool_create("fsl_dma_engine_desc_pool",
I
Ira Snyder 已提交
428 429 430
					  chan->dev,
					  sizeof(struct fsl_desc_sw),
					  __alignof__(struct fsl_desc_sw), 0);
I
Ira Snyder 已提交
431
	if (!chan->desc_pool) {
I
Ira Snyder 已提交
432 433 434
		dev_err(chan->dev, "unable to allocate channel %d "
				   "descriptor pool\n", chan->id);
		return -ENOMEM;
435 436
	}

I
Ira Snyder 已提交
437
	/* there is at least one descriptor free to be allocated */
438 439 440
	return 1;
}

I
Ira Snyder 已提交
441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469
/**
 * fsldma_free_desc_list - Free all descriptors in a queue
 * @chan: Freescae DMA channel
 * @list: the list to free
 *
 * LOCKING: must hold chan->desc_lock
 */
static void fsldma_free_desc_list(struct fsldma_chan *chan,
				  struct list_head *list)
{
	struct fsl_desc_sw *desc, *_desc;

	list_for_each_entry_safe(desc, _desc, list, node) {
		list_del(&desc->node);
		dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
	}
}

static void fsldma_free_desc_list_reverse(struct fsldma_chan *chan,
					  struct list_head *list)
{
	struct fsl_desc_sw *desc, *_desc;

	list_for_each_entry_safe_reverse(desc, _desc, list, node) {
		list_del(&desc->node);
		dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
	}
}

470 471
/**
 * fsl_dma_free_chan_resources - Free all resources of the channel.
I
Ira Snyder 已提交
472
 * @chan : Freescale DMA channel
473
 */
I
Ira Snyder 已提交
474
static void fsl_dma_free_chan_resources(struct dma_chan *dchan)
475
{
I
Ira Snyder 已提交
476
	struct fsldma_chan *chan = to_fsl_chan(dchan);
477 478
	unsigned long flags;

I
Ira Snyder 已提交
479 480
	dev_dbg(chan->dev, "Free all channel resources.\n");
	spin_lock_irqsave(&chan->desc_lock, flags);
I
Ira Snyder 已提交
481 482
	fsldma_free_desc_list(chan, &chan->ld_pending);
	fsldma_free_desc_list(chan, &chan->ld_running);
I
Ira Snyder 已提交
483
	spin_unlock_irqrestore(&chan->desc_lock, flags);
484

I
Ira Snyder 已提交
485
	dma_pool_destroy(chan->desc_pool);
I
Ira Snyder 已提交
486
	chan->desc_pool = NULL;
487 488
}

489
static struct dma_async_tx_descriptor *
I
Ira Snyder 已提交
490
fsl_dma_prep_interrupt(struct dma_chan *dchan, unsigned long flags)
491
{
I
Ira Snyder 已提交
492
	struct fsldma_chan *chan;
493 494
	struct fsl_desc_sw *new;

I
Ira Snyder 已提交
495
	if (!dchan)
496 497
		return NULL;

I
Ira Snyder 已提交
498
	chan = to_fsl_chan(dchan);
499

I
Ira Snyder 已提交
500
	new = fsl_dma_alloc_descriptor(chan);
501
	if (!new) {
I
Ira Snyder 已提交
502
		dev_err(chan->dev, "No free memory for link descriptor\n");
503 504 505 506
		return NULL;
	}

	new->async_tx.cookie = -EBUSY;
507
	new->async_tx.flags = flags;
508

509
	/* Insert the link descriptor to the LD ring */
510
	list_add_tail(&new->node, &new->tx_list);
511

512
	/* Set End-of-link to the last link descriptor of new list*/
I
Ira Snyder 已提交
513
	set_ld_eol(chan, new);
514 515 516 517

	return &new->async_tx;
}

518
static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy(
I
Ira Snyder 已提交
519
	struct dma_chan *dchan, dma_addr_t dma_dst, dma_addr_t dma_src,
520 521
	size_t len, unsigned long flags)
{
I
Ira Snyder 已提交
522
	struct fsldma_chan *chan;
523 524 525
	struct fsl_desc_sw *first = NULL, *prev = NULL, *new;
	size_t copy;

I
Ira Snyder 已提交
526
	if (!dchan)
527 528 529 530 531
		return NULL;

	if (!len)
		return NULL;

I
Ira Snyder 已提交
532
	chan = to_fsl_chan(dchan);
533 534 535 536

	do {

		/* Allocate the link descriptor from DMA pool */
I
Ira Snyder 已提交
537
		new = fsl_dma_alloc_descriptor(chan);
538
		if (!new) {
I
Ira Snyder 已提交
539
			dev_err(chan->dev,
540
					"No free memory for link descriptor\n");
541
			goto fail;
542 543
		}
#ifdef FSL_DMA_LD_DEBUG
I
Ira Snyder 已提交
544
		dev_dbg(chan->dev, "new link desc alloc %p\n", new);
545 546
#endif

547
		copy = min(len, (size_t)FSL_DMA_BCR_MAX_CNT);
548

I
Ira Snyder 已提交
549 550 551
		set_desc_cnt(chan, &new->hw, copy);
		set_desc_src(chan, &new->hw, dma_src);
		set_desc_dst(chan, &new->hw, dma_dst);
552 553 554 555

		if (!first)
			first = new;
		else
I
Ira Snyder 已提交
556
			set_desc_next(chan, &prev->hw, new->async_tx.phys);
557 558

		new->async_tx.cookie = 0;
559
		async_tx_ack(&new->async_tx);
560 561 562 563

		prev = new;
		len -= copy;
		dma_src += copy;
564
		dma_dst += copy;
565 566

		/* Insert the link descriptor to the LD ring */
567
		list_add_tail(&new->node, &first->tx_list);
568 569
	} while (len);

570
	new->async_tx.flags = flags; /* client is in control of this ack */
571 572 573
	new->async_tx.cookie = -EBUSY;

	/* Set End-of-link to the last link descriptor of new list*/
I
Ira Snyder 已提交
574
	set_ld_eol(chan, new);
575

576 577 578 579 580 581
	return &first->async_tx;

fail:
	if (!first)
		return NULL;

I
Ira Snyder 已提交
582
	fsldma_free_desc_list_reverse(chan, &first->tx_list);
583
	return NULL;
584 585
}

I
Ira Snyder 已提交
586 587 588 589 590 591 592 593 594 595 596 597 598
/**
 * fsl_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
 * @chan: DMA channel
 * @sgl: scatterlist to transfer to/from
 * @sg_len: number of entries in @scatterlist
 * @direction: DMA direction
 * @flags: DMAEngine flags
 *
 * Prepare a set of descriptors for a DMA_SLAVE transaction. Following the
 * DMA_SLAVE API, this gets the device-specific information from the
 * chan->private variable.
 */
static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg(
I
Ira Snyder 已提交
599
	struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
I
Ira Snyder 已提交
600 601
	enum dma_data_direction direction, unsigned long flags)
{
I
Ira Snyder 已提交
602
	struct fsldma_chan *chan;
I
Ira Snyder 已提交
603 604 605 606 607 608 609 610 611 612 613
	struct fsl_desc_sw *first = NULL, *prev = NULL, *new = NULL;
	struct fsl_dma_slave *slave;
	size_t copy;

	int i;
	struct scatterlist *sg;
	size_t sg_used;
	size_t hw_used;
	struct fsl_dma_hw_addr *hw;
	dma_addr_t dma_dst, dma_src;

I
Ira Snyder 已提交
614
	if (!dchan)
I
Ira Snyder 已提交
615 616
		return NULL;

I
Ira Snyder 已提交
617
	if (!dchan->private)
I
Ira Snyder 已提交
618 619
		return NULL;

I
Ira Snyder 已提交
620 621
	chan = to_fsl_chan(dchan);
	slave = dchan->private;
I
Ira Snyder 已提交
622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669

	if (list_empty(&slave->addresses))
		return NULL;

	hw = list_first_entry(&slave->addresses, struct fsl_dma_hw_addr, entry);
	hw_used = 0;

	/*
	 * Build the hardware transaction to copy from the scatterlist to
	 * the hardware, or from the hardware to the scatterlist
	 *
	 * If you are copying from the hardware to the scatterlist and it
	 * takes two hardware entries to fill an entire page, then both
	 * hardware entries will be coalesced into the same page
	 *
	 * If you are copying from the scatterlist to the hardware and a
	 * single page can fill two hardware entries, then the data will
	 * be read out of the page into the first hardware entry, and so on
	 */
	for_each_sg(sgl, sg, sg_len, i) {
		sg_used = 0;

		/* Loop until the entire scatterlist entry is used */
		while (sg_used < sg_dma_len(sg)) {

			/*
			 * If we've used up the current hardware address/length
			 * pair, we need to load a new one
			 *
			 * This is done in a while loop so that descriptors with
			 * length == 0 will be skipped
			 */
			while (hw_used >= hw->length) {

				/*
				 * If the current hardware entry is the last
				 * entry in the list, we're finished
				 */
				if (list_is_last(&hw->entry, &slave->addresses))
					goto finished;

				/* Get the next hardware address/length pair */
				hw = list_entry(hw->entry.next,
						struct fsl_dma_hw_addr, entry);
				hw_used = 0;
			}

			/* Allocate the link descriptor from DMA pool */
I
Ira Snyder 已提交
670
			new = fsl_dma_alloc_descriptor(chan);
I
Ira Snyder 已提交
671
			if (!new) {
I
Ira Snyder 已提交
672
				dev_err(chan->dev, "No free memory for "
I
Ira Snyder 已提交
673 674 675 676
						       "link descriptor\n");
				goto fail;
			}
#ifdef FSL_DMA_LD_DEBUG
I
Ira Snyder 已提交
677
			dev_dbg(chan->dev, "new link desc alloc %p\n", new);
I
Ira Snyder 已提交
678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703
#endif

			/*
			 * Calculate the maximum number of bytes to transfer,
			 * making sure it is less than the DMA controller limit
			 */
			copy = min_t(size_t, sg_dma_len(sg) - sg_used,
					     hw->length - hw_used);
			copy = min_t(size_t, copy, FSL_DMA_BCR_MAX_CNT);

			/*
			 * DMA_FROM_DEVICE
			 * from the hardware to the scatterlist
			 *
			 * DMA_TO_DEVICE
			 * from the scatterlist to the hardware
			 */
			if (direction == DMA_FROM_DEVICE) {
				dma_src = hw->address + hw_used;
				dma_dst = sg_dma_address(sg) + sg_used;
			} else {
				dma_src = sg_dma_address(sg) + sg_used;
				dma_dst = hw->address + hw_used;
			}

			/* Fill in the descriptor */
I
Ira Snyder 已提交
704 705 706
			set_desc_cnt(chan, &new->hw, copy);
			set_desc_src(chan, &new->hw, dma_src);
			set_desc_dst(chan, &new->hw, dma_dst);
I
Ira Snyder 已提交
707 708 709 710 711 712 713 714

			/*
			 * If this is not the first descriptor, chain the
			 * current descriptor after the previous descriptor
			 */
			if (!first) {
				first = new;
			} else {
I
Ira Snyder 已提交
715
				set_desc_next(chan, &prev->hw,
I
Ira Snyder 已提交
716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740
					      new->async_tx.phys);
			}

			new->async_tx.cookie = 0;
			async_tx_ack(&new->async_tx);

			prev = new;
			sg_used += copy;
			hw_used += copy;

			/* Insert the link descriptor into the LD ring */
			list_add_tail(&new->node, &first->tx_list);
		}
	}

finished:

	/* All of the hardware address/length pairs had length == 0 */
	if (!first || !new)
		return NULL;

	new->async_tx.flags = flags;
	new->async_tx.cookie = -EBUSY;

	/* Set End-of-link to the last link descriptor of new list */
I
Ira Snyder 已提交
741
	set_ld_eol(chan, new);
I
Ira Snyder 已提交
742 743

	/* Enable extra controller features */
I
Ira Snyder 已提交
744 745
	if (chan->set_src_loop_size)
		chan->set_src_loop_size(chan, slave->src_loop_size);
I
Ira Snyder 已提交
746

I
Ira Snyder 已提交
747 748
	if (chan->set_dst_loop_size)
		chan->set_dst_loop_size(chan, slave->dst_loop_size);
I
Ira Snyder 已提交
749

I
Ira Snyder 已提交
750 751
	if (chan->toggle_ext_start)
		chan->toggle_ext_start(chan, slave->external_start);
I
Ira Snyder 已提交
752

I
Ira Snyder 已提交
753 754
	if (chan->toggle_ext_pause)
		chan->toggle_ext_pause(chan, slave->external_pause);
I
Ira Snyder 已提交
755

I
Ira Snyder 已提交
756 757
	if (chan->set_request_count)
		chan->set_request_count(chan, slave->request_count);
I
Ira Snyder 已提交
758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773

	return &first->async_tx;

fail:
	/* If first was not set, then we failed to allocate the very first
	 * descriptor, and we're done */
	if (!first)
		return NULL;

	/*
	 * First is set, so all of the descriptors we allocated have been added
	 * to first->tx_list, INCLUDING "first" itself. Therefore we
	 * must traverse the list backwards freeing each descriptor in turn
	 *
	 * We're re-using variables for the loop, oh well
	 */
I
Ira Snyder 已提交
774
	fsldma_free_desc_list_reverse(chan, &first->tx_list);
I
Ira Snyder 已提交
775 776 777
	return NULL;
}

778
static int fsl_dma_device_control(struct dma_chan *dchan,
779
				  enum dma_ctrl_cmd cmd, unsigned long arg)
I
Ira Snyder 已提交
780
{
I
Ira Snyder 已提交
781
	struct fsldma_chan *chan;
I
Ira Snyder 已提交
782 783
	unsigned long flags;

784 785 786 787
	/* Only supports DMA_TERMINATE_ALL */
	if (cmd != DMA_TERMINATE_ALL)
		return -ENXIO;

I
Ira Snyder 已提交
788
	if (!dchan)
789
		return -EINVAL;
I
Ira Snyder 已提交
790

I
Ira Snyder 已提交
791
	chan = to_fsl_chan(dchan);
I
Ira Snyder 已提交
792 793

	/* Halt the DMA engine */
I
Ira Snyder 已提交
794
	dma_halt(chan);
I
Ira Snyder 已提交
795

I
Ira Snyder 已提交
796
	spin_lock_irqsave(&chan->desc_lock, flags);
I
Ira Snyder 已提交
797 798

	/* Remove and free all of the descriptors in the LD queue */
I
Ira Snyder 已提交
799 800
	fsldma_free_desc_list(chan, &chan->ld_pending);
	fsldma_free_desc_list(chan, &chan->ld_running);
I
Ira Snyder 已提交
801

I
Ira Snyder 已提交
802
	spin_unlock_irqrestore(&chan->desc_lock, flags);
803 804

	return 0;
I
Ira Snyder 已提交
805 806
}

807 808
/**
 * fsl_dma_update_completed_cookie - Update the completed cookie.
I
Ira Snyder 已提交
809
 * @chan : Freescale DMA channel
I
Ira Snyder 已提交
810 811
 *
 * CONTEXT: hardirq
812
 */
I
Ira Snyder 已提交
813
static void fsl_dma_update_completed_cookie(struct fsldma_chan *chan)
814
{
I
Ira Snyder 已提交
815 816 817
	struct fsl_desc_sw *desc;
	unsigned long flags;
	dma_cookie_t cookie;
818

I
Ira Snyder 已提交
819
	spin_lock_irqsave(&chan->desc_lock, flags);
820

I
Ira Snyder 已提交
821 822 823
	if (list_empty(&chan->ld_running)) {
		dev_dbg(chan->dev, "no running descriptors\n");
		goto out_unlock;
824
	}
I
Ira Snyder 已提交
825 826 827 828 829

	/* Get the last descriptor, update the cookie to that */
	desc = to_fsl_desc(chan->ld_running.prev);
	if (dma_is_idle(chan))
		cookie = desc->async_tx.cookie;
S
Steven J. Magnani 已提交
830
	else {
I
Ira Snyder 已提交
831
		cookie = desc->async_tx.cookie - 1;
S
Steven J. Magnani 已提交
832 833 834
		if (unlikely(cookie < DMA_MIN_COOKIE))
			cookie = DMA_MAX_COOKIE;
	}
I
Ira Snyder 已提交
835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854

	chan->completed_cookie = cookie;

out_unlock:
	spin_unlock_irqrestore(&chan->desc_lock, flags);
}

/**
 * fsldma_desc_status - Check the status of a descriptor
 * @chan: Freescale DMA channel
 * @desc: DMA SW descriptor
 *
 * This function will return the status of the given descriptor
 */
static enum dma_status fsldma_desc_status(struct fsldma_chan *chan,
					  struct fsl_desc_sw *desc)
{
	return dma_async_is_complete(desc->async_tx.cookie,
				     chan->completed_cookie,
				     chan->common.cookie);
855 856 857 858
}

/**
 * fsl_chan_ld_cleanup - Clean up link descriptors
I
Ira Snyder 已提交
859
 * @chan : Freescale DMA channel
860 861 862
 *
 * This function clean up the ld_queue of DMA channel.
 */
I
Ira Snyder 已提交
863
static void fsl_chan_ld_cleanup(struct fsldma_chan *chan)
864 865 866 867
{
	struct fsl_desc_sw *desc, *_desc;
	unsigned long flags;

I
Ira Snyder 已提交
868
	spin_lock_irqsave(&chan->desc_lock, flags);
869

I
Ira Snyder 已提交
870 871
	dev_dbg(chan->dev, "chan completed_cookie = %d\n", chan->completed_cookie);
	list_for_each_entry_safe(desc, _desc, &chan->ld_running, node) {
872 873 874
		dma_async_tx_callback callback;
		void *callback_param;

I
Ira Snyder 已提交
875
		if (fsldma_desc_status(chan, desc) == DMA_IN_PROGRESS)
876 877
			break;

I
Ira Snyder 已提交
878
		/* Remove from the list of running transactions */
879 880 881
		list_del(&desc->node);

		/* Run the link descriptor callback function */
I
Ira Snyder 已提交
882 883
		callback = desc->async_tx.callback;
		callback_param = desc->async_tx.callback_param;
884
		if (callback) {
I
Ira Snyder 已提交
885
			spin_unlock_irqrestore(&chan->desc_lock, flags);
I
Ira Snyder 已提交
886
			dev_dbg(chan->dev, "LD %p callback\n", desc);
887
			callback(callback_param);
I
Ira Snyder 已提交
888
			spin_lock_irqsave(&chan->desc_lock, flags);
889
		}
I
Ira Snyder 已提交
890 891 892 893

		/* Run any dependencies, then free the descriptor */
		dma_run_dependencies(&desc->async_tx);
		dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
894
	}
I
Ira Snyder 已提交
895

I
Ira Snyder 已提交
896
	spin_unlock_irqrestore(&chan->desc_lock, flags);
897 898 899
}

/**
I
Ira Snyder 已提交
900
 * fsl_chan_xfer_ld_queue - transfer any pending transactions
I
Ira Snyder 已提交
901
 * @chan : Freescale DMA channel
I
Ira Snyder 已提交
902 903 904 905 906
 *
 * This will make sure that any pending transactions will be run.
 * If the DMA controller is idle, it will be started. Otherwise,
 * the DMA controller's interrupt handler will start any pending
 * transactions when it becomes idle.
907
 */
I
Ira Snyder 已提交
908
static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan)
909
{
I
Ira Snyder 已提交
910
	struct fsl_desc_sw *desc;
911 912
	unsigned long flags;

I
Ira Snyder 已提交
913
	spin_lock_irqsave(&chan->desc_lock, flags);
914

I
Ira Snyder 已提交
915 916 917 918 919 920
	/*
	 * If the list of pending descriptors is empty, then we
	 * don't need to do any work at all
	 */
	if (list_empty(&chan->ld_pending)) {
		dev_dbg(chan->dev, "no pending LDs\n");
921
		goto out_unlock;
I
Ira Snyder 已提交
922
	}
923

I
Ira Snyder 已提交
924 925 926 927 928 929 930 931 932 933 934 935 936 937 938
	/*
	 * The DMA controller is not idle, which means the interrupt
	 * handler will start any queued transactions when it runs
	 * at the end of the current transaction
	 */
	if (!dma_is_idle(chan)) {
		dev_dbg(chan->dev, "DMA controller still busy\n");
		goto out_unlock;
	}

	/*
	 * TODO:
	 * make sure the dma_halt() function really un-wedges the
	 * controller as much as possible
	 */
I
Ira Snyder 已提交
939
	dma_halt(chan);
940

I
Ira Snyder 已提交
941 942 943
	/*
	 * If there are some link descriptors which have not been
	 * transferred, we need to start the controller
944 945
	 */

I
Ira Snyder 已提交
946 947 948 949 950 951 952 953 954 955 956 957 958
	/*
	 * Move all elements from the queue of pending transactions
	 * onto the list of running transactions
	 */
	desc = list_first_entry(&chan->ld_pending, struct fsl_desc_sw, node);
	list_splice_tail_init(&chan->ld_pending, &chan->ld_running);

	/*
	 * Program the descriptor's address into the DMA controller,
	 * then start the DMA transaction
	 */
	set_cdar(chan, desc->async_tx.phys);
	dma_start(chan);
959 960

out_unlock:
I
Ira Snyder 已提交
961
	spin_unlock_irqrestore(&chan->desc_lock, flags);
962 963 964 965
}

/**
 * fsl_dma_memcpy_issue_pending - Issue the DMA start command
I
Ira Snyder 已提交
966
 * @chan : Freescale DMA channel
967
 */
I
Ira Snyder 已提交
968
static void fsl_dma_memcpy_issue_pending(struct dma_chan *dchan)
969
{
I
Ira Snyder 已提交
970 971
	struct fsldma_chan *chan = to_fsl_chan(dchan);
	fsl_chan_xfer_ld_queue(chan);
972 973 974
}

/**
975
 * fsl_tx_status - Determine the DMA status
I
Ira Snyder 已提交
976
 * @chan : Freescale DMA channel
977
 */
978
static enum dma_status fsl_tx_status(struct dma_chan *dchan,
979
					dma_cookie_t cookie,
980
					struct dma_tx_state *txstate)
981
{
I
Ira Snyder 已提交
982
	struct fsldma_chan *chan = to_fsl_chan(dchan);
983 984 985
	dma_cookie_t last_used;
	dma_cookie_t last_complete;

I
Ira Snyder 已提交
986
	fsl_chan_ld_cleanup(chan);
987

I
Ira Snyder 已提交
988 989
	last_used = dchan->cookie;
	last_complete = chan->completed_cookie;
990

991
	dma_set_tx_state(txstate, last_complete, last_used, 0);
992 993 994 995

	return dma_async_is_complete(cookie, last_complete, last_used);
}

996 997 998 999
/*----------------------------------------------------------------------------*/
/* Interrupt Handling                                                         */
/*----------------------------------------------------------------------------*/

1000
static irqreturn_t fsldma_chan_irq(int irq, void *data)
1001
{
I
Ira Snyder 已提交
1002
	struct fsldma_chan *chan = data;
1003 1004
	int update_cookie = 0;
	int xfer_ld_q = 0;
I
Ira Snyder 已提交
1005
	u32 stat;
1006

I
Ira Snyder 已提交
1007
	/* save and clear the status register */
I
Ira Snyder 已提交
1008
	stat = get_sr(chan);
I
Ira Snyder 已提交
1009 1010
	set_sr(chan, stat);
	dev_dbg(chan->dev, "irq: channel %d, stat = 0x%x\n", chan->id, stat);
1011 1012 1013 1014 1015 1016

	stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH);
	if (!stat)
		return IRQ_NONE;

	if (stat & FSL_DMA_SR_TE)
I
Ira Snyder 已提交
1017
		dev_err(chan->dev, "Transfer Error!\n");
1018

I
Ira Snyder 已提交
1019 1020
	/*
	 * Programming Error
1021 1022 1023 1024
	 * The DMA_INTERRUPT async_tx is a NULL transfer, which will
	 * triger a PE interrupt.
	 */
	if (stat & FSL_DMA_SR_PE) {
I
Ira Snyder 已提交
1025
		dev_dbg(chan->dev, "irq: Programming Error INT\n");
I
Ira Snyder 已提交
1026
		if (get_bcr(chan) == 0) {
1027 1028 1029 1030
			/* BCR register is 0, this is a DMA_INTERRUPT async_tx.
			 * Now, update the completed cookie, and continue the
			 * next uncompleted transfer.
			 */
1031 1032
			update_cookie = 1;
			xfer_ld_q = 1;
1033 1034 1035 1036
		}
		stat &= ~FSL_DMA_SR_PE;
	}

I
Ira Snyder 已提交
1037 1038
	/*
	 * If the link descriptor segment transfer finishes,
1039 1040 1041
	 * we will recycle the used descriptor.
	 */
	if (stat & FSL_DMA_SR_EOSI) {
I
Ira Snyder 已提交
1042 1043
		dev_dbg(chan->dev, "irq: End-of-segments INT\n");
		dev_dbg(chan->dev, "irq: clndar 0x%llx, nlndar 0x%llx\n",
I
Ira Snyder 已提交
1044 1045
			(unsigned long long)get_cdar(chan),
			(unsigned long long)get_ndar(chan));
1046
		stat &= ~FSL_DMA_SR_EOSI;
1047 1048 1049
		update_cookie = 1;
	}

I
Ira Snyder 已提交
1050 1051
	/*
	 * For MPC8349, EOCDI event need to update cookie
1052 1053 1054
	 * and start the next transfer if it exist.
	 */
	if (stat & FSL_DMA_SR_EOCDI) {
I
Ira Snyder 已提交
1055
		dev_dbg(chan->dev, "irq: End-of-Chain link INT\n");
1056 1057 1058
		stat &= ~FSL_DMA_SR_EOCDI;
		update_cookie = 1;
		xfer_ld_q = 1;
1059 1060
	}

I
Ira Snyder 已提交
1061 1062
	/*
	 * If it current transfer is the end-of-transfer,
1063 1064 1065
	 * we should clear the Channel Start bit for
	 * prepare next transfer.
	 */
1066
	if (stat & FSL_DMA_SR_EOLNI) {
I
Ira Snyder 已提交
1067
		dev_dbg(chan->dev, "irq: End-of-link INT\n");
1068
		stat &= ~FSL_DMA_SR_EOLNI;
1069
		xfer_ld_q = 1;
1070 1071
	}

1072
	if (update_cookie)
I
Ira Snyder 已提交
1073
		fsl_dma_update_completed_cookie(chan);
1074
	if (xfer_ld_q)
I
Ira Snyder 已提交
1075
		fsl_chan_xfer_ld_queue(chan);
1076
	if (stat)
I
Ira Snyder 已提交
1077
		dev_dbg(chan->dev, "irq: unhandled sr 0x%02x\n", stat);
1078

I
Ira Snyder 已提交
1079
	dev_dbg(chan->dev, "irq: Exit\n");
I
Ira Snyder 已提交
1080
	tasklet_schedule(&chan->tasklet);
1081 1082 1083
	return IRQ_HANDLED;
}

1084 1085
static void dma_do_tasklet(unsigned long data)
{
I
Ira Snyder 已提交
1086 1087
	struct fsldma_chan *chan = (struct fsldma_chan *)data;
	fsl_chan_ld_cleanup(chan);
1088 1089 1090
}

static irqreturn_t fsldma_ctrl_irq(int irq, void *data)
1091
{
1092
	struct fsldma_device *fdev = data;
1093 1094 1095 1096
	struct fsldma_chan *chan;
	unsigned int handled = 0;
	u32 gsr, mask;
	int i;
1097

1098
	gsr = (fdev->feature & FSL_DMA_BIG_ENDIAN) ? in_be32(fdev->regs)
1099 1100 1101
						   : in_le32(fdev->regs);
	mask = 0xff000000;
	dev_dbg(fdev->dev, "IRQ: gsr 0x%.8x\n", gsr);
1102

1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118
	for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
		chan = fdev->chan[i];
		if (!chan)
			continue;

		if (gsr & mask) {
			dev_dbg(fdev->dev, "IRQ: chan %d\n", chan->id);
			fsldma_chan_irq(irq, chan);
			handled++;
		}

		gsr &= ~mask;
		mask >>= 8;
	}

	return IRQ_RETVAL(handled);
1119 1120
}

1121
static void fsldma_free_irqs(struct fsldma_device *fdev)
1122
{
1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193
	struct fsldma_chan *chan;
	int i;

	if (fdev->irq != NO_IRQ) {
		dev_dbg(fdev->dev, "free per-controller IRQ\n");
		free_irq(fdev->irq, fdev);
		return;
	}

	for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
		chan = fdev->chan[i];
		if (chan && chan->irq != NO_IRQ) {
			dev_dbg(fdev->dev, "free channel %d IRQ\n", chan->id);
			free_irq(chan->irq, chan);
		}
	}
}

static int fsldma_request_irqs(struct fsldma_device *fdev)
{
	struct fsldma_chan *chan;
	int ret;
	int i;

	/* if we have a per-controller IRQ, use that */
	if (fdev->irq != NO_IRQ) {
		dev_dbg(fdev->dev, "request per-controller IRQ\n");
		ret = request_irq(fdev->irq, fsldma_ctrl_irq, IRQF_SHARED,
				  "fsldma-controller", fdev);
		return ret;
	}

	/* no per-controller IRQ, use the per-channel IRQs */
	for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
		chan = fdev->chan[i];
		if (!chan)
			continue;

		if (chan->irq == NO_IRQ) {
			dev_err(fdev->dev, "no interrupts property defined for "
					   "DMA channel %d. Please fix your "
					   "device tree\n", chan->id);
			ret = -ENODEV;
			goto out_unwind;
		}

		dev_dbg(fdev->dev, "request channel %d IRQ\n", chan->id);
		ret = request_irq(chan->irq, fsldma_chan_irq, IRQF_SHARED,
				  "fsldma-chan", chan);
		if (ret) {
			dev_err(fdev->dev, "unable to request IRQ for DMA "
					   "channel %d\n", chan->id);
			goto out_unwind;
		}
	}

	return 0;

out_unwind:
	for (/* none */; i >= 0; i--) {
		chan = fdev->chan[i];
		if (!chan)
			continue;

		if (chan->irq == NO_IRQ)
			continue;

		free_irq(chan->irq, chan);
	}

	return ret;
1194 1195
}

1196 1197 1198 1199 1200
/*----------------------------------------------------------------------------*/
/* OpenFirmware Subsystem                                                     */
/*----------------------------------------------------------------------------*/

static int __devinit fsl_dma_chan_probe(struct fsldma_device *fdev,
1201
	struct device_node *node, u32 feature, const char *compatible)
1202
{
I
Ira Snyder 已提交
1203
	struct fsldma_chan *chan;
1204
	struct resource res;
1205 1206 1207
	int err;

	/* alloc channel */
I
Ira Snyder 已提交
1208 1209
	chan = kzalloc(sizeof(*chan), GFP_KERNEL);
	if (!chan) {
1210 1211 1212 1213 1214 1215
		dev_err(fdev->dev, "no free memory for DMA channels!\n");
		err = -ENOMEM;
		goto out_return;
	}

	/* ioremap registers for use */
I
Ira Snyder 已提交
1216 1217
	chan->regs = of_iomap(node, 0);
	if (!chan->regs) {
1218 1219
		dev_err(fdev->dev, "unable to ioremap registers\n");
		err = -ENOMEM;
I
Ira Snyder 已提交
1220
		goto out_free_chan;
1221 1222
	}

1223
	err = of_address_to_resource(node, 0, &res);
1224
	if (err) {
1225 1226
		dev_err(fdev->dev, "unable to find 'reg' property\n");
		goto out_iounmap_regs;
1227 1228
	}

I
Ira Snyder 已提交
1229
	chan->feature = feature;
1230
	if (!fdev->feature)
I
Ira Snyder 已提交
1231
		fdev->feature = chan->feature;
1232

1233 1234 1235
	/*
	 * If the DMA device's feature is different than the feature
	 * of its channels, report the bug
1236
	 */
I
Ira Snyder 已提交
1237
	WARN_ON(fdev->feature != chan->feature);
1238

I
Ira Snyder 已提交
1239 1240 1241
	chan->dev = fdev->dev;
	chan->id = ((res.start - 0x100) & 0xfff) >> 7;
	if (chan->id >= FSL_DMA_MAX_CHANS_PER_DEVICE) {
1242
		dev_err(fdev->dev, "too many channels for device\n");
1243
		err = -EINVAL;
1244
		goto out_iounmap_regs;
1245 1246
	}

I
Ira Snyder 已提交
1247 1248
	fdev->chan[chan->id] = chan;
	tasklet_init(&chan->tasklet, dma_do_tasklet, (unsigned long)chan);
1249 1250

	/* Initialize the channel */
I
Ira Snyder 已提交
1251
	dma_init(chan);
1252 1253

	/* Clear cdar registers */
I
Ira Snyder 已提交
1254
	set_cdar(chan, 0);
1255

I
Ira Snyder 已提交
1256
	switch (chan->feature & FSL_DMA_IP_MASK) {
1257
	case FSL_DMA_IP_85XX:
I
Ira Snyder 已提交
1258
		chan->toggle_ext_pause = fsl_chan_toggle_ext_pause;
1259
	case FSL_DMA_IP_83XX:
I
Ira Snyder 已提交
1260 1261 1262 1263
		chan->toggle_ext_start = fsl_chan_toggle_ext_start;
		chan->set_src_loop_size = fsl_chan_set_src_loop_size;
		chan->set_dst_loop_size = fsl_chan_set_dst_loop_size;
		chan->set_request_count = fsl_chan_set_request_count;
1264 1265
	}

I
Ira Snyder 已提交
1266
	spin_lock_init(&chan->desc_lock);
I
Ira Snyder 已提交
1267 1268
	INIT_LIST_HEAD(&chan->ld_pending);
	INIT_LIST_HEAD(&chan->ld_running);
1269

I
Ira Snyder 已提交
1270
	chan->common.device = &fdev->common;
1271

1272
	/* find the IRQ line, if it exists in the device tree */
I
Ira Snyder 已提交
1273
	chan->irq = irq_of_parse_and_map(node, 0);
1274

1275
	/* Add the channel to DMA device channel list */
I
Ira Snyder 已提交
1276
	list_add_tail(&chan->common.device_node, &fdev->common.channels);
1277 1278
	fdev->common.chancnt++;

I
Ira Snyder 已提交
1279 1280
	dev_info(fdev->dev, "#%d (%s), irq %d\n", chan->id, compatible,
		 chan->irq != NO_IRQ ? chan->irq : fdev->irq);
1281 1282

	return 0;
1283

1284
out_iounmap_regs:
I
Ira Snyder 已提交
1285 1286 1287
	iounmap(chan->regs);
out_free_chan:
	kfree(chan);
1288
out_return:
1289 1290 1291
	return err;
}

I
Ira Snyder 已提交
1292
static void fsl_dma_chan_remove(struct fsldma_chan *chan)
1293
{
I
Ira Snyder 已提交
1294 1295 1296 1297
	irq_dispose_mapping(chan->irq);
	list_del(&chan->common.device_node);
	iounmap(chan->regs);
	kfree(chan);
1298 1299
}

1300
static int __devinit fsldma_of_probe(struct of_device *op,
1301 1302
			const struct of_device_id *match)
{
1303
	struct fsldma_device *fdev;
1304
	struct device_node *child;
1305
	int err;
1306

1307
	fdev = kzalloc(sizeof(*fdev), GFP_KERNEL);
1308
	if (!fdev) {
1309 1310 1311
		dev_err(&op->dev, "No enough memory for 'priv'\n");
		err = -ENOMEM;
		goto out_return;
1312
	}
1313 1314

	fdev->dev = &op->dev;
1315 1316
	INIT_LIST_HEAD(&fdev->common.channels);

1317
	/* ioremap the registers for use */
1318
	fdev->regs = of_iomap(op->dev.of_node, 0);
1319 1320 1321 1322
	if (!fdev->regs) {
		dev_err(&op->dev, "unable to ioremap registers\n");
		err = -ENOMEM;
		goto out_free_fdev;
1323 1324
	}

1325
	/* map the channel IRQ if it exists, but don't hookup the handler yet */
1326
	fdev->irq = irq_of_parse_and_map(op->dev.of_node, 0);
1327

1328 1329
	dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask);
	dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask);
I
Ira Snyder 已提交
1330
	dma_cap_set(DMA_SLAVE, fdev->common.cap_mask);
1331 1332
	fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources;
	fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources;
1333
	fdev->common.device_prep_dma_interrupt = fsl_dma_prep_interrupt;
1334
	fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy;
1335
	fdev->common.device_tx_status = fsl_tx_status;
1336
	fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending;
I
Ira Snyder 已提交
1337
	fdev->common.device_prep_slave_sg = fsl_dma_prep_slave_sg;
1338
	fdev->common.device_control = fsl_dma_device_control;
1339
	fdev->common.dev = &op->dev;
1340

1341
	dev_set_drvdata(&op->dev, fdev);
1342

1343 1344 1345
	/*
	 * We cannot use of_platform_bus_probe() because there is no
	 * of_platform_bus_remove(). Instead, we manually instantiate every DMA
1346 1347
	 * channel object.
	 */
1348
	for_each_child_of_node(op->dev.of_node, child) {
1349
		if (of_device_is_compatible(child, "fsl,eloplus-dma-channel")) {
1350 1351 1352
			fsl_dma_chan_probe(fdev, child,
				FSL_DMA_IP_85XX | FSL_DMA_BIG_ENDIAN,
				"fsl,eloplus-dma-channel");
1353 1354 1355
		}

		if (of_device_is_compatible(child, "fsl,elo-dma-channel")) {
1356 1357 1358
			fsl_dma_chan_probe(fdev, child,
				FSL_DMA_IP_83XX | FSL_DMA_LITTLE_ENDIAN,
				"fsl,elo-dma-channel");
1359
		}
1360
	}
1361

1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374
	/*
	 * Hookup the IRQ handler(s)
	 *
	 * If we have a per-controller interrupt, we prefer that to the
	 * per-channel interrupts to reduce the number of shared interrupt
	 * handlers on the same IRQ line
	 */
	err = fsldma_request_irqs(fdev);
	if (err) {
		dev_err(fdev->dev, "unable to request IRQs\n");
		goto out_free_fdev;
	}

1375 1376 1377
	dma_async_device_register(&fdev->common);
	return 0;

1378
out_free_fdev:
1379
	irq_dispose_mapping(fdev->irq);
1380
	kfree(fdev);
1381
out_return:
1382 1383 1384
	return err;
}

1385
static int fsldma_of_remove(struct of_device *op)
1386
{
1387
	struct fsldma_device *fdev;
1388 1389
	unsigned int i;

1390
	fdev = dev_get_drvdata(&op->dev);
1391 1392
	dma_async_device_unregister(&fdev->common);

1393 1394
	fsldma_free_irqs(fdev);

1395
	for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
1396 1397
		if (fdev->chan[i])
			fsl_dma_chan_remove(fdev->chan[i]);
1398
	}
1399

1400 1401
	iounmap(fdev->regs);
	dev_set_drvdata(&op->dev, NULL);
1402 1403 1404 1405 1406
	kfree(fdev);

	return 0;
}

1407
static const struct of_device_id fsldma_of_ids[] = {
1408 1409
	{ .compatible = "fsl,eloplus-dma", },
	{ .compatible = "fsl,elo-dma", },
1410 1411 1412
	{}
};

1413
static struct of_platform_driver fsldma_of_driver = {
1414 1415 1416 1417 1418 1419 1420
	.driver = {
		.name = "fsl-elo-dma",
		.owner = THIS_MODULE,
		.of_match_table = fsldma_of_ids,
	},
	.probe = fsldma_of_probe,
	.remove = fsldma_of_remove,
1421 1422
};

1423 1424 1425 1426 1427
/*----------------------------------------------------------------------------*/
/* Module Init / Exit                                                         */
/*----------------------------------------------------------------------------*/

static __init int fsldma_init(void)
1428
{
1429 1430 1431 1432
	int ret;

	pr_info("Freescale Elo / Elo Plus DMA driver\n");

1433
	ret = of_register_platform_driver(&fsldma_of_driver);
1434 1435 1436 1437 1438 1439
	if (ret)
		pr_err("fsldma: failed to register platform driver\n");

	return ret;
}

1440
static void __exit fsldma_exit(void)
1441
{
1442
	of_unregister_platform_driver(&fsldma_of_driver);
1443 1444
}

1445 1446
subsys_initcall(fsldma_init);
module_exit(fsldma_exit);
1447 1448 1449

MODULE_DESCRIPTION("Freescale Elo / Elo Plus DMA driver");
MODULE_LICENSE("GPL");