fsldma.c 35.6 KB
Newer Older
1 2 3
/*
 * Freescale MPC85xx, MPC83xx DMA Engine support
 *
4
 * Copyright (C) 2007-2010 Freescale Semiconductor, Inc. All rights reserved.
5 6 7 8 9 10 11 12
 *
 * Author:
 *   Zhang Wei <wei.zhang@freescale.com>, Jul 2007
 *   Ebony Zhu <ebony.zhu@freescale.com>, May 2007
 *
 * Description:
 *   DMA engine driver for Freescale MPC8540 DMA controller, which is
 *   also fit for MPC8560, MPC8555, MPC8548, MPC8641, and etc.
13
 *   The support for MPC8349 DMA controller is also added.
14
 *
15 16 17 18 19
 * This driver instructs the DMA controller to issue the PCI Read Multiple
 * command for PCI read operations, instead of using the default PCI Read Line
 * command. Please be aware that this setting may result in read pre-fetching
 * on some platforms.
 *
20 21 22 23 24 25 26 27 28 29
 * This is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 */

#include <linux/init.h>
#include <linux/module.h>
#include <linux/pci.h>
30
#include <linux/slab.h>
31 32 33 34 35 36 37 38 39
#include <linux/interrupt.h>
#include <linux/dmaengine.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
#include <linux/of_platform.h>

#include "fsldma.h"

40 41
static const char msg_ld_oom[] = "No free memory for link descriptor\n";

I
Ira Snyder 已提交
42
static void dma_init(struct fsldma_chan *chan)
43 44
{
	/* Reset the channel */
I
Ira Snyder 已提交
45
	DMA_OUT(chan, &chan->regs->mr, 0, 32);
46

I
Ira Snyder 已提交
47
	switch (chan->feature & FSL_DMA_IP_MASK) {
48 49 50 51 52
	case FSL_DMA_IP_85XX:
		/* Set the channel to below modes:
		 * EIE - Error interrupt enable
		 * EOSIE - End of segments interrupt enable (basic mode)
		 * EOLNIE - End of links interrupt enable
F
Forrest Shi 已提交
53
		 * BWC - Bandwidth sharing among channels
54
		 */
F
Forrest Shi 已提交
55 56 57
		DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_BWC
				| FSL_DMA_MR_EIE | FSL_DMA_MR_EOLNIE
				| FSL_DMA_MR_EOSIE, 32);
58 59 60 61
		break;
	case FSL_DMA_IP_83XX:
		/* Set the channel to below modes:
		 * EOTIE - End-of-transfer interrupt enable
62
		 * PRC_RM - PCI read multiple
63
		 */
I
Ira Snyder 已提交
64
		DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_EOTIE
65
				| FSL_DMA_MR_PRC_RM, 32);
66 67 68 69
		break;
	}
}

I
Ira Snyder 已提交
70
static void set_sr(struct fsldma_chan *chan, u32 val)
71
{
I
Ira Snyder 已提交
72
	DMA_OUT(chan, &chan->regs->sr, val, 32);
73 74
}

I
Ira Snyder 已提交
75
static u32 get_sr(struct fsldma_chan *chan)
76
{
I
Ira Snyder 已提交
77
	return DMA_IN(chan, &chan->regs->sr, 32);
78 79
}

I
Ira Snyder 已提交
80
static void set_desc_cnt(struct fsldma_chan *chan,
81 82
				struct fsl_dma_ld_hw *hw, u32 count)
{
I
Ira Snyder 已提交
83
	hw->count = CPU_TO_DMA(chan, count, 32);
84 85
}

I
Ira Snyder 已提交
86
static void set_desc_src(struct fsldma_chan *chan,
87 88 89 90
				struct fsl_dma_ld_hw *hw, dma_addr_t src)
{
	u64 snoop_bits;

I
Ira Snyder 已提交
91
	snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
92
		? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0;
I
Ira Snyder 已提交
93
	hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64);
94 95
}

I
Ira Snyder 已提交
96
static void set_desc_dst(struct fsldma_chan *chan,
97
				struct fsl_dma_ld_hw *hw, dma_addr_t dst)
98 99 100
{
	u64 snoop_bits;

I
Ira Snyder 已提交
101
	snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
102
		? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0;
I
Ira Snyder 已提交
103
	hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64);
104 105
}

I
Ira Snyder 已提交
106
static void set_desc_next(struct fsldma_chan *chan,
107 108 109 110
				struct fsl_dma_ld_hw *hw, dma_addr_t next)
{
	u64 snoop_bits;

I
Ira Snyder 已提交
111
	snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
112
		? FSL_DMA_SNEN : 0;
I
Ira Snyder 已提交
113
	hw->next_ln_addr = CPU_TO_DMA(chan, snoop_bits | next, 64);
114 115
}

I
Ira Snyder 已提交
116
static void set_cdar(struct fsldma_chan *chan, dma_addr_t addr)
117
{
I
Ira Snyder 已提交
118
	DMA_OUT(chan, &chan->regs->cdar, addr | FSL_DMA_SNEN, 64);
119 120
}

I
Ira Snyder 已提交
121
static dma_addr_t get_cdar(struct fsldma_chan *chan)
122
{
I
Ira Snyder 已提交
123
	return DMA_IN(chan, &chan->regs->cdar, 64) & ~FSL_DMA_SNEN;
124 125
}

I
Ira Snyder 已提交
126
static dma_addr_t get_ndar(struct fsldma_chan *chan)
127
{
I
Ira Snyder 已提交
128
	return DMA_IN(chan, &chan->regs->ndar, 64);
129 130
}

I
Ira Snyder 已提交
131
static u32 get_bcr(struct fsldma_chan *chan)
132
{
I
Ira Snyder 已提交
133
	return DMA_IN(chan, &chan->regs->bcr, 32);
134 135
}

I
Ira Snyder 已提交
136
static int dma_is_idle(struct fsldma_chan *chan)
137
{
I
Ira Snyder 已提交
138
	u32 sr = get_sr(chan);
139 140 141
	return (!(sr & FSL_DMA_SR_CB)) || (sr & FSL_DMA_SR_CH);
}

I
Ira Snyder 已提交
142
static void dma_start(struct fsldma_chan *chan)
143
{
I
Ira Snyder 已提交
144 145
	u32 mode;

I
Ira Snyder 已提交
146
	mode = DMA_IN(chan, &chan->regs->mr, 32);
I
Ira Snyder 已提交
147

I
Ira Snyder 已提交
148 149 150
	if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) {
		if (chan->feature & FSL_DMA_CHAN_PAUSE_EXT) {
			DMA_OUT(chan, &chan->regs->bcr, 0, 32);
I
Ira Snyder 已提交
151 152 153 154
			mode |= FSL_DMA_MR_EMP_EN;
		} else {
			mode &= ~FSL_DMA_MR_EMP_EN;
		}
155
	}
156

I
Ira Snyder 已提交
157
	if (chan->feature & FSL_DMA_CHAN_START_EXT)
I
Ira Snyder 已提交
158
		mode |= FSL_DMA_MR_EMS_EN;
159
	else
I
Ira Snyder 已提交
160
		mode |= FSL_DMA_MR_CS;
161

I
Ira Snyder 已提交
162
	DMA_OUT(chan, &chan->regs->mr, mode, 32);
163 164
}

I
Ira Snyder 已提交
165
static void dma_halt(struct fsldma_chan *chan)
166
{
I
Ira Snyder 已提交
167
	u32 mode;
168 169
	int i;

I
Ira Snyder 已提交
170
	mode = DMA_IN(chan, &chan->regs->mr, 32);
I
Ira Snyder 已提交
171
	mode |= FSL_DMA_MR_CA;
I
Ira Snyder 已提交
172
	DMA_OUT(chan, &chan->regs->mr, mode, 32);
I
Ira Snyder 已提交
173 174

	mode &= ~(FSL_DMA_MR_CS | FSL_DMA_MR_EMS_EN | FSL_DMA_MR_CA);
I
Ira Snyder 已提交
175
	DMA_OUT(chan, &chan->regs->mr, mode, 32);
176

177
	for (i = 0; i < 100; i++) {
I
Ira Snyder 已提交
178
		if (dma_is_idle(chan))
I
Ira Snyder 已提交
179 180
			return;

181
		udelay(10);
182
	}
I
Ira Snyder 已提交
183

I
Ira Snyder 已提交
184
	if (!dma_is_idle(chan))
I
Ira Snyder 已提交
185
		dev_err(chan->dev, "DMA halt timeout!\n");
186 187
}

I
Ira Snyder 已提交
188
static void set_ld_eol(struct fsldma_chan *chan,
189 190
			struct fsl_desc_sw *desc)
{
191 192
	u64 snoop_bits;

I
Ira Snyder 已提交
193
	snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
194 195
		? FSL_DMA_SNEN : 0;

I
Ira Snyder 已提交
196 197
	desc->hw.next_ln_addr = CPU_TO_DMA(chan,
		DMA_TO_CPU(chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL
198
			| snoop_bits, 64);
199 200 201 202
}

/**
 * fsl_chan_set_src_loop_size - Set source address hold transfer size
I
Ira Snyder 已提交
203
 * @chan : Freescale DMA channel
204 205 206 207 208 209 210 211
 * @size     : Address loop size, 0 for disable loop
 *
 * The set source address hold transfer size. The source
 * address hold or loop transfer size is when the DMA transfer
 * data from source address (SA), if the loop size is 4, the DMA will
 * read data from SA, SA + 1, SA + 2, SA + 3, then loop back to SA,
 * SA + 1 ... and so on.
 */
I
Ira Snyder 已提交
212
static void fsl_chan_set_src_loop_size(struct fsldma_chan *chan, int size)
213
{
I
Ira Snyder 已提交
214 215
	u32 mode;

I
Ira Snyder 已提交
216
	mode = DMA_IN(chan, &chan->regs->mr, 32);
I
Ira Snyder 已提交
217

218 219
	switch (size) {
	case 0:
I
Ira Snyder 已提交
220
		mode &= ~FSL_DMA_MR_SAHE;
221 222 223 224 225
		break;
	case 1:
	case 2:
	case 4:
	case 8:
I
Ira Snyder 已提交
226
		mode |= FSL_DMA_MR_SAHE | (__ilog2(size) << 14);
227 228
		break;
	}
I
Ira Snyder 已提交
229

I
Ira Snyder 已提交
230
	DMA_OUT(chan, &chan->regs->mr, mode, 32);
231 232 233
}

/**
234
 * fsl_chan_set_dst_loop_size - Set destination address hold transfer size
I
Ira Snyder 已提交
235
 * @chan : Freescale DMA channel
236 237 238 239 240 241 242 243
 * @size     : Address loop size, 0 for disable loop
 *
 * The set destination address hold transfer size. The destination
 * address hold or loop transfer size is when the DMA transfer
 * data to destination address (TA), if the loop size is 4, the DMA will
 * write data to TA, TA + 1, TA + 2, TA + 3, then loop back to TA,
 * TA + 1 ... and so on.
 */
I
Ira Snyder 已提交
244
static void fsl_chan_set_dst_loop_size(struct fsldma_chan *chan, int size)
245
{
I
Ira Snyder 已提交
246 247
	u32 mode;

I
Ira Snyder 已提交
248
	mode = DMA_IN(chan, &chan->regs->mr, 32);
I
Ira Snyder 已提交
249

250 251
	switch (size) {
	case 0:
I
Ira Snyder 已提交
252
		mode &= ~FSL_DMA_MR_DAHE;
253 254 255 256 257
		break;
	case 1:
	case 2:
	case 4:
	case 8:
I
Ira Snyder 已提交
258
		mode |= FSL_DMA_MR_DAHE | (__ilog2(size) << 16);
259 260
		break;
	}
I
Ira Snyder 已提交
261

I
Ira Snyder 已提交
262
	DMA_OUT(chan, &chan->regs->mr, mode, 32);
263 264 265
}

/**
266
 * fsl_chan_set_request_count - Set DMA Request Count for external control
I
Ira Snyder 已提交
267
 * @chan : Freescale DMA channel
268 269 270 271 272 273
 * @size     : Number of bytes to transfer in a single request
 *
 * The Freescale DMA channel can be controlled by the external signal DREQ#.
 * The DMA request count is how many bytes are allowed to transfer before
 * pausing the channel, after which a new assertion of DREQ# resumes channel
 * operation.
274
 *
275
 * A size of 0 disables external pause control. The maximum size is 1024.
276
 */
I
Ira Snyder 已提交
277
static void fsl_chan_set_request_count(struct fsldma_chan *chan, int size)
278
{
I
Ira Snyder 已提交
279 280
	u32 mode;

281
	BUG_ON(size > 1024);
I
Ira Snyder 已提交
282

I
Ira Snyder 已提交
283
	mode = DMA_IN(chan, &chan->regs->mr, 32);
I
Ira Snyder 已提交
284 285
	mode |= (__ilog2(size) << 24) & 0x0f000000;

I
Ira Snyder 已提交
286
	DMA_OUT(chan, &chan->regs->mr, mode, 32);
287
}
288

289 290
/**
 * fsl_chan_toggle_ext_pause - Toggle channel external pause status
I
Ira Snyder 已提交
291
 * @chan : Freescale DMA channel
292 293 294 295 296 297
 * @enable   : 0 is disabled, 1 is enabled.
 *
 * The Freescale DMA channel can be controlled by the external signal DREQ#.
 * The DMA Request Count feature should be used in addition to this feature
 * to set the number of bytes to transfer before pausing the channel.
 */
I
Ira Snyder 已提交
298
static void fsl_chan_toggle_ext_pause(struct fsldma_chan *chan, int enable)
299 300
{
	if (enable)
I
Ira Snyder 已提交
301
		chan->feature |= FSL_DMA_CHAN_PAUSE_EXT;
302
	else
I
Ira Snyder 已提交
303
		chan->feature &= ~FSL_DMA_CHAN_PAUSE_EXT;
304 305 306 307
}

/**
 * fsl_chan_toggle_ext_start - Toggle channel external start status
I
Ira Snyder 已提交
308
 * @chan : Freescale DMA channel
309 310 311 312 313 314 315
 * @enable   : 0 is disabled, 1 is enabled.
 *
 * If enable the external start, the channel can be started by an
 * external DMA start pin. So the dma_start() does not start the
 * transfer immediately. The DMA channel will wait for the
 * control pin asserted.
 */
I
Ira Snyder 已提交
316
static void fsl_chan_toggle_ext_start(struct fsldma_chan *chan, int enable)
317 318
{
	if (enable)
I
Ira Snyder 已提交
319
		chan->feature |= FSL_DMA_CHAN_START_EXT;
320
	else
I
Ira Snyder 已提交
321
		chan->feature &= ~FSL_DMA_CHAN_START_EXT;
322 323
}

I
Ira Snyder 已提交
324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348
static void append_ld_queue(struct fsldma_chan *chan,
			    struct fsl_desc_sw *desc)
{
	struct fsl_desc_sw *tail = to_fsl_desc(chan->ld_pending.prev);

	if (list_empty(&chan->ld_pending))
		goto out_splice;

	/*
	 * Add the hardware descriptor to the chain of hardware descriptors
	 * that already exists in memory.
	 *
	 * This will un-set the EOL bit of the existing transaction, and the
	 * last link in this transaction will become the EOL descriptor.
	 */
	set_desc_next(chan, &tail->hw, desc->async_tx.phys);

	/*
	 * Add the software descriptor and all children to the list
	 * of pending transactions
	 */
out_splice:
	list_splice_tail_init(&desc->tx_list, &chan->ld_pending);
}

349 350
static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
{
I
Ira Snyder 已提交
351
	struct fsldma_chan *chan = to_fsl_chan(tx->chan);
352 353
	struct fsl_desc_sw *desc = tx_to_fsl_desc(tx);
	struct fsl_desc_sw *child;
354 355 356
	unsigned long flags;
	dma_cookie_t cookie;

I
Ira Snyder 已提交
357
	spin_lock_irqsave(&chan->desc_lock, flags);
358

I
Ira Snyder 已提交
359 360 361 362
	/*
	 * assign cookies to all of the software descriptors
	 * that make up this transaction
	 */
I
Ira Snyder 已提交
363
	cookie = chan->common.cookie;
364
	list_for_each_entry(child, &desc->tx_list, node) {
365 366 367 368
		cookie++;
		if (cookie < 0)
			cookie = 1;

S
Steven J. Magnani 已提交
369
		child->async_tx.cookie = cookie;
370 371
	}

I
Ira Snyder 已提交
372
	chan->common.cookie = cookie;
I
Ira Snyder 已提交
373 374

	/* put this transaction onto the tail of the pending queue */
I
Ira Snyder 已提交
375
	append_ld_queue(chan, desc);
376

I
Ira Snyder 已提交
377
	spin_unlock_irqrestore(&chan->desc_lock, flags);
378 379 380 381 382 383

	return cookie;
}

/**
 * fsl_dma_alloc_descriptor - Allocate descriptor from channel's DMA pool.
I
Ira Snyder 已提交
384
 * @chan : Freescale DMA channel
385 386 387 388
 *
 * Return - The descriptor allocated. NULL for failed.
 */
static struct fsl_desc_sw *fsl_dma_alloc_descriptor(
I
Ira Snyder 已提交
389
					struct fsldma_chan *chan)
390
{
I
Ira Snyder 已提交
391
	struct fsl_desc_sw *desc;
392
	dma_addr_t pdesc;
I
Ira Snyder 已提交
393 394 395 396 397

	desc = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc);
	if (!desc) {
		dev_dbg(chan->dev, "out of memory for link desc\n");
		return NULL;
398 399
	}

I
Ira Snyder 已提交
400 401 402 403 404 405 406
	memset(desc, 0, sizeof(*desc));
	INIT_LIST_HEAD(&desc->tx_list);
	dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
	desc->async_tx.tx_submit = fsl_dma_tx_submit;
	desc->async_tx.phys = pdesc;

	return desc;
407 408 409 410 411
}


/**
 * fsl_dma_alloc_chan_resources - Allocate resources for DMA channel.
I
Ira Snyder 已提交
412
 * @chan : Freescale DMA channel
413 414 415 416 417
 *
 * This function will create a dma pool for descriptor allocation.
 *
 * Return - The number of descriptors allocated.
 */
I
Ira Snyder 已提交
418
static int fsl_dma_alloc_chan_resources(struct dma_chan *dchan)
419
{
I
Ira Snyder 已提交
420
	struct fsldma_chan *chan = to_fsl_chan(dchan);
421 422

	/* Has this channel already been allocated? */
I
Ira Snyder 已提交
423
	if (chan->desc_pool)
424
		return 1;
425

I
Ira Snyder 已提交
426 427
	/*
	 * We need the descriptor to be aligned to 32bytes
428 429
	 * for meeting FSL DMA specification requirement.
	 */
I
Ira Snyder 已提交
430
	chan->desc_pool = dma_pool_create("fsl_dma_engine_desc_pool",
I
Ira Snyder 已提交
431 432 433
					  chan->dev,
					  sizeof(struct fsl_desc_sw),
					  __alignof__(struct fsl_desc_sw), 0);
I
Ira Snyder 已提交
434
	if (!chan->desc_pool) {
I
Ira Snyder 已提交
435 436 437
		dev_err(chan->dev, "unable to allocate channel %d "
				   "descriptor pool\n", chan->id);
		return -ENOMEM;
438 439
	}

I
Ira Snyder 已提交
440
	/* there is at least one descriptor free to be allocated */
441 442 443
	return 1;
}

I
Ira Snyder 已提交
444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472
/**
 * fsldma_free_desc_list - Free all descriptors in a queue
 * @chan: Freescae DMA channel
 * @list: the list to free
 *
 * LOCKING: must hold chan->desc_lock
 */
static void fsldma_free_desc_list(struct fsldma_chan *chan,
				  struct list_head *list)
{
	struct fsl_desc_sw *desc, *_desc;

	list_for_each_entry_safe(desc, _desc, list, node) {
		list_del(&desc->node);
		dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
	}
}

static void fsldma_free_desc_list_reverse(struct fsldma_chan *chan,
					  struct list_head *list)
{
	struct fsl_desc_sw *desc, *_desc;

	list_for_each_entry_safe_reverse(desc, _desc, list, node) {
		list_del(&desc->node);
		dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
	}
}

473 474
/**
 * fsl_dma_free_chan_resources - Free all resources of the channel.
I
Ira Snyder 已提交
475
 * @chan : Freescale DMA channel
476
 */
I
Ira Snyder 已提交
477
static void fsl_dma_free_chan_resources(struct dma_chan *dchan)
478
{
I
Ira Snyder 已提交
479
	struct fsldma_chan *chan = to_fsl_chan(dchan);
480 481
	unsigned long flags;

I
Ira Snyder 已提交
482 483
	dev_dbg(chan->dev, "Free all channel resources.\n");
	spin_lock_irqsave(&chan->desc_lock, flags);
I
Ira Snyder 已提交
484 485
	fsldma_free_desc_list(chan, &chan->ld_pending);
	fsldma_free_desc_list(chan, &chan->ld_running);
I
Ira Snyder 已提交
486
	spin_unlock_irqrestore(&chan->desc_lock, flags);
487

I
Ira Snyder 已提交
488
	dma_pool_destroy(chan->desc_pool);
I
Ira Snyder 已提交
489
	chan->desc_pool = NULL;
490 491
}

492
static struct dma_async_tx_descriptor *
I
Ira Snyder 已提交
493
fsl_dma_prep_interrupt(struct dma_chan *dchan, unsigned long flags)
494
{
I
Ira Snyder 已提交
495
	struct fsldma_chan *chan;
496 497
	struct fsl_desc_sw *new;

I
Ira Snyder 已提交
498
	if (!dchan)
499 500
		return NULL;

I
Ira Snyder 已提交
501
	chan = to_fsl_chan(dchan);
502

I
Ira Snyder 已提交
503
	new = fsl_dma_alloc_descriptor(chan);
504
	if (!new) {
505
		dev_err(chan->dev, msg_ld_oom);
506 507 508 509
		return NULL;
	}

	new->async_tx.cookie = -EBUSY;
510
	new->async_tx.flags = flags;
511

512
	/* Insert the link descriptor to the LD ring */
513
	list_add_tail(&new->node, &new->tx_list);
514

515
	/* Set End-of-link to the last link descriptor of new list*/
I
Ira Snyder 已提交
516
	set_ld_eol(chan, new);
517 518 519 520

	return &new->async_tx;
}

521
static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy(
I
Ira Snyder 已提交
522
	struct dma_chan *dchan, dma_addr_t dma_dst, dma_addr_t dma_src,
523 524
	size_t len, unsigned long flags)
{
I
Ira Snyder 已提交
525
	struct fsldma_chan *chan;
526 527 528
	struct fsl_desc_sw *first = NULL, *prev = NULL, *new;
	size_t copy;

I
Ira Snyder 已提交
529
	if (!dchan)
530 531 532 533 534
		return NULL;

	if (!len)
		return NULL;

I
Ira Snyder 已提交
535
	chan = to_fsl_chan(dchan);
536 537 538 539

	do {

		/* Allocate the link descriptor from DMA pool */
I
Ira Snyder 已提交
540
		new = fsl_dma_alloc_descriptor(chan);
541
		if (!new) {
542
			dev_err(chan->dev, msg_ld_oom);
543
			goto fail;
544 545
		}
#ifdef FSL_DMA_LD_DEBUG
I
Ira Snyder 已提交
546
		dev_dbg(chan->dev, "new link desc alloc %p\n", new);
547 548
#endif

549
		copy = min(len, (size_t)FSL_DMA_BCR_MAX_CNT);
550

I
Ira Snyder 已提交
551 552 553
		set_desc_cnt(chan, &new->hw, copy);
		set_desc_src(chan, &new->hw, dma_src);
		set_desc_dst(chan, &new->hw, dma_dst);
554 555 556 557

		if (!first)
			first = new;
		else
I
Ira Snyder 已提交
558
			set_desc_next(chan, &prev->hw, new->async_tx.phys);
559 560

		new->async_tx.cookie = 0;
561
		async_tx_ack(&new->async_tx);
562 563 564 565

		prev = new;
		len -= copy;
		dma_src += copy;
566
		dma_dst += copy;
567 568

		/* Insert the link descriptor to the LD ring */
569
		list_add_tail(&new->node, &first->tx_list);
570 571
	} while (len);

572
	new->async_tx.flags = flags; /* client is in control of this ack */
573 574 575
	new->async_tx.cookie = -EBUSY;

	/* Set End-of-link to the last link descriptor of new list*/
I
Ira Snyder 已提交
576
	set_ld_eol(chan, new);
577

578 579 580 581 582 583
	return &first->async_tx;

fail:
	if (!first)
		return NULL;

I
Ira Snyder 已提交
584
	fsldma_free_desc_list_reverse(chan, &first->tx_list);
585
	return NULL;
586 587
}

588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706
static struct dma_async_tx_descriptor *fsl_dma_prep_sg(struct dma_chan *dchan,
	struct scatterlist *dst_sg, unsigned int dst_nents,
	struct scatterlist *src_sg, unsigned int src_nents,
	unsigned long flags)
{
	struct fsl_desc_sw *first = NULL, *prev = NULL, *new = NULL;
	struct fsldma_chan *chan = to_fsl_chan(dchan);
	size_t dst_avail, src_avail;
	dma_addr_t dst, src;
	size_t len;

	/* basic sanity checks */
	if (dst_nents == 0 || src_nents == 0)
		return NULL;

	if (dst_sg == NULL || src_sg == NULL)
		return NULL;

	/*
	 * TODO: should we check that both scatterlists have the same
	 * TODO: number of bytes in total? Is that really an error?
	 */

	/* get prepared for the loop */
	dst_avail = sg_dma_len(dst_sg);
	src_avail = sg_dma_len(src_sg);

	/* run until we are out of scatterlist entries */
	while (true) {

		/* create the largest transaction possible */
		len = min_t(size_t, src_avail, dst_avail);
		len = min_t(size_t, len, FSL_DMA_BCR_MAX_CNT);
		if (len == 0)
			goto fetch;

		dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) - dst_avail;
		src = sg_dma_address(src_sg) + sg_dma_len(src_sg) - src_avail;

		/* allocate and populate the descriptor */
		new = fsl_dma_alloc_descriptor(chan);
		if (!new) {
			dev_err(chan->dev, msg_ld_oom);
			goto fail;
		}
#ifdef FSL_DMA_LD_DEBUG
		dev_dbg(chan->dev, "new link desc alloc %p\n", new);
#endif

		set_desc_cnt(chan, &new->hw, len);
		set_desc_src(chan, &new->hw, src);
		set_desc_dst(chan, &new->hw, dst);

		if (!first)
			first = new;
		else
			set_desc_next(chan, &prev->hw, new->async_tx.phys);

		new->async_tx.cookie = 0;
		async_tx_ack(&new->async_tx);
		prev = new;

		/* Insert the link descriptor to the LD ring */
		list_add_tail(&new->node, &first->tx_list);

		/* update metadata */
		dst_avail -= len;
		src_avail -= len;

fetch:
		/* fetch the next dst scatterlist entry */
		if (dst_avail == 0) {

			/* no more entries: we're done */
			if (dst_nents == 0)
				break;

			/* fetch the next entry: if there are no more: done */
			dst_sg = sg_next(dst_sg);
			if (dst_sg == NULL)
				break;

			dst_nents--;
			dst_avail = sg_dma_len(dst_sg);
		}

		/* fetch the next src scatterlist entry */
		if (src_avail == 0) {

			/* no more entries: we're done */
			if (src_nents == 0)
				break;

			/* fetch the next entry: if there are no more: done */
			src_sg = sg_next(src_sg);
			if (src_sg == NULL)
				break;

			src_nents--;
			src_avail = sg_dma_len(src_sg);
		}
	}

	new->async_tx.flags = flags; /* client is in control of this ack */
	new->async_tx.cookie = -EBUSY;

	/* Set End-of-link to the last link descriptor of new list */
	set_ld_eol(chan, new);

	return &first->async_tx;

fail:
	if (!first)
		return NULL;

	fsldma_free_desc_list_reverse(chan, &first->tx_list);
	return NULL;
}

I
Ira Snyder 已提交
707 708 709 710 711 712 713 714 715 716 717 718 719
/**
 * fsl_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
 * @chan: DMA channel
 * @sgl: scatterlist to transfer to/from
 * @sg_len: number of entries in @scatterlist
 * @direction: DMA direction
 * @flags: DMAEngine flags
 *
 * Prepare a set of descriptors for a DMA_SLAVE transaction. Following the
 * DMA_SLAVE API, this gets the device-specific information from the
 * chan->private variable.
 */
static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg(
I
Ira Snyder 已提交
720
	struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
I
Ira Snyder 已提交
721 722 723
	enum dma_data_direction direction, unsigned long flags)
{
	/*
I
Ira Snyder 已提交
724
	 * This operation is not supported on the Freescale DMA controller
I
Ira Snyder 已提交
725
	 *
I
Ira Snyder 已提交
726 727
	 * However, we need to provide the function pointer to allow the
	 * device_control() method to work.
I
Ira Snyder 已提交
728 729 730 731
	 */
	return NULL;
}

732
static int fsl_dma_device_control(struct dma_chan *dchan,
733
				  enum dma_ctrl_cmd cmd, unsigned long arg)
I
Ira Snyder 已提交
734
{
I
Ira Snyder 已提交
735
	struct dma_slave_config *config;
I
Ira Snyder 已提交
736
	struct fsldma_chan *chan;
I
Ira Snyder 已提交
737
	unsigned long flags;
I
Ira Snyder 已提交
738
	int size;
739

I
Ira Snyder 已提交
740
	if (!dchan)
741
		return -EINVAL;
I
Ira Snyder 已提交
742

I
Ira Snyder 已提交
743
	chan = to_fsl_chan(dchan);
I
Ira Snyder 已提交
744

I
Ira Snyder 已提交
745 746 747 748
	switch (cmd) {
	case DMA_TERMINATE_ALL:
		/* Halt the DMA engine */
		dma_halt(chan);
I
Ira Snyder 已提交
749

I
Ira Snyder 已提交
750
		spin_lock_irqsave(&chan->desc_lock, flags);
I
Ira Snyder 已提交
751

I
Ira Snyder 已提交
752 753 754
		/* Remove and free all of the descriptors in the LD queue */
		fsldma_free_desc_list(chan, &chan->ld_pending);
		fsldma_free_desc_list(chan, &chan->ld_running);
I
Ira Snyder 已提交
755

I
Ira Snyder 已提交
756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786
		spin_unlock_irqrestore(&chan->desc_lock, flags);
		return 0;

	case DMA_SLAVE_CONFIG:
		config = (struct dma_slave_config *)arg;

		/* make sure the channel supports setting burst size */
		if (!chan->set_request_count)
			return -ENXIO;

		/* we set the controller burst size depending on direction */
		if (config->direction == DMA_TO_DEVICE)
			size = config->dst_addr_width * config->dst_maxburst;
		else
			size = config->src_addr_width * config->src_maxburst;

		chan->set_request_count(chan, size);
		return 0;

	case FSLDMA_EXTERNAL_START:

		/* make sure the channel supports external start */
		if (!chan->toggle_ext_start)
			return -ENXIO;

		chan->toggle_ext_start(chan, arg);
		return 0;

	default:
		return -ENXIO;
	}
787 788

	return 0;
I
Ira Snyder 已提交
789 790
}

791 792
/**
 * fsl_dma_update_completed_cookie - Update the completed cookie.
I
Ira Snyder 已提交
793
 * @chan : Freescale DMA channel
I
Ira Snyder 已提交
794 795
 *
 * CONTEXT: hardirq
796
 */
I
Ira Snyder 已提交
797
static void fsl_dma_update_completed_cookie(struct fsldma_chan *chan)
798
{
I
Ira Snyder 已提交
799 800 801
	struct fsl_desc_sw *desc;
	unsigned long flags;
	dma_cookie_t cookie;
802

I
Ira Snyder 已提交
803
	spin_lock_irqsave(&chan->desc_lock, flags);
804

I
Ira Snyder 已提交
805 806 807
	if (list_empty(&chan->ld_running)) {
		dev_dbg(chan->dev, "no running descriptors\n");
		goto out_unlock;
808
	}
I
Ira Snyder 已提交
809 810 811 812 813

	/* Get the last descriptor, update the cookie to that */
	desc = to_fsl_desc(chan->ld_running.prev);
	if (dma_is_idle(chan))
		cookie = desc->async_tx.cookie;
S
Steven J. Magnani 已提交
814
	else {
I
Ira Snyder 已提交
815
		cookie = desc->async_tx.cookie - 1;
S
Steven J. Magnani 已提交
816 817 818
		if (unlikely(cookie < DMA_MIN_COOKIE))
			cookie = DMA_MAX_COOKIE;
	}
I
Ira Snyder 已提交
819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838

	chan->completed_cookie = cookie;

out_unlock:
	spin_unlock_irqrestore(&chan->desc_lock, flags);
}

/**
 * fsldma_desc_status - Check the status of a descriptor
 * @chan: Freescale DMA channel
 * @desc: DMA SW descriptor
 *
 * This function will return the status of the given descriptor
 */
static enum dma_status fsldma_desc_status(struct fsldma_chan *chan,
					  struct fsl_desc_sw *desc)
{
	return dma_async_is_complete(desc->async_tx.cookie,
				     chan->completed_cookie,
				     chan->common.cookie);
839 840 841 842
}

/**
 * fsl_chan_ld_cleanup - Clean up link descriptors
I
Ira Snyder 已提交
843
 * @chan : Freescale DMA channel
844 845 846
 *
 * This function clean up the ld_queue of DMA channel.
 */
I
Ira Snyder 已提交
847
static void fsl_chan_ld_cleanup(struct fsldma_chan *chan)
848 849 850 851
{
	struct fsl_desc_sw *desc, *_desc;
	unsigned long flags;

I
Ira Snyder 已提交
852
	spin_lock_irqsave(&chan->desc_lock, flags);
853

I
Ira Snyder 已提交
854 855
	dev_dbg(chan->dev, "chan completed_cookie = %d\n", chan->completed_cookie);
	list_for_each_entry_safe(desc, _desc, &chan->ld_running, node) {
856 857 858
		dma_async_tx_callback callback;
		void *callback_param;

I
Ira Snyder 已提交
859
		if (fsldma_desc_status(chan, desc) == DMA_IN_PROGRESS)
860 861
			break;

I
Ira Snyder 已提交
862
		/* Remove from the list of running transactions */
863 864 865
		list_del(&desc->node);

		/* Run the link descriptor callback function */
I
Ira Snyder 已提交
866 867
		callback = desc->async_tx.callback;
		callback_param = desc->async_tx.callback_param;
868
		if (callback) {
I
Ira Snyder 已提交
869
			spin_unlock_irqrestore(&chan->desc_lock, flags);
I
Ira Snyder 已提交
870
			dev_dbg(chan->dev, "LD %p callback\n", desc);
871
			callback(callback_param);
I
Ira Snyder 已提交
872
			spin_lock_irqsave(&chan->desc_lock, flags);
873
		}
I
Ira Snyder 已提交
874 875 876 877

		/* Run any dependencies, then free the descriptor */
		dma_run_dependencies(&desc->async_tx);
		dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
878
	}
I
Ira Snyder 已提交
879

I
Ira Snyder 已提交
880
	spin_unlock_irqrestore(&chan->desc_lock, flags);
881 882 883
}

/**
I
Ira Snyder 已提交
884
 * fsl_chan_xfer_ld_queue - transfer any pending transactions
I
Ira Snyder 已提交
885
 * @chan : Freescale DMA channel
I
Ira Snyder 已提交
886 887 888 889 890
 *
 * This will make sure that any pending transactions will be run.
 * If the DMA controller is idle, it will be started. Otherwise,
 * the DMA controller's interrupt handler will start any pending
 * transactions when it becomes idle.
891
 */
I
Ira Snyder 已提交
892
static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan)
893
{
I
Ira Snyder 已提交
894
	struct fsl_desc_sw *desc;
895 896
	unsigned long flags;

I
Ira Snyder 已提交
897
	spin_lock_irqsave(&chan->desc_lock, flags);
898

I
Ira Snyder 已提交
899 900 901 902 903 904
	/*
	 * If the list of pending descriptors is empty, then we
	 * don't need to do any work at all
	 */
	if (list_empty(&chan->ld_pending)) {
		dev_dbg(chan->dev, "no pending LDs\n");
905
		goto out_unlock;
I
Ira Snyder 已提交
906
	}
907

I
Ira Snyder 已提交
908 909 910 911 912 913 914 915 916 917 918 919 920 921 922
	/*
	 * The DMA controller is not idle, which means the interrupt
	 * handler will start any queued transactions when it runs
	 * at the end of the current transaction
	 */
	if (!dma_is_idle(chan)) {
		dev_dbg(chan->dev, "DMA controller still busy\n");
		goto out_unlock;
	}

	/*
	 * TODO:
	 * make sure the dma_halt() function really un-wedges the
	 * controller as much as possible
	 */
I
Ira Snyder 已提交
923
	dma_halt(chan);
924

I
Ira Snyder 已提交
925 926 927
	/*
	 * If there are some link descriptors which have not been
	 * transferred, we need to start the controller
928 929
	 */

I
Ira Snyder 已提交
930 931 932 933 934 935 936 937 938 939 940 941 942
	/*
	 * Move all elements from the queue of pending transactions
	 * onto the list of running transactions
	 */
	desc = list_first_entry(&chan->ld_pending, struct fsl_desc_sw, node);
	list_splice_tail_init(&chan->ld_pending, &chan->ld_running);

	/*
	 * Program the descriptor's address into the DMA controller,
	 * then start the DMA transaction
	 */
	set_cdar(chan, desc->async_tx.phys);
	dma_start(chan);
943 944

out_unlock:
I
Ira Snyder 已提交
945
	spin_unlock_irqrestore(&chan->desc_lock, flags);
946 947 948 949
}

/**
 * fsl_dma_memcpy_issue_pending - Issue the DMA start command
I
Ira Snyder 已提交
950
 * @chan : Freescale DMA channel
951
 */
I
Ira Snyder 已提交
952
static void fsl_dma_memcpy_issue_pending(struct dma_chan *dchan)
953
{
I
Ira Snyder 已提交
954 955
	struct fsldma_chan *chan = to_fsl_chan(dchan);
	fsl_chan_xfer_ld_queue(chan);
956 957 958
}

/**
959
 * fsl_tx_status - Determine the DMA status
I
Ira Snyder 已提交
960
 * @chan : Freescale DMA channel
961
 */
962
static enum dma_status fsl_tx_status(struct dma_chan *dchan,
963
					dma_cookie_t cookie,
964
					struct dma_tx_state *txstate)
965
{
I
Ira Snyder 已提交
966
	struct fsldma_chan *chan = to_fsl_chan(dchan);
967 968 969
	dma_cookie_t last_used;
	dma_cookie_t last_complete;

I
Ira Snyder 已提交
970
	fsl_chan_ld_cleanup(chan);
971

I
Ira Snyder 已提交
972 973
	last_used = dchan->cookie;
	last_complete = chan->completed_cookie;
974

975
	dma_set_tx_state(txstate, last_complete, last_used, 0);
976 977 978 979

	return dma_async_is_complete(cookie, last_complete, last_used);
}

980 981 982 983
/*----------------------------------------------------------------------------*/
/* Interrupt Handling                                                         */
/*----------------------------------------------------------------------------*/

984
static irqreturn_t fsldma_chan_irq(int irq, void *data)
985
{
I
Ira Snyder 已提交
986
	struct fsldma_chan *chan = data;
987 988
	int update_cookie = 0;
	int xfer_ld_q = 0;
I
Ira Snyder 已提交
989
	u32 stat;
990

I
Ira Snyder 已提交
991
	/* save and clear the status register */
I
Ira Snyder 已提交
992
	stat = get_sr(chan);
I
Ira Snyder 已提交
993 994
	set_sr(chan, stat);
	dev_dbg(chan->dev, "irq: channel %d, stat = 0x%x\n", chan->id, stat);
995 996 997 998 999 1000

	stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH);
	if (!stat)
		return IRQ_NONE;

	if (stat & FSL_DMA_SR_TE)
I
Ira Snyder 已提交
1001
		dev_err(chan->dev, "Transfer Error!\n");
1002

I
Ira Snyder 已提交
1003 1004
	/*
	 * Programming Error
1005 1006 1007 1008
	 * The DMA_INTERRUPT async_tx is a NULL transfer, which will
	 * triger a PE interrupt.
	 */
	if (stat & FSL_DMA_SR_PE) {
I
Ira Snyder 已提交
1009
		dev_dbg(chan->dev, "irq: Programming Error INT\n");
I
Ira Snyder 已提交
1010
		if (get_bcr(chan) == 0) {
1011 1012 1013 1014
			/* BCR register is 0, this is a DMA_INTERRUPT async_tx.
			 * Now, update the completed cookie, and continue the
			 * next uncompleted transfer.
			 */
1015 1016
			update_cookie = 1;
			xfer_ld_q = 1;
1017 1018 1019 1020
		}
		stat &= ~FSL_DMA_SR_PE;
	}

I
Ira Snyder 已提交
1021 1022
	/*
	 * If the link descriptor segment transfer finishes,
1023 1024 1025
	 * we will recycle the used descriptor.
	 */
	if (stat & FSL_DMA_SR_EOSI) {
I
Ira Snyder 已提交
1026 1027
		dev_dbg(chan->dev, "irq: End-of-segments INT\n");
		dev_dbg(chan->dev, "irq: clndar 0x%llx, nlndar 0x%llx\n",
I
Ira Snyder 已提交
1028 1029
			(unsigned long long)get_cdar(chan),
			(unsigned long long)get_ndar(chan));
1030
		stat &= ~FSL_DMA_SR_EOSI;
1031 1032 1033
		update_cookie = 1;
	}

I
Ira Snyder 已提交
1034 1035
	/*
	 * For MPC8349, EOCDI event need to update cookie
1036 1037 1038
	 * and start the next transfer if it exist.
	 */
	if (stat & FSL_DMA_SR_EOCDI) {
I
Ira Snyder 已提交
1039
		dev_dbg(chan->dev, "irq: End-of-Chain link INT\n");
1040 1041 1042
		stat &= ~FSL_DMA_SR_EOCDI;
		update_cookie = 1;
		xfer_ld_q = 1;
1043 1044
	}

I
Ira Snyder 已提交
1045 1046
	/*
	 * If it current transfer is the end-of-transfer,
1047 1048 1049
	 * we should clear the Channel Start bit for
	 * prepare next transfer.
	 */
1050
	if (stat & FSL_DMA_SR_EOLNI) {
I
Ira Snyder 已提交
1051
		dev_dbg(chan->dev, "irq: End-of-link INT\n");
1052
		stat &= ~FSL_DMA_SR_EOLNI;
1053
		xfer_ld_q = 1;
1054 1055
	}

1056
	if (update_cookie)
I
Ira Snyder 已提交
1057
		fsl_dma_update_completed_cookie(chan);
1058
	if (xfer_ld_q)
I
Ira Snyder 已提交
1059
		fsl_chan_xfer_ld_queue(chan);
1060
	if (stat)
I
Ira Snyder 已提交
1061
		dev_dbg(chan->dev, "irq: unhandled sr 0x%02x\n", stat);
1062

I
Ira Snyder 已提交
1063
	dev_dbg(chan->dev, "irq: Exit\n");
I
Ira Snyder 已提交
1064
	tasklet_schedule(&chan->tasklet);
1065 1066 1067
	return IRQ_HANDLED;
}

1068 1069
static void dma_do_tasklet(unsigned long data)
{
I
Ira Snyder 已提交
1070 1071
	struct fsldma_chan *chan = (struct fsldma_chan *)data;
	fsl_chan_ld_cleanup(chan);
1072 1073 1074
}

static irqreturn_t fsldma_ctrl_irq(int irq, void *data)
1075
{
1076
	struct fsldma_device *fdev = data;
1077 1078 1079 1080
	struct fsldma_chan *chan;
	unsigned int handled = 0;
	u32 gsr, mask;
	int i;
1081

1082
	gsr = (fdev->feature & FSL_DMA_BIG_ENDIAN) ? in_be32(fdev->regs)
1083 1084 1085
						   : in_le32(fdev->regs);
	mask = 0xff000000;
	dev_dbg(fdev->dev, "IRQ: gsr 0x%.8x\n", gsr);
1086

1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102
	for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
		chan = fdev->chan[i];
		if (!chan)
			continue;

		if (gsr & mask) {
			dev_dbg(fdev->dev, "IRQ: chan %d\n", chan->id);
			fsldma_chan_irq(irq, chan);
			handled++;
		}

		gsr &= ~mask;
		mask >>= 8;
	}

	return IRQ_RETVAL(handled);
1103 1104
}

1105
static void fsldma_free_irqs(struct fsldma_device *fdev)
1106
{
1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177
	struct fsldma_chan *chan;
	int i;

	if (fdev->irq != NO_IRQ) {
		dev_dbg(fdev->dev, "free per-controller IRQ\n");
		free_irq(fdev->irq, fdev);
		return;
	}

	for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
		chan = fdev->chan[i];
		if (chan && chan->irq != NO_IRQ) {
			dev_dbg(fdev->dev, "free channel %d IRQ\n", chan->id);
			free_irq(chan->irq, chan);
		}
	}
}

static int fsldma_request_irqs(struct fsldma_device *fdev)
{
	struct fsldma_chan *chan;
	int ret;
	int i;

	/* if we have a per-controller IRQ, use that */
	if (fdev->irq != NO_IRQ) {
		dev_dbg(fdev->dev, "request per-controller IRQ\n");
		ret = request_irq(fdev->irq, fsldma_ctrl_irq, IRQF_SHARED,
				  "fsldma-controller", fdev);
		return ret;
	}

	/* no per-controller IRQ, use the per-channel IRQs */
	for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
		chan = fdev->chan[i];
		if (!chan)
			continue;

		if (chan->irq == NO_IRQ) {
			dev_err(fdev->dev, "no interrupts property defined for "
					   "DMA channel %d. Please fix your "
					   "device tree\n", chan->id);
			ret = -ENODEV;
			goto out_unwind;
		}

		dev_dbg(fdev->dev, "request channel %d IRQ\n", chan->id);
		ret = request_irq(chan->irq, fsldma_chan_irq, IRQF_SHARED,
				  "fsldma-chan", chan);
		if (ret) {
			dev_err(fdev->dev, "unable to request IRQ for DMA "
					   "channel %d\n", chan->id);
			goto out_unwind;
		}
	}

	return 0;

out_unwind:
	for (/* none */; i >= 0; i--) {
		chan = fdev->chan[i];
		if (!chan)
			continue;

		if (chan->irq == NO_IRQ)
			continue;

		free_irq(chan->irq, chan);
	}

	return ret;
1178 1179
}

1180 1181 1182 1183 1184
/*----------------------------------------------------------------------------*/
/* OpenFirmware Subsystem                                                     */
/*----------------------------------------------------------------------------*/

static int __devinit fsl_dma_chan_probe(struct fsldma_device *fdev,
1185
	struct device_node *node, u32 feature, const char *compatible)
1186
{
I
Ira Snyder 已提交
1187
	struct fsldma_chan *chan;
1188
	struct resource res;
1189 1190 1191
	int err;

	/* alloc channel */
I
Ira Snyder 已提交
1192 1193
	chan = kzalloc(sizeof(*chan), GFP_KERNEL);
	if (!chan) {
1194 1195 1196 1197 1198 1199
		dev_err(fdev->dev, "no free memory for DMA channels!\n");
		err = -ENOMEM;
		goto out_return;
	}

	/* ioremap registers for use */
I
Ira Snyder 已提交
1200 1201
	chan->regs = of_iomap(node, 0);
	if (!chan->regs) {
1202 1203
		dev_err(fdev->dev, "unable to ioremap registers\n");
		err = -ENOMEM;
I
Ira Snyder 已提交
1204
		goto out_free_chan;
1205 1206
	}

1207
	err = of_address_to_resource(node, 0, &res);
1208
	if (err) {
1209 1210
		dev_err(fdev->dev, "unable to find 'reg' property\n");
		goto out_iounmap_regs;
1211 1212
	}

I
Ira Snyder 已提交
1213
	chan->feature = feature;
1214
	if (!fdev->feature)
I
Ira Snyder 已提交
1215
		fdev->feature = chan->feature;
1216

1217 1218 1219
	/*
	 * If the DMA device's feature is different than the feature
	 * of its channels, report the bug
1220
	 */
I
Ira Snyder 已提交
1221
	WARN_ON(fdev->feature != chan->feature);
1222

I
Ira Snyder 已提交
1223 1224 1225
	chan->dev = fdev->dev;
	chan->id = ((res.start - 0x100) & 0xfff) >> 7;
	if (chan->id >= FSL_DMA_MAX_CHANS_PER_DEVICE) {
1226
		dev_err(fdev->dev, "too many channels for device\n");
1227
		err = -EINVAL;
1228
		goto out_iounmap_regs;
1229 1230
	}

I
Ira Snyder 已提交
1231 1232
	fdev->chan[chan->id] = chan;
	tasklet_init(&chan->tasklet, dma_do_tasklet, (unsigned long)chan);
1233 1234

	/* Initialize the channel */
I
Ira Snyder 已提交
1235
	dma_init(chan);
1236 1237

	/* Clear cdar registers */
I
Ira Snyder 已提交
1238
	set_cdar(chan, 0);
1239

I
Ira Snyder 已提交
1240
	switch (chan->feature & FSL_DMA_IP_MASK) {
1241
	case FSL_DMA_IP_85XX:
I
Ira Snyder 已提交
1242
		chan->toggle_ext_pause = fsl_chan_toggle_ext_pause;
1243
	case FSL_DMA_IP_83XX:
I
Ira Snyder 已提交
1244 1245 1246 1247
		chan->toggle_ext_start = fsl_chan_toggle_ext_start;
		chan->set_src_loop_size = fsl_chan_set_src_loop_size;
		chan->set_dst_loop_size = fsl_chan_set_dst_loop_size;
		chan->set_request_count = fsl_chan_set_request_count;
1248 1249
	}

I
Ira Snyder 已提交
1250
	spin_lock_init(&chan->desc_lock);
I
Ira Snyder 已提交
1251 1252
	INIT_LIST_HEAD(&chan->ld_pending);
	INIT_LIST_HEAD(&chan->ld_running);
1253

I
Ira Snyder 已提交
1254
	chan->common.device = &fdev->common;
1255

1256
	/* find the IRQ line, if it exists in the device tree */
I
Ira Snyder 已提交
1257
	chan->irq = irq_of_parse_and_map(node, 0);
1258

1259
	/* Add the channel to DMA device channel list */
I
Ira Snyder 已提交
1260
	list_add_tail(&chan->common.device_node, &fdev->common.channels);
1261 1262
	fdev->common.chancnt++;

I
Ira Snyder 已提交
1263 1264
	dev_info(fdev->dev, "#%d (%s), irq %d\n", chan->id, compatible,
		 chan->irq != NO_IRQ ? chan->irq : fdev->irq);
1265 1266

	return 0;
1267

1268
out_iounmap_regs:
I
Ira Snyder 已提交
1269 1270 1271
	iounmap(chan->regs);
out_free_chan:
	kfree(chan);
1272
out_return:
1273 1274 1275
	return err;
}

I
Ira Snyder 已提交
1276
static void fsl_dma_chan_remove(struct fsldma_chan *chan)
1277
{
I
Ira Snyder 已提交
1278 1279 1280 1281
	irq_dispose_mapping(chan->irq);
	list_del(&chan->common.device_node);
	iounmap(chan->regs);
	kfree(chan);
1282 1283
}

1284
static int __devinit fsldma_of_probe(struct platform_device *op,
1285 1286
			const struct of_device_id *match)
{
1287
	struct fsldma_device *fdev;
1288
	struct device_node *child;
1289
	int err;
1290

1291
	fdev = kzalloc(sizeof(*fdev), GFP_KERNEL);
1292
	if (!fdev) {
1293 1294 1295
		dev_err(&op->dev, "No enough memory for 'priv'\n");
		err = -ENOMEM;
		goto out_return;
1296
	}
1297 1298

	fdev->dev = &op->dev;
1299 1300
	INIT_LIST_HEAD(&fdev->common.channels);

1301
	/* ioremap the registers for use */
1302
	fdev->regs = of_iomap(op->dev.of_node, 0);
1303 1304 1305 1306
	if (!fdev->regs) {
		dev_err(&op->dev, "unable to ioremap registers\n");
		err = -ENOMEM;
		goto out_free_fdev;
1307 1308
	}

1309
	/* map the channel IRQ if it exists, but don't hookup the handler yet */
1310
	fdev->irq = irq_of_parse_and_map(op->dev.of_node, 0);
1311

1312 1313
	dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask);
	dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask);
1314
	dma_cap_set(DMA_SG, fdev->common.cap_mask);
I
Ira Snyder 已提交
1315
	dma_cap_set(DMA_SLAVE, fdev->common.cap_mask);
1316 1317
	fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources;
	fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources;
1318
	fdev->common.device_prep_dma_interrupt = fsl_dma_prep_interrupt;
1319
	fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy;
1320
	fdev->common.device_prep_dma_sg = fsl_dma_prep_sg;
1321
	fdev->common.device_tx_status = fsl_tx_status;
1322
	fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending;
I
Ira Snyder 已提交
1323
	fdev->common.device_prep_slave_sg = fsl_dma_prep_slave_sg;
1324
	fdev->common.device_control = fsl_dma_device_control;
1325
	fdev->common.dev = &op->dev;
1326

1327 1328
	dma_set_mask(&(op->dev), DMA_BIT_MASK(36));

1329
	dev_set_drvdata(&op->dev, fdev);
1330

1331 1332 1333
	/*
	 * We cannot use of_platform_bus_probe() because there is no
	 * of_platform_bus_remove(). Instead, we manually instantiate every DMA
1334 1335
	 * channel object.
	 */
1336
	for_each_child_of_node(op->dev.of_node, child) {
1337
		if (of_device_is_compatible(child, "fsl,eloplus-dma-channel")) {
1338 1339 1340
			fsl_dma_chan_probe(fdev, child,
				FSL_DMA_IP_85XX | FSL_DMA_BIG_ENDIAN,
				"fsl,eloplus-dma-channel");
1341 1342 1343
		}

		if (of_device_is_compatible(child, "fsl,elo-dma-channel")) {
1344 1345 1346
			fsl_dma_chan_probe(fdev, child,
				FSL_DMA_IP_83XX | FSL_DMA_LITTLE_ENDIAN,
				"fsl,elo-dma-channel");
1347
		}
1348
	}
1349

1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362
	/*
	 * Hookup the IRQ handler(s)
	 *
	 * If we have a per-controller interrupt, we prefer that to the
	 * per-channel interrupts to reduce the number of shared interrupt
	 * handlers on the same IRQ line
	 */
	err = fsldma_request_irqs(fdev);
	if (err) {
		dev_err(fdev->dev, "unable to request IRQs\n");
		goto out_free_fdev;
	}

1363 1364 1365
	dma_async_device_register(&fdev->common);
	return 0;

1366
out_free_fdev:
1367
	irq_dispose_mapping(fdev->irq);
1368
	kfree(fdev);
1369
out_return:
1370 1371 1372
	return err;
}

1373
static int fsldma_of_remove(struct platform_device *op)
1374
{
1375
	struct fsldma_device *fdev;
1376 1377
	unsigned int i;

1378
	fdev = dev_get_drvdata(&op->dev);
1379 1380
	dma_async_device_unregister(&fdev->common);

1381 1382
	fsldma_free_irqs(fdev);

1383
	for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
1384 1385
		if (fdev->chan[i])
			fsl_dma_chan_remove(fdev->chan[i]);
1386
	}
1387

1388 1389
	iounmap(fdev->regs);
	dev_set_drvdata(&op->dev, NULL);
1390 1391 1392 1393 1394
	kfree(fdev);

	return 0;
}

1395
static const struct of_device_id fsldma_of_ids[] = {
1396 1397
	{ .compatible = "fsl,eloplus-dma", },
	{ .compatible = "fsl,elo-dma", },
1398 1399 1400
	{}
};

1401
static struct of_platform_driver fsldma_of_driver = {
1402 1403 1404 1405 1406 1407 1408
	.driver = {
		.name = "fsl-elo-dma",
		.owner = THIS_MODULE,
		.of_match_table = fsldma_of_ids,
	},
	.probe = fsldma_of_probe,
	.remove = fsldma_of_remove,
1409 1410
};

1411 1412 1413 1414 1415
/*----------------------------------------------------------------------------*/
/* Module Init / Exit                                                         */
/*----------------------------------------------------------------------------*/

static __init int fsldma_init(void)
1416
{
1417 1418 1419 1420
	int ret;

	pr_info("Freescale Elo / Elo Plus DMA driver\n");

1421
	ret = of_register_platform_driver(&fsldma_of_driver);
1422 1423 1424 1425 1426 1427
	if (ret)
		pr_err("fsldma: failed to register platform driver\n");

	return ret;
}

1428
static void __exit fsldma_exit(void)
1429
{
1430
	of_unregister_platform_driver(&fsldma_of_driver);
1431 1432
}

1433 1434
subsys_initcall(fsldma_init);
module_exit(fsldma_exit);
1435 1436 1437

MODULE_DESCRIPTION("Freescale Elo / Elo Plus DMA driver");
MODULE_LICENSE("GPL");