fsldma.c 35.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/*
 * Freescale MPC85xx, MPC83xx DMA Engine support
 *
 * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
 *
 * Author:
 *   Zhang Wei <wei.zhang@freescale.com>, Jul 2007
 *   Ebony Zhu <ebony.zhu@freescale.com>, May 2007
 *
 * Description:
 *   DMA engine driver for Freescale MPC8540 DMA controller, which is
 *   also fit for MPC8560, MPC8555, MPC8548, MPC8641, and etc.
 *   The support for MPC8349 DMA contorller is also added.
 *
15 16 17 18 19
 * This driver instructs the DMA controller to issue the PCI Read Multiple
 * command for PCI read operations, instead of using the default PCI Read Line
 * command. Please be aware that this setting may result in read pre-fetching
 * on some platforms.
 *
20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36
 * This is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 */

#include <linux/init.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/dmaengine.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
#include <linux/of_platform.h>

I
Ira Snyder 已提交
37
#include <asm/fsldma.h>
38 39
#include "fsldma.h"

I
Ira Snyder 已提交
40
static void dma_init(struct fsldma_chan *chan)
41 42
{
	/* Reset the channel */
I
Ira Snyder 已提交
43
	DMA_OUT(chan, &chan->regs->mr, 0, 32);
44

I
Ira Snyder 已提交
45
	switch (chan->feature & FSL_DMA_IP_MASK) {
46 47 48 49 50 51
	case FSL_DMA_IP_85XX:
		/* Set the channel to below modes:
		 * EIE - Error interrupt enable
		 * EOSIE - End of segments interrupt enable (basic mode)
		 * EOLNIE - End of links interrupt enable
		 */
I
Ira Snyder 已提交
52
		DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_EIE
53 54 55 56 57
				| FSL_DMA_MR_EOLNIE | FSL_DMA_MR_EOSIE, 32);
		break;
	case FSL_DMA_IP_83XX:
		/* Set the channel to below modes:
		 * EOTIE - End-of-transfer interrupt enable
58
		 * PRC_RM - PCI read multiple
59
		 */
I
Ira Snyder 已提交
60
		DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_EOTIE
61
				| FSL_DMA_MR_PRC_RM, 32);
62 63 64 65 66
		break;
	}

}

I
Ira Snyder 已提交
67
static void set_sr(struct fsldma_chan *chan, u32 val)
68
{
I
Ira Snyder 已提交
69
	DMA_OUT(chan, &chan->regs->sr, val, 32);
70 71
}

I
Ira Snyder 已提交
72
static u32 get_sr(struct fsldma_chan *chan)
73
{
I
Ira Snyder 已提交
74
	return DMA_IN(chan, &chan->regs->sr, 32);
75 76
}

I
Ira Snyder 已提交
77
static void set_desc_cnt(struct fsldma_chan *chan,
78 79
				struct fsl_dma_ld_hw *hw, u32 count)
{
I
Ira Snyder 已提交
80
	hw->count = CPU_TO_DMA(chan, count, 32);
81 82
}

I
Ira Snyder 已提交
83
static void set_desc_src(struct fsldma_chan *chan,
84 85 86 87
				struct fsl_dma_ld_hw *hw, dma_addr_t src)
{
	u64 snoop_bits;

I
Ira Snyder 已提交
88
	snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
89
		? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0;
I
Ira Snyder 已提交
90
	hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64);
91 92
}

I
Ira Snyder 已提交
93
static void set_desc_dst(struct fsldma_chan *chan,
94
				struct fsl_dma_ld_hw *hw, dma_addr_t dst)
95 96 97
{
	u64 snoop_bits;

I
Ira Snyder 已提交
98
	snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
99
		? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0;
I
Ira Snyder 已提交
100
	hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64);
101 102
}

I
Ira Snyder 已提交
103
static void set_desc_next(struct fsldma_chan *chan,
104 105 106 107
				struct fsl_dma_ld_hw *hw, dma_addr_t next)
{
	u64 snoop_bits;

I
Ira Snyder 已提交
108
	snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
109
		? FSL_DMA_SNEN : 0;
I
Ira Snyder 已提交
110
	hw->next_ln_addr = CPU_TO_DMA(chan, snoop_bits | next, 64);
111 112
}

I
Ira Snyder 已提交
113
static void set_cdar(struct fsldma_chan *chan, dma_addr_t addr)
114
{
I
Ira Snyder 已提交
115
	DMA_OUT(chan, &chan->regs->cdar, addr | FSL_DMA_SNEN, 64);
116 117
}

I
Ira Snyder 已提交
118
static dma_addr_t get_cdar(struct fsldma_chan *chan)
119
{
I
Ira Snyder 已提交
120
	return DMA_IN(chan, &chan->regs->cdar, 64) & ~FSL_DMA_SNEN;
121 122
}

I
Ira Snyder 已提交
123
static void set_ndar(struct fsldma_chan *chan, dma_addr_t addr)
124
{
I
Ira Snyder 已提交
125
	DMA_OUT(chan, &chan->regs->ndar, addr, 64);
126 127
}

I
Ira Snyder 已提交
128
static dma_addr_t get_ndar(struct fsldma_chan *chan)
129
{
I
Ira Snyder 已提交
130
	return DMA_IN(chan, &chan->regs->ndar, 64);
131 132
}

I
Ira Snyder 已提交
133
static u32 get_bcr(struct fsldma_chan *chan)
134
{
I
Ira Snyder 已提交
135
	return DMA_IN(chan, &chan->regs->bcr, 32);
136 137
}

I
Ira Snyder 已提交
138
static int dma_is_idle(struct fsldma_chan *chan)
139
{
I
Ira Snyder 已提交
140
	u32 sr = get_sr(chan);
141 142 143
	return (!(sr & FSL_DMA_SR_CB)) || (sr & FSL_DMA_SR_CH);
}

I
Ira Snyder 已提交
144
static void dma_start(struct fsldma_chan *chan)
145
{
I
Ira Snyder 已提交
146 147
	u32 mode;

I
Ira Snyder 已提交
148
	mode = DMA_IN(chan, &chan->regs->mr, 32);
I
Ira Snyder 已提交
149

I
Ira Snyder 已提交
150 151 152
	if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) {
		if (chan->feature & FSL_DMA_CHAN_PAUSE_EXT) {
			DMA_OUT(chan, &chan->regs->bcr, 0, 32);
I
Ira Snyder 已提交
153 154 155 156
			mode |= FSL_DMA_MR_EMP_EN;
		} else {
			mode &= ~FSL_DMA_MR_EMP_EN;
		}
157
	}
158

I
Ira Snyder 已提交
159
	if (chan->feature & FSL_DMA_CHAN_START_EXT)
I
Ira Snyder 已提交
160
		mode |= FSL_DMA_MR_EMS_EN;
161
	else
I
Ira Snyder 已提交
162
		mode |= FSL_DMA_MR_CS;
163

I
Ira Snyder 已提交
164
	DMA_OUT(chan, &chan->regs->mr, mode, 32);
165 166
}

I
Ira Snyder 已提交
167
static void dma_halt(struct fsldma_chan *chan)
168
{
I
Ira Snyder 已提交
169
	u32 mode;
170 171
	int i;

I
Ira Snyder 已提交
172
	mode = DMA_IN(chan, &chan->regs->mr, 32);
I
Ira Snyder 已提交
173
	mode |= FSL_DMA_MR_CA;
I
Ira Snyder 已提交
174
	DMA_OUT(chan, &chan->regs->mr, mode, 32);
I
Ira Snyder 已提交
175 176

	mode &= ~(FSL_DMA_MR_CS | FSL_DMA_MR_EMS_EN | FSL_DMA_MR_CA);
I
Ira Snyder 已提交
177
	DMA_OUT(chan, &chan->regs->mr, mode, 32);
178

179
	for (i = 0; i < 100; i++) {
I
Ira Snyder 已提交
180
		if (dma_is_idle(chan))
181
			break;
182
		udelay(10);
183
	}
I
Ira Snyder 已提交
184

I
Ira Snyder 已提交
185 186
	if (i >= 100 && !dma_is_idle(chan))
		dev_err(chan->dev, "DMA halt timeout!\n");
187 188
}

I
Ira Snyder 已提交
189
static void set_ld_eol(struct fsldma_chan *chan,
190 191
			struct fsl_desc_sw *desc)
{
192 193
	u64 snoop_bits;

I
Ira Snyder 已提交
194
	snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
195 196
		? FSL_DMA_SNEN : 0;

I
Ira Snyder 已提交
197 198
	desc->hw.next_ln_addr = CPU_TO_DMA(chan,
		DMA_TO_CPU(chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL
199
			| snoop_bits, 64);
200 201
}

I
Ira Snyder 已提交
202
static void append_ld_queue(struct fsldma_chan *chan,
203 204
		struct fsl_desc_sw *new_desc)
{
I
Ira Snyder 已提交
205
	struct fsl_desc_sw *queue_tail = to_fsl_desc(chan->ld_queue.prev);
206

I
Ira Snyder 已提交
207
	if (list_empty(&chan->ld_queue))
208 209 210 211 212 213 214 215 216
		return;

	/* Link to the new descriptor physical address and
	 * Enable End-of-segment interrupt for
	 * the last link descriptor.
	 * (the previous node's next link descriptor)
	 *
	 * For FSL_DMA_IP_83xx, the snoop enable bit need be set.
	 */
I
Ira Snyder 已提交
217
	queue_tail->hw.next_ln_addr = CPU_TO_DMA(chan,
218
			new_desc->async_tx.phys | FSL_DMA_EOSIE |
I
Ira Snyder 已提交
219
			(((chan->feature & FSL_DMA_IP_MASK)
220 221 222 223 224
				== FSL_DMA_IP_83XX) ? FSL_DMA_SNEN : 0), 64);
}

/**
 * fsl_chan_set_src_loop_size - Set source address hold transfer size
I
Ira Snyder 已提交
225
 * @chan : Freescale DMA channel
226 227 228 229 230 231 232 233
 * @size     : Address loop size, 0 for disable loop
 *
 * The set source address hold transfer size. The source
 * address hold or loop transfer size is when the DMA transfer
 * data from source address (SA), if the loop size is 4, the DMA will
 * read data from SA, SA + 1, SA + 2, SA + 3, then loop back to SA,
 * SA + 1 ... and so on.
 */
I
Ira Snyder 已提交
234
static void fsl_chan_set_src_loop_size(struct fsldma_chan *chan, int size)
235
{
I
Ira Snyder 已提交
236 237
	u32 mode;

I
Ira Snyder 已提交
238
	mode = DMA_IN(chan, &chan->regs->mr, 32);
I
Ira Snyder 已提交
239

240 241
	switch (size) {
	case 0:
I
Ira Snyder 已提交
242
		mode &= ~FSL_DMA_MR_SAHE;
243 244 245 246 247
		break;
	case 1:
	case 2:
	case 4:
	case 8:
I
Ira Snyder 已提交
248
		mode |= FSL_DMA_MR_SAHE | (__ilog2(size) << 14);
249 250
		break;
	}
I
Ira Snyder 已提交
251

I
Ira Snyder 已提交
252
	DMA_OUT(chan, &chan->regs->mr, mode, 32);
253 254 255
}

/**
256
 * fsl_chan_set_dst_loop_size - Set destination address hold transfer size
I
Ira Snyder 已提交
257
 * @chan : Freescale DMA channel
258 259 260 261 262 263 264 265
 * @size     : Address loop size, 0 for disable loop
 *
 * The set destination address hold transfer size. The destination
 * address hold or loop transfer size is when the DMA transfer
 * data to destination address (TA), if the loop size is 4, the DMA will
 * write data to TA, TA + 1, TA + 2, TA + 3, then loop back to TA,
 * TA + 1 ... and so on.
 */
I
Ira Snyder 已提交
266
static void fsl_chan_set_dst_loop_size(struct fsldma_chan *chan, int size)
267
{
I
Ira Snyder 已提交
268 269
	u32 mode;

I
Ira Snyder 已提交
270
	mode = DMA_IN(chan, &chan->regs->mr, 32);
I
Ira Snyder 已提交
271

272 273
	switch (size) {
	case 0:
I
Ira Snyder 已提交
274
		mode &= ~FSL_DMA_MR_DAHE;
275 276 277 278 279
		break;
	case 1:
	case 2:
	case 4:
	case 8:
I
Ira Snyder 已提交
280
		mode |= FSL_DMA_MR_DAHE | (__ilog2(size) << 16);
281 282
		break;
	}
I
Ira Snyder 已提交
283

I
Ira Snyder 已提交
284
	DMA_OUT(chan, &chan->regs->mr, mode, 32);
285 286 287
}

/**
288
 * fsl_chan_set_request_count - Set DMA Request Count for external control
I
Ira Snyder 已提交
289
 * @chan : Freescale DMA channel
290 291 292 293 294 295
 * @size     : Number of bytes to transfer in a single request
 *
 * The Freescale DMA channel can be controlled by the external signal DREQ#.
 * The DMA request count is how many bytes are allowed to transfer before
 * pausing the channel, after which a new assertion of DREQ# resumes channel
 * operation.
296
 *
297
 * A size of 0 disables external pause control. The maximum size is 1024.
298
 */
I
Ira Snyder 已提交
299
static void fsl_chan_set_request_count(struct fsldma_chan *chan, int size)
300
{
I
Ira Snyder 已提交
301 302
	u32 mode;

303
	BUG_ON(size > 1024);
I
Ira Snyder 已提交
304

I
Ira Snyder 已提交
305
	mode = DMA_IN(chan, &chan->regs->mr, 32);
I
Ira Snyder 已提交
306 307
	mode |= (__ilog2(size) << 24) & 0x0f000000;

I
Ira Snyder 已提交
308
	DMA_OUT(chan, &chan->regs->mr, mode, 32);
309
}
310

311 312
/**
 * fsl_chan_toggle_ext_pause - Toggle channel external pause status
I
Ira Snyder 已提交
313
 * @chan : Freescale DMA channel
314 315 316 317 318 319
 * @enable   : 0 is disabled, 1 is enabled.
 *
 * The Freescale DMA channel can be controlled by the external signal DREQ#.
 * The DMA Request Count feature should be used in addition to this feature
 * to set the number of bytes to transfer before pausing the channel.
 */
I
Ira Snyder 已提交
320
static void fsl_chan_toggle_ext_pause(struct fsldma_chan *chan, int enable)
321 322
{
	if (enable)
I
Ira Snyder 已提交
323
		chan->feature |= FSL_DMA_CHAN_PAUSE_EXT;
324
	else
I
Ira Snyder 已提交
325
		chan->feature &= ~FSL_DMA_CHAN_PAUSE_EXT;
326 327 328 329
}

/**
 * fsl_chan_toggle_ext_start - Toggle channel external start status
I
Ira Snyder 已提交
330
 * @chan : Freescale DMA channel
331 332 333 334 335 336 337
 * @enable   : 0 is disabled, 1 is enabled.
 *
 * If enable the external start, the channel can be started by an
 * external DMA start pin. So the dma_start() does not start the
 * transfer immediately. The DMA channel will wait for the
 * control pin asserted.
 */
I
Ira Snyder 已提交
338
static void fsl_chan_toggle_ext_start(struct fsldma_chan *chan, int enable)
339 340
{
	if (enable)
I
Ira Snyder 已提交
341
		chan->feature |= FSL_DMA_CHAN_START_EXT;
342
	else
I
Ira Snyder 已提交
343
		chan->feature &= ~FSL_DMA_CHAN_START_EXT;
344 345 346 347
}

static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
{
I
Ira Snyder 已提交
348
	struct fsldma_chan *chan = to_fsl_chan(tx->chan);
349 350
	struct fsl_desc_sw *desc = tx_to_fsl_desc(tx);
	struct fsl_desc_sw *child;
351 352 353 354
	unsigned long flags;
	dma_cookie_t cookie;

	/* cookie increment and adding to ld_queue must be atomic */
I
Ira Snyder 已提交
355
	spin_lock_irqsave(&chan->desc_lock, flags);
356

I
Ira Snyder 已提交
357
	cookie = chan->common.cookie;
358
	list_for_each_entry(child, &desc->tx_list, node) {
359 360 361 362 363 364 365
		cookie++;
		if (cookie < 0)
			cookie = 1;

		desc->async_tx.cookie = cookie;
	}

I
Ira Snyder 已提交
366 367 368
	chan->common.cookie = cookie;
	append_ld_queue(chan, desc);
	list_splice_init(&desc->tx_list, chan->ld_queue.prev);
369

I
Ira Snyder 已提交
370
	spin_unlock_irqrestore(&chan->desc_lock, flags);
371 372 373 374 375 376

	return cookie;
}

/**
 * fsl_dma_alloc_descriptor - Allocate descriptor from channel's DMA pool.
I
Ira Snyder 已提交
377
 * @chan : Freescale DMA channel
378 379 380 381
 *
 * Return - The descriptor allocated. NULL for failed.
 */
static struct fsl_desc_sw *fsl_dma_alloc_descriptor(
I
Ira Snyder 已提交
382
					struct fsldma_chan *chan)
383 384 385 386
{
	dma_addr_t pdesc;
	struct fsl_desc_sw *desc_sw;

I
Ira Snyder 已提交
387
	desc_sw = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc);
388 389
	if (desc_sw) {
		memset(desc_sw, 0, sizeof(struct fsl_desc_sw));
390
		INIT_LIST_HEAD(&desc_sw->tx_list);
391
		dma_async_tx_descriptor_init(&desc_sw->async_tx,
I
Ira Snyder 已提交
392
						&chan->common);
393 394 395 396 397 398 399 400 401 402
		desc_sw->async_tx.tx_submit = fsl_dma_tx_submit;
		desc_sw->async_tx.phys = pdesc;
	}

	return desc_sw;
}


/**
 * fsl_dma_alloc_chan_resources - Allocate resources for DMA channel.
I
Ira Snyder 已提交
403
 * @chan : Freescale DMA channel
404 405 406 407 408
 *
 * This function will create a dma pool for descriptor allocation.
 *
 * Return - The number of descriptors allocated.
 */
I
Ira Snyder 已提交
409
static int fsl_dma_alloc_chan_resources(struct dma_chan *dchan)
410
{
I
Ira Snyder 已提交
411
	struct fsldma_chan *chan = to_fsl_chan(dchan);
412 413

	/* Has this channel already been allocated? */
I
Ira Snyder 已提交
414
	if (chan->desc_pool)
415
		return 1;
416 417 418 419

	/* We need the descriptor to be aligned to 32bytes
	 * for meeting FSL DMA specification requirement.
	 */
I
Ira Snyder 已提交
420 421
	chan->desc_pool = dma_pool_create("fsl_dma_engine_desc_pool",
			chan->dev, sizeof(struct fsl_desc_sw),
422
			32, 0);
I
Ira Snyder 已提交
423 424 425
	if (!chan->desc_pool) {
		dev_err(chan->dev, "No memory for channel %d "
			"descriptor dma pool.\n", chan->id);
426 427 428 429 430 431 432 433
		return 0;
	}

	return 1;
}

/**
 * fsl_dma_free_chan_resources - Free all resources of the channel.
I
Ira Snyder 已提交
434
 * @chan : Freescale DMA channel
435
 */
I
Ira Snyder 已提交
436
static void fsl_dma_free_chan_resources(struct dma_chan *dchan)
437
{
I
Ira Snyder 已提交
438
	struct fsldma_chan *chan = to_fsl_chan(dchan);
439 440 441
	struct fsl_desc_sw *desc, *_desc;
	unsigned long flags;

I
Ira Snyder 已提交
442 443 444
	dev_dbg(chan->dev, "Free all channel resources.\n");
	spin_lock_irqsave(&chan->desc_lock, flags);
	list_for_each_entry_safe(desc, _desc, &chan->ld_queue, node) {
445
#ifdef FSL_DMA_LD_DEBUG
I
Ira Snyder 已提交
446
		dev_dbg(chan->dev,
447 448 449 450
				"LD %p will be released.\n", desc);
#endif
		list_del(&desc->node);
		/* free link descriptor */
I
Ira Snyder 已提交
451
		dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
452
	}
I
Ira Snyder 已提交
453 454
	spin_unlock_irqrestore(&chan->desc_lock, flags);
	dma_pool_destroy(chan->desc_pool);
455

I
Ira Snyder 已提交
456
	chan->desc_pool = NULL;
457 458
}

459
static struct dma_async_tx_descriptor *
I
Ira Snyder 已提交
460
fsl_dma_prep_interrupt(struct dma_chan *dchan, unsigned long flags)
461
{
I
Ira Snyder 已提交
462
	struct fsldma_chan *chan;
463 464
	struct fsl_desc_sw *new;

I
Ira Snyder 已提交
465
	if (!dchan)
466 467
		return NULL;

I
Ira Snyder 已提交
468
	chan = to_fsl_chan(dchan);
469

I
Ira Snyder 已提交
470
	new = fsl_dma_alloc_descriptor(chan);
471
	if (!new) {
I
Ira Snyder 已提交
472
		dev_err(chan->dev, "No free memory for link descriptor\n");
473 474 475 476
		return NULL;
	}

	new->async_tx.cookie = -EBUSY;
477
	new->async_tx.flags = flags;
478

479
	/* Insert the link descriptor to the LD ring */
480
	list_add_tail(&new->node, &new->tx_list);
481

482
	/* Set End-of-link to the last link descriptor of new list*/
I
Ira Snyder 已提交
483
	set_ld_eol(chan, new);
484 485 486 487

	return &new->async_tx;
}

488
static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy(
I
Ira Snyder 已提交
489
	struct dma_chan *dchan, dma_addr_t dma_dst, dma_addr_t dma_src,
490 491
	size_t len, unsigned long flags)
{
I
Ira Snyder 已提交
492
	struct fsldma_chan *chan;
493
	struct fsl_desc_sw *first = NULL, *prev = NULL, *new;
494
	struct list_head *list;
495 496
	size_t copy;

I
Ira Snyder 已提交
497
	if (!dchan)
498 499 500 501 502
		return NULL;

	if (!len)
		return NULL;

I
Ira Snyder 已提交
503
	chan = to_fsl_chan(dchan);
504 505 506 507

	do {

		/* Allocate the link descriptor from DMA pool */
I
Ira Snyder 已提交
508
		new = fsl_dma_alloc_descriptor(chan);
509
		if (!new) {
I
Ira Snyder 已提交
510
			dev_err(chan->dev,
511
					"No free memory for link descriptor\n");
512
			goto fail;
513 514
		}
#ifdef FSL_DMA_LD_DEBUG
I
Ira Snyder 已提交
515
		dev_dbg(chan->dev, "new link desc alloc %p\n", new);
516 517
#endif

518
		copy = min(len, (size_t)FSL_DMA_BCR_MAX_CNT);
519

I
Ira Snyder 已提交
520 521 522
		set_desc_cnt(chan, &new->hw, copy);
		set_desc_src(chan, &new->hw, dma_src);
		set_desc_dst(chan, &new->hw, dma_dst);
523 524 525 526

		if (!first)
			first = new;
		else
I
Ira Snyder 已提交
527
			set_desc_next(chan, &prev->hw, new->async_tx.phys);
528 529

		new->async_tx.cookie = 0;
530
		async_tx_ack(&new->async_tx);
531 532 533 534

		prev = new;
		len -= copy;
		dma_src += copy;
535
		dma_dst += copy;
536 537

		/* Insert the link descriptor to the LD ring */
538
		list_add_tail(&new->node, &first->tx_list);
539 540
	} while (len);

541
	new->async_tx.flags = flags; /* client is in control of this ack */
542 543 544
	new->async_tx.cookie = -EBUSY;

	/* Set End-of-link to the last link descriptor of new list*/
I
Ira Snyder 已提交
545
	set_ld_eol(chan, new);
546

547 548 549 550 551 552
	return &first->async_tx;

fail:
	if (!first)
		return NULL;

553
	list = &first->tx_list;
554 555
	list_for_each_entry_safe_reverse(new, prev, list, node) {
		list_del(&new->node);
I
Ira Snyder 已提交
556
		dma_pool_free(chan->desc_pool, new, new->async_tx.phys);
557 558 559
	}

	return NULL;
560 561
}

I
Ira Snyder 已提交
562 563 564 565 566 567 568 569 570 571 572 573 574
/**
 * fsl_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
 * @chan: DMA channel
 * @sgl: scatterlist to transfer to/from
 * @sg_len: number of entries in @scatterlist
 * @direction: DMA direction
 * @flags: DMAEngine flags
 *
 * Prepare a set of descriptors for a DMA_SLAVE transaction. Following the
 * DMA_SLAVE API, this gets the device-specific information from the
 * chan->private variable.
 */
static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg(
I
Ira Snyder 已提交
575
	struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
I
Ira Snyder 已提交
576 577
	enum dma_data_direction direction, unsigned long flags)
{
I
Ira Snyder 已提交
578
	struct fsldma_chan *chan;
I
Ira Snyder 已提交
579 580 581 582 583 584 585 586 587 588 589 590
	struct fsl_desc_sw *first = NULL, *prev = NULL, *new = NULL;
	struct fsl_dma_slave *slave;
	struct list_head *tx_list;
	size_t copy;

	int i;
	struct scatterlist *sg;
	size_t sg_used;
	size_t hw_used;
	struct fsl_dma_hw_addr *hw;
	dma_addr_t dma_dst, dma_src;

I
Ira Snyder 已提交
591
	if (!dchan)
I
Ira Snyder 已提交
592 593
		return NULL;

I
Ira Snyder 已提交
594
	if (!dchan->private)
I
Ira Snyder 已提交
595 596
		return NULL;

I
Ira Snyder 已提交
597 598
	chan = to_fsl_chan(dchan);
	slave = dchan->private;
I
Ira Snyder 已提交
599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646

	if (list_empty(&slave->addresses))
		return NULL;

	hw = list_first_entry(&slave->addresses, struct fsl_dma_hw_addr, entry);
	hw_used = 0;

	/*
	 * Build the hardware transaction to copy from the scatterlist to
	 * the hardware, or from the hardware to the scatterlist
	 *
	 * If you are copying from the hardware to the scatterlist and it
	 * takes two hardware entries to fill an entire page, then both
	 * hardware entries will be coalesced into the same page
	 *
	 * If you are copying from the scatterlist to the hardware and a
	 * single page can fill two hardware entries, then the data will
	 * be read out of the page into the first hardware entry, and so on
	 */
	for_each_sg(sgl, sg, sg_len, i) {
		sg_used = 0;

		/* Loop until the entire scatterlist entry is used */
		while (sg_used < sg_dma_len(sg)) {

			/*
			 * If we've used up the current hardware address/length
			 * pair, we need to load a new one
			 *
			 * This is done in a while loop so that descriptors with
			 * length == 0 will be skipped
			 */
			while (hw_used >= hw->length) {

				/*
				 * If the current hardware entry is the last
				 * entry in the list, we're finished
				 */
				if (list_is_last(&hw->entry, &slave->addresses))
					goto finished;

				/* Get the next hardware address/length pair */
				hw = list_entry(hw->entry.next,
						struct fsl_dma_hw_addr, entry);
				hw_used = 0;
			}

			/* Allocate the link descriptor from DMA pool */
I
Ira Snyder 已提交
647
			new = fsl_dma_alloc_descriptor(chan);
I
Ira Snyder 已提交
648
			if (!new) {
I
Ira Snyder 已提交
649
				dev_err(chan->dev, "No free memory for "
I
Ira Snyder 已提交
650 651 652 653
						       "link descriptor\n");
				goto fail;
			}
#ifdef FSL_DMA_LD_DEBUG
I
Ira Snyder 已提交
654
			dev_dbg(chan->dev, "new link desc alloc %p\n", new);
I
Ira Snyder 已提交
655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680
#endif

			/*
			 * Calculate the maximum number of bytes to transfer,
			 * making sure it is less than the DMA controller limit
			 */
			copy = min_t(size_t, sg_dma_len(sg) - sg_used,
					     hw->length - hw_used);
			copy = min_t(size_t, copy, FSL_DMA_BCR_MAX_CNT);

			/*
			 * DMA_FROM_DEVICE
			 * from the hardware to the scatterlist
			 *
			 * DMA_TO_DEVICE
			 * from the scatterlist to the hardware
			 */
			if (direction == DMA_FROM_DEVICE) {
				dma_src = hw->address + hw_used;
				dma_dst = sg_dma_address(sg) + sg_used;
			} else {
				dma_src = sg_dma_address(sg) + sg_used;
				dma_dst = hw->address + hw_used;
			}

			/* Fill in the descriptor */
I
Ira Snyder 已提交
681 682 683
			set_desc_cnt(chan, &new->hw, copy);
			set_desc_src(chan, &new->hw, dma_src);
			set_desc_dst(chan, &new->hw, dma_dst);
I
Ira Snyder 已提交
684 685 686 687 688 689 690 691

			/*
			 * If this is not the first descriptor, chain the
			 * current descriptor after the previous descriptor
			 */
			if (!first) {
				first = new;
			} else {
I
Ira Snyder 已提交
692
				set_desc_next(chan, &prev->hw,
I
Ira Snyder 已提交
693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717
					      new->async_tx.phys);
			}

			new->async_tx.cookie = 0;
			async_tx_ack(&new->async_tx);

			prev = new;
			sg_used += copy;
			hw_used += copy;

			/* Insert the link descriptor into the LD ring */
			list_add_tail(&new->node, &first->tx_list);
		}
	}

finished:

	/* All of the hardware address/length pairs had length == 0 */
	if (!first || !new)
		return NULL;

	new->async_tx.flags = flags;
	new->async_tx.cookie = -EBUSY;

	/* Set End-of-link to the last link descriptor of new list */
I
Ira Snyder 已提交
718
	set_ld_eol(chan, new);
I
Ira Snyder 已提交
719 720

	/* Enable extra controller features */
I
Ira Snyder 已提交
721 722
	if (chan->set_src_loop_size)
		chan->set_src_loop_size(chan, slave->src_loop_size);
I
Ira Snyder 已提交
723

I
Ira Snyder 已提交
724 725
	if (chan->set_dst_loop_size)
		chan->set_dst_loop_size(chan, slave->dst_loop_size);
I
Ira Snyder 已提交
726

I
Ira Snyder 已提交
727 728
	if (chan->toggle_ext_start)
		chan->toggle_ext_start(chan, slave->external_start);
I
Ira Snyder 已提交
729

I
Ira Snyder 已提交
730 731
	if (chan->toggle_ext_pause)
		chan->toggle_ext_pause(chan, slave->external_pause);
I
Ira Snyder 已提交
732

I
Ira Snyder 已提交
733 734
	if (chan->set_request_count)
		chan->set_request_count(chan, slave->request_count);
I
Ira Snyder 已提交
735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753

	return &first->async_tx;

fail:
	/* If first was not set, then we failed to allocate the very first
	 * descriptor, and we're done */
	if (!first)
		return NULL;

	/*
	 * First is set, so all of the descriptors we allocated have been added
	 * to first->tx_list, INCLUDING "first" itself. Therefore we
	 * must traverse the list backwards freeing each descriptor in turn
	 *
	 * We're re-using variables for the loop, oh well
	 */
	tx_list = &first->tx_list;
	list_for_each_entry_safe_reverse(new, prev, tx_list, node) {
		list_del_init(&new->node);
I
Ira Snyder 已提交
754
		dma_pool_free(chan->desc_pool, new, new->async_tx.phys);
I
Ira Snyder 已提交
755 756 757 758 759
	}

	return NULL;
}

I
Ira Snyder 已提交
760
static void fsl_dma_device_terminate_all(struct dma_chan *dchan)
I
Ira Snyder 已提交
761
{
I
Ira Snyder 已提交
762
	struct fsldma_chan *chan;
I
Ira Snyder 已提交
763 764 765
	struct fsl_desc_sw *desc, *tmp;
	unsigned long flags;

I
Ira Snyder 已提交
766
	if (!dchan)
I
Ira Snyder 已提交
767 768
		return;

I
Ira Snyder 已提交
769
	chan = to_fsl_chan(dchan);
I
Ira Snyder 已提交
770 771

	/* Halt the DMA engine */
I
Ira Snyder 已提交
772
	dma_halt(chan);
I
Ira Snyder 已提交
773

I
Ira Snyder 已提交
774
	spin_lock_irqsave(&chan->desc_lock, flags);
I
Ira Snyder 已提交
775 776

	/* Remove and free all of the descriptors in the LD queue */
I
Ira Snyder 已提交
777
	list_for_each_entry_safe(desc, tmp, &chan->ld_queue, node) {
I
Ira Snyder 已提交
778
		list_del(&desc->node);
I
Ira Snyder 已提交
779
		dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
I
Ira Snyder 已提交
780 781
	}

I
Ira Snyder 已提交
782
	spin_unlock_irqrestore(&chan->desc_lock, flags);
I
Ira Snyder 已提交
783 784
}

785 786
/**
 * fsl_dma_update_completed_cookie - Update the completed cookie.
I
Ira Snyder 已提交
787
 * @chan : Freescale DMA channel
788
 */
I
Ira Snyder 已提交
789
static void fsl_dma_update_completed_cookie(struct fsldma_chan *chan)
790 791 792 793
{
	struct fsl_desc_sw *cur_desc, *desc;
	dma_addr_t ld_phy;

I
Ira Snyder 已提交
794
	ld_phy = get_cdar(chan) & FSL_DMA_NLDA_MASK;
795 796 797

	if (ld_phy) {
		cur_desc = NULL;
I
Ira Snyder 已提交
798
		list_for_each_entry(desc, &chan->ld_queue, node)
799 800 801 802 803 804
			if (desc->async_tx.phys == ld_phy) {
				cur_desc = desc;
				break;
			}

		if (cur_desc && cur_desc->async_tx.cookie) {
I
Ira Snyder 已提交
805 806
			if (dma_is_idle(chan))
				chan->completed_cookie =
807 808
					cur_desc->async_tx.cookie;
			else
I
Ira Snyder 已提交
809
				chan->completed_cookie =
810 811 812 813 814 815 816
					cur_desc->async_tx.cookie - 1;
		}
	}
}

/**
 * fsl_chan_ld_cleanup - Clean up link descriptors
I
Ira Snyder 已提交
817
 * @chan : Freescale DMA channel
818 819 820 821 822
 *
 * This function clean up the ld_queue of DMA channel.
 * If 'in_intr' is set, the function will move the link descriptor to
 * the recycle list. Otherwise, free it directly.
 */
I
Ira Snyder 已提交
823
static void fsl_chan_ld_cleanup(struct fsldma_chan *chan)
824 825 826 827
{
	struct fsl_desc_sw *desc, *_desc;
	unsigned long flags;

I
Ira Snyder 已提交
828
	spin_lock_irqsave(&chan->desc_lock, flags);
829

I
Ira Snyder 已提交
830 831 832
	dev_dbg(chan->dev, "chan completed_cookie = %d\n",
			chan->completed_cookie);
	list_for_each_entry_safe(desc, _desc, &chan->ld_queue, node) {
833 834 835 836
		dma_async_tx_callback callback;
		void *callback_param;

		if (dma_async_is_complete(desc->async_tx.cookie,
I
Ira Snyder 已提交
837
			    chan->completed_cookie, chan->common.cookie)
838 839 840 841 842 843 844 845 846
				== DMA_IN_PROGRESS)
			break;

		callback = desc->async_tx.callback;
		callback_param = desc->async_tx.callback_param;

		/* Remove from ld_queue list */
		list_del(&desc->node);

I
Ira Snyder 已提交
847
		dev_dbg(chan->dev, "link descriptor %p will be recycle.\n",
848
				desc);
I
Ira Snyder 已提交
849
		dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
850 851 852

		/* Run the link descriptor callback function */
		if (callback) {
I
Ira Snyder 已提交
853 854
			spin_unlock_irqrestore(&chan->desc_lock, flags);
			dev_dbg(chan->dev, "link descriptor %p callback\n",
855 856
					desc);
			callback(callback_param);
I
Ira Snyder 已提交
857
			spin_lock_irqsave(&chan->desc_lock, flags);
858 859
		}
	}
I
Ira Snyder 已提交
860
	spin_unlock_irqrestore(&chan->desc_lock, flags);
861 862 863 864
}

/**
 * fsl_chan_xfer_ld_queue - Transfer link descriptors in channel ld_queue.
I
Ira Snyder 已提交
865
 * @chan : Freescale DMA channel
866
 */
I
Ira Snyder 已提交
867
static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan)
868 869
{
	struct list_head *ld_node;
870
	dma_addr_t next_dst_addr;
871 872
	unsigned long flags;

I
Ira Snyder 已提交
873
	spin_lock_irqsave(&chan->desc_lock, flags);
874

I
Ira Snyder 已提交
875
	if (!dma_is_idle(chan))
876
		goto out_unlock;
877

I
Ira Snyder 已提交
878
	dma_halt(chan);
879 880 881 882 883 884

	/* If there are some link descriptors
	 * not transfered in queue. We need to start it.
	 */

	/* Find the first un-transfer desciptor */
I
Ira Snyder 已提交
885 886
	for (ld_node = chan->ld_queue.next;
		(ld_node != &chan->ld_queue)
887 888
			&& (dma_async_is_complete(
				to_fsl_desc(ld_node)->async_tx.cookie,
I
Ira Snyder 已提交
889 890
				chan->completed_cookie,
				chan->common.cookie) == DMA_SUCCESS);
891 892
		ld_node = ld_node->next);

I
Ira Snyder 已提交
893
	if (ld_node != &chan->ld_queue) {
894
		/* Get the ld start address from ld_queue */
895
		next_dst_addr = to_fsl_desc(ld_node)->async_tx.phys;
I
Ira Snyder 已提交
896
		dev_dbg(chan->dev, "xfer LDs staring from 0x%llx\n",
897
				(unsigned long long)next_dst_addr);
I
Ira Snyder 已提交
898 899
		set_cdar(chan, next_dst_addr);
		dma_start(chan);
900
	} else {
I
Ira Snyder 已提交
901 902
		set_cdar(chan, 0);
		set_ndar(chan, 0);
903
	}
904 905

out_unlock:
I
Ira Snyder 已提交
906
	spin_unlock_irqrestore(&chan->desc_lock, flags);
907 908 909 910
}

/**
 * fsl_dma_memcpy_issue_pending - Issue the DMA start command
I
Ira Snyder 已提交
911
 * @chan : Freescale DMA channel
912
 */
I
Ira Snyder 已提交
913
static void fsl_dma_memcpy_issue_pending(struct dma_chan *dchan)
914
{
I
Ira Snyder 已提交
915
	struct fsldma_chan *chan = to_fsl_chan(dchan);
916 917 918 919 920

#ifdef FSL_DMA_LD_DEBUG
	struct fsl_desc_sw *ld;
	unsigned long flags;

I
Ira Snyder 已提交
921 922 923
	spin_lock_irqsave(&chan->desc_lock, flags);
	if (list_empty(&chan->ld_queue)) {
		spin_unlock_irqrestore(&chan->desc_lock, flags);
924 925 926
		return;
	}

I
Ira Snyder 已提交
927 928
	dev_dbg(chan->dev, "--memcpy issue--\n");
	list_for_each_entry(ld, &chan->ld_queue, node) {
929
		int i;
I
Ira Snyder 已提交
930 931
		dev_dbg(chan->dev, "Ch %d, LD %08x\n",
				chan->id, ld->async_tx.phys);
932
		for (i = 0; i < 8; i++)
I
Ira Snyder 已提交
933
			dev_dbg(chan->dev, "LD offset %d: %08x\n",
934 935
					i, *(((u32 *)&ld->hw) + i));
	}
I
Ira Snyder 已提交
936 937
	dev_dbg(chan->dev, "----------------\n");
	spin_unlock_irqrestore(&chan->desc_lock, flags);
938 939
#endif

I
Ira Snyder 已提交
940
	fsl_chan_xfer_ld_queue(chan);
941 942 943 944
}

/**
 * fsl_dma_is_complete - Determine the DMA status
I
Ira Snyder 已提交
945
 * @chan : Freescale DMA channel
946
 */
I
Ira Snyder 已提交
947
static enum dma_status fsl_dma_is_complete(struct dma_chan *dchan,
948 949 950 951
					dma_cookie_t cookie,
					dma_cookie_t *done,
					dma_cookie_t *used)
{
I
Ira Snyder 已提交
952
	struct fsldma_chan *chan = to_fsl_chan(dchan);
953 954 955
	dma_cookie_t last_used;
	dma_cookie_t last_complete;

I
Ira Snyder 已提交
956
	fsl_chan_ld_cleanup(chan);
957

I
Ira Snyder 已提交
958 959
	last_used = dchan->cookie;
	last_complete = chan->completed_cookie;
960 961 962 963 964 965 966 967 968 969

	if (done)
		*done = last_complete;

	if (used)
		*used = last_used;

	return dma_async_is_complete(cookie, last_complete, last_used);
}

970 971 972 973
/*----------------------------------------------------------------------------*/
/* Interrupt Handling                                                         */
/*----------------------------------------------------------------------------*/

974
static irqreturn_t fsldma_chan_irq(int irq, void *data)
975
{
I
Ira Snyder 已提交
976
	struct fsldma_chan *chan = data;
977 978
	int update_cookie = 0;
	int xfer_ld_q = 0;
I
Ira Snyder 已提交
979
	u32 stat;
980

I
Ira Snyder 已提交
981 982 983 984
	stat = get_sr(chan);
	dev_dbg(chan->dev, "event: channel %d, stat = 0x%x\n",
						chan->id, stat);
	set_sr(chan, stat);		/* Clear the event register */
985 986 987 988 989 990

	stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH);
	if (!stat)
		return IRQ_NONE;

	if (stat & FSL_DMA_SR_TE)
I
Ira Snyder 已提交
991
		dev_err(chan->dev, "Transfer Error!\n");
992

993 994 995 996 997
	/* Programming Error
	 * The DMA_INTERRUPT async_tx is a NULL transfer, which will
	 * triger a PE interrupt.
	 */
	if (stat & FSL_DMA_SR_PE) {
I
Ira Snyder 已提交
998 999
		dev_dbg(chan->dev, "event: Programming Error INT\n");
		if (get_bcr(chan) == 0) {
1000 1001 1002 1003
			/* BCR register is 0, this is a DMA_INTERRUPT async_tx.
			 * Now, update the completed cookie, and continue the
			 * next uncompleted transfer.
			 */
1004 1005
			update_cookie = 1;
			xfer_ld_q = 1;
1006 1007 1008 1009
		}
		stat &= ~FSL_DMA_SR_PE;
	}

1010 1011 1012 1013
	/* If the link descriptor segment transfer finishes,
	 * we will recycle the used descriptor.
	 */
	if (stat & FSL_DMA_SR_EOSI) {
I
Ira Snyder 已提交
1014 1015 1016 1017
		dev_dbg(chan->dev, "event: End-of-segments INT\n");
		dev_dbg(chan->dev, "event: clndar 0x%llx, nlndar 0x%llx\n",
			(unsigned long long)get_cdar(chan),
			(unsigned long long)get_ndar(chan));
1018
		stat &= ~FSL_DMA_SR_EOSI;
1019 1020 1021 1022 1023 1024 1025
		update_cookie = 1;
	}

	/* For MPC8349, EOCDI event need to update cookie
	 * and start the next transfer if it exist.
	 */
	if (stat & FSL_DMA_SR_EOCDI) {
I
Ira Snyder 已提交
1026
		dev_dbg(chan->dev, "event: End-of-Chain link INT\n");
1027 1028 1029
		stat &= ~FSL_DMA_SR_EOCDI;
		update_cookie = 1;
		xfer_ld_q = 1;
1030 1031 1032 1033 1034 1035
	}

	/* If it current transfer is the end-of-transfer,
	 * we should clear the Channel Start bit for
	 * prepare next transfer.
	 */
1036
	if (stat & FSL_DMA_SR_EOLNI) {
I
Ira Snyder 已提交
1037
		dev_dbg(chan->dev, "event: End-of-link INT\n");
1038
		stat &= ~FSL_DMA_SR_EOLNI;
1039
		xfer_ld_q = 1;
1040 1041
	}

1042
	if (update_cookie)
I
Ira Snyder 已提交
1043
		fsl_dma_update_completed_cookie(chan);
1044
	if (xfer_ld_q)
I
Ira Snyder 已提交
1045
		fsl_chan_xfer_ld_queue(chan);
1046
	if (stat)
I
Ira Snyder 已提交
1047
		dev_dbg(chan->dev, "event: unhandled sr 0x%02x\n",
1048 1049
					stat);

I
Ira Snyder 已提交
1050 1051
	dev_dbg(chan->dev, "event: Exit\n");
	tasklet_schedule(&chan->tasklet);
1052 1053 1054
	return IRQ_HANDLED;
}

1055 1056
static void dma_do_tasklet(unsigned long data)
{
I
Ira Snyder 已提交
1057 1058
	struct fsldma_chan *chan = (struct fsldma_chan *)data;
	fsl_chan_ld_cleanup(chan);
1059 1060 1061
}

static irqreturn_t fsldma_ctrl_irq(int irq, void *data)
1062
{
1063
	struct fsldma_device *fdev = data;
1064 1065 1066 1067
	struct fsldma_chan *chan;
	unsigned int handled = 0;
	u32 gsr, mask;
	int i;
1068

1069
	gsr = (fdev->feature & FSL_DMA_BIG_ENDIAN) ? in_be32(fdev->regs)
1070 1071 1072
						   : in_le32(fdev->regs);
	mask = 0xff000000;
	dev_dbg(fdev->dev, "IRQ: gsr 0x%.8x\n", gsr);
1073

1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089
	for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
		chan = fdev->chan[i];
		if (!chan)
			continue;

		if (gsr & mask) {
			dev_dbg(fdev->dev, "IRQ: chan %d\n", chan->id);
			fsldma_chan_irq(irq, chan);
			handled++;
		}

		gsr &= ~mask;
		mask >>= 8;
	}

	return IRQ_RETVAL(handled);
1090 1091
}

1092
static void fsldma_free_irqs(struct fsldma_device *fdev)
1093
{
1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164
	struct fsldma_chan *chan;
	int i;

	if (fdev->irq != NO_IRQ) {
		dev_dbg(fdev->dev, "free per-controller IRQ\n");
		free_irq(fdev->irq, fdev);
		return;
	}

	for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
		chan = fdev->chan[i];
		if (chan && chan->irq != NO_IRQ) {
			dev_dbg(fdev->dev, "free channel %d IRQ\n", chan->id);
			free_irq(chan->irq, chan);
		}
	}
}

static int fsldma_request_irqs(struct fsldma_device *fdev)
{
	struct fsldma_chan *chan;
	int ret;
	int i;

	/* if we have a per-controller IRQ, use that */
	if (fdev->irq != NO_IRQ) {
		dev_dbg(fdev->dev, "request per-controller IRQ\n");
		ret = request_irq(fdev->irq, fsldma_ctrl_irq, IRQF_SHARED,
				  "fsldma-controller", fdev);
		return ret;
	}

	/* no per-controller IRQ, use the per-channel IRQs */
	for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
		chan = fdev->chan[i];
		if (!chan)
			continue;

		if (chan->irq == NO_IRQ) {
			dev_err(fdev->dev, "no interrupts property defined for "
					   "DMA channel %d. Please fix your "
					   "device tree\n", chan->id);
			ret = -ENODEV;
			goto out_unwind;
		}

		dev_dbg(fdev->dev, "request channel %d IRQ\n", chan->id);
		ret = request_irq(chan->irq, fsldma_chan_irq, IRQF_SHARED,
				  "fsldma-chan", chan);
		if (ret) {
			dev_err(fdev->dev, "unable to request IRQ for DMA "
					   "channel %d\n", chan->id);
			goto out_unwind;
		}
	}

	return 0;

out_unwind:
	for (/* none */; i >= 0; i--) {
		chan = fdev->chan[i];
		if (!chan)
			continue;

		if (chan->irq == NO_IRQ)
			continue;

		free_irq(chan->irq, chan);
	}

	return ret;
1165 1166
}

1167 1168 1169 1170 1171
/*----------------------------------------------------------------------------*/
/* OpenFirmware Subsystem                                                     */
/*----------------------------------------------------------------------------*/

static int __devinit fsl_dma_chan_probe(struct fsldma_device *fdev,
1172
	struct device_node *node, u32 feature, const char *compatible)
1173
{
I
Ira Snyder 已提交
1174
	struct fsldma_chan *chan;
1175
	struct resource res;
1176 1177 1178
	int err;

	/* alloc channel */
I
Ira Snyder 已提交
1179 1180
	chan = kzalloc(sizeof(*chan), GFP_KERNEL);
	if (!chan) {
1181 1182 1183 1184 1185 1186
		dev_err(fdev->dev, "no free memory for DMA channels!\n");
		err = -ENOMEM;
		goto out_return;
	}

	/* ioremap registers for use */
I
Ira Snyder 已提交
1187 1188
	chan->regs = of_iomap(node, 0);
	if (!chan->regs) {
1189 1190
		dev_err(fdev->dev, "unable to ioremap registers\n");
		err = -ENOMEM;
I
Ira Snyder 已提交
1191
		goto out_free_chan;
1192 1193
	}

1194
	err = of_address_to_resource(node, 0, &res);
1195
	if (err) {
1196 1197
		dev_err(fdev->dev, "unable to find 'reg' property\n");
		goto out_iounmap_regs;
1198 1199
	}

I
Ira Snyder 已提交
1200
	chan->feature = feature;
1201
	if (!fdev->feature)
I
Ira Snyder 已提交
1202
		fdev->feature = chan->feature;
1203

1204 1205 1206
	/*
	 * If the DMA device's feature is different than the feature
	 * of its channels, report the bug
1207
	 */
I
Ira Snyder 已提交
1208
	WARN_ON(fdev->feature != chan->feature);
1209

I
Ira Snyder 已提交
1210 1211 1212
	chan->dev = fdev->dev;
	chan->id = ((res.start - 0x100) & 0xfff) >> 7;
	if (chan->id >= FSL_DMA_MAX_CHANS_PER_DEVICE) {
1213
		dev_err(fdev->dev, "too many channels for device\n");
1214
		err = -EINVAL;
1215
		goto out_iounmap_regs;
1216 1217
	}

I
Ira Snyder 已提交
1218 1219
	fdev->chan[chan->id] = chan;
	tasklet_init(&chan->tasklet, dma_do_tasklet, (unsigned long)chan);
1220 1221

	/* Initialize the channel */
I
Ira Snyder 已提交
1222
	dma_init(chan);
1223 1224

	/* Clear cdar registers */
I
Ira Snyder 已提交
1225
	set_cdar(chan, 0);
1226

I
Ira Snyder 已提交
1227
	switch (chan->feature & FSL_DMA_IP_MASK) {
1228
	case FSL_DMA_IP_85XX:
I
Ira Snyder 已提交
1229
		chan->toggle_ext_pause = fsl_chan_toggle_ext_pause;
1230
	case FSL_DMA_IP_83XX:
I
Ira Snyder 已提交
1231 1232 1233 1234
		chan->toggle_ext_start = fsl_chan_toggle_ext_start;
		chan->set_src_loop_size = fsl_chan_set_src_loop_size;
		chan->set_dst_loop_size = fsl_chan_set_dst_loop_size;
		chan->set_request_count = fsl_chan_set_request_count;
1235 1236
	}

I
Ira Snyder 已提交
1237 1238
	spin_lock_init(&chan->desc_lock);
	INIT_LIST_HEAD(&chan->ld_queue);
1239

I
Ira Snyder 已提交
1240
	chan->common.device = &fdev->common;
1241

1242
	/* find the IRQ line, if it exists in the device tree */
I
Ira Snyder 已提交
1243
	chan->irq = irq_of_parse_and_map(node, 0);
1244

1245
	/* Add the channel to DMA device channel list */
I
Ira Snyder 已提交
1246
	list_add_tail(&chan->common.device_node, &fdev->common.channels);
1247 1248
	fdev->common.chancnt++;

I
Ira Snyder 已提交
1249 1250
	dev_info(fdev->dev, "#%d (%s), irq %d\n", chan->id, compatible,
		 chan->irq != NO_IRQ ? chan->irq : fdev->irq);
1251 1252

	return 0;
1253

1254
out_iounmap_regs:
I
Ira Snyder 已提交
1255 1256 1257
	iounmap(chan->regs);
out_free_chan:
	kfree(chan);
1258
out_return:
1259 1260 1261
	return err;
}

I
Ira Snyder 已提交
1262
static void fsl_dma_chan_remove(struct fsldma_chan *chan)
1263
{
I
Ira Snyder 已提交
1264 1265 1266 1267
	irq_dispose_mapping(chan->irq);
	list_del(&chan->common.device_node);
	iounmap(chan->regs);
	kfree(chan);
1268 1269
}

1270
static int __devinit fsldma_of_probe(struct of_device *op,
1271 1272
			const struct of_device_id *match)
{
1273
	struct fsldma_device *fdev;
1274
	struct device_node *child;
1275
	int err;
1276

1277
	fdev = kzalloc(sizeof(*fdev), GFP_KERNEL);
1278
	if (!fdev) {
1279 1280 1281
		dev_err(&op->dev, "No enough memory for 'priv'\n");
		err = -ENOMEM;
		goto out_return;
1282
	}
1283 1284

	fdev->dev = &op->dev;
1285 1286
	INIT_LIST_HEAD(&fdev->common.channels);

1287 1288 1289 1290 1291 1292
	/* ioremap the registers for use */
	fdev->regs = of_iomap(op->node, 0);
	if (!fdev->regs) {
		dev_err(&op->dev, "unable to ioremap registers\n");
		err = -ENOMEM;
		goto out_free_fdev;
1293 1294
	}

1295 1296 1297
	/* map the channel IRQ if it exists, but don't hookup the handler yet */
	fdev->irq = irq_of_parse_and_map(op->node, 0);

1298 1299
	dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask);
	dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask);
I
Ira Snyder 已提交
1300
	dma_cap_set(DMA_SLAVE, fdev->common.cap_mask);
1301 1302
	fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources;
	fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources;
1303
	fdev->common.device_prep_dma_interrupt = fsl_dma_prep_interrupt;
1304 1305 1306
	fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy;
	fdev->common.device_is_tx_complete = fsl_dma_is_complete;
	fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending;
I
Ira Snyder 已提交
1307 1308
	fdev->common.device_prep_slave_sg = fsl_dma_prep_slave_sg;
	fdev->common.device_terminate_all = fsl_dma_device_terminate_all;
1309
	fdev->common.dev = &op->dev;
1310

1311
	dev_set_drvdata(&op->dev, fdev);
1312

1313 1314 1315
	/*
	 * We cannot use of_platform_bus_probe() because there is no
	 * of_platform_bus_remove(). Instead, we manually instantiate every DMA
1316 1317
	 * channel object.
	 */
1318 1319
	for_each_child_of_node(op->node, child) {
		if (of_device_is_compatible(child, "fsl,eloplus-dma-channel")) {
1320 1321 1322
			fsl_dma_chan_probe(fdev, child,
				FSL_DMA_IP_85XX | FSL_DMA_BIG_ENDIAN,
				"fsl,eloplus-dma-channel");
1323 1324 1325
		}

		if (of_device_is_compatible(child, "fsl,elo-dma-channel")) {
1326 1327 1328
			fsl_dma_chan_probe(fdev, child,
				FSL_DMA_IP_83XX | FSL_DMA_LITTLE_ENDIAN,
				"fsl,elo-dma-channel");
1329
		}
1330
	}
1331

1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344
	/*
	 * Hookup the IRQ handler(s)
	 *
	 * If we have a per-controller interrupt, we prefer that to the
	 * per-channel interrupts to reduce the number of shared interrupt
	 * handlers on the same IRQ line
	 */
	err = fsldma_request_irqs(fdev);
	if (err) {
		dev_err(fdev->dev, "unable to request IRQs\n");
		goto out_free_fdev;
	}

1345 1346 1347
	dma_async_device_register(&fdev->common);
	return 0;

1348
out_free_fdev:
1349
	irq_dispose_mapping(fdev->irq);
1350
	kfree(fdev);
1351
out_return:
1352 1353 1354
	return err;
}

1355
static int fsldma_of_remove(struct of_device *op)
1356
{
1357
	struct fsldma_device *fdev;
1358 1359
	unsigned int i;

1360
	fdev = dev_get_drvdata(&op->dev);
1361 1362
	dma_async_device_unregister(&fdev->common);

1363 1364
	fsldma_free_irqs(fdev);

1365
	for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
1366 1367
		if (fdev->chan[i])
			fsl_dma_chan_remove(fdev->chan[i]);
1368
	}
1369

1370 1371
	iounmap(fdev->regs);
	dev_set_drvdata(&op->dev, NULL);
1372 1373 1374 1375 1376
	kfree(fdev);

	return 0;
}

1377
static struct of_device_id fsldma_of_ids[] = {
1378 1379
	{ .compatible = "fsl,eloplus-dma", },
	{ .compatible = "fsl,elo-dma", },
1380 1381 1382
	{}
};

1383 1384 1385 1386 1387
static struct of_platform_driver fsldma_of_driver = {
	.name		= "fsl-elo-dma",
	.match_table	= fsldma_of_ids,
	.probe		= fsldma_of_probe,
	.remove		= fsldma_of_remove,
1388 1389
};

1390 1391 1392 1393 1394
/*----------------------------------------------------------------------------*/
/* Module Init / Exit                                                         */
/*----------------------------------------------------------------------------*/

static __init int fsldma_init(void)
1395
{
1396 1397 1398 1399
	int ret;

	pr_info("Freescale Elo / Elo Plus DMA driver\n");

1400
	ret = of_register_platform_driver(&fsldma_of_driver);
1401 1402 1403 1404 1405 1406
	if (ret)
		pr_err("fsldma: failed to register platform driver\n");

	return ret;
}

1407
static void __exit fsldma_exit(void)
1408
{
1409
	of_unregister_platform_driver(&fsldma_of_driver);
1410 1411
}

1412 1413
subsys_initcall(fsldma_init);
module_exit(fsldma_exit);
1414 1415 1416

MODULE_DESCRIPTION("Freescale Elo / Elo Plus DMA driver");
MODULE_LICENSE("GPL");