imx-dma.c 31.7 KB
Newer Older
1 2 3 4 5 6 7
/*
 * drivers/dma/imx-dma.c
 *
 * This file contains a driver for the Freescale i.MX DMA engine
 * found on i.MX1/21/27
 *
 * Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
8
 * Copyright 2012 Javier Martin, Vista Silicon <javier.martin@vista-silicon.com>
9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25
 *
 * The code contained herein is licensed under the GNU General Public
 * License. You may obtain a copy of the GNU General Public License
 * Version 2 or later at the following locations:
 *
 * http://www.opensource.org/licenses/gpl-license.html
 * http://www.gnu.org/copyleft/gpl.html
 */
#include <linux/init.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
26
#include <linux/clk.h>
27
#include <linux/dmaengine.h>
28
#include <linux/module.h>
29 30

#include <asm/irq.h>
31
#include <mach/dma.h>
32 33
#include <mach/hardware.h>

34
#include "dmaengine.h"
35
#define IMXDMA_MAX_CHAN_DESCRIPTORS	16
36 37
#define IMX_DMA_CHANNELS  16

38 39 40 41
#define IMX_DMA_2D_SLOTS	2
#define IMX_DMA_2D_SLOT_A	0
#define IMX_DMA_2D_SLOT_B	1

42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108
#define IMX_DMA_LENGTH_LOOP	((unsigned int)-1)
#define IMX_DMA_MEMSIZE_32	(0 << 4)
#define IMX_DMA_MEMSIZE_8	(1 << 4)
#define IMX_DMA_MEMSIZE_16	(2 << 4)
#define IMX_DMA_TYPE_LINEAR	(0 << 10)
#define IMX_DMA_TYPE_2D		(1 << 10)
#define IMX_DMA_TYPE_FIFO	(2 << 10)

#define IMX_DMA_ERR_BURST     (1 << 0)
#define IMX_DMA_ERR_REQUEST   (1 << 1)
#define IMX_DMA_ERR_TRANSFER  (1 << 2)
#define IMX_DMA_ERR_BUFFER    (1 << 3)
#define IMX_DMA_ERR_TIMEOUT   (1 << 4)

#define DMA_DCR     0x00		/* Control Register */
#define DMA_DISR    0x04		/* Interrupt status Register */
#define DMA_DIMR    0x08		/* Interrupt mask Register */
#define DMA_DBTOSR  0x0c		/* Burst timeout status Register */
#define DMA_DRTOSR  0x10		/* Request timeout Register */
#define DMA_DSESR   0x14		/* Transfer Error Status Register */
#define DMA_DBOSR   0x18		/* Buffer overflow status Register */
#define DMA_DBTOCR  0x1c		/* Burst timeout control Register */
#define DMA_WSRA    0x40		/* W-Size Register A */
#define DMA_XSRA    0x44		/* X-Size Register A */
#define DMA_YSRA    0x48		/* Y-Size Register A */
#define DMA_WSRB    0x4c		/* W-Size Register B */
#define DMA_XSRB    0x50		/* X-Size Register B */
#define DMA_YSRB    0x54		/* Y-Size Register B */
#define DMA_SAR(x)  (0x80 + ((x) << 6))	/* Source Address Registers */
#define DMA_DAR(x)  (0x84 + ((x) << 6))	/* Destination Address Registers */
#define DMA_CNTR(x) (0x88 + ((x) << 6))	/* Count Registers */
#define DMA_CCR(x)  (0x8c + ((x) << 6))	/* Control Registers */
#define DMA_RSSR(x) (0x90 + ((x) << 6))	/* Request source select Registers */
#define DMA_BLR(x)  (0x94 + ((x) << 6))	/* Burst length Registers */
#define DMA_RTOR(x) (0x98 + ((x) << 6))	/* Request timeout Registers */
#define DMA_BUCR(x) (0x98 + ((x) << 6))	/* Bus Utilization Registers */
#define DMA_CCNR(x) (0x9C + ((x) << 6))	/* Channel counter Registers */

#define DCR_DRST           (1<<1)
#define DCR_DEN            (1<<0)
#define DBTOCR_EN          (1<<15)
#define DBTOCR_CNT(x)      ((x) & 0x7fff)
#define CNTR_CNT(x)        ((x) & 0xffffff)
#define CCR_ACRPT          (1<<14)
#define CCR_DMOD_LINEAR    (0x0 << 12)
#define CCR_DMOD_2D        (0x1 << 12)
#define CCR_DMOD_FIFO      (0x2 << 12)
#define CCR_DMOD_EOBFIFO   (0x3 << 12)
#define CCR_SMOD_LINEAR    (0x0 << 10)
#define CCR_SMOD_2D        (0x1 << 10)
#define CCR_SMOD_FIFO      (0x2 << 10)
#define CCR_SMOD_EOBFIFO   (0x3 << 10)
#define CCR_MDIR_DEC       (1<<9)
#define CCR_MSEL_B         (1<<8)
#define CCR_DSIZ_32        (0x0 << 6)
#define CCR_DSIZ_8         (0x1 << 6)
#define CCR_DSIZ_16        (0x2 << 6)
#define CCR_SSIZ_32        (0x0 << 4)
#define CCR_SSIZ_8         (0x1 << 4)
#define CCR_SSIZ_16        (0x2 << 4)
#define CCR_REN            (1<<3)
#define CCR_RPT            (1<<2)
#define CCR_FRC            (1<<1)
#define CCR_CEN            (1<<0)
#define RTOR_EN            (1<<15)
#define RTOR_CLK           (1<<14)
#define RTOR_PSC           (1<<13)
109 110 111 112 113 114 115 116

enum  imxdma_prep_type {
	IMXDMA_DESC_MEMCPY,
	IMXDMA_DESC_INTERLEAVED,
	IMXDMA_DESC_SLAVE_SG,
	IMXDMA_DESC_CYCLIC,
};

117 118 119 120 121 122 123
struct imx_dma_2d_config {
	u16		xsr;
	u16		ysr;
	u16		wsr;
	int		count;
};

124 125 126 127 128 129 130
struct imxdma_desc {
	struct list_head		node;
	struct dma_async_tx_descriptor	desc;
	enum dma_status			status;
	dma_addr_t			src;
	dma_addr_t			dest;
	size_t				len;
131
	enum dma_transfer_direction	direction;
132 133 134 135 136 137 138 139 140 141 142 143 144
	enum imxdma_prep_type		type;
	/* For memcpy and interleaved */
	unsigned int			config_port;
	unsigned int			config_mem;
	/* For interleaved transfers */
	unsigned int			x;
	unsigned int			y;
	unsigned int			w;
	/* For slave sg and cyclic */
	struct scatterlist		*sg;
	unsigned int			sgcount;
};

145
struct imxdma_channel {
146 147
	int				hw_chaining;
	struct timer_list		watchdog;
148 149 150
	struct imxdma_engine		*imxdma;
	unsigned int			channel;

151 152 153 154 155
	struct tasklet_struct		dma_tasklet;
	struct list_head		ld_free;
	struct list_head		ld_queue;
	struct list_head		ld_active;
	int				descs_allocated;
156 157 158 159 160 161 162 163
	enum dma_slave_buswidth		word_size;
	dma_addr_t			per_address;
	u32				watermark_level;
	struct dma_chan			chan;
	struct dma_async_tx_descriptor	desc;
	enum dma_status			status;
	int				dma_request;
	struct scatterlist		*sg_list;
164 165
	u32				ccr_from_device;
	u32				ccr_to_device;
166 167
	bool				enabled_2d;
	int				slot_2d;
168 169 170 171
};

struct imxdma_engine {
	struct device			*dev;
172
	struct device_dma_parameters	dma_parms;
173
	struct dma_device		dma_device;
174
	void __iomem			*base;
175 176
	struct clk			*dma_ahb;
	struct clk			*dma_ipg;
177 178
	spinlock_t			lock;
	struct imx_dma_2d_config	slots_2d[IMX_DMA_2D_SLOTS];
179
	struct imxdma_channel		channel[IMX_DMA_CHANNELS];
180 181 182 183 184 185 186
};

static struct imxdma_channel *to_imxdma_chan(struct dma_chan *chan)
{
	return container_of(chan, struct imxdma_channel, chan);
}

187
static inline bool imxdma_chan_is_doing_cyclic(struct imxdma_channel *imxdmac)
188
{
189 190 191 192 193 194 195 196 197
	struct imxdma_desc *desc;

	if (!list_empty(&imxdmac->ld_active)) {
		desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc,
					node);
		if (desc->type == IMXDMA_DESC_CYCLIC)
			return true;
	}
	return false;
198 199
}

200

201 202 203

static void imx_dmav1_writel(struct imxdma_engine *imxdma, unsigned val,
			     unsigned offset)
204
{
205
	__raw_writel(val, imxdma->base + offset);
206 207
}

208
static unsigned imx_dmav1_readl(struct imxdma_engine *imxdma, unsigned offset)
209
{
210
	return __raw_readl(imxdma->base + offset);
211
}
212

213
static int imxdma_hw_chain(struct imxdma_channel *imxdmac)
214 215
{
	if (cpu_is_mx27())
216
		return imxdmac->hw_chaining;
217 218 219 220 221 222 223
	else
		return 0;
}

/*
 * imxdma_sg_next - prepare next chunk for scatter-gather DMA emulation
 */
224
static inline int imxdma_sg_next(struct imxdma_desc *d)
225
{
226
	struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan);
227
	struct imxdma_engine *imxdma = imxdmac->imxdma;
228
	struct scatterlist *sg = d->sg;
229 230
	unsigned long now;

231
	now = min(d->len, sg_dma_len(sg));
232 233
	if (d->len != IMX_DMA_LENGTH_LOOP)
		d->len -= now;
234

235
	if (d->direction == DMA_DEV_TO_MEM)
236 237
		imx_dmav1_writel(imxdma, sg->dma_address,
				 DMA_DAR(imxdmac->channel));
238
	else
239 240
		imx_dmav1_writel(imxdma, sg->dma_address,
				 DMA_SAR(imxdmac->channel));
241

242
	imx_dmav1_writel(imxdma, now, DMA_CNTR(imxdmac->channel));
243

244 245
	dev_dbg(imxdma->dev, " %s channel: %d dst 0x%08x, src 0x%08x, "
		"size 0x%08x\n", __func__, imxdmac->channel,
246 247 248
		 imx_dmav1_readl(imxdma, DMA_DAR(imxdmac->channel)),
		 imx_dmav1_readl(imxdma, DMA_SAR(imxdmac->channel)),
		 imx_dmav1_readl(imxdma, DMA_CNTR(imxdmac->channel)));
249 250

	return now;
251 252
}

253
static void imxdma_enable_hw(struct imxdma_desc *d)
254
{
255
	struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan);
256
	struct imxdma_engine *imxdma = imxdmac->imxdma;
257 258 259
	int channel = imxdmac->channel;
	unsigned long flags;

260
	dev_dbg(imxdma->dev, "%s channel %d\n", __func__, channel);
261 262 263

	local_irq_save(flags);

264 265 266 267 268
	imx_dmav1_writel(imxdma, 1 << channel, DMA_DISR);
	imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_DIMR) &
			 ~(1 << channel), DMA_DIMR);
	imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_CCR(channel)) |
			 CCR_CEN | CCR_ACRPT, DMA_CCR(channel));
269 270

	if ((cpu_is_mx21() || cpu_is_mx27()) &&
271
			d->sg && imxdma_hw_chain(imxdmac)) {
272 273
		d->sg = sg_next(d->sg);
		if (d->sg) {
274
			u32 tmp;
275
			imxdma_sg_next(d);
276 277 278
			tmp = imx_dmav1_readl(imxdma, DMA_CCR(channel));
			imx_dmav1_writel(imxdma, tmp | CCR_RPT | CCR_ACRPT,
					 DMA_CCR(channel));
279 280 281 282 283 284 285 286
		}
	}

	local_irq_restore(flags);
}

static void imxdma_disable_hw(struct imxdma_channel *imxdmac)
{
287
	struct imxdma_engine *imxdma = imxdmac->imxdma;
288 289 290
	int channel = imxdmac->channel;
	unsigned long flags;

291
	dev_dbg(imxdma->dev, "%s channel %d\n", __func__, channel);
292

293 294
	if (imxdma_hw_chain(imxdmac))
		del_timer(&imxdmac->watchdog);
295 296

	local_irq_save(flags);
297 298 299 300 301
	imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_DIMR) |
			 (1 << channel), DMA_DIMR);
	imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_CCR(channel)) &
			 ~CCR_CEN, DMA_CCR(channel));
	imx_dmav1_writel(imxdma, 1 << channel, DMA_DISR);
302 303 304 305
	local_irq_restore(flags);
}

static void imxdma_watchdog(unsigned long data)
306
{
307
	struct imxdma_channel *imxdmac = (struct imxdma_channel *)data;
308
	struct imxdma_engine *imxdma = imxdmac->imxdma;
309
	int channel = imxdmac->channel;
310

311
	imx_dmav1_writel(imxdma, 0, DMA_CCR(channel));
312

313
	/* Tasklet watchdog error handler */
314
	tasklet_schedule(&imxdmac->dma_tasklet);
315 316
	dev_dbg(imxdma->dev, "channel %d: watchdog timeout!\n",
		imxdmac->channel);
317 318
}

319
static irqreturn_t imxdma_err_handler(int irq, void *dev_id)
320
{
321 322 323 324 325
	struct imxdma_engine *imxdma = dev_id;
	unsigned int err_mask;
	int i, disr;
	int errcode;

326
	disr = imx_dmav1_readl(imxdma, DMA_DISR);
327

328 329 330 331
	err_mask = imx_dmav1_readl(imxdma, DMA_DBTOSR) |
		   imx_dmav1_readl(imxdma, DMA_DRTOSR) |
		   imx_dmav1_readl(imxdma, DMA_DSESR)  |
		   imx_dmav1_readl(imxdma, DMA_DBOSR);
332 333 334 335

	if (!err_mask)
		return IRQ_HANDLED;

336
	imx_dmav1_writel(imxdma, disr & err_mask, DMA_DISR);
337 338 339 340 341 342

	for (i = 0; i < IMX_DMA_CHANNELS; i++) {
		if (!(err_mask & (1 << i)))
			continue;
		errcode = 0;

343 344
		if (imx_dmav1_readl(imxdma, DMA_DBTOSR) & (1 << i)) {
			imx_dmav1_writel(imxdma, 1 << i, DMA_DBTOSR);
345 346
			errcode |= IMX_DMA_ERR_BURST;
		}
347 348
		if (imx_dmav1_readl(imxdma, DMA_DRTOSR) & (1 << i)) {
			imx_dmav1_writel(imxdma, 1 << i, DMA_DRTOSR);
349 350
			errcode |= IMX_DMA_ERR_REQUEST;
		}
351 352
		if (imx_dmav1_readl(imxdma, DMA_DSESR) & (1 << i)) {
			imx_dmav1_writel(imxdma, 1 << i, DMA_DSESR);
353 354
			errcode |= IMX_DMA_ERR_TRANSFER;
		}
355 356
		if (imx_dmav1_readl(imxdma, DMA_DBOSR) & (1 << i)) {
			imx_dmav1_writel(imxdma, 1 << i, DMA_DBOSR);
357 358 359 360 361 362 363 364 365 366 367 368 369
			errcode |= IMX_DMA_ERR_BUFFER;
		}
		/* Tasklet error handler */
		tasklet_schedule(&imxdma->channel[i].dma_tasklet);

		printk(KERN_WARNING
		       "DMA timeout on channel %d -%s%s%s%s\n", i,
		       errcode & IMX_DMA_ERR_BURST ?    " burst" : "",
		       errcode & IMX_DMA_ERR_REQUEST ?  " request" : "",
		       errcode & IMX_DMA_ERR_TRANSFER ? " transfer" : "",
		       errcode & IMX_DMA_ERR_BUFFER ?   " buffer" : "");
	}
	return IRQ_HANDLED;
370 371
}

372
static void dma_irq_handle_channel(struct imxdma_channel *imxdmac)
373
{
374
	struct imxdma_engine *imxdma = imxdmac->imxdma;
375
	int chno = imxdmac->channel;
376
	struct imxdma_desc *desc;
377

378
	spin_lock(&imxdma->lock);
379
	if (list_empty(&imxdmac->ld_active)) {
380
		spin_unlock(&imxdma->lock);
381 382
		goto out;
	}
383

384 385 386
	desc = list_first_entry(&imxdmac->ld_active,
				struct imxdma_desc,
				node);
387
	spin_unlock(&imxdma->lock);
388

389 390 391
	if (desc->sg) {
		u32 tmp;
		desc->sg = sg_next(desc->sg);
392

393
		if (desc->sg) {
394
			imxdma_sg_next(desc);
395

396
			tmp = imx_dmav1_readl(imxdma, DMA_CCR(chno));
397

398
			if (imxdma_hw_chain(imxdmac)) {
399 400 401
				/* FIXME: The timeout should probably be
				 * configurable
				 */
402
				mod_timer(&imxdmac->watchdog,
403 404 405
					jiffies + msecs_to_jiffies(500));

				tmp |= CCR_CEN | CCR_RPT | CCR_ACRPT;
406
				imx_dmav1_writel(imxdma, tmp, DMA_CCR(chno));
407
			} else {
408 409
				imx_dmav1_writel(imxdma, tmp & ~CCR_CEN,
						 DMA_CCR(chno));
410 411 412
				tmp |= CCR_CEN;
			}

413
			imx_dmav1_writel(imxdma, tmp, DMA_CCR(chno));
414 415 416 417

			if (imxdma_chan_is_doing_cyclic(imxdmac))
				/* Tasklet progression */
				tasklet_schedule(&imxdmac->dma_tasklet);
418

419 420 421
			return;
		}

422 423
		if (imxdma_hw_chain(imxdmac)) {
			del_timer(&imxdmac->watchdog);
424 425 426 427
			return;
		}
	}

428
out:
429
	imx_dmav1_writel(imxdma, 0, DMA_CCR(chno));
430
	/* Tasklet irq */
431 432 433
	tasklet_schedule(&imxdmac->dma_tasklet);
}

434 435 436 437 438 439 440 441
static irqreturn_t dma_irq_handler(int irq, void *dev_id)
{
	struct imxdma_engine *imxdma = dev_id;
	int i, disr;

	if (cpu_is_mx21() || cpu_is_mx27())
		imxdma_err_handler(irq, dev_id);

442
	disr = imx_dmav1_readl(imxdma, DMA_DISR);
443

444
	dev_dbg(imxdma->dev, "%s called, disr=0x%08x\n", __func__, disr);
445

446
	imx_dmav1_writel(imxdma, disr, DMA_DISR);
447
	for (i = 0; i < IMX_DMA_CHANNELS; i++) {
448
		if (disr & (1 << i))
449 450 451 452 453 454
			dma_irq_handle_channel(&imxdma->channel[i]);
	}

	return IRQ_HANDLED;
}

455 456 457
static int imxdma_xfer_desc(struct imxdma_desc *d)
{
	struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan);
458
	struct imxdma_engine *imxdma = imxdmac->imxdma;
459 460 461
	unsigned long flags;
	int slot = -1;
	int i;
462 463 464

	/* Configure and enable */
	switch (d->type) {
465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505
	case IMXDMA_DESC_INTERLEAVED:
		/* Try to get a free 2D slot */
		spin_lock_irqsave(&imxdma->lock, flags);
		for (i = 0; i < IMX_DMA_2D_SLOTS; i++) {
			if ((imxdma->slots_2d[i].count > 0) &&
			((imxdma->slots_2d[i].xsr != d->x) ||
			(imxdma->slots_2d[i].ysr != d->y) ||
			(imxdma->slots_2d[i].wsr != d->w)))
				continue;
			slot = i;
			break;
		}
		if (slot < 0)
			return -EBUSY;

		imxdma->slots_2d[slot].xsr = d->x;
		imxdma->slots_2d[slot].ysr = d->y;
		imxdma->slots_2d[slot].wsr = d->w;
		imxdma->slots_2d[slot].count++;

		imxdmac->slot_2d = slot;
		imxdmac->enabled_2d = true;
		spin_unlock_irqrestore(&imxdma->lock, flags);

		if (slot == IMX_DMA_2D_SLOT_A) {
			d->config_mem &= ~CCR_MSEL_B;
			d->config_port &= ~CCR_MSEL_B;
			imx_dmav1_writel(imxdma, d->x, DMA_XSRA);
			imx_dmav1_writel(imxdma, d->y, DMA_YSRA);
			imx_dmav1_writel(imxdma, d->w, DMA_WSRA);
		} else {
			d->config_mem |= CCR_MSEL_B;
			d->config_port |= CCR_MSEL_B;
			imx_dmav1_writel(imxdma, d->x, DMA_XSRB);
			imx_dmav1_writel(imxdma, d->y, DMA_YSRB);
			imx_dmav1_writel(imxdma, d->w, DMA_WSRB);
		}
		/*
		 * We fall-through here intentionally, since a 2D transfer is
		 * similar to MEMCPY just adding the 2D slot configuration.
		 */
506
	case IMXDMA_DESC_MEMCPY:
507 508 509
		imx_dmav1_writel(imxdma, d->src, DMA_SAR(imxdmac->channel));
		imx_dmav1_writel(imxdma, d->dest, DMA_DAR(imxdmac->channel));
		imx_dmav1_writel(imxdma, d->config_mem | (d->config_port << 2),
510
			 DMA_CCR(imxdmac->channel));
511

512
		imx_dmav1_writel(imxdma, d->len, DMA_CNTR(imxdmac->channel));
513 514 515 516 517 518

		dev_dbg(imxdma->dev, "%s channel: %d dest=0x%08x src=0x%08x "
			"dma_length=%d\n", __func__, imxdmac->channel,
			d->dest, d->src, d->len);

		break;
519
	/* Cyclic transfer is the same as slave_sg with special sg configuration. */
520 521
	case IMXDMA_DESC_CYCLIC:
	case IMXDMA_DESC_SLAVE_SG:
522
		if (d->direction == DMA_DEV_TO_MEM) {
523
			imx_dmav1_writel(imxdma, imxdmac->per_address,
524
					 DMA_SAR(imxdmac->channel));
525
			imx_dmav1_writel(imxdma, imxdmac->ccr_from_device,
526 527 528 529 530 531 532
					 DMA_CCR(imxdmac->channel));

			dev_dbg(imxdma->dev, "%s channel: %d sg=%p sgcount=%d "
				"total length=%d dev_addr=0x%08x (dev2mem)\n",
				__func__, imxdmac->channel, d->sg, d->sgcount,
				d->len, imxdmac->per_address);
		} else if (d->direction == DMA_MEM_TO_DEV) {
533
			imx_dmav1_writel(imxdma, imxdmac->per_address,
534
					 DMA_DAR(imxdmac->channel));
535
			imx_dmav1_writel(imxdma, imxdmac->ccr_to_device,
536 537 538 539 540 541 542 543 544 545 546 547
					 DMA_CCR(imxdmac->channel));

			dev_dbg(imxdma->dev, "%s channel: %d sg=%p sgcount=%d "
				"total length=%d dev_addr=0x%08x (mem2dev)\n",
				__func__, imxdmac->channel, d->sg, d->sgcount,
				d->len, imxdmac->per_address);
		} else {
			dev_err(imxdma->dev, "%s channel: %d bad dma mode\n",
				__func__, imxdmac->channel);
			return -EINVAL;
		}

548
		imxdma_sg_next(d);
549

550 551 552 553
		break;
	default:
		return -EINVAL;
	}
554
	imxdma_enable_hw(d);
555
	return 0;
556 557
}

558
static void imxdma_tasklet(unsigned long data)
559
{
560 561 562
	struct imxdma_channel *imxdmac = (void *)data;
	struct imxdma_engine *imxdma = imxdmac->imxdma;
	struct imxdma_desc *desc;
563

564
	spin_lock(&imxdma->lock);
565 566 567 568 569 570 571 572 573 574

	if (list_empty(&imxdmac->ld_active)) {
		/* Someone might have called terminate all */
		goto out;
	}
	desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc, node);

	if (desc->desc.callback)
		desc->desc.callback(desc->desc.callback_param);

575 576 577 578
	/* If we are dealing with a cyclic descriptor keep it on ld_active
	 * and dont mark the descripor as complete.
	 * Only in non-cyclic cases it would be marked as complete
	 */
579 580
	if (imxdma_chan_is_doing_cyclic(imxdmac))
		goto out;
581 582
	else
		dma_cookie_complete(&desc->desc);
583

584 585 586 587 588 589
	/* Free 2D slot if it was an interleaved transfer */
	if (imxdmac->enabled_2d) {
		imxdma->slots_2d[imxdmac->slot_2d].count--;
		imxdmac->enabled_2d = false;
	}

590 591 592 593 594 595 596 597 598 599 600
	list_move_tail(imxdmac->ld_active.next, &imxdmac->ld_free);

	if (!list_empty(&imxdmac->ld_queue)) {
		desc = list_first_entry(&imxdmac->ld_queue, struct imxdma_desc,
					node);
		list_move_tail(imxdmac->ld_queue.next, &imxdmac->ld_active);
		if (imxdma_xfer_desc(desc) < 0)
			dev_warn(imxdma->dev, "%s: channel: %d couldn't xfer desc\n",
				 __func__, imxdmac->channel);
	}
out:
601
	spin_unlock(&imxdma->lock);
602 603 604 605 606 607 608
}

static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
		unsigned long arg)
{
	struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
	struct dma_slave_config *dmaengine_cfg = (void *)arg;
609
	struct imxdma_engine *imxdma = imxdmac->imxdma;
610
	unsigned long flags;
611 612 613 614
	unsigned int mode = 0;

	switch (cmd) {
	case DMA_TERMINATE_ALL:
615
		imxdma_disable_hw(imxdmac);
616

617
		spin_lock_irqsave(&imxdma->lock, flags);
618 619
		list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free);
		list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free);
620
		spin_unlock_irqrestore(&imxdma->lock, flags);
621 622
		return 0;
	case DMA_SLAVE_CONFIG:
623
		if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645
			imxdmac->per_address = dmaengine_cfg->src_addr;
			imxdmac->watermark_level = dmaengine_cfg->src_maxburst;
			imxdmac->word_size = dmaengine_cfg->src_addr_width;
		} else {
			imxdmac->per_address = dmaengine_cfg->dst_addr;
			imxdmac->watermark_level = dmaengine_cfg->dst_maxburst;
			imxdmac->word_size = dmaengine_cfg->dst_addr_width;
		}

		switch (imxdmac->word_size) {
		case DMA_SLAVE_BUSWIDTH_1_BYTE:
			mode = IMX_DMA_MEMSIZE_8;
			break;
		case DMA_SLAVE_BUSWIDTH_2_BYTES:
			mode = IMX_DMA_MEMSIZE_16;
			break;
		default:
		case DMA_SLAVE_BUSWIDTH_4_BYTES:
			mode = IMX_DMA_MEMSIZE_32;
			break;
		}

646 647
		imxdmac->hw_chaining = 1;
		if (!imxdma_hw_chain(imxdmac))
648
			return -EINVAL;
649
		imxdmac->ccr_from_device = (mode | IMX_DMA_TYPE_FIFO) |
650 651
			((IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) << 2) |
			CCR_REN;
652
		imxdmac->ccr_to_device =
653 654
			(IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) |
			((mode | IMX_DMA_TYPE_FIFO) << 2) | CCR_REN;
655
		imx_dmav1_writel(imxdma, imxdmac->dma_request,
656 657
				 DMA_RSSR(imxdmac->channel));

658
		/* Set burst length */
659 660
		imx_dmav1_writel(imxdma, imxdmac->watermark_level *
				imxdmac->word_size, DMA_BLR(imxdmac->channel));
661 662 663 664 665 666 667 668 669 670 671 672 673

		return 0;
	default:
		return -ENOSYS;
	}

	return -EINVAL;
}

static enum dma_status imxdma_tx_status(struct dma_chan *chan,
					    dma_cookie_t cookie,
					    struct dma_tx_state *txstate)
{
674
	return dma_cookie_status(chan, cookie, txstate);
675 676 677 678 679
}

static dma_cookie_t imxdma_tx_submit(struct dma_async_tx_descriptor *tx)
{
	struct imxdma_channel *imxdmac = to_imxdma_chan(tx->chan);
680
	struct imxdma_engine *imxdma = imxdmac->imxdma;
681
	dma_cookie_t cookie;
682
	unsigned long flags;
683

684
	spin_lock_irqsave(&imxdma->lock, flags);
685
	list_move_tail(imxdmac->ld_free.next, &imxdmac->ld_queue);
686
	cookie = dma_cookie_assign(tx);
687
	spin_unlock_irqrestore(&imxdma->lock, flags);
688 689 690 691 692 693 694 695 696

	return cookie;
}

static int imxdma_alloc_chan_resources(struct dma_chan *chan)
{
	struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
	struct imx_dma_data *data = chan->private;

697 698
	if (data != NULL)
		imxdmac->dma_request = data->dma_request;
699

700 701
	while (imxdmac->descs_allocated < IMXDMA_MAX_CHAN_DESCRIPTORS) {
		struct imxdma_desc *desc;
702

703 704 705 706 707 708 709 710 711 712 713 714 715
		desc = kzalloc(sizeof(*desc), GFP_KERNEL);
		if (!desc)
			break;
		__memzero(&desc->desc, sizeof(struct dma_async_tx_descriptor));
		dma_async_tx_descriptor_init(&desc->desc, chan);
		desc->desc.tx_submit = imxdma_tx_submit;
		/* txd.flags will be overwritten in prep funcs */
		desc->desc.flags = DMA_CTRL_ACK;
		desc->status = DMA_SUCCESS;

		list_add_tail(&desc->node, &imxdmac->ld_free);
		imxdmac->descs_allocated++;
	}
716

717 718 719 720
	if (!imxdmac->descs_allocated)
		return -ENOMEM;

	return imxdmac->descs_allocated;
721 722 723 724 725
}

static void imxdma_free_chan_resources(struct dma_chan *chan)
{
	struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
726
	struct imxdma_engine *imxdma = imxdmac->imxdma;
727 728 729
	struct imxdma_desc *desc, *_desc;
	unsigned long flags;

730
	spin_lock_irqsave(&imxdma->lock, flags);
731

732
	imxdma_disable_hw(imxdmac);
733 734
	list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free);
	list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free);
735

736
	spin_unlock_irqrestore(&imxdma->lock, flags);
737 738 739 740 741 742

	list_for_each_entry_safe(desc, _desc, &imxdmac->ld_free, node) {
		kfree(desc);
		imxdmac->descs_allocated--;
	}
	INIT_LIST_HEAD(&imxdmac->ld_free);
743 744 745 746 747 748 749 750 751

	if (imxdmac->sg_list) {
		kfree(imxdmac->sg_list);
		imxdmac->sg_list = NULL;
	}
}

static struct dma_async_tx_descriptor *imxdma_prep_slave_sg(
		struct dma_chan *chan, struct scatterlist *sgl,
752
		unsigned int sg_len, enum dma_transfer_direction direction,
753
		unsigned long flags, void *context)
754 755 756
{
	struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
	struct scatterlist *sg;
757 758
	int i, dma_length = 0;
	struct imxdma_desc *desc;
759

760 761
	if (list_empty(&imxdmac->ld_free) ||
	    imxdma_chan_is_doing_cyclic(imxdmac))
762 763
		return NULL;

764
	desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
765 766

	for_each_sg(sgl, sg, sg_len, i) {
767
		dma_length += sg_dma_len(sg);
768 769
	}

770 771
	switch (imxdmac->word_size) {
	case DMA_SLAVE_BUSWIDTH_4_BYTES:
772
		if (sg_dma_len(sgl) & 3 || sgl->dma_address & 3)
773 774 775
			return NULL;
		break;
	case DMA_SLAVE_BUSWIDTH_2_BYTES:
776
		if (sg_dma_len(sgl) & 1 || sgl->dma_address & 1)
777 778 779 780 781 782 783 784
			return NULL;
		break;
	case DMA_SLAVE_BUSWIDTH_1_BYTE:
		break;
	default:
		return NULL;
	}

785 786 787 788
	desc->type = IMXDMA_DESC_SLAVE_SG;
	desc->sg = sgl;
	desc->sgcount = sg_len;
	desc->len = dma_length;
789
	desc->direction = direction;
790 791 792 793 794 795 796
	if (direction == DMA_DEV_TO_MEM) {
		desc->src = imxdmac->per_address;
	} else {
		desc->dest = imxdmac->per_address;
	}
	desc->desc.callback = NULL;
	desc->desc.callback_param = NULL;
797

798
	return &desc->desc;
799 800 801 802
}

static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic(
		struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
803 804
		size_t period_len, enum dma_transfer_direction direction,
		void *context)
805 806 807
{
	struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
	struct imxdma_engine *imxdma = imxdmac->imxdma;
808 809
	struct imxdma_desc *desc;
	int i;
810 811 812 813 814
	unsigned int periods = buf_len / period_len;

	dev_dbg(imxdma->dev, "%s channel: %d buf_len=%d period_len=%d\n",
			__func__, imxdmac->channel, buf_len, period_len);

815 816
	if (list_empty(&imxdmac->ld_free) ||
	    imxdma_chan_is_doing_cyclic(imxdmac))
817 818
		return NULL;

819
	desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
820 821 822 823 824 825 826 827 828 829 830 831 832 833 834

	if (imxdmac->sg_list)
		kfree(imxdmac->sg_list);

	imxdmac->sg_list = kcalloc(periods + 1,
			sizeof(struct scatterlist), GFP_KERNEL);
	if (!imxdmac->sg_list)
		return NULL;

	sg_init_table(imxdmac->sg_list, periods);

	for (i = 0; i < periods; i++) {
		imxdmac->sg_list[i].page_link = 0;
		imxdmac->sg_list[i].offset = 0;
		imxdmac->sg_list[i].dma_address = dma_addr;
835
		sg_dma_len(&imxdmac->sg_list[i]) = period_len;
836 837 838 839 840
		dma_addr += period_len;
	}

	/* close the loop */
	imxdmac->sg_list[periods].offset = 0;
841
	sg_dma_len(&imxdmac->sg_list[periods]) = 0;
842 843 844
	imxdmac->sg_list[periods].page_link =
		((unsigned long)imxdmac->sg_list | 0x01) & ~0x02;

845 846 847 848
	desc->type = IMXDMA_DESC_CYCLIC;
	desc->sg = imxdmac->sg_list;
	desc->sgcount = periods;
	desc->len = IMX_DMA_LENGTH_LOOP;
849
	desc->direction = direction;
850 851 852 853 854 855 856
	if (direction == DMA_DEV_TO_MEM) {
		desc->src = imxdmac->per_address;
	} else {
		desc->dest = imxdmac->per_address;
	}
	desc->desc.callback = NULL;
	desc->desc.callback_param = NULL;
857

858
	return &desc->desc;
859 860
}

861 862 863 864 865 866
static struct dma_async_tx_descriptor *imxdma_prep_dma_memcpy(
	struct dma_chan *chan, dma_addr_t dest,
	dma_addr_t src, size_t len, unsigned long flags)
{
	struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
	struct imxdma_engine *imxdma = imxdmac->imxdma;
867
	struct imxdma_desc *desc;
868

869 870 871
	dev_dbg(imxdma->dev, "%s channel: %d src=0x%x dst=0x%x len=%d\n",
			__func__, imxdmac->channel, src, dest, len);

872 873
	if (list_empty(&imxdmac->ld_free) ||
	    imxdma_chan_is_doing_cyclic(imxdmac))
874 875
		return NULL;

876
	desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
877

878 879 880 881
	desc->type = IMXDMA_DESC_MEMCPY;
	desc->src = src;
	desc->dest = dest;
	desc->len = len;
882
	desc->direction = DMA_MEM_TO_MEM;
883 884 885 886
	desc->config_port = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR;
	desc->config_mem = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR;
	desc->desc.callback = NULL;
	desc->desc.callback_param = NULL;
887

888
	return &desc->desc;
889 890
}

891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931
static struct dma_async_tx_descriptor *imxdma_prep_dma_interleaved(
	struct dma_chan *chan, struct dma_interleaved_template *xt,
	unsigned long flags)
{
	struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
	struct imxdma_engine *imxdma = imxdmac->imxdma;
	struct imxdma_desc *desc;

	dev_dbg(imxdma->dev, "%s channel: %d src_start=0x%x dst_start=0x%x\n"
		"   src_sgl=%s dst_sgl=%s numf=%d frame_size=%d\n", __func__,
		imxdmac->channel, xt->src_start, xt->dst_start,
		xt->src_sgl ? "true" : "false", xt->dst_sgl ? "true" : "false",
		xt->numf, xt->frame_size);

	if (list_empty(&imxdmac->ld_free) ||
	    imxdma_chan_is_doing_cyclic(imxdmac))
		return NULL;

	if (xt->frame_size != 1 || xt->numf <= 0 || xt->dir != DMA_MEM_TO_MEM)
		return NULL;

	desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);

	desc->type = IMXDMA_DESC_INTERLEAVED;
	desc->src = xt->src_start;
	desc->dest = xt->dst_start;
	desc->x = xt->sgl[0].size;
	desc->y = xt->numf;
	desc->w = xt->sgl[0].icg + desc->x;
	desc->len = desc->x * desc->y;
	desc->direction = DMA_MEM_TO_MEM;
	desc->config_port = IMX_DMA_MEMSIZE_32;
	desc->config_mem = IMX_DMA_MEMSIZE_32;
	if (xt->src_sgl)
		desc->config_mem |= IMX_DMA_TYPE_2D;
	if (xt->dst_sgl)
		desc->config_port |= IMX_DMA_TYPE_2D;
	desc->desc.callback = NULL;
	desc->desc.callback_param = NULL;

	return &desc->desc;
932 933 934 935
}

static void imxdma_issue_pending(struct dma_chan *chan)
{
936
	struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
937 938 939 940
	struct imxdma_engine *imxdma = imxdmac->imxdma;
	struct imxdma_desc *desc;
	unsigned long flags;

941
	spin_lock_irqsave(&imxdma->lock, flags);
942 943 944 945 946 947 948 949 950 951 952 953 954 955
	if (list_empty(&imxdmac->ld_active) &&
	    !list_empty(&imxdmac->ld_queue)) {
		desc = list_first_entry(&imxdmac->ld_queue,
					struct imxdma_desc, node);

		if (imxdma_xfer_desc(desc) < 0) {
			dev_warn(imxdma->dev,
				 "%s: channel: %d couldn't issue DMA xfer\n",
				 __func__, imxdmac->channel);
		} else {
			list_move_tail(imxdmac->ld_queue.next,
				       &imxdmac->ld_active);
		}
	}
956
	spin_unlock_irqrestore(&imxdma->lock, flags);
957 958 959
}

static int __init imxdma_probe(struct platform_device *pdev)
960
	{
961 962 963
	struct imxdma_engine *imxdma;
	int ret, i;

964

965 966 967 968
	imxdma = kzalloc(sizeof(*imxdma), GFP_KERNEL);
	if (!imxdma)
		return -ENOMEM;

969 970 971 972 973 974 975 976
	if (cpu_is_mx1()) {
		imxdma->base = MX1_IO_ADDRESS(MX1_DMA_BASE_ADDR);
	} else if (cpu_is_mx21()) {
		imxdma->base = MX21_IO_ADDRESS(MX21_DMA_BASE_ADDR);
	} else if (cpu_is_mx27()) {
		imxdma->base = MX27_IO_ADDRESS(MX27_DMA_BASE_ADDR);
	} else {
		kfree(imxdma);
977
		return 0;
978
	}
979

980 981 982 983 984 985 986 987 988 989 990 991 992 993
	imxdma->dma_ipg = devm_clk_get(&pdev->dev, "ipg");
	if (IS_ERR(imxdma->dma_ipg)) {
		ret = PTR_ERR(imxdma->dma_ipg);
		goto err_clk;
	}

	imxdma->dma_ahb = devm_clk_get(&pdev->dev, "ahb");
	if (IS_ERR(imxdma->dma_ahb)) {
		ret = PTR_ERR(imxdma->dma_ahb);
		goto err_clk;
	}

	clk_prepare_enable(imxdma->dma_ipg);
	clk_prepare_enable(imxdma->dma_ahb);
994 995

	/* reset DMA module */
996
	imx_dmav1_writel(imxdma, DCR_DRST, DMA_DCR);
997 998 999 1000

	if (cpu_is_mx1()) {
		ret = request_irq(MX1_DMA_INT, dma_irq_handler, 0, "DMA", imxdma);
		if (ret) {
1001
			dev_warn(imxdma->dev, "Can't register IRQ for DMA\n");
1002
			goto err_enable;
1003 1004 1005 1006
		}

		ret = request_irq(MX1_DMA_ERR, imxdma_err_handler, 0, "DMA", imxdma);
		if (ret) {
1007
			dev_warn(imxdma->dev, "Can't register ERRIRQ for DMA\n");
1008
			free_irq(MX1_DMA_INT, NULL);
1009
			goto err_enable;
1010 1011 1012 1013
		}
	}

	/* enable DMA module */
1014
	imx_dmav1_writel(imxdma, DCR_DEN, DMA_DCR);
1015 1016

	/* clear all interrupts */
1017
	imx_dmav1_writel(imxdma, (1 << IMX_DMA_CHANNELS) - 1, DMA_DISR);
1018 1019

	/* disable interrupts */
1020
	imx_dmav1_writel(imxdma, (1 << IMX_DMA_CHANNELS) - 1, DMA_DIMR);
1021 1022 1023

	INIT_LIST_HEAD(&imxdma->dma_device.channels);

1024 1025
	dma_cap_set(DMA_SLAVE, imxdma->dma_device.cap_mask);
	dma_cap_set(DMA_CYCLIC, imxdma->dma_device.cap_mask);
1026
	dma_cap_set(DMA_MEMCPY, imxdma->dma_device.cap_mask);
1027 1028 1029 1030 1031 1032 1033
	dma_cap_set(DMA_INTERLEAVE, imxdma->dma_device.cap_mask);

	/* Initialize 2D global parameters */
	for (i = 0; i < IMX_DMA_2D_SLOTS; i++)
		imxdma->slots_2d[i].count = 0;

	spin_lock_init(&imxdma->lock);
1034

1035
	/* Initialize channel parameters */
1036
	for (i = 0; i < IMX_DMA_CHANNELS; i++) {
1037 1038
		struct imxdma_channel *imxdmac = &imxdma->channel[i];

1039 1040 1041 1042
		if (cpu_is_mx21() || cpu_is_mx27()) {
			ret = request_irq(MX2x_INT_DMACH0 + i,
					dma_irq_handler, 0, "DMA", imxdma);
			if (ret) {
1043 1044 1045
				dev_warn(imxdma->dev, "Can't register IRQ %d "
					 "for DMA channel %d\n",
					 MX2x_INT_DMACH0 + i, i);
1046 1047
				goto err_init;
			}
1048 1049 1050
			init_timer(&imxdmac->watchdog);
			imxdmac->watchdog.function = &imxdma_watchdog;
			imxdmac->watchdog.data = (unsigned long)imxdmac;
S
Sascha Hauer 已提交
1051
		}
1052 1053 1054

		imxdmac->imxdma = imxdma;

1055 1056 1057 1058 1059 1060
		INIT_LIST_HEAD(&imxdmac->ld_queue);
		INIT_LIST_HEAD(&imxdmac->ld_free);
		INIT_LIST_HEAD(&imxdmac->ld_active);

		tasklet_init(&imxdmac->dma_tasklet, imxdma_tasklet,
			     (unsigned long)imxdmac);
1061
		imxdmac->chan.device = &imxdma->dma_device;
1062
		dma_cookie_init(&imxdmac->chan);
1063 1064 1065
		imxdmac->channel = i;

		/* Add the channel to the DMAC list */
1066 1067
		list_add_tail(&imxdmac->chan.device_node,
			      &imxdma->dma_device.channels);
1068 1069 1070 1071 1072 1073 1074 1075 1076 1077
	}

	imxdma->dev = &pdev->dev;
	imxdma->dma_device.dev = &pdev->dev;

	imxdma->dma_device.device_alloc_chan_resources = imxdma_alloc_chan_resources;
	imxdma->dma_device.device_free_chan_resources = imxdma_free_chan_resources;
	imxdma->dma_device.device_tx_status = imxdma_tx_status;
	imxdma->dma_device.device_prep_slave_sg = imxdma_prep_slave_sg;
	imxdma->dma_device.device_prep_dma_cyclic = imxdma_prep_dma_cyclic;
1078
	imxdma->dma_device.device_prep_dma_memcpy = imxdma_prep_dma_memcpy;
1079
	imxdma->dma_device.device_prep_interleaved_dma = imxdma_prep_dma_interleaved;
1080 1081 1082 1083 1084
	imxdma->dma_device.device_control = imxdma_control;
	imxdma->dma_device.device_issue_pending = imxdma_issue_pending;

	platform_set_drvdata(pdev, imxdma);

1085
	imxdma->dma_device.copy_align = 2; /* 2^2 = 4 bytes alignment */
1086 1087 1088
	imxdma->dma_device.dev->dma_parms = &imxdma->dma_parms;
	dma_set_max_seg_size(imxdma->dma_device.dev, 0xffffff);

1089 1090 1091 1092 1093 1094 1095 1096 1097
	ret = dma_async_device_register(&imxdma->dma_device);
	if (ret) {
		dev_err(&pdev->dev, "unable to register\n");
		goto err_init;
	}

	return 0;

err_init:
1098 1099 1100 1101 1102 1103 1104

	if (cpu_is_mx21() || cpu_is_mx27()) {
		while (--i >= 0)
			free_irq(MX2x_INT_DMACH0 + i, NULL);
	} else if cpu_is_mx1() {
		free_irq(MX1_DMA_INT, NULL);
		free_irq(MX1_DMA_ERR, NULL);
1105
	}
1106 1107 1108 1109
err_enable:
	clk_disable_unprepare(imxdma->dma_ipg);
	clk_disable_unprepare(imxdma->dma_ahb);
err_clk:
1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120
	kfree(imxdma);
	return ret;
}

static int __exit imxdma_remove(struct platform_device *pdev)
{
	struct imxdma_engine *imxdma = platform_get_drvdata(pdev);
	int i;

        dma_async_device_unregister(&imxdma->dma_device);

1121 1122 1123 1124 1125 1126
	if (cpu_is_mx21() || cpu_is_mx27()) {
		for (i = 0; i < IMX_DMA_CHANNELS; i++)
			free_irq(MX2x_INT_DMACH0 + i, NULL);
	} else if cpu_is_mx1() {
		free_irq(MX1_DMA_INT, NULL);
		free_irq(MX1_DMA_ERR, NULL);
1127 1128
	}

1129 1130 1131
	clk_disable_unprepare(imxdma->dma_ipg);
	clk_disable_unprepare(imxdma->dma_ahb);
	kfree(imxdma);
1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151

        return 0;
}

static struct platform_driver imxdma_driver = {
	.driver		= {
		.name	= "imx-dma",
	},
	.remove		= __exit_p(imxdma_remove),
};

static int __init imxdma_module_init(void)
{
	return platform_driver_probe(&imxdma_driver, imxdma_probe);
}
subsys_initcall(imxdma_module_init);

MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>");
MODULE_DESCRIPTION("i.MX dma driver");
MODULE_LICENSE("GPL");