imx-dma.c 32.1 KB
Newer Older
1 2 3 4 5 6 7
/*
 * drivers/dma/imx-dma.c
 *
 * This file contains a driver for the Freescale i.MX DMA engine
 * found on i.MX1/21/27
 *
 * Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
8
 * Copyright 2012 Javier Martin, Vista Silicon <javier.martin@vista-silicon.com>
9 10 11 12 13 14 15 16
 *
 * The code contained herein is licensed under the GNU General Public
 * License. You may obtain a copy of the GNU General Public License
 * Version 2 or later at the following locations:
 *
 * http://www.opensource.org/licenses/gpl-license.html
 * http://www.gnu.org/copyleft/gpl.html
 */
17
#include <linux/err.h>
18 19 20 21 22 23 24 25 26
#include <linux/init.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
27
#include <linux/clk.h>
28
#include <linux/dmaengine.h>
29
#include <linux/module.h>
30 31

#include <asm/irq.h>
32
#include <linux/platform_data/dma-imx.h>
33

34
#include "dmaengine.h"
35
#define IMXDMA_MAX_CHAN_DESCRIPTORS	16
36 37
#define IMX_DMA_CHANNELS  16

38 39 40 41
#define IMX_DMA_2D_SLOTS	2
#define IMX_DMA_2D_SLOT_A	0
#define IMX_DMA_2D_SLOT_B	1

42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108
#define IMX_DMA_LENGTH_LOOP	((unsigned int)-1)
#define IMX_DMA_MEMSIZE_32	(0 << 4)
#define IMX_DMA_MEMSIZE_8	(1 << 4)
#define IMX_DMA_MEMSIZE_16	(2 << 4)
#define IMX_DMA_TYPE_LINEAR	(0 << 10)
#define IMX_DMA_TYPE_2D		(1 << 10)
#define IMX_DMA_TYPE_FIFO	(2 << 10)

#define IMX_DMA_ERR_BURST     (1 << 0)
#define IMX_DMA_ERR_REQUEST   (1 << 1)
#define IMX_DMA_ERR_TRANSFER  (1 << 2)
#define IMX_DMA_ERR_BUFFER    (1 << 3)
#define IMX_DMA_ERR_TIMEOUT   (1 << 4)

#define DMA_DCR     0x00		/* Control Register */
#define DMA_DISR    0x04		/* Interrupt status Register */
#define DMA_DIMR    0x08		/* Interrupt mask Register */
#define DMA_DBTOSR  0x0c		/* Burst timeout status Register */
#define DMA_DRTOSR  0x10		/* Request timeout Register */
#define DMA_DSESR   0x14		/* Transfer Error Status Register */
#define DMA_DBOSR   0x18		/* Buffer overflow status Register */
#define DMA_DBTOCR  0x1c		/* Burst timeout control Register */
#define DMA_WSRA    0x40		/* W-Size Register A */
#define DMA_XSRA    0x44		/* X-Size Register A */
#define DMA_YSRA    0x48		/* Y-Size Register A */
#define DMA_WSRB    0x4c		/* W-Size Register B */
#define DMA_XSRB    0x50		/* X-Size Register B */
#define DMA_YSRB    0x54		/* Y-Size Register B */
#define DMA_SAR(x)  (0x80 + ((x) << 6))	/* Source Address Registers */
#define DMA_DAR(x)  (0x84 + ((x) << 6))	/* Destination Address Registers */
#define DMA_CNTR(x) (0x88 + ((x) << 6))	/* Count Registers */
#define DMA_CCR(x)  (0x8c + ((x) << 6))	/* Control Registers */
#define DMA_RSSR(x) (0x90 + ((x) << 6))	/* Request source select Registers */
#define DMA_BLR(x)  (0x94 + ((x) << 6))	/* Burst length Registers */
#define DMA_RTOR(x) (0x98 + ((x) << 6))	/* Request timeout Registers */
#define DMA_BUCR(x) (0x98 + ((x) << 6))	/* Bus Utilization Registers */
#define DMA_CCNR(x) (0x9C + ((x) << 6))	/* Channel counter Registers */

#define DCR_DRST           (1<<1)
#define DCR_DEN            (1<<0)
#define DBTOCR_EN          (1<<15)
#define DBTOCR_CNT(x)      ((x) & 0x7fff)
#define CNTR_CNT(x)        ((x) & 0xffffff)
#define CCR_ACRPT          (1<<14)
#define CCR_DMOD_LINEAR    (0x0 << 12)
#define CCR_DMOD_2D        (0x1 << 12)
#define CCR_DMOD_FIFO      (0x2 << 12)
#define CCR_DMOD_EOBFIFO   (0x3 << 12)
#define CCR_SMOD_LINEAR    (0x0 << 10)
#define CCR_SMOD_2D        (0x1 << 10)
#define CCR_SMOD_FIFO      (0x2 << 10)
#define CCR_SMOD_EOBFIFO   (0x3 << 10)
#define CCR_MDIR_DEC       (1<<9)
#define CCR_MSEL_B         (1<<8)
#define CCR_DSIZ_32        (0x0 << 6)
#define CCR_DSIZ_8         (0x1 << 6)
#define CCR_DSIZ_16        (0x2 << 6)
#define CCR_SSIZ_32        (0x0 << 4)
#define CCR_SSIZ_8         (0x1 << 4)
#define CCR_SSIZ_16        (0x2 << 4)
#define CCR_REN            (1<<3)
#define CCR_RPT            (1<<2)
#define CCR_FRC            (1<<1)
#define CCR_CEN            (1<<0)
#define RTOR_EN            (1<<15)
#define RTOR_CLK           (1<<14)
#define RTOR_PSC           (1<<13)
109 110 111 112 113 114 115 116

enum  imxdma_prep_type {
	IMXDMA_DESC_MEMCPY,
	IMXDMA_DESC_INTERLEAVED,
	IMXDMA_DESC_SLAVE_SG,
	IMXDMA_DESC_CYCLIC,
};

117 118 119 120 121 122 123
struct imx_dma_2d_config {
	u16		xsr;
	u16		ysr;
	u16		wsr;
	int		count;
};

124 125 126 127 128 129 130
struct imxdma_desc {
	struct list_head		node;
	struct dma_async_tx_descriptor	desc;
	enum dma_status			status;
	dma_addr_t			src;
	dma_addr_t			dest;
	size_t				len;
131
	enum dma_transfer_direction	direction;
132 133 134 135 136 137 138 139 140 141 142 143 144
	enum imxdma_prep_type		type;
	/* For memcpy and interleaved */
	unsigned int			config_port;
	unsigned int			config_mem;
	/* For interleaved transfers */
	unsigned int			x;
	unsigned int			y;
	unsigned int			w;
	/* For slave sg and cyclic */
	struct scatterlist		*sg;
	unsigned int			sgcount;
};

145
struct imxdma_channel {
146 147
	int				hw_chaining;
	struct timer_list		watchdog;
148 149 150
	struct imxdma_engine		*imxdma;
	unsigned int			channel;

151 152 153 154 155
	struct tasklet_struct		dma_tasklet;
	struct list_head		ld_free;
	struct list_head		ld_queue;
	struct list_head		ld_active;
	int				descs_allocated;
156 157 158 159 160 161 162 163
	enum dma_slave_buswidth		word_size;
	dma_addr_t			per_address;
	u32				watermark_level;
	struct dma_chan			chan;
	struct dma_async_tx_descriptor	desc;
	enum dma_status			status;
	int				dma_request;
	struct scatterlist		*sg_list;
164 165
	u32				ccr_from_device;
	u32				ccr_to_device;
166 167
	bool				enabled_2d;
	int				slot_2d;
168 169
};

170 171 172 173 174 175
enum imx_dma_type {
	IMX1_DMA,
	IMX21_DMA,
	IMX27_DMA,
};

176 177
struct imxdma_engine {
	struct device			*dev;
178
	struct device_dma_parameters	dma_parms;
179
	struct dma_device		dma_device;
180
	void __iomem			*base;
181 182
	struct clk			*dma_ahb;
	struct clk			*dma_ipg;
183 184
	spinlock_t			lock;
	struct imx_dma_2d_config	slots_2d[IMX_DMA_2D_SLOTS];
185
	struct imxdma_channel		channel[IMX_DMA_CHANNELS];
186
	enum imx_dma_type		devtype;
187 188
};

189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219
static struct platform_device_id imx_dma_devtype[] = {
	{
		.name = "imx1-dma",
		.driver_data = IMX1_DMA,
	}, {
		.name = "imx21-dma",
		.driver_data = IMX21_DMA,
	}, {
		.name = "imx27-dma",
		.driver_data = IMX27_DMA,
	}, {
		/* sentinel */
	}
};
MODULE_DEVICE_TABLE(platform, imx_dma_devtype);

static inline int is_imx1_dma(struct imxdma_engine *imxdma)
{
	return imxdma->devtype == IMX1_DMA;
}

static inline int is_imx21_dma(struct imxdma_engine *imxdma)
{
	return imxdma->devtype == IMX21_DMA;
}

static inline int is_imx27_dma(struct imxdma_engine *imxdma)
{
	return imxdma->devtype == IMX27_DMA;
}

220 221 222 223 224
static struct imxdma_channel *to_imxdma_chan(struct dma_chan *chan)
{
	return container_of(chan, struct imxdma_channel, chan);
}

225
static inline bool imxdma_chan_is_doing_cyclic(struct imxdma_channel *imxdmac)
226
{
227 228 229 230 231 232 233 234 235
	struct imxdma_desc *desc;

	if (!list_empty(&imxdmac->ld_active)) {
		desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc,
					node);
		if (desc->type == IMXDMA_DESC_CYCLIC)
			return true;
	}
	return false;
236 237
}

238

239 240 241

static void imx_dmav1_writel(struct imxdma_engine *imxdma, unsigned val,
			     unsigned offset)
242
{
243
	__raw_writel(val, imxdma->base + offset);
244 245
}

246
static unsigned imx_dmav1_readl(struct imxdma_engine *imxdma, unsigned offset)
247
{
248
	return __raw_readl(imxdma->base + offset);
249
}
250

251
static int imxdma_hw_chain(struct imxdma_channel *imxdmac)
252
{
253 254 255
	struct imxdma_engine *imxdma = imxdmac->imxdma;

	if (is_imx27_dma(imxdma))
256
		return imxdmac->hw_chaining;
257 258 259 260 261 262 263
	else
		return 0;
}

/*
 * imxdma_sg_next - prepare next chunk for scatter-gather DMA emulation
 */
264
static inline int imxdma_sg_next(struct imxdma_desc *d)
265
{
266
	struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan);
267
	struct imxdma_engine *imxdma = imxdmac->imxdma;
268
	struct scatterlist *sg = d->sg;
269 270
	unsigned long now;

271
	now = min(d->len, sg_dma_len(sg));
272 273
	if (d->len != IMX_DMA_LENGTH_LOOP)
		d->len -= now;
274

275
	if (d->direction == DMA_DEV_TO_MEM)
276 277
		imx_dmav1_writel(imxdma, sg->dma_address,
				 DMA_DAR(imxdmac->channel));
278
	else
279 280
		imx_dmav1_writel(imxdma, sg->dma_address,
				 DMA_SAR(imxdmac->channel));
281

282
	imx_dmav1_writel(imxdma, now, DMA_CNTR(imxdmac->channel));
283

284 285
	dev_dbg(imxdma->dev, " %s channel: %d dst 0x%08x, src 0x%08x, "
		"size 0x%08x\n", __func__, imxdmac->channel,
286 287 288
		 imx_dmav1_readl(imxdma, DMA_DAR(imxdmac->channel)),
		 imx_dmav1_readl(imxdma, DMA_SAR(imxdmac->channel)),
		 imx_dmav1_readl(imxdma, DMA_CNTR(imxdmac->channel)));
289 290

	return now;
291 292
}

293
static void imxdma_enable_hw(struct imxdma_desc *d)
294
{
295
	struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan);
296
	struct imxdma_engine *imxdma = imxdmac->imxdma;
297 298 299
	int channel = imxdmac->channel;
	unsigned long flags;

300
	dev_dbg(imxdma->dev, "%s channel %d\n", __func__, channel);
301 302 303

	local_irq_save(flags);

304 305 306 307 308
	imx_dmav1_writel(imxdma, 1 << channel, DMA_DISR);
	imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_DIMR) &
			 ~(1 << channel), DMA_DIMR);
	imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_CCR(channel)) |
			 CCR_CEN | CCR_ACRPT, DMA_CCR(channel));
309

310
	if (!is_imx1_dma(imxdma) &&
311
			d->sg && imxdma_hw_chain(imxdmac)) {
312 313
		d->sg = sg_next(d->sg);
		if (d->sg) {
314
			u32 tmp;
315
			imxdma_sg_next(d);
316 317 318
			tmp = imx_dmav1_readl(imxdma, DMA_CCR(channel));
			imx_dmav1_writel(imxdma, tmp | CCR_RPT | CCR_ACRPT,
					 DMA_CCR(channel));
319 320 321 322 323 324 325 326
		}
	}

	local_irq_restore(flags);
}

static void imxdma_disable_hw(struct imxdma_channel *imxdmac)
{
327
	struct imxdma_engine *imxdma = imxdmac->imxdma;
328 329 330
	int channel = imxdmac->channel;
	unsigned long flags;

331
	dev_dbg(imxdma->dev, "%s channel %d\n", __func__, channel);
332

333 334
	if (imxdma_hw_chain(imxdmac))
		del_timer(&imxdmac->watchdog);
335 336

	local_irq_save(flags);
337 338 339 340 341
	imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_DIMR) |
			 (1 << channel), DMA_DIMR);
	imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_CCR(channel)) &
			 ~CCR_CEN, DMA_CCR(channel));
	imx_dmav1_writel(imxdma, 1 << channel, DMA_DISR);
342 343 344 345
	local_irq_restore(flags);
}

static void imxdma_watchdog(unsigned long data)
346
{
347
	struct imxdma_channel *imxdmac = (struct imxdma_channel *)data;
348
	struct imxdma_engine *imxdma = imxdmac->imxdma;
349
	int channel = imxdmac->channel;
350

351
	imx_dmav1_writel(imxdma, 0, DMA_CCR(channel));
352

353
	/* Tasklet watchdog error handler */
354
	tasklet_schedule(&imxdmac->dma_tasklet);
355 356
	dev_dbg(imxdma->dev, "channel %d: watchdog timeout!\n",
		imxdmac->channel);
357 358
}

359
static irqreturn_t imxdma_err_handler(int irq, void *dev_id)
360
{
361 362 363 364 365
	struct imxdma_engine *imxdma = dev_id;
	unsigned int err_mask;
	int i, disr;
	int errcode;

366
	disr = imx_dmav1_readl(imxdma, DMA_DISR);
367

368 369 370 371
	err_mask = imx_dmav1_readl(imxdma, DMA_DBTOSR) |
		   imx_dmav1_readl(imxdma, DMA_DRTOSR) |
		   imx_dmav1_readl(imxdma, DMA_DSESR)  |
		   imx_dmav1_readl(imxdma, DMA_DBOSR);
372 373 374 375

	if (!err_mask)
		return IRQ_HANDLED;

376
	imx_dmav1_writel(imxdma, disr & err_mask, DMA_DISR);
377 378 379 380 381 382

	for (i = 0; i < IMX_DMA_CHANNELS; i++) {
		if (!(err_mask & (1 << i)))
			continue;
		errcode = 0;

383 384
		if (imx_dmav1_readl(imxdma, DMA_DBTOSR) & (1 << i)) {
			imx_dmav1_writel(imxdma, 1 << i, DMA_DBTOSR);
385 386
			errcode |= IMX_DMA_ERR_BURST;
		}
387 388
		if (imx_dmav1_readl(imxdma, DMA_DRTOSR) & (1 << i)) {
			imx_dmav1_writel(imxdma, 1 << i, DMA_DRTOSR);
389 390
			errcode |= IMX_DMA_ERR_REQUEST;
		}
391 392
		if (imx_dmav1_readl(imxdma, DMA_DSESR) & (1 << i)) {
			imx_dmav1_writel(imxdma, 1 << i, DMA_DSESR);
393 394
			errcode |= IMX_DMA_ERR_TRANSFER;
		}
395 396
		if (imx_dmav1_readl(imxdma, DMA_DBOSR) & (1 << i)) {
			imx_dmav1_writel(imxdma, 1 << i, DMA_DBOSR);
397 398 399 400 401 402 403 404 405 406 407 408 409
			errcode |= IMX_DMA_ERR_BUFFER;
		}
		/* Tasklet error handler */
		tasklet_schedule(&imxdma->channel[i].dma_tasklet);

		printk(KERN_WARNING
		       "DMA timeout on channel %d -%s%s%s%s\n", i,
		       errcode & IMX_DMA_ERR_BURST ?    " burst" : "",
		       errcode & IMX_DMA_ERR_REQUEST ?  " request" : "",
		       errcode & IMX_DMA_ERR_TRANSFER ? " transfer" : "",
		       errcode & IMX_DMA_ERR_BUFFER ?   " buffer" : "");
	}
	return IRQ_HANDLED;
410 411
}

412
static void dma_irq_handle_channel(struct imxdma_channel *imxdmac)
413
{
414
	struct imxdma_engine *imxdma = imxdmac->imxdma;
415
	int chno = imxdmac->channel;
416
	struct imxdma_desc *desc;
417

418
	spin_lock(&imxdma->lock);
419
	if (list_empty(&imxdmac->ld_active)) {
420
		spin_unlock(&imxdma->lock);
421 422
		goto out;
	}
423

424 425 426
	desc = list_first_entry(&imxdmac->ld_active,
				struct imxdma_desc,
				node);
427
	spin_unlock(&imxdma->lock);
428

429 430 431
	if (desc->sg) {
		u32 tmp;
		desc->sg = sg_next(desc->sg);
432

433
		if (desc->sg) {
434
			imxdma_sg_next(desc);
435

436
			tmp = imx_dmav1_readl(imxdma, DMA_CCR(chno));
437

438
			if (imxdma_hw_chain(imxdmac)) {
439 440 441
				/* FIXME: The timeout should probably be
				 * configurable
				 */
442
				mod_timer(&imxdmac->watchdog,
443 444 445
					jiffies + msecs_to_jiffies(500));

				tmp |= CCR_CEN | CCR_RPT | CCR_ACRPT;
446
				imx_dmav1_writel(imxdma, tmp, DMA_CCR(chno));
447
			} else {
448 449
				imx_dmav1_writel(imxdma, tmp & ~CCR_CEN,
						 DMA_CCR(chno));
450 451 452
				tmp |= CCR_CEN;
			}

453
			imx_dmav1_writel(imxdma, tmp, DMA_CCR(chno));
454 455 456 457

			if (imxdma_chan_is_doing_cyclic(imxdmac))
				/* Tasklet progression */
				tasklet_schedule(&imxdmac->dma_tasklet);
458

459 460 461
			return;
		}

462 463
		if (imxdma_hw_chain(imxdmac)) {
			del_timer(&imxdmac->watchdog);
464 465 466 467
			return;
		}
	}

468
out:
469
	imx_dmav1_writel(imxdma, 0, DMA_CCR(chno));
470
	/* Tasklet irq */
471 472 473
	tasklet_schedule(&imxdmac->dma_tasklet);
}

474 475 476 477 478
static irqreturn_t dma_irq_handler(int irq, void *dev_id)
{
	struct imxdma_engine *imxdma = dev_id;
	int i, disr;

479
	if (!is_imx1_dma(imxdma))
480 481
		imxdma_err_handler(irq, dev_id);

482
	disr = imx_dmav1_readl(imxdma, DMA_DISR);
483

484
	dev_dbg(imxdma->dev, "%s called, disr=0x%08x\n", __func__, disr);
485

486
	imx_dmav1_writel(imxdma, disr, DMA_DISR);
487
	for (i = 0; i < IMX_DMA_CHANNELS; i++) {
488
		if (disr & (1 << i))
489 490 491 492 493 494
			dma_irq_handle_channel(&imxdma->channel[i]);
	}

	return IRQ_HANDLED;
}

495 496 497
static int imxdma_xfer_desc(struct imxdma_desc *d)
{
	struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan);
498
	struct imxdma_engine *imxdma = imxdmac->imxdma;
499 500 501
	unsigned long flags;
	int slot = -1;
	int i;
502 503 504

	/* Configure and enable */
	switch (d->type) {
505 506 507 508 509 510 511 512 513 514 515 516
	case IMXDMA_DESC_INTERLEAVED:
		/* Try to get a free 2D slot */
		spin_lock_irqsave(&imxdma->lock, flags);
		for (i = 0; i < IMX_DMA_2D_SLOTS; i++) {
			if ((imxdma->slots_2d[i].count > 0) &&
			((imxdma->slots_2d[i].xsr != d->x) ||
			(imxdma->slots_2d[i].ysr != d->y) ||
			(imxdma->slots_2d[i].wsr != d->w)))
				continue;
			slot = i;
			break;
		}
517 518
		if (slot < 0) {
			spin_unlock_irqrestore(&imxdma->lock, flags);
519
			return -EBUSY;
520
		}
521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547

		imxdma->slots_2d[slot].xsr = d->x;
		imxdma->slots_2d[slot].ysr = d->y;
		imxdma->slots_2d[slot].wsr = d->w;
		imxdma->slots_2d[slot].count++;

		imxdmac->slot_2d = slot;
		imxdmac->enabled_2d = true;
		spin_unlock_irqrestore(&imxdma->lock, flags);

		if (slot == IMX_DMA_2D_SLOT_A) {
			d->config_mem &= ~CCR_MSEL_B;
			d->config_port &= ~CCR_MSEL_B;
			imx_dmav1_writel(imxdma, d->x, DMA_XSRA);
			imx_dmav1_writel(imxdma, d->y, DMA_YSRA);
			imx_dmav1_writel(imxdma, d->w, DMA_WSRA);
		} else {
			d->config_mem |= CCR_MSEL_B;
			d->config_port |= CCR_MSEL_B;
			imx_dmav1_writel(imxdma, d->x, DMA_XSRB);
			imx_dmav1_writel(imxdma, d->y, DMA_YSRB);
			imx_dmav1_writel(imxdma, d->w, DMA_WSRB);
		}
		/*
		 * We fall-through here intentionally, since a 2D transfer is
		 * similar to MEMCPY just adding the 2D slot configuration.
		 */
548
	case IMXDMA_DESC_MEMCPY:
549 550 551
		imx_dmav1_writel(imxdma, d->src, DMA_SAR(imxdmac->channel));
		imx_dmav1_writel(imxdma, d->dest, DMA_DAR(imxdmac->channel));
		imx_dmav1_writel(imxdma, d->config_mem | (d->config_port << 2),
552
			 DMA_CCR(imxdmac->channel));
553

554
		imx_dmav1_writel(imxdma, d->len, DMA_CNTR(imxdmac->channel));
555 556 557 558 559 560

		dev_dbg(imxdma->dev, "%s channel: %d dest=0x%08x src=0x%08x "
			"dma_length=%d\n", __func__, imxdmac->channel,
			d->dest, d->src, d->len);

		break;
561
	/* Cyclic transfer is the same as slave_sg with special sg configuration. */
562 563
	case IMXDMA_DESC_CYCLIC:
	case IMXDMA_DESC_SLAVE_SG:
564
		if (d->direction == DMA_DEV_TO_MEM) {
565
			imx_dmav1_writel(imxdma, imxdmac->per_address,
566
					 DMA_SAR(imxdmac->channel));
567
			imx_dmav1_writel(imxdma, imxdmac->ccr_from_device,
568 569 570 571 572 573 574
					 DMA_CCR(imxdmac->channel));

			dev_dbg(imxdma->dev, "%s channel: %d sg=%p sgcount=%d "
				"total length=%d dev_addr=0x%08x (dev2mem)\n",
				__func__, imxdmac->channel, d->sg, d->sgcount,
				d->len, imxdmac->per_address);
		} else if (d->direction == DMA_MEM_TO_DEV) {
575
			imx_dmav1_writel(imxdma, imxdmac->per_address,
576
					 DMA_DAR(imxdmac->channel));
577
			imx_dmav1_writel(imxdma, imxdmac->ccr_to_device,
578 579 580 581 582 583 584 585 586 587 588 589
					 DMA_CCR(imxdmac->channel));

			dev_dbg(imxdma->dev, "%s channel: %d sg=%p sgcount=%d "
				"total length=%d dev_addr=0x%08x (mem2dev)\n",
				__func__, imxdmac->channel, d->sg, d->sgcount,
				d->len, imxdmac->per_address);
		} else {
			dev_err(imxdma->dev, "%s channel: %d bad dma mode\n",
				__func__, imxdmac->channel);
			return -EINVAL;
		}

590
		imxdma_sg_next(d);
591

592 593 594 595
		break;
	default:
		return -EINVAL;
	}
596
	imxdma_enable_hw(d);
597
	return 0;
598 599
}

600
static void imxdma_tasklet(unsigned long data)
601
{
602 603 604
	struct imxdma_channel *imxdmac = (void *)data;
	struct imxdma_engine *imxdma = imxdmac->imxdma;
	struct imxdma_desc *desc;
605

606
	spin_lock(&imxdma->lock);
607 608 609 610 611 612 613 614 615 616

	if (list_empty(&imxdmac->ld_active)) {
		/* Someone might have called terminate all */
		goto out;
	}
	desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc, node);

	if (desc->desc.callback)
		desc->desc.callback(desc->desc.callback_param);

M
Masanari Iida 已提交
617 618
	/* If we are dealing with a cyclic descriptor, keep it on ld_active
	 * and dont mark the descriptor as complete.
619 620
	 * Only in non-cyclic cases it would be marked as complete
	 */
621 622
	if (imxdma_chan_is_doing_cyclic(imxdmac))
		goto out;
623 624
	else
		dma_cookie_complete(&desc->desc);
625

626 627 628 629 630 631
	/* Free 2D slot if it was an interleaved transfer */
	if (imxdmac->enabled_2d) {
		imxdma->slots_2d[imxdmac->slot_2d].count--;
		imxdmac->enabled_2d = false;
	}

632 633 634 635 636 637 638 639 640 641 642
	list_move_tail(imxdmac->ld_active.next, &imxdmac->ld_free);

	if (!list_empty(&imxdmac->ld_queue)) {
		desc = list_first_entry(&imxdmac->ld_queue, struct imxdma_desc,
					node);
		list_move_tail(imxdmac->ld_queue.next, &imxdmac->ld_active);
		if (imxdma_xfer_desc(desc) < 0)
			dev_warn(imxdma->dev, "%s: channel: %d couldn't xfer desc\n",
				 __func__, imxdmac->channel);
	}
out:
643
	spin_unlock(&imxdma->lock);
644 645 646 647 648 649 650
}

static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
		unsigned long arg)
{
	struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
	struct dma_slave_config *dmaengine_cfg = (void *)arg;
651
	struct imxdma_engine *imxdma = imxdmac->imxdma;
652
	unsigned long flags;
653 654 655 656
	unsigned int mode = 0;

	switch (cmd) {
	case DMA_TERMINATE_ALL:
657
		imxdma_disable_hw(imxdmac);
658

659
		spin_lock_irqsave(&imxdma->lock, flags);
660 661
		list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free);
		list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free);
662
		spin_unlock_irqrestore(&imxdma->lock, flags);
663 664
		return 0;
	case DMA_SLAVE_CONFIG:
665
		if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687
			imxdmac->per_address = dmaengine_cfg->src_addr;
			imxdmac->watermark_level = dmaengine_cfg->src_maxburst;
			imxdmac->word_size = dmaengine_cfg->src_addr_width;
		} else {
			imxdmac->per_address = dmaengine_cfg->dst_addr;
			imxdmac->watermark_level = dmaengine_cfg->dst_maxburst;
			imxdmac->word_size = dmaengine_cfg->dst_addr_width;
		}

		switch (imxdmac->word_size) {
		case DMA_SLAVE_BUSWIDTH_1_BYTE:
			mode = IMX_DMA_MEMSIZE_8;
			break;
		case DMA_SLAVE_BUSWIDTH_2_BYTES:
			mode = IMX_DMA_MEMSIZE_16;
			break;
		default:
		case DMA_SLAVE_BUSWIDTH_4_BYTES:
			mode = IMX_DMA_MEMSIZE_32;
			break;
		}

688 689
		imxdmac->hw_chaining = 0;

690
		imxdmac->ccr_from_device = (mode | IMX_DMA_TYPE_FIFO) |
691 692
			((IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) << 2) |
			CCR_REN;
693
		imxdmac->ccr_to_device =
694 695
			(IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) |
			((mode | IMX_DMA_TYPE_FIFO) << 2) | CCR_REN;
696
		imx_dmav1_writel(imxdma, imxdmac->dma_request,
697 698
				 DMA_RSSR(imxdmac->channel));

699
		/* Set burst length */
700 701
		imx_dmav1_writel(imxdma, imxdmac->watermark_level *
				imxdmac->word_size, DMA_BLR(imxdmac->channel));
702 703 704 705 706 707 708 709 710 711 712 713 714

		return 0;
	default:
		return -ENOSYS;
	}

	return -EINVAL;
}

static enum dma_status imxdma_tx_status(struct dma_chan *chan,
					    dma_cookie_t cookie,
					    struct dma_tx_state *txstate)
{
715
	return dma_cookie_status(chan, cookie, txstate);
716 717 718 719 720
}

static dma_cookie_t imxdma_tx_submit(struct dma_async_tx_descriptor *tx)
{
	struct imxdma_channel *imxdmac = to_imxdma_chan(tx->chan);
721
	struct imxdma_engine *imxdma = imxdmac->imxdma;
722
	dma_cookie_t cookie;
723
	unsigned long flags;
724

725
	spin_lock_irqsave(&imxdma->lock, flags);
726
	list_move_tail(imxdmac->ld_free.next, &imxdmac->ld_queue);
727
	cookie = dma_cookie_assign(tx);
728
	spin_unlock_irqrestore(&imxdma->lock, flags);
729 730 731 732 733 734 735 736 737

	return cookie;
}

static int imxdma_alloc_chan_resources(struct dma_chan *chan)
{
	struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
	struct imx_dma_data *data = chan->private;

738 739
	if (data != NULL)
		imxdmac->dma_request = data->dma_request;
740

741 742
	while (imxdmac->descs_allocated < IMXDMA_MAX_CHAN_DESCRIPTORS) {
		struct imxdma_desc *desc;
743

744 745 746 747 748 749 750 751 752 753 754 755 756
		desc = kzalloc(sizeof(*desc), GFP_KERNEL);
		if (!desc)
			break;
		__memzero(&desc->desc, sizeof(struct dma_async_tx_descriptor));
		dma_async_tx_descriptor_init(&desc->desc, chan);
		desc->desc.tx_submit = imxdma_tx_submit;
		/* txd.flags will be overwritten in prep funcs */
		desc->desc.flags = DMA_CTRL_ACK;
		desc->status = DMA_SUCCESS;

		list_add_tail(&desc->node, &imxdmac->ld_free);
		imxdmac->descs_allocated++;
	}
757

758 759 760 761
	if (!imxdmac->descs_allocated)
		return -ENOMEM;

	return imxdmac->descs_allocated;
762 763 764 765 766
}

static void imxdma_free_chan_resources(struct dma_chan *chan)
{
	struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
767
	struct imxdma_engine *imxdma = imxdmac->imxdma;
768 769 770
	struct imxdma_desc *desc, *_desc;
	unsigned long flags;

771
	spin_lock_irqsave(&imxdma->lock, flags);
772

773
	imxdma_disable_hw(imxdmac);
774 775
	list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free);
	list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free);
776

777
	spin_unlock_irqrestore(&imxdma->lock, flags);
778 779 780 781 782 783

	list_for_each_entry_safe(desc, _desc, &imxdmac->ld_free, node) {
		kfree(desc);
		imxdmac->descs_allocated--;
	}
	INIT_LIST_HEAD(&imxdmac->ld_free);
784 785 786 787 788 789 790 791 792

	if (imxdmac->sg_list) {
		kfree(imxdmac->sg_list);
		imxdmac->sg_list = NULL;
	}
}

static struct dma_async_tx_descriptor *imxdma_prep_slave_sg(
		struct dma_chan *chan, struct scatterlist *sgl,
793
		unsigned int sg_len, enum dma_transfer_direction direction,
794
		unsigned long flags, void *context)
795 796 797
{
	struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
	struct scatterlist *sg;
798 799
	int i, dma_length = 0;
	struct imxdma_desc *desc;
800

801 802
	if (list_empty(&imxdmac->ld_free) ||
	    imxdma_chan_is_doing_cyclic(imxdmac))
803 804
		return NULL;

805
	desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
806 807

	for_each_sg(sgl, sg, sg_len, i) {
808
		dma_length += sg_dma_len(sg);
809 810
	}

811 812
	switch (imxdmac->word_size) {
	case DMA_SLAVE_BUSWIDTH_4_BYTES:
813
		if (sg_dma_len(sgl) & 3 || sgl->dma_address & 3)
814 815 816
			return NULL;
		break;
	case DMA_SLAVE_BUSWIDTH_2_BYTES:
817
		if (sg_dma_len(sgl) & 1 || sgl->dma_address & 1)
818 819 820 821 822 823 824 825
			return NULL;
		break;
	case DMA_SLAVE_BUSWIDTH_1_BYTE:
		break;
	default:
		return NULL;
	}

826 827 828 829
	desc->type = IMXDMA_DESC_SLAVE_SG;
	desc->sg = sgl;
	desc->sgcount = sg_len;
	desc->len = dma_length;
830
	desc->direction = direction;
831 832 833 834 835 836 837
	if (direction == DMA_DEV_TO_MEM) {
		desc->src = imxdmac->per_address;
	} else {
		desc->dest = imxdmac->per_address;
	}
	desc->desc.callback = NULL;
	desc->desc.callback_param = NULL;
838

839
	return &desc->desc;
840 841 842 843
}

static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic(
		struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
844
		size_t period_len, enum dma_transfer_direction direction,
845
		unsigned long flags, void *context)
846 847 848
{
	struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
	struct imxdma_engine *imxdma = imxdmac->imxdma;
849 850
	struct imxdma_desc *desc;
	int i;
851 852 853 854 855
	unsigned int periods = buf_len / period_len;

	dev_dbg(imxdma->dev, "%s channel: %d buf_len=%d period_len=%d\n",
			__func__, imxdmac->channel, buf_len, period_len);

856 857
	if (list_empty(&imxdmac->ld_free) ||
	    imxdma_chan_is_doing_cyclic(imxdmac))
858 859
		return NULL;

860
	desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
861 862 863 864 865 866 867 868 869 870 871 872 873 874 875

	if (imxdmac->sg_list)
		kfree(imxdmac->sg_list);

	imxdmac->sg_list = kcalloc(periods + 1,
			sizeof(struct scatterlist), GFP_KERNEL);
	if (!imxdmac->sg_list)
		return NULL;

	sg_init_table(imxdmac->sg_list, periods);

	for (i = 0; i < periods; i++) {
		imxdmac->sg_list[i].page_link = 0;
		imxdmac->sg_list[i].offset = 0;
		imxdmac->sg_list[i].dma_address = dma_addr;
876
		sg_dma_len(&imxdmac->sg_list[i]) = period_len;
877 878 879 880 881
		dma_addr += period_len;
	}

	/* close the loop */
	imxdmac->sg_list[periods].offset = 0;
882
	sg_dma_len(&imxdmac->sg_list[periods]) = 0;
883 884 885
	imxdmac->sg_list[periods].page_link =
		((unsigned long)imxdmac->sg_list | 0x01) & ~0x02;

886 887 888 889
	desc->type = IMXDMA_DESC_CYCLIC;
	desc->sg = imxdmac->sg_list;
	desc->sgcount = periods;
	desc->len = IMX_DMA_LENGTH_LOOP;
890
	desc->direction = direction;
891 892 893 894 895 896 897
	if (direction == DMA_DEV_TO_MEM) {
		desc->src = imxdmac->per_address;
	} else {
		desc->dest = imxdmac->per_address;
	}
	desc->desc.callback = NULL;
	desc->desc.callback_param = NULL;
898

899
	return &desc->desc;
900 901
}

902 903 904 905 906 907
static struct dma_async_tx_descriptor *imxdma_prep_dma_memcpy(
	struct dma_chan *chan, dma_addr_t dest,
	dma_addr_t src, size_t len, unsigned long flags)
{
	struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
	struct imxdma_engine *imxdma = imxdmac->imxdma;
908
	struct imxdma_desc *desc;
909

910 911 912
	dev_dbg(imxdma->dev, "%s channel: %d src=0x%x dst=0x%x len=%d\n",
			__func__, imxdmac->channel, src, dest, len);

913 914
	if (list_empty(&imxdmac->ld_free) ||
	    imxdma_chan_is_doing_cyclic(imxdmac))
915 916
		return NULL;

917
	desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
918

919 920 921 922
	desc->type = IMXDMA_DESC_MEMCPY;
	desc->src = src;
	desc->dest = dest;
	desc->len = len;
923
	desc->direction = DMA_MEM_TO_MEM;
924 925 926 927
	desc->config_port = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR;
	desc->config_mem = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR;
	desc->desc.callback = NULL;
	desc->desc.callback_param = NULL;
928

929
	return &desc->desc;
930 931
}

932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972
static struct dma_async_tx_descriptor *imxdma_prep_dma_interleaved(
	struct dma_chan *chan, struct dma_interleaved_template *xt,
	unsigned long flags)
{
	struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
	struct imxdma_engine *imxdma = imxdmac->imxdma;
	struct imxdma_desc *desc;

	dev_dbg(imxdma->dev, "%s channel: %d src_start=0x%x dst_start=0x%x\n"
		"   src_sgl=%s dst_sgl=%s numf=%d frame_size=%d\n", __func__,
		imxdmac->channel, xt->src_start, xt->dst_start,
		xt->src_sgl ? "true" : "false", xt->dst_sgl ? "true" : "false",
		xt->numf, xt->frame_size);

	if (list_empty(&imxdmac->ld_free) ||
	    imxdma_chan_is_doing_cyclic(imxdmac))
		return NULL;

	if (xt->frame_size != 1 || xt->numf <= 0 || xt->dir != DMA_MEM_TO_MEM)
		return NULL;

	desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);

	desc->type = IMXDMA_DESC_INTERLEAVED;
	desc->src = xt->src_start;
	desc->dest = xt->dst_start;
	desc->x = xt->sgl[0].size;
	desc->y = xt->numf;
	desc->w = xt->sgl[0].icg + desc->x;
	desc->len = desc->x * desc->y;
	desc->direction = DMA_MEM_TO_MEM;
	desc->config_port = IMX_DMA_MEMSIZE_32;
	desc->config_mem = IMX_DMA_MEMSIZE_32;
	if (xt->src_sgl)
		desc->config_mem |= IMX_DMA_TYPE_2D;
	if (xt->dst_sgl)
		desc->config_port |= IMX_DMA_TYPE_2D;
	desc->desc.callback = NULL;
	desc->desc.callback_param = NULL;

	return &desc->desc;
973 974 975 976
}

static void imxdma_issue_pending(struct dma_chan *chan)
{
977
	struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
978 979 980 981
	struct imxdma_engine *imxdma = imxdmac->imxdma;
	struct imxdma_desc *desc;
	unsigned long flags;

982
	spin_lock_irqsave(&imxdma->lock, flags);
983 984 985 986 987 988 989 990 991 992 993 994 995 996
	if (list_empty(&imxdmac->ld_active) &&
	    !list_empty(&imxdmac->ld_queue)) {
		desc = list_first_entry(&imxdmac->ld_queue,
					struct imxdma_desc, node);

		if (imxdma_xfer_desc(desc) < 0) {
			dev_warn(imxdma->dev,
				 "%s: channel: %d couldn't issue DMA xfer\n",
				 __func__, imxdmac->channel);
		} else {
			list_move_tail(imxdmac->ld_queue.next,
				       &imxdmac->ld_active);
		}
	}
997
	spin_unlock_irqrestore(&imxdma->lock, flags);
998 999 1000
}

static int __init imxdma_probe(struct platform_device *pdev)
1001
	{
1002
	struct imxdma_engine *imxdma;
1003
	struct resource *res;
1004
	int ret, i;
1005
	int irq, irq_err;
1006

1007
	imxdma = devm_kzalloc(&pdev->dev, sizeof(*imxdma), GFP_KERNEL);
1008 1009 1010
	if (!imxdma)
		return -ENOMEM;

1011 1012
	imxdma->devtype = pdev->id_entry->driver_data;

1013
	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1014 1015 1016
	imxdma->base = devm_ioremap_resource(&pdev->dev, res);
	if (IS_ERR(imxdma->base))
		return PTR_ERR(imxdma->base);
1017 1018 1019 1020

	irq = platform_get_irq(pdev, 0);
	if (irq < 0)
		return irq;
1021

1022
	imxdma->dma_ipg = devm_clk_get(&pdev->dev, "ipg");
1023 1024
	if (IS_ERR(imxdma->dma_ipg))
		return PTR_ERR(imxdma->dma_ipg);
1025 1026

	imxdma->dma_ahb = devm_clk_get(&pdev->dev, "ahb");
1027 1028
	if (IS_ERR(imxdma->dma_ahb))
		return PTR_ERR(imxdma->dma_ahb);
1029 1030 1031

	clk_prepare_enable(imxdma->dma_ipg);
	clk_prepare_enable(imxdma->dma_ahb);
1032 1033

	/* reset DMA module */
1034
	imx_dmav1_writel(imxdma, DCR_DRST, DMA_DCR);
1035

1036
	if (is_imx1_dma(imxdma)) {
1037
		ret = devm_request_irq(&pdev->dev, irq,
1038
				       dma_irq_handler, 0, "DMA", imxdma);
1039
		if (ret) {
1040
			dev_warn(imxdma->dev, "Can't register IRQ for DMA\n");
1041
			goto err;
1042 1043
		}

1044 1045 1046 1047 1048 1049 1050
		irq_err = platform_get_irq(pdev, 1);
		if (irq_err < 0) {
			ret = irq_err;
			goto err;
		}

		ret = devm_request_irq(&pdev->dev, irq_err,
1051
				       imxdma_err_handler, 0, "DMA", imxdma);
1052
		if (ret) {
1053
			dev_warn(imxdma->dev, "Can't register ERRIRQ for DMA\n");
1054
			goto err;
1055 1056 1057 1058
		}
	}

	/* enable DMA module */
1059
	imx_dmav1_writel(imxdma, DCR_DEN, DMA_DCR);
1060 1061

	/* clear all interrupts */
1062
	imx_dmav1_writel(imxdma, (1 << IMX_DMA_CHANNELS) - 1, DMA_DISR);
1063 1064

	/* disable interrupts */
1065
	imx_dmav1_writel(imxdma, (1 << IMX_DMA_CHANNELS) - 1, DMA_DIMR);
1066 1067 1068

	INIT_LIST_HEAD(&imxdma->dma_device.channels);

1069 1070
	dma_cap_set(DMA_SLAVE, imxdma->dma_device.cap_mask);
	dma_cap_set(DMA_CYCLIC, imxdma->dma_device.cap_mask);
1071
	dma_cap_set(DMA_MEMCPY, imxdma->dma_device.cap_mask);
1072 1073 1074 1075 1076 1077 1078
	dma_cap_set(DMA_INTERLEAVE, imxdma->dma_device.cap_mask);

	/* Initialize 2D global parameters */
	for (i = 0; i < IMX_DMA_2D_SLOTS; i++)
		imxdma->slots_2d[i].count = 0;

	spin_lock_init(&imxdma->lock);
1079

1080
	/* Initialize channel parameters */
1081
	for (i = 0; i < IMX_DMA_CHANNELS; i++) {
1082 1083
		struct imxdma_channel *imxdmac = &imxdma->channel[i];

1084
		if (!is_imx1_dma(imxdma)) {
1085
			ret = devm_request_irq(&pdev->dev, irq + i,
1086 1087
					dma_irq_handler, 0, "DMA", imxdma);
			if (ret) {
1088 1089
				dev_warn(imxdma->dev, "Can't register IRQ %d "
					 "for DMA channel %d\n",
1090
					 irq + i, i);
1091
				goto err;
1092
			}
1093 1094 1095
			init_timer(&imxdmac->watchdog);
			imxdmac->watchdog.function = &imxdma_watchdog;
			imxdmac->watchdog.data = (unsigned long)imxdmac;
S
Sascha Hauer 已提交
1096
		}
1097 1098 1099

		imxdmac->imxdma = imxdma;

1100 1101 1102 1103 1104 1105
		INIT_LIST_HEAD(&imxdmac->ld_queue);
		INIT_LIST_HEAD(&imxdmac->ld_free);
		INIT_LIST_HEAD(&imxdmac->ld_active);

		tasklet_init(&imxdmac->dma_tasklet, imxdma_tasklet,
			     (unsigned long)imxdmac);
1106
		imxdmac->chan.device = &imxdma->dma_device;
1107
		dma_cookie_init(&imxdmac->chan);
1108 1109 1110
		imxdmac->channel = i;

		/* Add the channel to the DMAC list */
1111 1112
		list_add_tail(&imxdmac->chan.device_node,
			      &imxdma->dma_device.channels);
1113 1114 1115 1116 1117 1118 1119 1120 1121 1122
	}

	imxdma->dev = &pdev->dev;
	imxdma->dma_device.dev = &pdev->dev;

	imxdma->dma_device.device_alloc_chan_resources = imxdma_alloc_chan_resources;
	imxdma->dma_device.device_free_chan_resources = imxdma_free_chan_resources;
	imxdma->dma_device.device_tx_status = imxdma_tx_status;
	imxdma->dma_device.device_prep_slave_sg = imxdma_prep_slave_sg;
	imxdma->dma_device.device_prep_dma_cyclic = imxdma_prep_dma_cyclic;
1123
	imxdma->dma_device.device_prep_dma_memcpy = imxdma_prep_dma_memcpy;
1124
	imxdma->dma_device.device_prep_interleaved_dma = imxdma_prep_dma_interleaved;
1125 1126 1127 1128 1129
	imxdma->dma_device.device_control = imxdma_control;
	imxdma->dma_device.device_issue_pending = imxdma_issue_pending;

	platform_set_drvdata(pdev, imxdma);

1130
	imxdma->dma_device.copy_align = 2; /* 2^2 = 4 bytes alignment */
1131 1132 1133
	imxdma->dma_device.dev->dma_parms = &imxdma->dma_parms;
	dma_set_max_seg_size(imxdma->dma_device.dev, 0xffffff);

1134 1135 1136
	ret = dma_async_device_register(&imxdma->dma_device);
	if (ret) {
		dev_err(&pdev->dev, "unable to register\n");
1137
		goto err;
1138 1139 1140 1141
	}

	return 0;

1142
err:
1143 1144
	clk_disable_unprepare(imxdma->dma_ipg);
	clk_disable_unprepare(imxdma->dma_ahb);
1145 1146 1147
	return ret;
}

1148
static int imxdma_remove(struct platform_device *pdev)
1149 1150 1151 1152 1153
{
	struct imxdma_engine *imxdma = platform_get_drvdata(pdev);

        dma_async_device_unregister(&imxdma->dma_device);

1154 1155
	clk_disable_unprepare(imxdma->dma_ipg);
	clk_disable_unprepare(imxdma->dma_ahb);
1156 1157 1158 1159 1160 1161 1162 1163

        return 0;
}

static struct platform_driver imxdma_driver = {
	.driver		= {
		.name	= "imx-dma",
	},
1164
	.id_table	= imx_dma_devtype,
1165
	.remove		= imxdma_remove,
1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176
};

static int __init imxdma_module_init(void)
{
	return platform_driver_probe(&imxdma_driver, imxdma_probe);
}
subsys_initcall(imxdma_module_init);

MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>");
MODULE_DESCRIPTION("i.MX dma driver");
MODULE_LICENSE("GPL");