imx-dma.c 34.0 KB
Newer Older
1 2 3 4 5 6 7
/*
 * drivers/dma/imx-dma.c
 *
 * This file contains a driver for the Freescale i.MX DMA engine
 * found on i.MX1/21/27
 *
 * Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
8
 * Copyright 2012 Javier Martin, Vista Silicon <javier.martin@vista-silicon.com>
9 10 11 12 13 14 15 16
 *
 * The code contained herein is licensed under the GNU General Public
 * License. You may obtain a copy of the GNU General Public License
 * Version 2 or later at the following locations:
 *
 * http://www.opensource.org/licenses/gpl-license.html
 * http://www.gnu.org/copyleft/gpl.html
 */
17
#include <linux/err.h>
18 19 20 21 22 23 24 25 26
#include <linux/init.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
27
#include <linux/clk.h>
28
#include <linux/dmaengine.h>
29
#include <linux/module.h>
30 31
#include <linux/of_device.h>
#include <linux/of_dma.h>
32 33

#include <asm/irq.h>
34
#include <linux/platform_data/dma-imx.h>
35

36
#include "dmaengine.h"
37
#define IMXDMA_MAX_CHAN_DESCRIPTORS	16
38 39
#define IMX_DMA_CHANNELS  16

40 41 42 43
#define IMX_DMA_2D_SLOTS	2
#define IMX_DMA_2D_SLOT_A	0
#define IMX_DMA_2D_SLOT_B	1

44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110
#define IMX_DMA_LENGTH_LOOP	((unsigned int)-1)
#define IMX_DMA_MEMSIZE_32	(0 << 4)
#define IMX_DMA_MEMSIZE_8	(1 << 4)
#define IMX_DMA_MEMSIZE_16	(2 << 4)
#define IMX_DMA_TYPE_LINEAR	(0 << 10)
#define IMX_DMA_TYPE_2D		(1 << 10)
#define IMX_DMA_TYPE_FIFO	(2 << 10)

#define IMX_DMA_ERR_BURST     (1 << 0)
#define IMX_DMA_ERR_REQUEST   (1 << 1)
#define IMX_DMA_ERR_TRANSFER  (1 << 2)
#define IMX_DMA_ERR_BUFFER    (1 << 3)
#define IMX_DMA_ERR_TIMEOUT   (1 << 4)

#define DMA_DCR     0x00		/* Control Register */
#define DMA_DISR    0x04		/* Interrupt status Register */
#define DMA_DIMR    0x08		/* Interrupt mask Register */
#define DMA_DBTOSR  0x0c		/* Burst timeout status Register */
#define DMA_DRTOSR  0x10		/* Request timeout Register */
#define DMA_DSESR   0x14		/* Transfer Error Status Register */
#define DMA_DBOSR   0x18		/* Buffer overflow status Register */
#define DMA_DBTOCR  0x1c		/* Burst timeout control Register */
#define DMA_WSRA    0x40		/* W-Size Register A */
#define DMA_XSRA    0x44		/* X-Size Register A */
#define DMA_YSRA    0x48		/* Y-Size Register A */
#define DMA_WSRB    0x4c		/* W-Size Register B */
#define DMA_XSRB    0x50		/* X-Size Register B */
#define DMA_YSRB    0x54		/* Y-Size Register B */
#define DMA_SAR(x)  (0x80 + ((x) << 6))	/* Source Address Registers */
#define DMA_DAR(x)  (0x84 + ((x) << 6))	/* Destination Address Registers */
#define DMA_CNTR(x) (0x88 + ((x) << 6))	/* Count Registers */
#define DMA_CCR(x)  (0x8c + ((x) << 6))	/* Control Registers */
#define DMA_RSSR(x) (0x90 + ((x) << 6))	/* Request source select Registers */
#define DMA_BLR(x)  (0x94 + ((x) << 6))	/* Burst length Registers */
#define DMA_RTOR(x) (0x98 + ((x) << 6))	/* Request timeout Registers */
#define DMA_BUCR(x) (0x98 + ((x) << 6))	/* Bus Utilization Registers */
#define DMA_CCNR(x) (0x9C + ((x) << 6))	/* Channel counter Registers */

#define DCR_DRST           (1<<1)
#define DCR_DEN            (1<<0)
#define DBTOCR_EN          (1<<15)
#define DBTOCR_CNT(x)      ((x) & 0x7fff)
#define CNTR_CNT(x)        ((x) & 0xffffff)
#define CCR_ACRPT          (1<<14)
#define CCR_DMOD_LINEAR    (0x0 << 12)
#define CCR_DMOD_2D        (0x1 << 12)
#define CCR_DMOD_FIFO      (0x2 << 12)
#define CCR_DMOD_EOBFIFO   (0x3 << 12)
#define CCR_SMOD_LINEAR    (0x0 << 10)
#define CCR_SMOD_2D        (0x1 << 10)
#define CCR_SMOD_FIFO      (0x2 << 10)
#define CCR_SMOD_EOBFIFO   (0x3 << 10)
#define CCR_MDIR_DEC       (1<<9)
#define CCR_MSEL_B         (1<<8)
#define CCR_DSIZ_32        (0x0 << 6)
#define CCR_DSIZ_8         (0x1 << 6)
#define CCR_DSIZ_16        (0x2 << 6)
#define CCR_SSIZ_32        (0x0 << 4)
#define CCR_SSIZ_8         (0x1 << 4)
#define CCR_SSIZ_16        (0x2 << 4)
#define CCR_REN            (1<<3)
#define CCR_RPT            (1<<2)
#define CCR_FRC            (1<<1)
#define CCR_CEN            (1<<0)
#define RTOR_EN            (1<<15)
#define RTOR_CLK           (1<<14)
#define RTOR_PSC           (1<<13)
111 112 113 114 115 116 117 118

enum  imxdma_prep_type {
	IMXDMA_DESC_MEMCPY,
	IMXDMA_DESC_INTERLEAVED,
	IMXDMA_DESC_SLAVE_SG,
	IMXDMA_DESC_CYCLIC,
};

119 120 121 122 123 124 125
struct imx_dma_2d_config {
	u16		xsr;
	u16		ysr;
	u16		wsr;
	int		count;
};

126 127 128 129 130 131 132
struct imxdma_desc {
	struct list_head		node;
	struct dma_async_tx_descriptor	desc;
	enum dma_status			status;
	dma_addr_t			src;
	dma_addr_t			dest;
	size_t				len;
133
	enum dma_transfer_direction	direction;
134 135 136 137 138 139 140 141 142 143 144 145 146
	enum imxdma_prep_type		type;
	/* For memcpy and interleaved */
	unsigned int			config_port;
	unsigned int			config_mem;
	/* For interleaved transfers */
	unsigned int			x;
	unsigned int			y;
	unsigned int			w;
	/* For slave sg and cyclic */
	struct scatterlist		*sg;
	unsigned int			sgcount;
};

147
struct imxdma_channel {
148 149
	int				hw_chaining;
	struct timer_list		watchdog;
150 151 152
	struct imxdma_engine		*imxdma;
	unsigned int			channel;

153 154 155 156 157
	struct tasklet_struct		dma_tasklet;
	struct list_head		ld_free;
	struct list_head		ld_queue;
	struct list_head		ld_active;
	int				descs_allocated;
158 159 160 161 162 163 164 165
	enum dma_slave_buswidth		word_size;
	dma_addr_t			per_address;
	u32				watermark_level;
	struct dma_chan			chan;
	struct dma_async_tx_descriptor	desc;
	enum dma_status			status;
	int				dma_request;
	struct scatterlist		*sg_list;
166 167
	u32				ccr_from_device;
	u32				ccr_to_device;
168 169
	bool				enabled_2d;
	int				slot_2d;
170 171
};

172 173 174 175 176 177
enum imx_dma_type {
	IMX1_DMA,
	IMX21_DMA,
	IMX27_DMA,
};

178 179
struct imxdma_engine {
	struct device			*dev;
180
	struct device_dma_parameters	dma_parms;
181
	struct dma_device		dma_device;
182
	void __iomem			*base;
183 184
	struct clk			*dma_ahb;
	struct clk			*dma_ipg;
185 186
	spinlock_t			lock;
	struct imx_dma_2d_config	slots_2d[IMX_DMA_2D_SLOTS];
187
	struct imxdma_channel		channel[IMX_DMA_CHANNELS];
188
	enum imx_dma_type		devtype;
189 190
};

191 192 193 194 195
struct imxdma_filter_data {
	struct imxdma_engine	*imxdma;
	int			 request;
};

196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211
static struct platform_device_id imx_dma_devtype[] = {
	{
		.name = "imx1-dma",
		.driver_data = IMX1_DMA,
	}, {
		.name = "imx21-dma",
		.driver_data = IMX21_DMA,
	}, {
		.name = "imx27-dma",
		.driver_data = IMX27_DMA,
	}, {
		/* sentinel */
	}
};
MODULE_DEVICE_TABLE(platform, imx_dma_devtype);

212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227
static const struct of_device_id imx_dma_of_dev_id[] = {
	{
		.compatible = "fsl,imx1-dma",
		.data = &imx_dma_devtype[IMX1_DMA],
	}, {
		.compatible = "fsl,imx21-dma",
		.data = &imx_dma_devtype[IMX21_DMA],
	}, {
		.compatible = "fsl,imx27-dma",
		.data = &imx_dma_devtype[IMX27_DMA],
	}, {
		/* sentinel */
	}
};
MODULE_DEVICE_TABLE(of, imx_dma_of_dev_id);

228 229 230 231 232 233 234 235 236 237 238 239 240 241 242
static inline int is_imx1_dma(struct imxdma_engine *imxdma)
{
	return imxdma->devtype == IMX1_DMA;
}

static inline int is_imx21_dma(struct imxdma_engine *imxdma)
{
	return imxdma->devtype == IMX21_DMA;
}

static inline int is_imx27_dma(struct imxdma_engine *imxdma)
{
	return imxdma->devtype == IMX27_DMA;
}

243 244 245 246 247
static struct imxdma_channel *to_imxdma_chan(struct dma_chan *chan)
{
	return container_of(chan, struct imxdma_channel, chan);
}

248
static inline bool imxdma_chan_is_doing_cyclic(struct imxdma_channel *imxdmac)
249
{
250 251 252 253 254 255 256 257 258
	struct imxdma_desc *desc;

	if (!list_empty(&imxdmac->ld_active)) {
		desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc,
					node);
		if (desc->type == IMXDMA_DESC_CYCLIC)
			return true;
	}
	return false;
259 260
}

261

262 263 264

static void imx_dmav1_writel(struct imxdma_engine *imxdma, unsigned val,
			     unsigned offset)
265
{
266
	__raw_writel(val, imxdma->base + offset);
267 268
}

269
static unsigned imx_dmav1_readl(struct imxdma_engine *imxdma, unsigned offset)
270
{
271
	return __raw_readl(imxdma->base + offset);
272
}
273

274
static int imxdma_hw_chain(struct imxdma_channel *imxdmac)
275
{
276 277 278
	struct imxdma_engine *imxdma = imxdmac->imxdma;

	if (is_imx27_dma(imxdma))
279
		return imxdmac->hw_chaining;
280 281 282 283 284 285 286
	else
		return 0;
}

/*
 * imxdma_sg_next - prepare next chunk for scatter-gather DMA emulation
 */
287
static inline int imxdma_sg_next(struct imxdma_desc *d)
288
{
289
	struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan);
290
	struct imxdma_engine *imxdma = imxdmac->imxdma;
291
	struct scatterlist *sg = d->sg;
292 293
	unsigned long now;

294
	now = min(d->len, sg_dma_len(sg));
295 296
	if (d->len != IMX_DMA_LENGTH_LOOP)
		d->len -= now;
297

298
	if (d->direction == DMA_DEV_TO_MEM)
299 300
		imx_dmav1_writel(imxdma, sg->dma_address,
				 DMA_DAR(imxdmac->channel));
301
	else
302 303
		imx_dmav1_writel(imxdma, sg->dma_address,
				 DMA_SAR(imxdmac->channel));
304

305
	imx_dmav1_writel(imxdma, now, DMA_CNTR(imxdmac->channel));
306

307 308
	dev_dbg(imxdma->dev, " %s channel: %d dst 0x%08x, src 0x%08x, "
		"size 0x%08x\n", __func__, imxdmac->channel,
309 310 311
		 imx_dmav1_readl(imxdma, DMA_DAR(imxdmac->channel)),
		 imx_dmav1_readl(imxdma, DMA_SAR(imxdmac->channel)),
		 imx_dmav1_readl(imxdma, DMA_CNTR(imxdmac->channel)));
312 313

	return now;
314 315
}

316
static void imxdma_enable_hw(struct imxdma_desc *d)
317
{
318
	struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan);
319
	struct imxdma_engine *imxdma = imxdmac->imxdma;
320 321 322
	int channel = imxdmac->channel;
	unsigned long flags;

323
	dev_dbg(imxdma->dev, "%s channel %d\n", __func__, channel);
324 325 326

	local_irq_save(flags);

327 328 329 330 331
	imx_dmav1_writel(imxdma, 1 << channel, DMA_DISR);
	imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_DIMR) &
			 ~(1 << channel), DMA_DIMR);
	imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_CCR(channel)) |
			 CCR_CEN | CCR_ACRPT, DMA_CCR(channel));
332

333
	if (!is_imx1_dma(imxdma) &&
334
			d->sg && imxdma_hw_chain(imxdmac)) {
335 336
		d->sg = sg_next(d->sg);
		if (d->sg) {
337
			u32 tmp;
338
			imxdma_sg_next(d);
339 340 341
			tmp = imx_dmav1_readl(imxdma, DMA_CCR(channel));
			imx_dmav1_writel(imxdma, tmp | CCR_RPT | CCR_ACRPT,
					 DMA_CCR(channel));
342 343 344 345 346 347 348 349
		}
	}

	local_irq_restore(flags);
}

static void imxdma_disable_hw(struct imxdma_channel *imxdmac)
{
350
	struct imxdma_engine *imxdma = imxdmac->imxdma;
351 352 353
	int channel = imxdmac->channel;
	unsigned long flags;

354
	dev_dbg(imxdma->dev, "%s channel %d\n", __func__, channel);
355

356 357
	if (imxdma_hw_chain(imxdmac))
		del_timer(&imxdmac->watchdog);
358 359

	local_irq_save(flags);
360 361 362 363 364
	imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_DIMR) |
			 (1 << channel), DMA_DIMR);
	imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_CCR(channel)) &
			 ~CCR_CEN, DMA_CCR(channel));
	imx_dmav1_writel(imxdma, 1 << channel, DMA_DISR);
365 366 367 368
	local_irq_restore(flags);
}

static void imxdma_watchdog(unsigned long data)
369
{
370
	struct imxdma_channel *imxdmac = (struct imxdma_channel *)data;
371
	struct imxdma_engine *imxdma = imxdmac->imxdma;
372
	int channel = imxdmac->channel;
373

374
	imx_dmav1_writel(imxdma, 0, DMA_CCR(channel));
375

376
	/* Tasklet watchdog error handler */
377
	tasklet_schedule(&imxdmac->dma_tasklet);
378 379
	dev_dbg(imxdma->dev, "channel %d: watchdog timeout!\n",
		imxdmac->channel);
380 381
}

382
static irqreturn_t imxdma_err_handler(int irq, void *dev_id)
383
{
384 385 386 387 388
	struct imxdma_engine *imxdma = dev_id;
	unsigned int err_mask;
	int i, disr;
	int errcode;

389
	disr = imx_dmav1_readl(imxdma, DMA_DISR);
390

391 392 393 394
	err_mask = imx_dmav1_readl(imxdma, DMA_DBTOSR) |
		   imx_dmav1_readl(imxdma, DMA_DRTOSR) |
		   imx_dmav1_readl(imxdma, DMA_DSESR)  |
		   imx_dmav1_readl(imxdma, DMA_DBOSR);
395 396 397 398

	if (!err_mask)
		return IRQ_HANDLED;

399
	imx_dmav1_writel(imxdma, disr & err_mask, DMA_DISR);
400 401 402 403 404 405

	for (i = 0; i < IMX_DMA_CHANNELS; i++) {
		if (!(err_mask & (1 << i)))
			continue;
		errcode = 0;

406 407
		if (imx_dmav1_readl(imxdma, DMA_DBTOSR) & (1 << i)) {
			imx_dmav1_writel(imxdma, 1 << i, DMA_DBTOSR);
408 409
			errcode |= IMX_DMA_ERR_BURST;
		}
410 411
		if (imx_dmav1_readl(imxdma, DMA_DRTOSR) & (1 << i)) {
			imx_dmav1_writel(imxdma, 1 << i, DMA_DRTOSR);
412 413
			errcode |= IMX_DMA_ERR_REQUEST;
		}
414 415
		if (imx_dmav1_readl(imxdma, DMA_DSESR) & (1 << i)) {
			imx_dmav1_writel(imxdma, 1 << i, DMA_DSESR);
416 417
			errcode |= IMX_DMA_ERR_TRANSFER;
		}
418 419
		if (imx_dmav1_readl(imxdma, DMA_DBOSR) & (1 << i)) {
			imx_dmav1_writel(imxdma, 1 << i, DMA_DBOSR);
420 421 422 423 424
			errcode |= IMX_DMA_ERR_BUFFER;
		}
		/* Tasklet error handler */
		tasklet_schedule(&imxdma->channel[i].dma_tasklet);

425 426 427 428 429 430
		dev_warn(imxdma->dev,
			 "DMA timeout on channel %d -%s%s%s%s\n", i,
			 errcode & IMX_DMA_ERR_BURST ?    " burst" : "",
			 errcode & IMX_DMA_ERR_REQUEST ?  " request" : "",
			 errcode & IMX_DMA_ERR_TRANSFER ? " transfer" : "",
			 errcode & IMX_DMA_ERR_BUFFER ?   " buffer" : "");
431 432
	}
	return IRQ_HANDLED;
433 434
}

435
static void dma_irq_handle_channel(struct imxdma_channel *imxdmac)
436
{
437
	struct imxdma_engine *imxdma = imxdmac->imxdma;
438
	int chno = imxdmac->channel;
439
	struct imxdma_desc *desc;
440
	unsigned long flags;
441

442
	spin_lock_irqsave(&imxdma->lock, flags);
443
	if (list_empty(&imxdmac->ld_active)) {
444
		spin_unlock_irqrestore(&imxdma->lock, flags);
445 446
		goto out;
	}
447

448 449 450
	desc = list_first_entry(&imxdmac->ld_active,
				struct imxdma_desc,
				node);
451
	spin_unlock_irqrestore(&imxdma->lock, flags);
452

453 454 455
	if (desc->sg) {
		u32 tmp;
		desc->sg = sg_next(desc->sg);
456

457
		if (desc->sg) {
458
			imxdma_sg_next(desc);
459

460
			tmp = imx_dmav1_readl(imxdma, DMA_CCR(chno));
461

462
			if (imxdma_hw_chain(imxdmac)) {
463 464 465
				/* FIXME: The timeout should probably be
				 * configurable
				 */
466
				mod_timer(&imxdmac->watchdog,
467 468 469
					jiffies + msecs_to_jiffies(500));

				tmp |= CCR_CEN | CCR_RPT | CCR_ACRPT;
470
				imx_dmav1_writel(imxdma, tmp, DMA_CCR(chno));
471
			} else {
472 473
				imx_dmav1_writel(imxdma, tmp & ~CCR_CEN,
						 DMA_CCR(chno));
474 475 476
				tmp |= CCR_CEN;
			}

477
			imx_dmav1_writel(imxdma, tmp, DMA_CCR(chno));
478 479 480 481

			if (imxdma_chan_is_doing_cyclic(imxdmac))
				/* Tasklet progression */
				tasklet_schedule(&imxdmac->dma_tasklet);
482

483 484 485
			return;
		}

486 487
		if (imxdma_hw_chain(imxdmac)) {
			del_timer(&imxdmac->watchdog);
488 489 490 491
			return;
		}
	}

492
out:
493
	imx_dmav1_writel(imxdma, 0, DMA_CCR(chno));
494
	/* Tasklet irq */
495 496 497
	tasklet_schedule(&imxdmac->dma_tasklet);
}

498 499 500 501 502
static irqreturn_t dma_irq_handler(int irq, void *dev_id)
{
	struct imxdma_engine *imxdma = dev_id;
	int i, disr;

503
	if (!is_imx1_dma(imxdma))
504 505
		imxdma_err_handler(irq, dev_id);

506
	disr = imx_dmav1_readl(imxdma, DMA_DISR);
507

508
	dev_dbg(imxdma->dev, "%s called, disr=0x%08x\n", __func__, disr);
509

510
	imx_dmav1_writel(imxdma, disr, DMA_DISR);
511
	for (i = 0; i < IMX_DMA_CHANNELS; i++) {
512
		if (disr & (1 << i))
513 514 515 516 517 518
			dma_irq_handle_channel(&imxdma->channel[i]);
	}

	return IRQ_HANDLED;
}

519 520 521
static int imxdma_xfer_desc(struct imxdma_desc *d)
{
	struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan);
522
	struct imxdma_engine *imxdma = imxdmac->imxdma;
523 524
	int slot = -1;
	int i;
525 526 527

	/* Configure and enable */
	switch (d->type) {
528 529 530 531 532 533 534 535 536 537 538
	case IMXDMA_DESC_INTERLEAVED:
		/* Try to get a free 2D slot */
		for (i = 0; i < IMX_DMA_2D_SLOTS; i++) {
			if ((imxdma->slots_2d[i].count > 0) &&
			((imxdma->slots_2d[i].xsr != d->x) ||
			(imxdma->slots_2d[i].ysr != d->y) ||
			(imxdma->slots_2d[i].wsr != d->w)))
				continue;
			slot = i;
			break;
		}
539
		if (slot < 0)
540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566
			return -EBUSY;

		imxdma->slots_2d[slot].xsr = d->x;
		imxdma->slots_2d[slot].ysr = d->y;
		imxdma->slots_2d[slot].wsr = d->w;
		imxdma->slots_2d[slot].count++;

		imxdmac->slot_2d = slot;
		imxdmac->enabled_2d = true;

		if (slot == IMX_DMA_2D_SLOT_A) {
			d->config_mem &= ~CCR_MSEL_B;
			d->config_port &= ~CCR_MSEL_B;
			imx_dmav1_writel(imxdma, d->x, DMA_XSRA);
			imx_dmav1_writel(imxdma, d->y, DMA_YSRA);
			imx_dmav1_writel(imxdma, d->w, DMA_WSRA);
		} else {
			d->config_mem |= CCR_MSEL_B;
			d->config_port |= CCR_MSEL_B;
			imx_dmav1_writel(imxdma, d->x, DMA_XSRB);
			imx_dmav1_writel(imxdma, d->y, DMA_YSRB);
			imx_dmav1_writel(imxdma, d->w, DMA_WSRB);
		}
		/*
		 * We fall-through here intentionally, since a 2D transfer is
		 * similar to MEMCPY just adding the 2D slot configuration.
		 */
567
	case IMXDMA_DESC_MEMCPY:
568 569 570
		imx_dmav1_writel(imxdma, d->src, DMA_SAR(imxdmac->channel));
		imx_dmav1_writel(imxdma, d->dest, DMA_DAR(imxdmac->channel));
		imx_dmav1_writel(imxdma, d->config_mem | (d->config_port << 2),
571
			 DMA_CCR(imxdmac->channel));
572

573
		imx_dmav1_writel(imxdma, d->len, DMA_CNTR(imxdmac->channel));
574

575 576 577 578 579
		dev_dbg(imxdma->dev,
			"%s channel: %d dest=0x%08llx src=0x%08llx dma_length=%zu\n",
			__func__, imxdmac->channel,
			(unsigned long long)d->dest,
			(unsigned long long)d->src, d->len);
580 581

		break;
582
	/* Cyclic transfer is the same as slave_sg with special sg configuration. */
583 584
	case IMXDMA_DESC_CYCLIC:
	case IMXDMA_DESC_SLAVE_SG:
585
		if (d->direction == DMA_DEV_TO_MEM) {
586
			imx_dmav1_writel(imxdma, imxdmac->per_address,
587
					 DMA_SAR(imxdmac->channel));
588
			imx_dmav1_writel(imxdma, imxdmac->ccr_from_device,
589 590
					 DMA_CCR(imxdmac->channel));

591 592 593 594 595
			dev_dbg(imxdma->dev,
				"%s channel: %d sg=%p sgcount=%d total length=%zu dev_addr=0x%08llx (dev2mem)\n",
				__func__, imxdmac->channel,
				d->sg, d->sgcount, d->len,
				(unsigned long long)imxdmac->per_address);
596
		} else if (d->direction == DMA_MEM_TO_DEV) {
597
			imx_dmav1_writel(imxdma, imxdmac->per_address,
598
					 DMA_DAR(imxdmac->channel));
599
			imx_dmav1_writel(imxdma, imxdmac->ccr_to_device,
600 601
					 DMA_CCR(imxdmac->channel));

602 603 604 605 606
			dev_dbg(imxdma->dev,
				"%s channel: %d sg=%p sgcount=%d total length=%zu dev_addr=0x%08llx (mem2dev)\n",
				__func__, imxdmac->channel,
				d->sg, d->sgcount, d->len,
				(unsigned long long)imxdmac->per_address);
607 608 609 610 611 612
		} else {
			dev_err(imxdma->dev, "%s channel: %d bad dma mode\n",
				__func__, imxdmac->channel);
			return -EINVAL;
		}

613
		imxdma_sg_next(d);
614

615 616 617 618
		break;
	default:
		return -EINVAL;
	}
619
	imxdma_enable_hw(d);
620
	return 0;
621 622
}

623
static void imxdma_tasklet(unsigned long data)
624
{
625 626 627
	struct imxdma_channel *imxdmac = (void *)data;
	struct imxdma_engine *imxdma = imxdmac->imxdma;
	struct imxdma_desc *desc;
628
	unsigned long flags;
629

630
	spin_lock_irqsave(&imxdma->lock, flags);
631 632 633

	if (list_empty(&imxdmac->ld_active)) {
		/* Someone might have called terminate all */
634 635
		spin_unlock_irqrestore(&imxdma->lock, flags);
		return;
636 637 638
	}
	desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc, node);

M
Masanari Iida 已提交
639 640
	/* If we are dealing with a cyclic descriptor, keep it on ld_active
	 * and dont mark the descriptor as complete.
641 642
	 * Only in non-cyclic cases it would be marked as complete
	 */
643 644
	if (imxdma_chan_is_doing_cyclic(imxdmac))
		goto out;
645 646
	else
		dma_cookie_complete(&desc->desc);
647

648 649 650 651 652 653
	/* Free 2D slot if it was an interleaved transfer */
	if (imxdmac->enabled_2d) {
		imxdma->slots_2d[imxdmac->slot_2d].count--;
		imxdmac->enabled_2d = false;
	}

654 655 656 657 658 659 660 661 662 663 664
	list_move_tail(imxdmac->ld_active.next, &imxdmac->ld_free);

	if (!list_empty(&imxdmac->ld_queue)) {
		desc = list_first_entry(&imxdmac->ld_queue, struct imxdma_desc,
					node);
		list_move_tail(imxdmac->ld_queue.next, &imxdmac->ld_active);
		if (imxdma_xfer_desc(desc) < 0)
			dev_warn(imxdma->dev, "%s: channel: %d couldn't xfer desc\n",
				 __func__, imxdmac->channel);
	}
out:
665
	spin_unlock_irqrestore(&imxdma->lock, flags);
666 667 668 669

	if (desc->desc.callback)
		desc->desc.callback(desc->desc.callback_param);

670 671 672 673 674 675 676
}

static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
		unsigned long arg)
{
	struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
	struct dma_slave_config *dmaengine_cfg = (void *)arg;
677
	struct imxdma_engine *imxdma = imxdmac->imxdma;
678
	unsigned long flags;
679 680 681 682
	unsigned int mode = 0;

	switch (cmd) {
	case DMA_TERMINATE_ALL:
683
		imxdma_disable_hw(imxdmac);
684

685
		spin_lock_irqsave(&imxdma->lock, flags);
686 687
		list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free);
		list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free);
688
		spin_unlock_irqrestore(&imxdma->lock, flags);
689 690
		return 0;
	case DMA_SLAVE_CONFIG:
691
		if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713
			imxdmac->per_address = dmaengine_cfg->src_addr;
			imxdmac->watermark_level = dmaengine_cfg->src_maxburst;
			imxdmac->word_size = dmaengine_cfg->src_addr_width;
		} else {
			imxdmac->per_address = dmaengine_cfg->dst_addr;
			imxdmac->watermark_level = dmaengine_cfg->dst_maxburst;
			imxdmac->word_size = dmaengine_cfg->dst_addr_width;
		}

		switch (imxdmac->word_size) {
		case DMA_SLAVE_BUSWIDTH_1_BYTE:
			mode = IMX_DMA_MEMSIZE_8;
			break;
		case DMA_SLAVE_BUSWIDTH_2_BYTES:
			mode = IMX_DMA_MEMSIZE_16;
			break;
		default:
		case DMA_SLAVE_BUSWIDTH_4_BYTES:
			mode = IMX_DMA_MEMSIZE_32;
			break;
		}

714 715
		imxdmac->hw_chaining = 0;

716
		imxdmac->ccr_from_device = (mode | IMX_DMA_TYPE_FIFO) |
717 718
			((IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) << 2) |
			CCR_REN;
719
		imxdmac->ccr_to_device =
720 721
			(IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) |
			((mode | IMX_DMA_TYPE_FIFO) << 2) | CCR_REN;
722
		imx_dmav1_writel(imxdma, imxdmac->dma_request,
723 724
				 DMA_RSSR(imxdmac->channel));

725
		/* Set burst length */
726 727
		imx_dmav1_writel(imxdma, imxdmac->watermark_level *
				imxdmac->word_size, DMA_BLR(imxdmac->channel));
728 729 730 731 732 733 734 735 736 737 738 739 740

		return 0;
	default:
		return -ENOSYS;
	}

	return -EINVAL;
}

static enum dma_status imxdma_tx_status(struct dma_chan *chan,
					    dma_cookie_t cookie,
					    struct dma_tx_state *txstate)
{
741
	return dma_cookie_status(chan, cookie, txstate);
742 743 744 745 746
}

static dma_cookie_t imxdma_tx_submit(struct dma_async_tx_descriptor *tx)
{
	struct imxdma_channel *imxdmac = to_imxdma_chan(tx->chan);
747
	struct imxdma_engine *imxdma = imxdmac->imxdma;
748
	dma_cookie_t cookie;
749
	unsigned long flags;
750

751
	spin_lock_irqsave(&imxdma->lock, flags);
752
	list_move_tail(imxdmac->ld_free.next, &imxdmac->ld_queue);
753
	cookie = dma_cookie_assign(tx);
754
	spin_unlock_irqrestore(&imxdma->lock, flags);
755 756 757 758 759 760 761 762 763

	return cookie;
}

static int imxdma_alloc_chan_resources(struct dma_chan *chan)
{
	struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
	struct imx_dma_data *data = chan->private;

764 765
	if (data != NULL)
		imxdmac->dma_request = data->dma_request;
766

767 768
	while (imxdmac->descs_allocated < IMXDMA_MAX_CHAN_DESCRIPTORS) {
		struct imxdma_desc *desc;
769

770 771 772 773 774 775 776 777
		desc = kzalloc(sizeof(*desc), GFP_KERNEL);
		if (!desc)
			break;
		__memzero(&desc->desc, sizeof(struct dma_async_tx_descriptor));
		dma_async_tx_descriptor_init(&desc->desc, chan);
		desc->desc.tx_submit = imxdma_tx_submit;
		/* txd.flags will be overwritten in prep funcs */
		desc->desc.flags = DMA_CTRL_ACK;
778
		desc->status = DMA_COMPLETE;
779 780 781 782

		list_add_tail(&desc->node, &imxdmac->ld_free);
		imxdmac->descs_allocated++;
	}
783

784 785 786 787
	if (!imxdmac->descs_allocated)
		return -ENOMEM;

	return imxdmac->descs_allocated;
788 789 790 791 792
}

static void imxdma_free_chan_resources(struct dma_chan *chan)
{
	struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
793
	struct imxdma_engine *imxdma = imxdmac->imxdma;
794 795 796
	struct imxdma_desc *desc, *_desc;
	unsigned long flags;

797
	spin_lock_irqsave(&imxdma->lock, flags);
798

799
	imxdma_disable_hw(imxdmac);
800 801
	list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free);
	list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free);
802

803
	spin_unlock_irqrestore(&imxdma->lock, flags);
804 805 806 807 808 809

	list_for_each_entry_safe(desc, _desc, &imxdmac->ld_free, node) {
		kfree(desc);
		imxdmac->descs_allocated--;
	}
	INIT_LIST_HEAD(&imxdmac->ld_free);
810

811 812
	kfree(imxdmac->sg_list);
	imxdmac->sg_list = NULL;
813 814 815 816
}

static struct dma_async_tx_descriptor *imxdma_prep_slave_sg(
		struct dma_chan *chan, struct scatterlist *sgl,
817
		unsigned int sg_len, enum dma_transfer_direction direction,
818
		unsigned long flags, void *context)
819 820 821
{
	struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
	struct scatterlist *sg;
822 823
	int i, dma_length = 0;
	struct imxdma_desc *desc;
824

825 826
	if (list_empty(&imxdmac->ld_free) ||
	    imxdma_chan_is_doing_cyclic(imxdmac))
827 828
		return NULL;

829
	desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
830 831

	for_each_sg(sgl, sg, sg_len, i) {
832
		dma_length += sg_dma_len(sg);
833 834
	}

835 836
	switch (imxdmac->word_size) {
	case DMA_SLAVE_BUSWIDTH_4_BYTES:
837
		if (sg_dma_len(sgl) & 3 || sgl->dma_address & 3)
838 839 840
			return NULL;
		break;
	case DMA_SLAVE_BUSWIDTH_2_BYTES:
841
		if (sg_dma_len(sgl) & 1 || sgl->dma_address & 1)
842 843 844 845 846 847 848 849
			return NULL;
		break;
	case DMA_SLAVE_BUSWIDTH_1_BYTE:
		break;
	default:
		return NULL;
	}

850 851 852 853
	desc->type = IMXDMA_DESC_SLAVE_SG;
	desc->sg = sgl;
	desc->sgcount = sg_len;
	desc->len = dma_length;
854
	desc->direction = direction;
855 856 857 858 859 860 861
	if (direction == DMA_DEV_TO_MEM) {
		desc->src = imxdmac->per_address;
	} else {
		desc->dest = imxdmac->per_address;
	}
	desc->desc.callback = NULL;
	desc->desc.callback_param = NULL;
862

863
	return &desc->desc;
864 865 866 867
}

static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic(
		struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
868
		size_t period_len, enum dma_transfer_direction direction,
869
		unsigned long flags)
870 871 872
{
	struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
	struct imxdma_engine *imxdma = imxdmac->imxdma;
873 874
	struct imxdma_desc *desc;
	int i;
875 876
	unsigned int periods = buf_len / period_len;

877
	dev_dbg(imxdma->dev, "%s channel: %d buf_len=%zu period_len=%zu\n",
878 879
			__func__, imxdmac->channel, buf_len, period_len);

880 881
	if (list_empty(&imxdmac->ld_free) ||
	    imxdma_chan_is_doing_cyclic(imxdmac))
882 883
		return NULL;

884
	desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
885

886
	kfree(imxdmac->sg_list);
887 888

	imxdmac->sg_list = kcalloc(periods + 1,
889
			sizeof(struct scatterlist), GFP_ATOMIC);
890 891 892 893 894 895 896 897 898
	if (!imxdmac->sg_list)
		return NULL;

	sg_init_table(imxdmac->sg_list, periods);

	for (i = 0; i < periods; i++) {
		imxdmac->sg_list[i].page_link = 0;
		imxdmac->sg_list[i].offset = 0;
		imxdmac->sg_list[i].dma_address = dma_addr;
899
		sg_dma_len(&imxdmac->sg_list[i]) = period_len;
900 901 902 903 904
		dma_addr += period_len;
	}

	/* close the loop */
	imxdmac->sg_list[periods].offset = 0;
905
	sg_dma_len(&imxdmac->sg_list[periods]) = 0;
906 907 908
	imxdmac->sg_list[periods].page_link =
		((unsigned long)imxdmac->sg_list | 0x01) & ~0x02;

909 910 911 912
	desc->type = IMXDMA_DESC_CYCLIC;
	desc->sg = imxdmac->sg_list;
	desc->sgcount = periods;
	desc->len = IMX_DMA_LENGTH_LOOP;
913
	desc->direction = direction;
914 915 916 917 918 919 920
	if (direction == DMA_DEV_TO_MEM) {
		desc->src = imxdmac->per_address;
	} else {
		desc->dest = imxdmac->per_address;
	}
	desc->desc.callback = NULL;
	desc->desc.callback_param = NULL;
921

922
	return &desc->desc;
923 924
}

925 926 927 928 929 930
static struct dma_async_tx_descriptor *imxdma_prep_dma_memcpy(
	struct dma_chan *chan, dma_addr_t dest,
	dma_addr_t src, size_t len, unsigned long flags)
{
	struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
	struct imxdma_engine *imxdma = imxdmac->imxdma;
931
	struct imxdma_desc *desc;
932

933 934 935
	dev_dbg(imxdma->dev, "%s channel: %d src=0x%llx dst=0x%llx len=%zu\n",
		__func__, imxdmac->channel, (unsigned long long)src,
		(unsigned long long)dest, len);
936

937 938
	if (list_empty(&imxdmac->ld_free) ||
	    imxdma_chan_is_doing_cyclic(imxdmac))
939 940
		return NULL;

941
	desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
942

943 944 945 946
	desc->type = IMXDMA_DESC_MEMCPY;
	desc->src = src;
	desc->dest = dest;
	desc->len = len;
947
	desc->direction = DMA_MEM_TO_MEM;
948 949 950 951
	desc->config_port = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR;
	desc->config_mem = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR;
	desc->desc.callback = NULL;
	desc->desc.callback_param = NULL;
952

953
	return &desc->desc;
954 955
}

956 957 958 959 960 961 962 963
static struct dma_async_tx_descriptor *imxdma_prep_dma_interleaved(
	struct dma_chan *chan, struct dma_interleaved_template *xt,
	unsigned long flags)
{
	struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
	struct imxdma_engine *imxdma = imxdmac->imxdma;
	struct imxdma_desc *desc;

964 965 966 967
	dev_dbg(imxdma->dev, "%s channel: %d src_start=0x%llx dst_start=0x%llx\n"
		"   src_sgl=%s dst_sgl=%s numf=%zu frame_size=%zu\n", __func__,
		imxdmac->channel, (unsigned long long)xt->src_start,
		(unsigned long long) xt->dst_start,
968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997
		xt->src_sgl ? "true" : "false", xt->dst_sgl ? "true" : "false",
		xt->numf, xt->frame_size);

	if (list_empty(&imxdmac->ld_free) ||
	    imxdma_chan_is_doing_cyclic(imxdmac))
		return NULL;

	if (xt->frame_size != 1 || xt->numf <= 0 || xt->dir != DMA_MEM_TO_MEM)
		return NULL;

	desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);

	desc->type = IMXDMA_DESC_INTERLEAVED;
	desc->src = xt->src_start;
	desc->dest = xt->dst_start;
	desc->x = xt->sgl[0].size;
	desc->y = xt->numf;
	desc->w = xt->sgl[0].icg + desc->x;
	desc->len = desc->x * desc->y;
	desc->direction = DMA_MEM_TO_MEM;
	desc->config_port = IMX_DMA_MEMSIZE_32;
	desc->config_mem = IMX_DMA_MEMSIZE_32;
	if (xt->src_sgl)
		desc->config_mem |= IMX_DMA_TYPE_2D;
	if (xt->dst_sgl)
		desc->config_port |= IMX_DMA_TYPE_2D;
	desc->desc.callback = NULL;
	desc->desc.callback_param = NULL;

	return &desc->desc;
998 999 1000 1001
}

static void imxdma_issue_pending(struct dma_chan *chan)
{
1002
	struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
1003 1004 1005 1006
	struct imxdma_engine *imxdma = imxdmac->imxdma;
	struct imxdma_desc *desc;
	unsigned long flags;

1007
	spin_lock_irqsave(&imxdma->lock, flags);
1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021
	if (list_empty(&imxdmac->ld_active) &&
	    !list_empty(&imxdmac->ld_queue)) {
		desc = list_first_entry(&imxdmac->ld_queue,
					struct imxdma_desc, node);

		if (imxdma_xfer_desc(desc) < 0) {
			dev_warn(imxdma->dev,
				 "%s: channel: %d couldn't issue DMA xfer\n",
				 __func__, imxdmac->channel);
		} else {
			list_move_tail(imxdmac->ld_queue.next,
				       &imxdmac->ld_active);
		}
	}
1022
	spin_unlock_irqrestore(&imxdma->lock, flags);
1023 1024
}

1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056
static bool imxdma_filter_fn(struct dma_chan *chan, void *param)
{
	struct imxdma_filter_data *fdata = param;
	struct imxdma_channel *imxdma_chan = to_imxdma_chan(chan);

	if (chan->device->dev != fdata->imxdma->dev)
		return false;

	imxdma_chan->dma_request = fdata->request;
	chan->private = NULL;

	return true;
}

static struct dma_chan *imxdma_xlate(struct of_phandle_args *dma_spec,
						struct of_dma *ofdma)
{
	int count = dma_spec->args_count;
	struct imxdma_engine *imxdma = ofdma->of_dma_data;
	struct imxdma_filter_data fdata = {
		.imxdma = imxdma,
	};

	if (count != 1)
		return NULL;

	fdata.request = dma_spec->args[0];

	return dma_request_channel(imxdma->dma_device.cap_mask,
					imxdma_filter_fn, &fdata);
}

1057
static int __init imxdma_probe(struct platform_device *pdev)
1058
	{
1059
	struct imxdma_engine *imxdma;
1060
	struct resource *res;
1061
	const struct of_device_id *of_id;
1062
	int ret, i;
1063
	int irq, irq_err;
1064

1065 1066 1067 1068
	of_id = of_match_device(imx_dma_of_dev_id, &pdev->dev);
	if (of_id)
		pdev->id_entry = of_id->data;

1069
	imxdma = devm_kzalloc(&pdev->dev, sizeof(*imxdma), GFP_KERNEL);
1070 1071 1072
	if (!imxdma)
		return -ENOMEM;

1073
	imxdma->dev = &pdev->dev;
1074 1075
	imxdma->devtype = pdev->id_entry->driver_data;

1076
	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1077 1078 1079
	imxdma->base = devm_ioremap_resource(&pdev->dev, res);
	if (IS_ERR(imxdma->base))
		return PTR_ERR(imxdma->base);
1080 1081 1082 1083

	irq = platform_get_irq(pdev, 0);
	if (irq < 0)
		return irq;
1084

1085
	imxdma->dma_ipg = devm_clk_get(&pdev->dev, "ipg");
1086 1087
	if (IS_ERR(imxdma->dma_ipg))
		return PTR_ERR(imxdma->dma_ipg);
1088 1089

	imxdma->dma_ahb = devm_clk_get(&pdev->dev, "ahb");
1090 1091
	if (IS_ERR(imxdma->dma_ahb))
		return PTR_ERR(imxdma->dma_ahb);
1092 1093 1094

	clk_prepare_enable(imxdma->dma_ipg);
	clk_prepare_enable(imxdma->dma_ahb);
1095 1096

	/* reset DMA module */
1097
	imx_dmav1_writel(imxdma, DCR_DRST, DMA_DCR);
1098

1099
	if (is_imx1_dma(imxdma)) {
1100
		ret = devm_request_irq(&pdev->dev, irq,
1101
				       dma_irq_handler, 0, "DMA", imxdma);
1102
		if (ret) {
1103
			dev_warn(imxdma->dev, "Can't register IRQ for DMA\n");
1104
			goto err;
1105 1106
		}

1107 1108 1109 1110 1111 1112 1113
		irq_err = platform_get_irq(pdev, 1);
		if (irq_err < 0) {
			ret = irq_err;
			goto err;
		}

		ret = devm_request_irq(&pdev->dev, irq_err,
1114
				       imxdma_err_handler, 0, "DMA", imxdma);
1115
		if (ret) {
1116
			dev_warn(imxdma->dev, "Can't register ERRIRQ for DMA\n");
1117
			goto err;
1118 1119 1120 1121
		}
	}

	/* enable DMA module */
1122
	imx_dmav1_writel(imxdma, DCR_DEN, DMA_DCR);
1123 1124

	/* clear all interrupts */
1125
	imx_dmav1_writel(imxdma, (1 << IMX_DMA_CHANNELS) - 1, DMA_DISR);
1126 1127

	/* disable interrupts */
1128
	imx_dmav1_writel(imxdma, (1 << IMX_DMA_CHANNELS) - 1, DMA_DIMR);
1129 1130 1131

	INIT_LIST_HEAD(&imxdma->dma_device.channels);

1132 1133
	dma_cap_set(DMA_SLAVE, imxdma->dma_device.cap_mask);
	dma_cap_set(DMA_CYCLIC, imxdma->dma_device.cap_mask);
1134
	dma_cap_set(DMA_MEMCPY, imxdma->dma_device.cap_mask);
1135 1136 1137 1138 1139 1140 1141
	dma_cap_set(DMA_INTERLEAVE, imxdma->dma_device.cap_mask);

	/* Initialize 2D global parameters */
	for (i = 0; i < IMX_DMA_2D_SLOTS; i++)
		imxdma->slots_2d[i].count = 0;

	spin_lock_init(&imxdma->lock);
1142

1143
	/* Initialize channel parameters */
1144
	for (i = 0; i < IMX_DMA_CHANNELS; i++) {
1145 1146
		struct imxdma_channel *imxdmac = &imxdma->channel[i];

1147
		if (!is_imx1_dma(imxdma)) {
1148
			ret = devm_request_irq(&pdev->dev, irq + i,
1149 1150
					dma_irq_handler, 0, "DMA", imxdma);
			if (ret) {
1151 1152
				dev_warn(imxdma->dev, "Can't register IRQ %d "
					 "for DMA channel %d\n",
1153
					 irq + i, i);
1154
				goto err;
1155
			}
1156 1157 1158
			init_timer(&imxdmac->watchdog);
			imxdmac->watchdog.function = &imxdma_watchdog;
			imxdmac->watchdog.data = (unsigned long)imxdmac;
S
Sascha Hauer 已提交
1159
		}
1160 1161 1162

		imxdmac->imxdma = imxdma;

1163 1164 1165 1166 1167 1168
		INIT_LIST_HEAD(&imxdmac->ld_queue);
		INIT_LIST_HEAD(&imxdmac->ld_free);
		INIT_LIST_HEAD(&imxdmac->ld_active);

		tasklet_init(&imxdmac->dma_tasklet, imxdma_tasklet,
			     (unsigned long)imxdmac);
1169
		imxdmac->chan.device = &imxdma->dma_device;
1170
		dma_cookie_init(&imxdmac->chan);
1171 1172 1173
		imxdmac->channel = i;

		/* Add the channel to the DMAC list */
1174 1175
		list_add_tail(&imxdmac->chan.device_node,
			      &imxdma->dma_device.channels);
1176 1177 1178 1179 1180 1181 1182 1183 1184
	}

	imxdma->dma_device.dev = &pdev->dev;

	imxdma->dma_device.device_alloc_chan_resources = imxdma_alloc_chan_resources;
	imxdma->dma_device.device_free_chan_resources = imxdma_free_chan_resources;
	imxdma->dma_device.device_tx_status = imxdma_tx_status;
	imxdma->dma_device.device_prep_slave_sg = imxdma_prep_slave_sg;
	imxdma->dma_device.device_prep_dma_cyclic = imxdma_prep_dma_cyclic;
1185
	imxdma->dma_device.device_prep_dma_memcpy = imxdma_prep_dma_memcpy;
1186
	imxdma->dma_device.device_prep_interleaved_dma = imxdma_prep_dma_interleaved;
1187 1188 1189 1190 1191
	imxdma->dma_device.device_control = imxdma_control;
	imxdma->dma_device.device_issue_pending = imxdma_issue_pending;

	platform_set_drvdata(pdev, imxdma);

1192
	imxdma->dma_device.copy_align = 2; /* 2^2 = 4 bytes alignment */
1193 1194 1195
	imxdma->dma_device.dev->dma_parms = &imxdma->dma_parms;
	dma_set_max_seg_size(imxdma->dma_device.dev, 0xffffff);

1196 1197 1198
	ret = dma_async_device_register(&imxdma->dma_device);
	if (ret) {
		dev_err(&pdev->dev, "unable to register\n");
1199
		goto err;
1200 1201
	}

1202 1203 1204 1205 1206 1207 1208 1209 1210
	if (pdev->dev.of_node) {
		ret = of_dma_controller_register(pdev->dev.of_node,
				imxdma_xlate, imxdma);
		if (ret) {
			dev_err(&pdev->dev, "unable to register of_dma_controller\n");
			goto err_of_dma_controller;
		}
	}

1211 1212
	return 0;

1213 1214
err_of_dma_controller:
	dma_async_device_unregister(&imxdma->dma_device);
1215
err:
1216 1217
	clk_disable_unprepare(imxdma->dma_ipg);
	clk_disable_unprepare(imxdma->dma_ahb);
1218 1219 1220
	return ret;
}

1221
static int imxdma_remove(struct platform_device *pdev)
1222 1223 1224 1225 1226
{
	struct imxdma_engine *imxdma = platform_get_drvdata(pdev);

        dma_async_device_unregister(&imxdma->dma_device);

1227 1228 1229
	if (pdev->dev.of_node)
		of_dma_controller_free(pdev->dev.of_node);

1230 1231
	clk_disable_unprepare(imxdma->dma_ipg);
	clk_disable_unprepare(imxdma->dma_ahb);
1232 1233 1234 1235 1236 1237 1238

        return 0;
}

static struct platform_driver imxdma_driver = {
	.driver		= {
		.name	= "imx-dma",
1239
		.of_match_table = imx_dma_of_dev_id,
1240
	},
1241
	.id_table	= imx_dma_devtype,
1242
	.remove		= imxdma_remove,
1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253
};

static int __init imxdma_module_init(void)
{
	return platform_driver_probe(&imxdma_driver, imxdma_probe);
}
subsys_initcall(imxdma_module_init);

MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>");
MODULE_DESCRIPTION("i.MX dma driver");
MODULE_LICENSE("GPL");