shdmac.c 24.2 KB
Newer Older
1 2 3 4 5
/*
 * Renesas SuperH DMA Engine support
 *
 * base is drivers/dma/flsdma.c
 *
6
 * Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
7 8 9 10 11 12 13 14 15 16 17 18 19 20
 * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
 * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
 * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
 *
 * This is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * - DMA of SuperH does not have Hardware DMA chain mode.
 * - MAX DMA size is 16MB.
 *
 */

21 22
#include <linux/delay.h>
#include <linux/dmaengine.h>
23
#include <linux/err.h>
24
#include <linux/init.h>
25 26
#include <linux/interrupt.h>
#include <linux/kdebug.h>
27
#include <linux/module.h>
28
#include <linux/notifier.h>
29 30
#include <linux/of.h>
#include <linux/of_device.h>
31
#include <linux/platform_device.h>
32
#include <linux/pm_runtime.h>
33
#include <linux/rculist.h>
34
#include <linux/sh_dma.h>
35
#include <linux/slab.h>
P
Paul Mundt 已提交
36
#include <linux/spinlock.h>
37

38
#include "../dmaengine.h"
39 40
#include "shdma.h"

41 42 43 44 45 46
/* DMA registers */
#define SAR	0x00	/* Source Address Register */
#define DAR	0x04	/* Destination Address Register */
#define TCR	0x08	/* Transfer Count Register */
#define CHCR	0x0C	/* Channel Control Register */
#define DMAOR	0x40	/* DMA Operation Register */
47 48 49

#define TEND	0x18 /* USB-DMAC */

50
#define SH_DMAE_DRV_NAME "sh-dma-engine"
51

52 53
/* Default MEMCPY transfer size = 2^2 = 4 bytes */
#define LOG2_DEFAULT_XFER_SIZE	2
54 55
#define SH_DMA_SLAVE_NUMBER 256
#define SH_DMA_TCR_MAX (16 * 1024 * 1024 - 1)
56

P
Paul Mundt 已提交
57 58
/*
 * Used for write-side mutual exclusion for the global device list,
59
 * read-side synchronization by way of RCU, and per-controller data.
P
Paul Mundt 已提交
60 61 62 63
 */
static DEFINE_SPINLOCK(sh_dmae_lock);
static LIST_HEAD(sh_dmae_devices);

64 65 66 67 68 69 70 71
/*
 * Different DMAC implementations provide different ways to clear DMA channels:
 * (1) none - no CHCLR registers are available
 * (2) one CHCLR register per channel - 0 has to be written to it to clear
 *     channel buffers
 * (3) one CHCLR per several channels - 1 has to be written to the bit,
 *     corresponding to the specific channel to reset it
 */
72
static void channel_clear(struct sh_dmae_chan *sh_dc)
73 74
{
	struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
75 76 77
	const struct sh_dmae_channel *chan_pdata = shdev->pdata->channel +
		sh_dc->shdma_chan.id;
	u32 val = shdev->pdata->chclr_bitwise ? 1 << chan_pdata->chclr_bit : 0;
78

79
	__raw_writel(val, shdev->chan_reg + chan_pdata->chclr_offset);
80
}
81

82 83
static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg)
{
84
	__raw_writel(data, sh_dc->base + reg);
85 86 87 88
}

static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg)
{
89
	return __raw_readl(sh_dc->base + reg);
90 91 92 93
}

static u16 dmaor_read(struct sh_dmae_device *shdev)
{
94
	void __iomem *addr = shdev->chan_reg + DMAOR;
95 96 97 98 99

	if (shdev->pdata->dmaor_is_32bit)
		return __raw_readl(addr);
	else
		return __raw_readw(addr);
100 101 102 103
}

static void dmaor_write(struct sh_dmae_device *shdev, u16 data)
{
104
	void __iomem *addr = shdev->chan_reg + DMAOR;
105 106 107 108 109

	if (shdev->pdata->dmaor_is_32bit)
		__raw_writel(data, addr);
	else
		__raw_writew(data, addr);
110 111
}

112 113 114 115
static void chcr_write(struct sh_dmae_chan *sh_dc, u32 data)
{
	struct sh_dmae_device *shdev = to_sh_dev(sh_dc);

116
	__raw_writel(data, sh_dc->base + shdev->chcr_offset);
117 118 119 120 121 122
}

static u32 chcr_read(struct sh_dmae_chan *sh_dc)
{
	struct sh_dmae_device *shdev = to_sh_dev(sh_dc);

123
	return __raw_readl(sh_dc->base + shdev->chcr_offset);
124 125 126 127 128 129 130
}

/*
 * Reset DMA controller
 *
 * SH7780 has two DMAOR register
 */
131
static void sh_dmae_ctl_stop(struct sh_dmae_device *shdev)
132
{
133 134 135 136
	unsigned short dmaor;
	unsigned long flags;

	spin_lock_irqsave(&sh_dmae_lock, flags);
137

138
	dmaor = dmaor_read(shdev);
139
	dmaor_write(shdev, dmaor & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME));
140 141

	spin_unlock_irqrestore(&sh_dmae_lock, flags);
142 143
}

144
static int sh_dmae_rst(struct sh_dmae_device *shdev)
145 146
{
	unsigned short dmaor;
147
	unsigned long flags;
148

149
	spin_lock_irqsave(&sh_dmae_lock, flags);
150

151 152
	dmaor = dmaor_read(shdev) & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME);

153 154 155 156 157
	if (shdev->pdata->chclr_present) {
		int i;
		for (i = 0; i < shdev->pdata->channel_num; i++) {
			struct sh_dmae_chan *sh_chan = shdev->chan[i];
			if (sh_chan)
158
				channel_clear(sh_chan);
159 160 161
		}
	}

162 163 164 165 166 167 168
	dmaor_write(shdev, dmaor | shdev->pdata->dmaor_init);

	dmaor = dmaor_read(shdev);

	spin_unlock_irqrestore(&sh_dmae_lock, flags);

	if (dmaor & (DMAOR_AE | DMAOR_NMIF)) {
169
		dev_warn(shdev->shdma_dev.dma_dev.dev, "Can't initialize DMAOR.\n");
170
		return -EIO;
171
	}
172
	if (shdev->pdata->dmaor_init & ~dmaor)
173
		dev_warn(shdev->shdma_dev.dma_dev.dev,
174 175
			 "DMAOR=0x%x hasn't latched the initial value 0x%x.\n",
			 dmaor, shdev->pdata->dmaor_init);
176 177 178
	return 0;
}

179
static bool dmae_is_busy(struct sh_dmae_chan *sh_chan)
180
{
181
	u32 chcr = chcr_read(sh_chan);
182 183 184 185 186

	if ((chcr & (CHCR_DE | CHCR_TE)) == CHCR_DE)
		return true; /* working */

	return false; /* waiting */
187 188
}

189
static unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan, u32 chcr)
190
{
191
	struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
192
	const struct sh_dmae_pdata *pdata = shdev->pdata;
193 194 195 196 197
	int cnt = ((chcr & pdata->ts_low_mask) >> pdata->ts_low_shift) |
		((chcr & pdata->ts_high_mask) >> pdata->ts_high_shift);

	if (cnt >= pdata->ts_shift_num)
		cnt = 0;
198

199 200 201 202 203
	return pdata->ts_shift[cnt];
}

static u32 log2size_to_chcr(struct sh_dmae_chan *sh_chan, int l2size)
{
204
	struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
205
	const struct sh_dmae_pdata *pdata = shdev->pdata;
206 207 208 209 210 211 212 213 214 215 216
	int i;

	for (i = 0; i < pdata->ts_shift_num; i++)
		if (pdata->ts_shift[i] == l2size)
			break;

	if (i == pdata->ts_shift_num)
		i = 0;

	return ((i << pdata->ts_low_shift) & pdata->ts_low_mask) |
		((i << pdata->ts_high_shift) & pdata->ts_high_mask);
217 218
}

219
static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw)
220
{
221 222
	sh_dmae_writel(sh_chan, hw->sar, SAR);
	sh_dmae_writel(sh_chan, hw->dar, DAR);
223
	sh_dmae_writel(sh_chan, hw->tcr >> sh_chan->xmit_shift, TCR);
224 225 226 227
}

static void dmae_start(struct sh_dmae_chan *sh_chan)
{
228
	struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
229
	u32 chcr = chcr_read(sh_chan);
230

231 232 233
	if (shdev->pdata->needs_tend_set)
		sh_dmae_writel(sh_chan, 0xFFFFFFFF, TEND);

234
	chcr |= CHCR_DE | shdev->chcr_ie_bit;
235
	chcr_write(sh_chan, chcr & ~CHCR_TE);
236 237
}

238 239
static void dmae_init(struct sh_dmae_chan *sh_chan)
{
240 241 242
	/*
	 * Default configuration for dual address memory-memory transfer.
	 */
243
	u32 chcr = DM_INC | SM_INC | RS_AUTO | log2size_to_chcr(sh_chan,
244 245
						   LOG2_DEFAULT_XFER_SIZE);
	sh_chan->xmit_shift = calc_xmit_shift(sh_chan, chcr);
246
	chcr_write(sh_chan, chcr);
247 248
}

249 250
static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val)
{
251
	/* If DMA is active, cannot set CHCR. TODO: remove this superfluous check */
252 253
	if (dmae_is_busy(sh_chan))
		return -EBUSY;
254

255
	sh_chan->xmit_shift = calc_xmit_shift(sh_chan, val);
256
	chcr_write(sh_chan, val);
257

258 259 260 261 262
	return 0;
}

static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
{
263
	struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
264
	const struct sh_dmae_pdata *pdata = shdev->pdata;
265
	const struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->shdma_chan.id];
266
	void __iomem *addr = shdev->dmars;
267
	unsigned int shift = chan_pdata->dmars_bit;
268 269 270

	if (dmae_is_busy(sh_chan))
		return -EBUSY;
271

272 273 274
	if (pdata->no_dmars)
		return 0;

275 276
	/* in the case of a missing DMARS resource use first memory window */
	if (!addr)
277 278
		addr = shdev->chan_reg;
	addr += chan_pdata->dmars;
279

280 281
	__raw_writew((__raw_readw(addr) & (0xff00 >> shift)) | (val << shift),
		     addr);
282 283 284 285

	return 0;
}

286 287
static void sh_dmae_start_xfer(struct shdma_chan *schan,
			       struct shdma_desc *sdesc)
288
{
289 290 291 292 293 294 295 296 297 298
	struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
						    shdma_chan);
	struct sh_dmae_desc *sh_desc = container_of(sdesc,
					struct sh_dmae_desc, shdma_desc);
	dev_dbg(sh_chan->shdma_chan.dev, "Queue #%d to %d: %u@%x -> %x\n",
		sdesc->async_tx.cookie, sh_chan->shdma_chan.id,
		sh_desc->hw.tcr, sh_desc->hw.sar, sh_desc->hw.dar);
	/* Get the ld start address from ld_queue */
	dmae_set_reg(sh_chan, &sh_desc->hw);
	dmae_start(sh_chan);
299 300
}

301
static bool sh_dmae_channel_busy(struct shdma_chan *schan)
302
{
303 304 305
	struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
						    shdma_chan);
	return dmae_is_busy(sh_chan);
306 307
}

308
static void sh_dmae_setup_xfer(struct shdma_chan *schan,
309
			       int slave_id)
310
{
311 312
	struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
						    shdma_chan);
313

314
	if (slave_id >= 0) {
315
		const struct sh_dmae_slave_config *cfg =
316
			sh_chan->config;
317

318 319
		dmae_set_dmars(sh_chan, cfg->mid_rid);
		dmae_set_chcr(sh_chan, cfg->chcr);
320
	} else {
321
		dmae_init(sh_chan);
322 323 324
	}
}

325 326 327 328
/*
 * Find a slave channel configuration from the contoller list by either a slave
 * ID in the non-DT case, or by a MID/RID value in the DT case
 */
329
static const struct sh_dmae_slave_config *dmae_find_slave(
330
	struct sh_dmae_chan *sh_chan, int match)
331
{
332
	struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
333
	const struct sh_dmae_pdata *pdata = shdev->pdata;
334
	const struct sh_dmae_slave_config *cfg;
335 336
	int i;

337 338 339
	if (!sh_chan->shdma_chan.dev->of_node) {
		if (match >= SH_DMA_SLAVE_NUMBER)
			return NULL;
340

341 342 343 344 345 346
		for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++)
			if (cfg->slave_id == match)
				return cfg;
	} else {
		for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++)
			if (cfg->mid_rid == match) {
347
				sh_chan->shdma_chan.slave_id = i;
348 349 350
				return cfg;
			}
	}
351 352 353 354

	return NULL;
}

355
static int sh_dmae_set_slave(struct shdma_chan *schan,
356
			     int slave_id, dma_addr_t slave_addr, bool try)
357
{
358 359
	struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
						    shdma_chan);
360
	const struct sh_dmae_slave_config *cfg = dmae_find_slave(sh_chan, slave_id);
361
	if (!cfg)
362
		return -ENXIO;
363

364
	if (!try) {
365
		sh_chan->config = cfg;
366 367
		sh_chan->slave_addr = slave_addr ? : cfg->addr;
	}
368 369

	return 0;
370 371
}

372
static void dmae_halt(struct sh_dmae_chan *sh_chan)
373
{
374 375
	struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
	u32 chcr = chcr_read(sh_chan);
376

377 378
	chcr &= ~(CHCR_DE | CHCR_TE | shdev->chcr_ie_bit);
	chcr_write(sh_chan, chcr);
379 380
}

381 382 383
static int sh_dmae_desc_setup(struct shdma_chan *schan,
			      struct shdma_desc *sdesc,
			      dma_addr_t src, dma_addr_t dst, size_t *len)
384
{
385 386
	struct sh_dmae_desc *sh_desc = container_of(sdesc,
					struct sh_dmae_desc, shdma_desc);
387

388 389
	if (*len > schan->max_xfer_len)
		*len = schan->max_xfer_len;
390

391 392 393
	sh_desc->hw.sar = src;
	sh_desc->hw.dar = dst;
	sh_desc->hw.tcr = *len;
394

395
	return 0;
396 397
}

398
static void sh_dmae_halt(struct shdma_chan *schan)
399
{
400 401 402
	struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
						    shdma_chan);
	dmae_halt(sh_chan);
403 404
}

405
static bool sh_dmae_chan_irq(struct shdma_chan *schan, int irq)
406
{
407 408
	struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
						    shdma_chan);
409

410 411
	if (!(chcr_read(sh_chan) & CHCR_TE))
		return false;
412

413 414
	/* DMA stop */
	dmae_halt(sh_chan);
415

416
	return true;
417 418
}

419 420 421 422 423 424 425
static size_t sh_dmae_get_partial(struct shdma_chan *schan,
				  struct shdma_desc *sdesc)
{
	struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
						    shdma_chan);
	struct sh_dmae_desc *sh_desc = container_of(sdesc,
					struct sh_dmae_desc, shdma_desc);
426 427
	return sh_desc->hw.tcr -
		(sh_dmae_readl(sh_chan, TCR) << sh_chan->xmit_shift);
428 429
}

430 431
/* Called from error IRQ or NMI */
static bool sh_dmae_reset(struct sh_dmae_device *shdev)
432
{
433
	bool ret;
434

435
	/* halt the dma controller */
436
	sh_dmae_ctl_stop(shdev);
437 438

	/* We cannot detect, which channel caused the error, have to reset all */
439
	ret = shdma_reset(&shdev->shdma_dev);
P
Paul Mundt 已提交
440

441
	sh_dmae_rst(shdev);
442

443
	return ret;
P
Paul Mundt 已提交
444 445
}

446
#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARM)
P
Paul Mundt 已提交
447 448
static irqreturn_t sh_dmae_err(int irq, void *data)
{
449 450
	struct sh_dmae_device *shdev = data;

451
	if (!(dmaor_read(shdev) & DMAOR_AE))
452
		return IRQ_NONE;
453

454
	sh_dmae_reset(shdev);
455
	return IRQ_HANDLED;
456
}
457
#endif
458

459 460
static bool sh_dmae_desc_completed(struct shdma_chan *schan,
				   struct shdma_desc *sdesc)
461
{
462 463 464 465
	struct sh_dmae_chan *sh_chan = container_of(schan,
					struct sh_dmae_chan, shdma_chan);
	struct sh_dmae_desc *sh_desc = container_of(sdesc,
					struct sh_dmae_desc, shdma_desc);
466
	u32 sar_buf = sh_dmae_readl(sh_chan, SAR);
467
	u32 dar_buf = sh_dmae_readl(sh_chan, DAR);
468

469 470 471 472
	return	(sdesc->direction == DMA_DEV_TO_MEM &&
		 (sh_desc->hw.dar + sh_desc->hw.tcr) == dar_buf) ||
		(sdesc->direction != DMA_DEV_TO_MEM &&
		 (sh_desc->hw.sar + sh_desc->hw.tcr) == sar_buf);
473 474
}

P
Paul Mundt 已提交
475 476 477 478 479 480
static bool sh_dmae_nmi_notify(struct sh_dmae_device *shdev)
{
	/* Fast path out if NMIF is not asserted for this controller */
	if ((dmaor_read(shdev) & DMAOR_NMIF) == 0)
		return false;

481
	return sh_dmae_reset(shdev);
P
Paul Mundt 已提交
482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522
}

static int sh_dmae_nmi_handler(struct notifier_block *self,
			       unsigned long cmd, void *data)
{
	struct sh_dmae_device *shdev;
	int ret = NOTIFY_DONE;
	bool triggered;

	/*
	 * Only concern ourselves with NMI events.
	 *
	 * Normally we would check the die chain value, but as this needs
	 * to be architecture independent, check for NMI context instead.
	 */
	if (!in_nmi())
		return NOTIFY_DONE;

	rcu_read_lock();
	list_for_each_entry_rcu(shdev, &sh_dmae_devices, node) {
		/*
		 * Only stop if one of the controllers has NMIF asserted,
		 * we do not want to interfere with regular address error
		 * handling or NMI events that don't concern the DMACs.
		 */
		triggered = sh_dmae_nmi_notify(shdev);
		if (triggered == true)
			ret = NOTIFY_OK;
	}
	rcu_read_unlock();

	return ret;
}

static struct notifier_block sh_dmae_nmi_notifier __read_mostly = {
	.notifier_call	= sh_dmae_nmi_handler,

	/* Run before NMI debug handler and KGDB */
	.priority	= 1,
};

B
Bill Pemberton 已提交
523
static int sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id,
524
					int irq, unsigned long flags)
525
{
526
	const struct sh_dmae_channel *chan_pdata = &shdev->pdata->channel[id];
527 528 529 530 531
	struct shdma_dev *sdev = &shdev->shdma_dev;
	struct platform_device *pdev = to_platform_device(sdev->dma_dev.dev);
	struct sh_dmae_chan *sh_chan;
	struct shdma_chan *schan;
	int err;
532

533 534
	sh_chan = devm_kzalloc(sdev->dma_dev.dev, sizeof(struct sh_dmae_chan),
			       GFP_KERNEL);
535 536
	if (!sh_chan) {
		dev_err(sdev->dma_dev.dev,
537
			"No free memory for allocating dma channels!\n");
538 539 540
		return -ENOMEM;
	}

541 542
	schan = &sh_chan->shdma_chan;
	schan->max_xfer_len = SH_DMA_TCR_MAX + 1;
543

544
	shdma_chan_probe(sdev, schan, id);
545

546
	sh_chan->base = shdev->chan_reg + chan_pdata->offset;
547

548
	/* set up channel irq */
549
	if (pdev->id >= 0)
550 551
		snprintf(sh_chan->dev_id, sizeof(sh_chan->dev_id),
			 "sh-dmae%d.%d", pdev->id, id);
552
	else
553 554
		snprintf(sh_chan->dev_id, sizeof(sh_chan->dev_id),
			 "sh-dma%d", id);
555

556
	err = shdma_request_irq(schan, irq, flags, sh_chan->dev_id);
557
	if (err) {
558 559 560
		dev_err(sdev->dma_dev.dev,
			"DMA channel %d request_irq error %d\n",
			id, err);
561 562 563
		goto err_no_irq;
	}

564
	shdev->chan[id] = sh_chan;
565 566 567 568
	return 0;

err_no_irq:
	/* remove from dmaengine device node */
569
	shdma_chan_remove(schan);
570 571 572 573 574
	return err;
}

static void sh_dmae_chan_remove(struct sh_dmae_device *shdev)
{
575
	struct shdma_chan *schan;
576 577
	int i;

578 579
	shdma_for_each_chan(schan, &shdev->shdma_dev, i) {
		BUG_ON(!schan);
580

581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623
		shdma_chan_remove(schan);
	}
}

static void sh_dmae_shutdown(struct platform_device *pdev)
{
	struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
	sh_dmae_ctl_stop(shdev);
}

static int sh_dmae_runtime_suspend(struct device *dev)
{
	return 0;
}

static int sh_dmae_runtime_resume(struct device *dev)
{
	struct sh_dmae_device *shdev = dev_get_drvdata(dev);

	return sh_dmae_rst(shdev);
}

#ifdef CONFIG_PM
static int sh_dmae_suspend(struct device *dev)
{
	return 0;
}

static int sh_dmae_resume(struct device *dev)
{
	struct sh_dmae_device *shdev = dev_get_drvdata(dev);
	int i, ret;

	ret = sh_dmae_rst(shdev);
	if (ret < 0)
		dev_err(dev, "Failed to reset!\n");

	for (i = 0; i < shdev->pdata->channel_num; i++) {
		struct sh_dmae_chan *sh_chan = shdev->chan[i];

		if (!sh_chan->shdma_chan.desc_num)
			continue;

624
		if (sh_chan->shdma_chan.slave_id >= 0) {
625
			const struct sh_dmae_slave_config *cfg = sh_chan->config;
626 627 628 629
			dmae_set_dmars(sh_chan, cfg->mid_rid);
			dmae_set_chcr(sh_chan, cfg->chcr);
		} else {
			dmae_init(sh_chan);
630 631
		}
	}
632 633

	return 0;
634
}
635 636 637 638
#else
#define sh_dmae_suspend NULL
#define sh_dmae_resume NULL
#endif
639

640
static const struct dev_pm_ops sh_dmae_pm = {
641 642 643 644 645 646 647 648
	.suspend		= sh_dmae_suspend,
	.resume			= sh_dmae_resume,
	.runtime_suspend	= sh_dmae_runtime_suspend,
	.runtime_resume		= sh_dmae_runtime_resume,
};

static dma_addr_t sh_dmae_slave_addr(struct shdma_chan *schan)
{
649 650
	struct sh_dmae_chan *sh_chan = container_of(schan,
					struct sh_dmae_chan, shdma_chan);
651 652

	/*
653 654 655
	 * Implicit BUG_ON(!sh_chan->config)
	 * This is an exclusive slave DMA operation, may only be called after a
	 * successful slave configuration.
656
	 */
657
	return sh_chan->slave_addr;
658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675
}

static struct shdma_desc *sh_dmae_embedded_desc(void *buf, int i)
{
	return &((struct sh_dmae_desc *)buf)[i].shdma_desc;
}

static const struct shdma_ops sh_dmae_shdma_ops = {
	.desc_completed = sh_dmae_desc_completed,
	.halt_channel = sh_dmae_halt,
	.channel_busy = sh_dmae_channel_busy,
	.slave_addr = sh_dmae_slave_addr,
	.desc_setup = sh_dmae_desc_setup,
	.set_slave = sh_dmae_set_slave,
	.setup_xfer = sh_dmae_setup_xfer,
	.start_xfer = sh_dmae_start_xfer,
	.embedded_desc = sh_dmae_embedded_desc,
	.chan_irq = sh_dmae_chan_irq,
676
	.get_partial = sh_dmae_get_partial,
677 678
};

679
static const struct of_device_id sh_dmae_of_match[] = {
680
	{.compatible = "renesas,shdma-r8a73a4", .data = r8a73a4_shdma_devid,},
681 682 683 684
	{}
};
MODULE_DEVICE_TABLE(of, sh_dmae_of_match);

B
Bill Pemberton 已提交
685
static int sh_dmae_probe(struct platform_device *pdev)
686
{
687
	const struct sh_dmae_pdata *pdata;
688 689 690 691 692 693
	unsigned long chan_flag[SH_DMAE_MAX_CHANNELS] = {};
	int chan_irq[SH_DMAE_MAX_CHANNELS];
#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARM)
	unsigned long irqflags = 0;
	int errirq;
#endif
694
	int err, i, irq_cnt = 0, irqres = 0, irq_cap = 0;
695
	struct sh_dmae_device *shdev;
696
	struct dma_device *dma_dev;
697
	struct resource *chan, *dmars, *errirq_res, *chanirq_res;
698

699 700 701
	if (pdev->dev.of_node)
		pdata = of_match_device(sh_dmae_of_match, &pdev->dev)->data;
	else
702
		pdata = dev_get_platdata(&pdev->dev);
703

704
	/* get platform data */
705
	if (!pdata || !pdata->channel_num)
706 707
		return -ENODEV;

708
	chan = platform_get_resource(pdev, IORESOURCE_MEM, 0);
709
	/* DMARS area is optional */
710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730
	dmars = platform_get_resource(pdev, IORESOURCE_MEM, 1);
	/*
	 * IRQ resources:
	 * 1. there always must be at least one IRQ IO-resource. On SH4 it is
	 *    the error IRQ, in which case it is the only IRQ in this resource:
	 *    start == end. If it is the only IRQ resource, all channels also
	 *    use the same IRQ.
	 * 2. DMA channel IRQ resources can be specified one per resource or in
	 *    ranges (start != end)
	 * 3. iff all events (channels and, optionally, error) on this
	 *    controller use the same IRQ, only one IRQ resource can be
	 *    specified, otherwise there must be one IRQ per channel, even if
	 *    some of them are equal
	 * 4. if all IRQs on this controller are equal or if some specific IRQs
	 *    specify IORESOURCE_IRQ_SHAREABLE in their resources, they will be
	 *    requested with the IRQF_SHARED flag
	 */
	errirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
	if (!chan || !errirq_res)
		return -ENODEV;

731 732
	shdev = devm_kzalloc(&pdev->dev, sizeof(struct sh_dmae_device),
			     GFP_KERNEL);
733
	if (!shdev) {
734
		dev_err(&pdev->dev, "Not enough memory\n");
735
		return -ENOMEM;
736 737
	}

738 739
	dma_dev = &shdev->shdma_dev.dma_dev;

740 741 742
	shdev->chan_reg = devm_ioremap_resource(&pdev->dev, chan);
	if (IS_ERR(shdev->chan_reg))
		return PTR_ERR(shdev->chan_reg);
743
	if (dmars) {
744 745 746
		shdev->dmars = devm_ioremap_resource(&pdev->dev, dmars);
		if (IS_ERR(shdev->dmars))
			return PTR_ERR(shdev->dmars);
747 748
	}

749 750 751 752 753 754 755 756 757 758 759 760 761 762 763
	if (!pdata->slave_only)
		dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
	if (pdata->slave && pdata->slave_num)
		dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);

	/* Default transfer size of 32 bytes requires 32-byte alignment */
	dma_dev->copy_align = LOG2_DEFAULT_XFER_SIZE;

	shdev->shdma_dev.ops = &sh_dmae_shdma_ops;
	shdev->shdma_dev.desc_size = sizeof(struct sh_dmae_desc);
	err = shdma_init(&pdev->dev, &shdev->shdma_dev,
			      pdata->channel_num);
	if (err < 0)
		goto eshdma;

764
	/* platform data */
765
	shdev->pdata = pdata;
766

767 768 769 770 771
	if (pdata->chcr_offset)
		shdev->chcr_offset = pdata->chcr_offset;
	else
		shdev->chcr_offset = CHCR;

772 773 774 775 776
	if (pdata->chcr_ie_bit)
		shdev->chcr_ie_bit = pdata->chcr_ie_bit;
	else
		shdev->chcr_ie_bit = CHCR_IE;

777 778
	platform_set_drvdata(pdev, shdev);

779
	pm_runtime_enable(&pdev->dev);
780 781 782
	err = pm_runtime_get_sync(&pdev->dev);
	if (err < 0)
		dev_err(&pdev->dev, "%s(): GET = %d\n", __func__, err);
783

784
	spin_lock_irq(&sh_dmae_lock);
P
Paul Mundt 已提交
785
	list_add_tail_rcu(&shdev->node, &sh_dmae_devices);
786
	spin_unlock_irq(&sh_dmae_lock);
P
Paul Mundt 已提交
787

788
	/* reset dma controller - only needed as a test */
789
	err = sh_dmae_rst(shdev);
790 791 792
	if (err)
		goto rst_err;

793
#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
794 795 796 797 798 799 800 801 802
	chanirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 1);

	if (!chanirq_res)
		chanirq_res = errirq_res;
	else
		irqres++;

	if (chanirq_res == errirq_res ||
	    (errirq_res->flags & IORESOURCE_BITS) == IORESOURCE_IRQ_SHAREABLE)
803
		irqflags = IRQF_SHARED;
804 805 806

	errirq = errirq_res->start;

807 808
	err = devm_request_irq(&pdev->dev, errirq, sh_dmae_err, irqflags,
			       "DMAC Address Error", shdev);
809 810 811 812 813
	if (err) {
		dev_err(&pdev->dev,
			"DMA failed requesting irq #%d, error %d\n",
			errirq, err);
		goto eirq_err;
814 815
	}

816 817
#else
	chanirq_res = errirq_res;
818
#endif /* CONFIG_CPU_SH4 || CONFIG_ARCH_SHMOBILE */
819 820 821 822 823

	if (chanirq_res->start == chanirq_res->end &&
	    !platform_get_resource(pdev, IORESOURCE_IRQ, 1)) {
		/* Special case - all multiplexed */
		for (; irq_cnt < pdata->channel_num; irq_cnt++) {
824
			if (irq_cnt < SH_DMAE_MAX_CHANNELS) {
825 826 827 828 829 830
				chan_irq[irq_cnt] = chanirq_res->start;
				chan_flag[irq_cnt] = IRQF_SHARED;
			} else {
				irq_cap = 1;
				break;
			}
831
		}
832 833 834
	} else {
		do {
			for (i = chanirq_res->start; i <= chanirq_res->end; i++) {
835
				if (irq_cnt >= SH_DMAE_MAX_CHANNELS) {
836 837 838 839
					irq_cap = 1;
					break;
				}

840 841 842 843
				if ((errirq_res->flags & IORESOURCE_BITS) ==
				    IORESOURCE_IRQ_SHAREABLE)
					chan_flag[irq_cnt] = IRQF_SHARED;
				else
844
					chan_flag[irq_cnt] = 0;
845 846 847 848
				dev_dbg(&pdev->dev,
					"Found IRQ %d for channel %d\n",
					i, irq_cnt);
				chan_irq[irq_cnt++] = i;
849 850
			}

851
			if (irq_cnt >= SH_DMAE_MAX_CHANNELS)
852
				break;
853

854 855 856
			chanirq_res = platform_get_resource(pdev,
						IORESOURCE_IRQ, ++irqres);
		} while (irq_cnt < pdata->channel_num && chanirq_res);
857
	}
858

859
	/* Create DMA Channel */
860
	for (i = 0; i < irq_cnt; i++) {
861
		err = sh_dmae_chan_probe(shdev, i, chan_irq[i], chan_flag[i]);
862 863 864 865
		if (err)
			goto chan_probe_err;
	}

866 867 868
	if (irq_cap)
		dev_notice(&pdev->dev, "Attempting to register %d DMA "
			   "channels when a maximum of %d are supported.\n",
869
			   pdata->channel_num, SH_DMAE_MAX_CHANNELS);
870

871 872
	pm_runtime_put(&pdev->dev);

873 874 875
	err = dma_async_device_register(&shdev->shdma_dev.dma_dev);
	if (err < 0)
		goto edmadevreg;
876 877 878

	return err;

879 880 881
edmadevreg:
	pm_runtime_get(&pdev->dev);

882 883
chan_probe_err:
	sh_dmae_chan_remove(shdev);
884

885
#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
886
eirq_err:
887
#endif
888
rst_err:
889
	spin_lock_irq(&sh_dmae_lock);
P
Paul Mundt 已提交
890
	list_del_rcu(&shdev->node);
891
	spin_unlock_irq(&sh_dmae_lock);
P
Paul Mundt 已提交
892

893
	pm_runtime_put(&pdev->dev);
894 895
	pm_runtime_disable(&pdev->dev);

896 897
	shdma_cleanup(&shdev->shdma_dev);
eshdma:
898
	synchronize_rcu();
899 900 901 902

	return err;
}

903
static int sh_dmae_remove(struct platform_device *pdev)
904 905
{
	struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
906
	struct dma_device *dma_dev = &shdev->shdma_dev.dma_dev;
907

908
	dma_async_device_unregister(dma_dev);
909

910
	spin_lock_irq(&sh_dmae_lock);
P
Paul Mundt 已提交
911
	list_del_rcu(&shdev->node);
912
	spin_unlock_irq(&sh_dmae_lock);
P
Paul Mundt 已提交
913

914 915
	pm_runtime_disable(&pdev->dev);

916 917 918
	sh_dmae_chan_remove(shdev);
	shdma_cleanup(&shdev->shdma_dev);

919
	synchronize_rcu();
920

921 922 923 924
	return 0;
}

static struct platform_driver sh_dmae_driver = {
925
	.driver 	= {
926
		.owner	= THIS_MODULE,
927
		.pm	= &sh_dmae_pm,
928
		.name	= SH_DMAE_DRV_NAME,
929
		.of_match_table = sh_dmae_of_match,
930
	},
B
Bill Pemberton 已提交
931
	.remove		= sh_dmae_remove,
932
	.shutdown	= sh_dmae_shutdown,
933 934 935 936
};

static int __init sh_dmae_init(void)
{
937 938 939 940 941
	/* Wire up NMI handling */
	int err = register_die_notifier(&sh_dmae_nmi_notifier);
	if (err)
		return err;

942 943 944 945 946 947 948
	return platform_driver_probe(&sh_dmae_driver, sh_dmae_probe);
}
module_init(sh_dmae_init);

static void __exit sh_dmae_exit(void)
{
	platform_driver_unregister(&sh_dmae_driver);
949 950

	unregister_die_notifier(&sh_dmae_nmi_notifier);
951 952 953 954 955 956
}
module_exit(sh_dmae_exit);

MODULE_AUTHOR("Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>");
MODULE_DESCRIPTION("Renesas SH DMA Engine driver");
MODULE_LICENSE("GPL");
957
MODULE_ALIAS("platform:" SH_DMAE_DRV_NAME);