shdmac.c 24.1 KB
Newer Older
1 2 3 4 5
/*
 * Renesas SuperH DMA Engine support
 *
 * base is drivers/dma/flsdma.c
 *
6
 * Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
 * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
 * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
 * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
 *
 * This is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * - DMA of SuperH does not have Hardware DMA chain mode.
 * - MAX DMA size is 16MB.
 *
 */

#include <linux/init.h>
#include <linux/module.h>
23 24
#include <linux/of.h>
#include <linux/of_device.h>
25
#include <linux/slab.h>
26 27 28 29
#include <linux/interrupt.h>
#include <linux/dmaengine.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
30
#include <linux/pm_runtime.h>
31
#include <linux/sh_dma.h>
P
Paul Mundt 已提交
32 33 34 35
#include <linux/notifier.h>
#include <linux/kdebug.h>
#include <linux/spinlock.h>
#include <linux/rculist.h>
36

37
#include "../dmaengine.h"
38 39
#include "shdma.h"

40 41 42 43 44 45 46 47 48
/* DMA register */
#define SAR	0x00
#define DAR	0x04
#define TCR	0x08
#define CHCR	0x0C
#define DMAOR	0x40

#define TEND	0x18 /* USB-DMAC */

49
#define SH_DMAE_DRV_NAME "sh-dma-engine"
50

51 52
/* Default MEMCPY transfer size = 2^2 = 4 bytes */
#define LOG2_DEFAULT_XFER_SIZE	2
53 54
#define SH_DMA_SLAVE_NUMBER 256
#define SH_DMA_TCR_MAX (16 * 1024 * 1024 - 1)
55

P
Paul Mundt 已提交
56 57
/*
 * Used for write-side mutual exclusion for the global device list,
58
 * read-side synchronization by way of RCU, and per-controller data.
P
Paul Mundt 已提交
59 60 61 62
 */
static DEFINE_SPINLOCK(sh_dmae_lock);
static LIST_HEAD(sh_dmae_devices);

63 64 65 66 67 68 69 70
/*
 * Different DMAC implementations provide different ways to clear DMA channels:
 * (1) none - no CHCLR registers are available
 * (2) one CHCLR register per channel - 0 has to be written to it to clear
 *     channel buffers
 * (3) one CHCLR per several channels - 1 has to be written to the bit,
 *     corresponding to the specific channel to reset it
 */
71
static void channel_clear(struct sh_dmae_chan *sh_dc)
72 73
{
	struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
74 75 76
	const struct sh_dmae_channel *chan_pdata = shdev->pdata->channel +
		sh_dc->shdma_chan.id;
	u32 val = shdev->pdata->chclr_bitwise ? 1 << chan_pdata->chclr_bit : 0;
77

78
	__raw_writel(val, shdev->chan_reg + chan_pdata->chclr_offset);
79
}
80

81 82
static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg)
{
83
	__raw_writel(data, sh_dc->base + reg);
84 85 86 87
}

static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg)
{
88
	return __raw_readl(sh_dc->base + reg);
89 90 91 92
}

static u16 dmaor_read(struct sh_dmae_device *shdev)
{
93
	void __iomem *addr = shdev->chan_reg + DMAOR;
94 95 96 97 98

	if (shdev->pdata->dmaor_is_32bit)
		return __raw_readl(addr);
	else
		return __raw_readw(addr);
99 100 101 102
}

static void dmaor_write(struct sh_dmae_device *shdev, u16 data)
{
103
	void __iomem *addr = shdev->chan_reg + DMAOR;
104 105 106 107 108

	if (shdev->pdata->dmaor_is_32bit)
		__raw_writel(data, addr);
	else
		__raw_writew(data, addr);
109 110
}

111 112 113 114
static void chcr_write(struct sh_dmae_chan *sh_dc, u32 data)
{
	struct sh_dmae_device *shdev = to_sh_dev(sh_dc);

115
	__raw_writel(data, sh_dc->base + shdev->chcr_offset);
116 117 118 119 120 121
}

static u32 chcr_read(struct sh_dmae_chan *sh_dc)
{
	struct sh_dmae_device *shdev = to_sh_dev(sh_dc);

122
	return __raw_readl(sh_dc->base + shdev->chcr_offset);
123 124 125 126 127 128 129
}

/*
 * Reset DMA controller
 *
 * SH7780 has two DMAOR register
 */
130
static void sh_dmae_ctl_stop(struct sh_dmae_device *shdev)
131
{
132 133 134 135
	unsigned short dmaor;
	unsigned long flags;

	spin_lock_irqsave(&sh_dmae_lock, flags);
136

137
	dmaor = dmaor_read(shdev);
138
	dmaor_write(shdev, dmaor & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME));
139 140

	spin_unlock_irqrestore(&sh_dmae_lock, flags);
141 142
}

143
static int sh_dmae_rst(struct sh_dmae_device *shdev)
144 145
{
	unsigned short dmaor;
146
	unsigned long flags;
147

148
	spin_lock_irqsave(&sh_dmae_lock, flags);
149

150 151
	dmaor = dmaor_read(shdev) & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME);

152 153 154 155 156
	if (shdev->pdata->chclr_present) {
		int i;
		for (i = 0; i < shdev->pdata->channel_num; i++) {
			struct sh_dmae_chan *sh_chan = shdev->chan[i];
			if (sh_chan)
157
				channel_clear(sh_chan);
158 159 160
		}
	}

161 162 163 164 165 166 167
	dmaor_write(shdev, dmaor | shdev->pdata->dmaor_init);

	dmaor = dmaor_read(shdev);

	spin_unlock_irqrestore(&sh_dmae_lock, flags);

	if (dmaor & (DMAOR_AE | DMAOR_NMIF)) {
168
		dev_warn(shdev->shdma_dev.dma_dev.dev, "Can't initialize DMAOR.\n");
169
		return -EIO;
170
	}
171
	if (shdev->pdata->dmaor_init & ~dmaor)
172
		dev_warn(shdev->shdma_dev.dma_dev.dev,
173 174
			 "DMAOR=0x%x hasn't latched the initial value 0x%x.\n",
			 dmaor, shdev->pdata->dmaor_init);
175 176 177
	return 0;
}

178
static bool dmae_is_busy(struct sh_dmae_chan *sh_chan)
179
{
180
	u32 chcr = chcr_read(sh_chan);
181 182 183 184 185

	if ((chcr & (CHCR_DE | CHCR_TE)) == CHCR_DE)
		return true; /* working */

	return false; /* waiting */
186 187
}

188
static unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan, u32 chcr)
189
{
190
	struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
191
	const struct sh_dmae_pdata *pdata = shdev->pdata;
192 193 194 195 196
	int cnt = ((chcr & pdata->ts_low_mask) >> pdata->ts_low_shift) |
		((chcr & pdata->ts_high_mask) >> pdata->ts_high_shift);

	if (cnt >= pdata->ts_shift_num)
		cnt = 0;
197

198 199 200 201 202
	return pdata->ts_shift[cnt];
}

static u32 log2size_to_chcr(struct sh_dmae_chan *sh_chan, int l2size)
{
203
	struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
204
	const struct sh_dmae_pdata *pdata = shdev->pdata;
205 206 207 208 209 210 211 212 213 214 215
	int i;

	for (i = 0; i < pdata->ts_shift_num; i++)
		if (pdata->ts_shift[i] == l2size)
			break;

	if (i == pdata->ts_shift_num)
		i = 0;

	return ((i << pdata->ts_low_shift) & pdata->ts_low_mask) |
		((i << pdata->ts_high_shift) & pdata->ts_high_mask);
216 217
}

218
static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw)
219
{
220 221
	sh_dmae_writel(sh_chan, hw->sar, SAR);
	sh_dmae_writel(sh_chan, hw->dar, DAR);
222
	sh_dmae_writel(sh_chan, hw->tcr >> sh_chan->xmit_shift, TCR);
223 224 225 226
}

static void dmae_start(struct sh_dmae_chan *sh_chan)
{
227
	struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
228
	u32 chcr = chcr_read(sh_chan);
229

230 231 232
	if (shdev->pdata->needs_tend_set)
		sh_dmae_writel(sh_chan, 0xFFFFFFFF, TEND);

233
	chcr |= CHCR_DE | shdev->chcr_ie_bit;
234
	chcr_write(sh_chan, chcr & ~CHCR_TE);
235 236
}

237 238
static void dmae_init(struct sh_dmae_chan *sh_chan)
{
239 240 241 242 243 244 245
	/*
	 * Default configuration for dual address memory-memory transfer.
	 * 0x400 represents auto-request.
	 */
	u32 chcr = DM_INC | SM_INC | 0x400 | log2size_to_chcr(sh_chan,
						   LOG2_DEFAULT_XFER_SIZE);
	sh_chan->xmit_shift = calc_xmit_shift(sh_chan, chcr);
246
	chcr_write(sh_chan, chcr);
247 248
}

249 250
static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val)
{
251
	/* If DMA is active, cannot set CHCR. TODO: remove this superfluous check */
252 253
	if (dmae_is_busy(sh_chan))
		return -EBUSY;
254

255
	sh_chan->xmit_shift = calc_xmit_shift(sh_chan, val);
256
	chcr_write(sh_chan, val);
257

258 259 260 261 262
	return 0;
}

static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
{
263
	struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
264
	const struct sh_dmae_pdata *pdata = shdev->pdata;
265
	const struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->shdma_chan.id];
266
	void __iomem *addr = shdev->dmars;
267
	unsigned int shift = chan_pdata->dmars_bit;
268 269 270

	if (dmae_is_busy(sh_chan))
		return -EBUSY;
271

272 273 274
	if (pdata->no_dmars)
		return 0;

275 276
	/* in the case of a missing DMARS resource use first memory window */
	if (!addr)
277 278
		addr = shdev->chan_reg;
	addr += chan_pdata->dmars;
279

280 281
	__raw_writew((__raw_readw(addr) & (0xff00 >> shift)) | (val << shift),
		     addr);
282 283 284 285

	return 0;
}

286 287
static void sh_dmae_start_xfer(struct shdma_chan *schan,
			       struct shdma_desc *sdesc)
288
{
289 290 291 292 293 294 295 296 297 298
	struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
						    shdma_chan);
	struct sh_dmae_desc *sh_desc = container_of(sdesc,
					struct sh_dmae_desc, shdma_desc);
	dev_dbg(sh_chan->shdma_chan.dev, "Queue #%d to %d: %u@%x -> %x\n",
		sdesc->async_tx.cookie, sh_chan->shdma_chan.id,
		sh_desc->hw.tcr, sh_desc->hw.sar, sh_desc->hw.dar);
	/* Get the ld start address from ld_queue */
	dmae_set_reg(sh_chan, &sh_desc->hw);
	dmae_start(sh_chan);
299 300
}

301
static bool sh_dmae_channel_busy(struct shdma_chan *schan)
302
{
303 304 305
	struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
						    shdma_chan);
	return dmae_is_busy(sh_chan);
306 307
}

308
static void sh_dmae_setup_xfer(struct shdma_chan *schan,
309
			       int slave_id)
310
{
311 312
	struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
						    shdma_chan);
313

314
	if (slave_id >= 0) {
315
		const struct sh_dmae_slave_config *cfg =
316
			sh_chan->config;
317

318 319
		dmae_set_dmars(sh_chan, cfg->mid_rid);
		dmae_set_chcr(sh_chan, cfg->chcr);
320
	} else {
321
		dmae_init(sh_chan);
322 323 324
	}
}

325 326 327 328
/*
 * Find a slave channel configuration from the contoller list by either a slave
 * ID in the non-DT case, or by a MID/RID value in the DT case
 */
329
static const struct sh_dmae_slave_config *dmae_find_slave(
330
	struct sh_dmae_chan *sh_chan, int match)
331
{
332
	struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
333
	const struct sh_dmae_pdata *pdata = shdev->pdata;
334
	const struct sh_dmae_slave_config *cfg;
335 336
	int i;

337 338 339
	if (!sh_chan->shdma_chan.dev->of_node) {
		if (match >= SH_DMA_SLAVE_NUMBER)
			return NULL;
340

341 342 343 344 345 346
		for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++)
			if (cfg->slave_id == match)
				return cfg;
	} else {
		for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++)
			if (cfg->mid_rid == match) {
347
				sh_chan->shdma_chan.slave_id = i;
348 349 350
				return cfg;
			}
	}
351 352 353 354

	return NULL;
}

355
static int sh_dmae_set_slave(struct shdma_chan *schan,
356
			     int slave_id, dma_addr_t slave_addr, bool try)
357
{
358 359
	struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
						    shdma_chan);
360
	const struct sh_dmae_slave_config *cfg = dmae_find_slave(sh_chan, slave_id);
361
	if (!cfg)
362
		return -ENXIO;
363

364
	if (!try) {
365
		sh_chan->config = cfg;
366 367
		sh_chan->slave_addr = slave_addr ? : cfg->addr;
	}
368 369

	return 0;
370 371
}

372
static void dmae_halt(struct sh_dmae_chan *sh_chan)
373
{
374 375
	struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
	u32 chcr = chcr_read(sh_chan);
376

377 378
	chcr &= ~(CHCR_DE | CHCR_TE | shdev->chcr_ie_bit);
	chcr_write(sh_chan, chcr);
379 380
}

381 382 383
static int sh_dmae_desc_setup(struct shdma_chan *schan,
			      struct shdma_desc *sdesc,
			      dma_addr_t src, dma_addr_t dst, size_t *len)
384
{
385 386
	struct sh_dmae_desc *sh_desc = container_of(sdesc,
					struct sh_dmae_desc, shdma_desc);
387

388 389
	if (*len > schan->max_xfer_len)
		*len = schan->max_xfer_len;
390

391 392 393
	sh_desc->hw.sar = src;
	sh_desc->hw.dar = dst;
	sh_desc->hw.tcr = *len;
394

395
	return 0;
396 397
}

398
static void sh_dmae_halt(struct shdma_chan *schan)
399
{
400 401 402
	struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
						    shdma_chan);
	dmae_halt(sh_chan);
403 404
}

405
static bool sh_dmae_chan_irq(struct shdma_chan *schan, int irq)
406
{
407 408
	struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
						    shdma_chan);
409

410 411
	if (!(chcr_read(sh_chan) & CHCR_TE))
		return false;
412

413 414
	/* DMA stop */
	dmae_halt(sh_chan);
415

416
	return true;
417 418
}

419 420 421 422 423 424 425
static size_t sh_dmae_get_partial(struct shdma_chan *schan,
				  struct shdma_desc *sdesc)
{
	struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
						    shdma_chan);
	struct sh_dmae_desc *sh_desc = container_of(sdesc,
					struct sh_dmae_desc, shdma_desc);
426 427
	return sh_desc->hw.tcr -
		(sh_dmae_readl(sh_chan, TCR) << sh_chan->xmit_shift);
428 429
}

430 431
/* Called from error IRQ or NMI */
static bool sh_dmae_reset(struct sh_dmae_device *shdev)
432
{
433
	bool ret;
434

435
	/* halt the dma controller */
436
	sh_dmae_ctl_stop(shdev);
437 438

	/* We cannot detect, which channel caused the error, have to reset all */
439
	ret = shdma_reset(&shdev->shdma_dev);
P
Paul Mundt 已提交
440

441
	sh_dmae_rst(shdev);
442

443
	return ret;
P
Paul Mundt 已提交
444 445
}

446
#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARM)
P
Paul Mundt 已提交
447 448
static irqreturn_t sh_dmae_err(int irq, void *data)
{
449 450
	struct sh_dmae_device *shdev = data;

451
	if (!(dmaor_read(shdev) & DMAOR_AE))
452
		return IRQ_NONE;
453

454
	sh_dmae_reset(shdev);
455
	return IRQ_HANDLED;
456
}
457
#endif
458

459 460
static bool sh_dmae_desc_completed(struct shdma_chan *schan,
				   struct shdma_desc *sdesc)
461
{
462 463 464 465
	struct sh_dmae_chan *sh_chan = container_of(schan,
					struct sh_dmae_chan, shdma_chan);
	struct sh_dmae_desc *sh_desc = container_of(sdesc,
					struct sh_dmae_desc, shdma_desc);
466
	u32 sar_buf = sh_dmae_readl(sh_chan, SAR);
467
	u32 dar_buf = sh_dmae_readl(sh_chan, DAR);
468

469 470 471 472
	return	(sdesc->direction == DMA_DEV_TO_MEM &&
		 (sh_desc->hw.dar + sh_desc->hw.tcr) == dar_buf) ||
		(sdesc->direction != DMA_DEV_TO_MEM &&
		 (sh_desc->hw.sar + sh_desc->hw.tcr) == sar_buf);
473 474
}

P
Paul Mundt 已提交
475 476 477 478 479 480
static bool sh_dmae_nmi_notify(struct sh_dmae_device *shdev)
{
	/* Fast path out if NMIF is not asserted for this controller */
	if ((dmaor_read(shdev) & DMAOR_NMIF) == 0)
		return false;

481
	return sh_dmae_reset(shdev);
P
Paul Mundt 已提交
482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522
}

static int sh_dmae_nmi_handler(struct notifier_block *self,
			       unsigned long cmd, void *data)
{
	struct sh_dmae_device *shdev;
	int ret = NOTIFY_DONE;
	bool triggered;

	/*
	 * Only concern ourselves with NMI events.
	 *
	 * Normally we would check the die chain value, but as this needs
	 * to be architecture independent, check for NMI context instead.
	 */
	if (!in_nmi())
		return NOTIFY_DONE;

	rcu_read_lock();
	list_for_each_entry_rcu(shdev, &sh_dmae_devices, node) {
		/*
		 * Only stop if one of the controllers has NMIF asserted,
		 * we do not want to interfere with regular address error
		 * handling or NMI events that don't concern the DMACs.
		 */
		triggered = sh_dmae_nmi_notify(shdev);
		if (triggered == true)
			ret = NOTIFY_OK;
	}
	rcu_read_unlock();

	return ret;
}

static struct notifier_block sh_dmae_nmi_notifier __read_mostly = {
	.notifier_call	= sh_dmae_nmi_handler,

	/* Run before NMI debug handler and KGDB */
	.priority	= 1,
};

B
Bill Pemberton 已提交
523
static int sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id,
524
					int irq, unsigned long flags)
525
{
526
	const struct sh_dmae_channel *chan_pdata = &shdev->pdata->channel[id];
527 528 529 530 531
	struct shdma_dev *sdev = &shdev->shdma_dev;
	struct platform_device *pdev = to_platform_device(sdev->dma_dev.dev);
	struct sh_dmae_chan *sh_chan;
	struct shdma_chan *schan;
	int err;
532

533 534
	sh_chan = devm_kzalloc(sdev->dma_dev.dev, sizeof(struct sh_dmae_chan),
			       GFP_KERNEL);
535 536
	if (!sh_chan) {
		dev_err(sdev->dma_dev.dev,
537
			"No free memory for allocating dma channels!\n");
538 539 540
		return -ENOMEM;
	}

541 542
	schan = &sh_chan->shdma_chan;
	schan->max_xfer_len = SH_DMA_TCR_MAX + 1;
543

544
	shdma_chan_probe(sdev, schan, id);
545

546
	sh_chan->base = shdev->chan_reg + chan_pdata->offset;
547

548
	/* set up channel irq */
549
	if (pdev->id >= 0)
550 551
		snprintf(sh_chan->dev_id, sizeof(sh_chan->dev_id),
			 "sh-dmae%d.%d", pdev->id, id);
552
	else
553 554
		snprintf(sh_chan->dev_id, sizeof(sh_chan->dev_id),
			 "sh-dma%d", id);
555

556
	err = shdma_request_irq(schan, irq, flags, sh_chan->dev_id);
557
	if (err) {
558 559 560
		dev_err(sdev->dma_dev.dev,
			"DMA channel %d request_irq error %d\n",
			id, err);
561 562 563
		goto err_no_irq;
	}

564
	shdev->chan[id] = sh_chan;
565 566 567 568
	return 0;

err_no_irq:
	/* remove from dmaengine device node */
569
	shdma_chan_remove(schan);
570 571 572 573 574
	return err;
}

static void sh_dmae_chan_remove(struct sh_dmae_device *shdev)
{
575 576
	struct dma_device *dma_dev = &shdev->shdma_dev.dma_dev;
	struct shdma_chan *schan;
577 578
	int i;

579 580
	shdma_for_each_chan(schan, &shdev->shdma_dev, i) {
		BUG_ON(!schan);
581

582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625
		shdma_chan_remove(schan);
	}
	dma_dev->chancnt = 0;
}

static void sh_dmae_shutdown(struct platform_device *pdev)
{
	struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
	sh_dmae_ctl_stop(shdev);
}

static int sh_dmae_runtime_suspend(struct device *dev)
{
	return 0;
}

static int sh_dmae_runtime_resume(struct device *dev)
{
	struct sh_dmae_device *shdev = dev_get_drvdata(dev);

	return sh_dmae_rst(shdev);
}

#ifdef CONFIG_PM
static int sh_dmae_suspend(struct device *dev)
{
	return 0;
}

static int sh_dmae_resume(struct device *dev)
{
	struct sh_dmae_device *shdev = dev_get_drvdata(dev);
	int i, ret;

	ret = sh_dmae_rst(shdev);
	if (ret < 0)
		dev_err(dev, "Failed to reset!\n");

	for (i = 0; i < shdev->pdata->channel_num; i++) {
		struct sh_dmae_chan *sh_chan = shdev->chan[i];

		if (!sh_chan->shdma_chan.desc_num)
			continue;

626
		if (sh_chan->shdma_chan.slave_id >= 0) {
627
			const struct sh_dmae_slave_config *cfg = sh_chan->config;
628 629 630 631
			dmae_set_dmars(sh_chan, cfg->mid_rid);
			dmae_set_chcr(sh_chan, cfg->chcr);
		} else {
			dmae_init(sh_chan);
632 633
		}
	}
634 635

	return 0;
636
}
637 638 639 640
#else
#define sh_dmae_suspend NULL
#define sh_dmae_resume NULL
#endif
641

642 643 644 645 646 647 648 649 650
const struct dev_pm_ops sh_dmae_pm = {
	.suspend		= sh_dmae_suspend,
	.resume			= sh_dmae_resume,
	.runtime_suspend	= sh_dmae_runtime_suspend,
	.runtime_resume		= sh_dmae_runtime_resume,
};

static dma_addr_t sh_dmae_slave_addr(struct shdma_chan *schan)
{
651 652
	struct sh_dmae_chan *sh_chan = container_of(schan,
					struct sh_dmae_chan, shdma_chan);
653 654

	/*
655 656 657
	 * Implicit BUG_ON(!sh_chan->config)
	 * This is an exclusive slave DMA operation, may only be called after a
	 * successful slave configuration.
658
	 */
659
	return sh_chan->slave_addr;
660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677
}

static struct shdma_desc *sh_dmae_embedded_desc(void *buf, int i)
{
	return &((struct sh_dmae_desc *)buf)[i].shdma_desc;
}

static const struct shdma_ops sh_dmae_shdma_ops = {
	.desc_completed = sh_dmae_desc_completed,
	.halt_channel = sh_dmae_halt,
	.channel_busy = sh_dmae_channel_busy,
	.slave_addr = sh_dmae_slave_addr,
	.desc_setup = sh_dmae_desc_setup,
	.set_slave = sh_dmae_set_slave,
	.setup_xfer = sh_dmae_setup_xfer,
	.start_xfer = sh_dmae_start_xfer,
	.embedded_desc = sh_dmae_embedded_desc,
	.chan_irq = sh_dmae_chan_irq,
678
	.get_partial = sh_dmae_get_partial,
679 680
};

681
static const struct of_device_id sh_dmae_of_match[] = {
682
	{.compatible = "renesas,shdma-r8a73a4", .data = r8a73a4_shdma_devid,},
683 684 685 686
	{}
};
MODULE_DEVICE_TABLE(of, sh_dmae_of_match);

B
Bill Pemberton 已提交
687
static int sh_dmae_probe(struct platform_device *pdev)
688
{
689
	const struct sh_dmae_pdata *pdata;
690 691 692 693 694 695
	unsigned long chan_flag[SH_DMAE_MAX_CHANNELS] = {};
	int chan_irq[SH_DMAE_MAX_CHANNELS];
#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARM)
	unsigned long irqflags = 0;
	int errirq;
#endif
696
	int err, i, irq_cnt = 0, irqres = 0, irq_cap = 0;
697
	struct sh_dmae_device *shdev;
698
	struct dma_device *dma_dev;
699
	struct resource *chan, *dmars, *errirq_res, *chanirq_res;
700

701 702 703
	if (pdev->dev.of_node)
		pdata = of_match_device(sh_dmae_of_match, &pdev->dev)->data;
	else
704
		pdata = dev_get_platdata(&pdev->dev);
705

706
	/* get platform data */
707
	if (!pdata || !pdata->channel_num)
708 709
		return -ENODEV;

710
	chan = platform_get_resource(pdev, IORESOURCE_MEM, 0);
711
	/* DMARS area is optional */
712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732
	dmars = platform_get_resource(pdev, IORESOURCE_MEM, 1);
	/*
	 * IRQ resources:
	 * 1. there always must be at least one IRQ IO-resource. On SH4 it is
	 *    the error IRQ, in which case it is the only IRQ in this resource:
	 *    start == end. If it is the only IRQ resource, all channels also
	 *    use the same IRQ.
	 * 2. DMA channel IRQ resources can be specified one per resource or in
	 *    ranges (start != end)
	 * 3. iff all events (channels and, optionally, error) on this
	 *    controller use the same IRQ, only one IRQ resource can be
	 *    specified, otherwise there must be one IRQ per channel, even if
	 *    some of them are equal
	 * 4. if all IRQs on this controller are equal or if some specific IRQs
	 *    specify IORESOURCE_IRQ_SHAREABLE in their resources, they will be
	 *    requested with the IRQF_SHARED flag
	 */
	errirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
	if (!chan || !errirq_res)
		return -ENODEV;

733 734
	shdev = devm_kzalloc(&pdev->dev, sizeof(struct sh_dmae_device),
			     GFP_KERNEL);
735
	if (!shdev) {
736
		dev_err(&pdev->dev, "Not enough memory\n");
737
		return -ENOMEM;
738 739
	}

740 741
	dma_dev = &shdev->shdma_dev.dma_dev;

742 743 744
	shdev->chan_reg = devm_ioremap_resource(&pdev->dev, chan);
	if (IS_ERR(shdev->chan_reg))
		return PTR_ERR(shdev->chan_reg);
745
	if (dmars) {
746 747 748
		shdev->dmars = devm_ioremap_resource(&pdev->dev, dmars);
		if (IS_ERR(shdev->dmars))
			return PTR_ERR(shdev->dmars);
749 750
	}

751 752 753 754 755 756 757 758 759 760 761 762 763 764 765
	if (!pdata->slave_only)
		dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
	if (pdata->slave && pdata->slave_num)
		dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);

	/* Default transfer size of 32 bytes requires 32-byte alignment */
	dma_dev->copy_align = LOG2_DEFAULT_XFER_SIZE;

	shdev->shdma_dev.ops = &sh_dmae_shdma_ops;
	shdev->shdma_dev.desc_size = sizeof(struct sh_dmae_desc);
	err = shdma_init(&pdev->dev, &shdev->shdma_dev,
			      pdata->channel_num);
	if (err < 0)
		goto eshdma;

766
	/* platform data */
767
	shdev->pdata = pdata;
768

769 770 771 772 773
	if (pdata->chcr_offset)
		shdev->chcr_offset = pdata->chcr_offset;
	else
		shdev->chcr_offset = CHCR;

774 775 776 777 778
	if (pdata->chcr_ie_bit)
		shdev->chcr_ie_bit = pdata->chcr_ie_bit;
	else
		shdev->chcr_ie_bit = CHCR_IE;

779 780
	platform_set_drvdata(pdev, shdev);

781
	pm_runtime_enable(&pdev->dev);
782 783 784
	err = pm_runtime_get_sync(&pdev->dev);
	if (err < 0)
		dev_err(&pdev->dev, "%s(): GET = %d\n", __func__, err);
785

786
	spin_lock_irq(&sh_dmae_lock);
P
Paul Mundt 已提交
787
	list_add_tail_rcu(&shdev->node, &sh_dmae_devices);
788
	spin_unlock_irq(&sh_dmae_lock);
P
Paul Mundt 已提交
789

790
	/* reset dma controller - only needed as a test */
791
	err = sh_dmae_rst(shdev);
792 793 794
	if (err)
		goto rst_err;

795
#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
796 797 798 799 800 801 802 803 804
	chanirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 1);

	if (!chanirq_res)
		chanirq_res = errirq_res;
	else
		irqres++;

	if (chanirq_res == errirq_res ||
	    (errirq_res->flags & IORESOURCE_BITS) == IORESOURCE_IRQ_SHAREABLE)
805
		irqflags = IRQF_SHARED;
806 807 808

	errirq = errirq_res->start;

809 810
	err = devm_request_irq(&pdev->dev, errirq, sh_dmae_err, irqflags,
			       "DMAC Address Error", shdev);
811 812 813 814 815
	if (err) {
		dev_err(&pdev->dev,
			"DMA failed requesting irq #%d, error %d\n",
			errirq, err);
		goto eirq_err;
816 817
	}

818 819
#else
	chanirq_res = errirq_res;
820
#endif /* CONFIG_CPU_SH4 || CONFIG_ARCH_SHMOBILE */
821 822 823 824 825

	if (chanirq_res->start == chanirq_res->end &&
	    !platform_get_resource(pdev, IORESOURCE_IRQ, 1)) {
		/* Special case - all multiplexed */
		for (; irq_cnt < pdata->channel_num; irq_cnt++) {
826
			if (irq_cnt < SH_DMAE_MAX_CHANNELS) {
827 828 829 830 831 832
				chan_irq[irq_cnt] = chanirq_res->start;
				chan_flag[irq_cnt] = IRQF_SHARED;
			} else {
				irq_cap = 1;
				break;
			}
833
		}
834 835 836
	} else {
		do {
			for (i = chanirq_res->start; i <= chanirq_res->end; i++) {
837
				if (irq_cnt >= SH_DMAE_MAX_CHANNELS) {
838 839 840 841
					irq_cap = 1;
					break;
				}

842 843 844 845
				if ((errirq_res->flags & IORESOURCE_BITS) ==
				    IORESOURCE_IRQ_SHAREABLE)
					chan_flag[irq_cnt] = IRQF_SHARED;
				else
846
					chan_flag[irq_cnt] = 0;
847 848 849 850
				dev_dbg(&pdev->dev,
					"Found IRQ %d for channel %d\n",
					i, irq_cnt);
				chan_irq[irq_cnt++] = i;
851 852
			}

853
			if (irq_cnt >= SH_DMAE_MAX_CHANNELS)
854
				break;
855

856 857 858
			chanirq_res = platform_get_resource(pdev,
						IORESOURCE_IRQ, ++irqres);
		} while (irq_cnt < pdata->channel_num && chanirq_res);
859
	}
860

861
	/* Create DMA Channel */
862
	for (i = 0; i < irq_cnt; i++) {
863
		err = sh_dmae_chan_probe(shdev, i, chan_irq[i], chan_flag[i]);
864 865 866 867
		if (err)
			goto chan_probe_err;
	}

868 869 870
	if (irq_cap)
		dev_notice(&pdev->dev, "Attempting to register %d DMA "
			   "channels when a maximum of %d are supported.\n",
871
			   pdata->channel_num, SH_DMAE_MAX_CHANNELS);
872

873 874
	pm_runtime_put(&pdev->dev);

875 876 877
	err = dma_async_device_register(&shdev->shdma_dev.dma_dev);
	if (err < 0)
		goto edmadevreg;
878 879 880

	return err;

881 882 883
edmadevreg:
	pm_runtime_get(&pdev->dev);

884 885
chan_probe_err:
	sh_dmae_chan_remove(shdev);
886

887
#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
888
eirq_err:
889
#endif
890
rst_err:
891
	spin_lock_irq(&sh_dmae_lock);
P
Paul Mundt 已提交
892
	list_del_rcu(&shdev->node);
893
	spin_unlock_irq(&sh_dmae_lock);
P
Paul Mundt 已提交
894

895
	pm_runtime_put(&pdev->dev);
896 897
	pm_runtime_disable(&pdev->dev);

898 899
	shdma_cleanup(&shdev->shdma_dev);
eshdma:
900
	synchronize_rcu();
901 902 903 904

	return err;
}

905
static int sh_dmae_remove(struct platform_device *pdev)
906 907
{
	struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
908
	struct dma_device *dma_dev = &shdev->shdma_dev.dma_dev;
909

910
	dma_async_device_unregister(dma_dev);
911

912
	spin_lock_irq(&sh_dmae_lock);
P
Paul Mundt 已提交
913
	list_del_rcu(&shdev->node);
914
	spin_unlock_irq(&sh_dmae_lock);
P
Paul Mundt 已提交
915

916 917
	pm_runtime_disable(&pdev->dev);

918 919 920
	sh_dmae_chan_remove(shdev);
	shdma_cleanup(&shdev->shdma_dev);

921
	synchronize_rcu();
922

923 924 925 926
	return 0;
}

static struct platform_driver sh_dmae_driver = {
927
	.driver 	= {
928
		.owner	= THIS_MODULE,
929
		.pm	= &sh_dmae_pm,
930
		.name	= SH_DMAE_DRV_NAME,
931
		.of_match_table = sh_dmae_of_match,
932
	},
B
Bill Pemberton 已提交
933
	.remove		= sh_dmae_remove,
934
	.shutdown	= sh_dmae_shutdown,
935 936 937 938
};

static int __init sh_dmae_init(void)
{
939 940 941 942 943
	/* Wire up NMI handling */
	int err = register_die_notifier(&sh_dmae_nmi_notifier);
	if (err)
		return err;

944 945 946 947 948 949 950
	return platform_driver_probe(&sh_dmae_driver, sh_dmae_probe);
}
module_init(sh_dmae_init);

static void __exit sh_dmae_exit(void)
{
	platform_driver_unregister(&sh_dmae_driver);
951 952

	unregister_die_notifier(&sh_dmae_nmi_notifier);
953 954 955 956 957 958
}
module_exit(sh_dmae_exit);

MODULE_AUTHOR("Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>");
MODULE_DESCRIPTION("Renesas SH DMA Engine driver");
MODULE_LICENSE("GPL");
959
MODULE_ALIAS("platform:" SH_DMAE_DRV_NAME);