shdma.c 23.8 KB
Newer Older
1 2 3 4 5
/*
 * Renesas SuperH DMA Engine support
 *
 * base is drivers/dma/flsdma.c
 *
6
 * Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
 * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
 * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
 * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
 *
 * This is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * - DMA of SuperH does not have Hardware DMA chain mode.
 * - MAX DMA size is 16MB.
 *
 */

#include <linux/init.h>
#include <linux/module.h>
23
#include <linux/slab.h>
24 25 26 27
#include <linux/interrupt.h>
#include <linux/dmaengine.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
28
#include <linux/pm_runtime.h>
29
#include <linux/sh_dma.h>
P
Paul Mundt 已提交
30 31 32 33
#include <linux/notifier.h>
#include <linux/kdebug.h>
#include <linux/spinlock.h>
#include <linux/rculist.h>
34

35
#include "../dmaengine.h"
36 37
#include "shdma.h"

38
#define SH_DMAE_DRV_NAME "sh-dma-engine"
39

40 41
/* Default MEMCPY transfer size = 2^2 = 4 bytes */
#define LOG2_DEFAULT_XFER_SIZE	2
42 43
#define SH_DMA_SLAVE_NUMBER 256
#define SH_DMA_TCR_MAX (16 * 1024 * 1024 - 1)
44

P
Paul Mundt 已提交
45 46
/*
 * Used for write-side mutual exclusion for the global device list,
47
 * read-side synchronization by way of RCU, and per-controller data.
P
Paul Mundt 已提交
48 49 50 51
 */
static DEFINE_SPINLOCK(sh_dmae_lock);
static LIST_HEAD(sh_dmae_devices);

52 53 54 55 56 57 58 59
/*
 * Different DMAC implementations provide different ways to clear DMA channels:
 * (1) none - no CHCLR registers are available
 * (2) one CHCLR register per channel - 0 has to be written to it to clear
 *     channel buffers
 * (3) one CHCLR per several channels - 1 has to be written to the bit,
 *     corresponding to the specific channel to reset it
 */
60
static void channel_clear(struct sh_dmae_chan *sh_dc)
61 62
{
	struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
63 64 65
	const struct sh_dmae_channel *chan_pdata = shdev->pdata->channel +
		sh_dc->shdma_chan.id;
	u32 val = shdev->pdata->chclr_bitwise ? 1 << chan_pdata->chclr_bit : 0;
66

67
	__raw_writel(val, shdev->chan_reg + chan_pdata->chclr_offset);
68
}
69

70 71
static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg)
{
72
	__raw_writel(data, sh_dc->base + reg);
73 74 75 76
}

static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg)
{
77
	return __raw_readl(sh_dc->base + reg);
78 79 80 81
}

static u16 dmaor_read(struct sh_dmae_device *shdev)
{
82
	void __iomem *addr = shdev->chan_reg + DMAOR;
83 84 85 86 87

	if (shdev->pdata->dmaor_is_32bit)
		return __raw_readl(addr);
	else
		return __raw_readw(addr);
88 89 90 91
}

static void dmaor_write(struct sh_dmae_device *shdev, u16 data)
{
92
	void __iomem *addr = shdev->chan_reg + DMAOR;
93 94 95 96 97

	if (shdev->pdata->dmaor_is_32bit)
		__raw_writel(data, addr);
	else
		__raw_writew(data, addr);
98 99
}

100 101 102 103
static void chcr_write(struct sh_dmae_chan *sh_dc, u32 data)
{
	struct sh_dmae_device *shdev = to_sh_dev(sh_dc);

104
	__raw_writel(data, sh_dc->base + shdev->chcr_offset);
105 106 107 108 109 110
}

static u32 chcr_read(struct sh_dmae_chan *sh_dc)
{
	struct sh_dmae_device *shdev = to_sh_dev(sh_dc);

111
	return __raw_readl(sh_dc->base + shdev->chcr_offset);
112 113 114 115 116 117 118
}

/*
 * Reset DMA controller
 *
 * SH7780 has two DMAOR register
 */
119
static void sh_dmae_ctl_stop(struct sh_dmae_device *shdev)
120
{
121 122 123 124
	unsigned short dmaor;
	unsigned long flags;

	spin_lock_irqsave(&sh_dmae_lock, flags);
125

126
	dmaor = dmaor_read(shdev);
127
	dmaor_write(shdev, dmaor & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME));
128 129

	spin_unlock_irqrestore(&sh_dmae_lock, flags);
130 131
}

132
static int sh_dmae_rst(struct sh_dmae_device *shdev)
133 134
{
	unsigned short dmaor;
135
	unsigned long flags;
136

137
	spin_lock_irqsave(&sh_dmae_lock, flags);
138

139 140
	dmaor = dmaor_read(shdev) & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME);

141 142 143 144 145
	if (shdev->pdata->chclr_present) {
		int i;
		for (i = 0; i < shdev->pdata->channel_num; i++) {
			struct sh_dmae_chan *sh_chan = shdev->chan[i];
			if (sh_chan)
146
				channel_clear(sh_chan);
147 148 149
		}
	}

150 151 152 153 154 155 156
	dmaor_write(shdev, dmaor | shdev->pdata->dmaor_init);

	dmaor = dmaor_read(shdev);

	spin_unlock_irqrestore(&sh_dmae_lock, flags);

	if (dmaor & (DMAOR_AE | DMAOR_NMIF)) {
157
		dev_warn(shdev->shdma_dev.dma_dev.dev, "Can't initialize DMAOR.\n");
158
		return -EIO;
159
	}
160
	if (shdev->pdata->dmaor_init & ~dmaor)
161
		dev_warn(shdev->shdma_dev.dma_dev.dev,
162 163
			 "DMAOR=0x%x hasn't latched the initial value 0x%x.\n",
			 dmaor, shdev->pdata->dmaor_init);
164 165 166
	return 0;
}

167
static bool dmae_is_busy(struct sh_dmae_chan *sh_chan)
168
{
169
	u32 chcr = chcr_read(sh_chan);
170 171 172 173 174

	if ((chcr & (CHCR_DE | CHCR_TE)) == CHCR_DE)
		return true; /* working */

	return false; /* waiting */
175 176
}

177
static unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan, u32 chcr)
178
{
179
	struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
180 181 182 183 184 185
	struct sh_dmae_pdata *pdata = shdev->pdata;
	int cnt = ((chcr & pdata->ts_low_mask) >> pdata->ts_low_shift) |
		((chcr & pdata->ts_high_mask) >> pdata->ts_high_shift);

	if (cnt >= pdata->ts_shift_num)
		cnt = 0;
186

187 188 189 190 191
	return pdata->ts_shift[cnt];
}

static u32 log2size_to_chcr(struct sh_dmae_chan *sh_chan, int l2size)
{
192
	struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
193 194 195 196 197 198 199 200 201 202 203 204
	struct sh_dmae_pdata *pdata = shdev->pdata;
	int i;

	for (i = 0; i < pdata->ts_shift_num; i++)
		if (pdata->ts_shift[i] == l2size)
			break;

	if (i == pdata->ts_shift_num)
		i = 0;

	return ((i << pdata->ts_low_shift) & pdata->ts_low_mask) |
		((i << pdata->ts_high_shift) & pdata->ts_high_mask);
205 206
}

207
static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw)
208
{
209 210
	sh_dmae_writel(sh_chan, hw->sar, SAR);
	sh_dmae_writel(sh_chan, hw->dar, DAR);
211
	sh_dmae_writel(sh_chan, hw->tcr >> sh_chan->xmit_shift, TCR);
212 213 214 215
}

static void dmae_start(struct sh_dmae_chan *sh_chan)
{
216
	struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
217
	u32 chcr = chcr_read(sh_chan);
218

219 220 221
	if (shdev->pdata->needs_tend_set)
		sh_dmae_writel(sh_chan, 0xFFFFFFFF, TEND);

222
	chcr |= CHCR_DE | shdev->chcr_ie_bit;
223
	chcr_write(sh_chan, chcr & ~CHCR_TE);
224 225
}

226 227
static void dmae_init(struct sh_dmae_chan *sh_chan)
{
228 229 230 231 232 233 234
	/*
	 * Default configuration for dual address memory-memory transfer.
	 * 0x400 represents auto-request.
	 */
	u32 chcr = DM_INC | SM_INC | 0x400 | log2size_to_chcr(sh_chan,
						   LOG2_DEFAULT_XFER_SIZE);
	sh_chan->xmit_shift = calc_xmit_shift(sh_chan, chcr);
235
	chcr_write(sh_chan, chcr);
236 237
}

238 239
static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val)
{
240
	/* If DMA is active, cannot set CHCR. TODO: remove this superfluous check */
241 242
	if (dmae_is_busy(sh_chan))
		return -EBUSY;
243

244
	sh_chan->xmit_shift = calc_xmit_shift(sh_chan, val);
245
	chcr_write(sh_chan, val);
246

247 248 249 250 251
	return 0;
}

static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
{
252
	struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
253
	struct sh_dmae_pdata *pdata = shdev->pdata;
254
	const struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->shdma_chan.id];
255
	void __iomem *addr = shdev->dmars;
256
	unsigned int shift = chan_pdata->dmars_bit;
257 258 259

	if (dmae_is_busy(sh_chan))
		return -EBUSY;
260

261 262 263
	if (pdata->no_dmars)
		return 0;

264 265
	/* in the case of a missing DMARS resource use first memory window */
	if (!addr)
266 267
		addr = shdev->chan_reg;
	addr += chan_pdata->dmars;
268

269 270
	__raw_writew((__raw_readw(addr) & (0xff00 >> shift)) | (val << shift),
		     addr);
271 272 273 274

	return 0;
}

275 276
static void sh_dmae_start_xfer(struct shdma_chan *schan,
			       struct shdma_desc *sdesc)
277
{
278 279 280 281 282 283 284 285 286 287
	struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
						    shdma_chan);
	struct sh_dmae_desc *sh_desc = container_of(sdesc,
					struct sh_dmae_desc, shdma_desc);
	dev_dbg(sh_chan->shdma_chan.dev, "Queue #%d to %d: %u@%x -> %x\n",
		sdesc->async_tx.cookie, sh_chan->shdma_chan.id,
		sh_desc->hw.tcr, sh_desc->hw.sar, sh_desc->hw.dar);
	/* Get the ld start address from ld_queue */
	dmae_set_reg(sh_chan, &sh_desc->hw);
	dmae_start(sh_chan);
288 289
}

290
static bool sh_dmae_channel_busy(struct shdma_chan *schan)
291
{
292 293 294
	struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
						    shdma_chan);
	return dmae_is_busy(sh_chan);
295 296
}

297
static void sh_dmae_setup_xfer(struct shdma_chan *schan,
298
			       int slave_id)
299
{
300 301
	struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
						    shdma_chan);
302

303
	if (slave_id >= 0) {
304
		const struct sh_dmae_slave_config *cfg =
305
			sh_chan->config;
306

307 308
		dmae_set_dmars(sh_chan, cfg->mid_rid);
		dmae_set_chcr(sh_chan, cfg->chcr);
309
	} else {
310
		dmae_init(sh_chan);
311 312 313
	}
}

314 315 316 317
/*
 * Find a slave channel configuration from the contoller list by either a slave
 * ID in the non-DT case, or by a MID/RID value in the DT case
 */
318
static const struct sh_dmae_slave_config *dmae_find_slave(
319
	struct sh_dmae_chan *sh_chan, int match)
320
{
321 322 323
	struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
	struct sh_dmae_pdata *pdata = shdev->pdata;
	const struct sh_dmae_slave_config *cfg;
324 325
	int i;

326 327 328
	if (!sh_chan->shdma_chan.dev->of_node) {
		if (match >= SH_DMA_SLAVE_NUMBER)
			return NULL;
329

330 331 332 333 334 335 336 337 338 339
		for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++)
			if (cfg->slave_id == match)
				return cfg;
	} else {
		for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++)
			if (cfg->mid_rid == match) {
				sh_chan->shdma_chan.slave_id = cfg->slave_id;
				return cfg;
			}
	}
340 341 342 343

	return NULL;
}

344
static int sh_dmae_set_slave(struct shdma_chan *schan,
345
			     int slave_id, bool try)
346
{
347 348
	struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
						    shdma_chan);
349
	const struct sh_dmae_slave_config *cfg = dmae_find_slave(sh_chan, slave_id);
350
	if (!cfg)
351
		return -ENXIO;
352

353 354
	if (!try)
		sh_chan->config = cfg;
355 356

	return 0;
357 358
}

359
static void dmae_halt(struct sh_dmae_chan *sh_chan)
360
{
361 362
	struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
	u32 chcr = chcr_read(sh_chan);
363

364 365
	chcr &= ~(CHCR_DE | CHCR_TE | shdev->chcr_ie_bit);
	chcr_write(sh_chan, chcr);
366 367
}

368 369 370
static int sh_dmae_desc_setup(struct shdma_chan *schan,
			      struct shdma_desc *sdesc,
			      dma_addr_t src, dma_addr_t dst, size_t *len)
371
{
372 373
	struct sh_dmae_desc *sh_desc = container_of(sdesc,
					struct sh_dmae_desc, shdma_desc);
374

375 376
	if (*len > schan->max_xfer_len)
		*len = schan->max_xfer_len;
377

378 379 380
	sh_desc->hw.sar = src;
	sh_desc->hw.dar = dst;
	sh_desc->hw.tcr = *len;
381

382
	return 0;
383 384
}

385
static void sh_dmae_halt(struct shdma_chan *schan)
386
{
387 388 389
	struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
						    shdma_chan);
	dmae_halt(sh_chan);
390 391
}

392
static bool sh_dmae_chan_irq(struct shdma_chan *schan, int irq)
393
{
394 395
	struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
						    shdma_chan);
396

397 398
	if (!(chcr_read(sh_chan) & CHCR_TE))
		return false;
399

400 401
	/* DMA stop */
	dmae_halt(sh_chan);
402

403
	return true;
404 405
}

406 407 408 409 410 411 412 413 414 415 416
static size_t sh_dmae_get_partial(struct shdma_chan *schan,
				  struct shdma_desc *sdesc)
{
	struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
						    shdma_chan);
	struct sh_dmae_desc *sh_desc = container_of(sdesc,
					struct sh_dmae_desc, shdma_desc);
	return (sh_desc->hw.tcr - sh_dmae_readl(sh_chan, TCR)) <<
		sh_chan->xmit_shift;
}

417 418
/* Called from error IRQ or NMI */
static bool sh_dmae_reset(struct sh_dmae_device *shdev)
419
{
420
	bool ret;
421

422
	/* halt the dma controller */
423
	sh_dmae_ctl_stop(shdev);
424 425

	/* We cannot detect, which channel caused the error, have to reset all */
426
	ret = shdma_reset(&shdev->shdma_dev);
P
Paul Mundt 已提交
427

428
	sh_dmae_rst(shdev);
429

430
	return ret;
P
Paul Mundt 已提交
431 432 433 434
}

static irqreturn_t sh_dmae_err(int irq, void *data)
{
435 436
	struct sh_dmae_device *shdev = data;

437
	if (!(dmaor_read(shdev) & DMAOR_AE))
438
		return IRQ_NONE;
439

440
	sh_dmae_reset(shdev);
441
	return IRQ_HANDLED;
442 443
}

444 445
static bool sh_dmae_desc_completed(struct shdma_chan *schan,
				   struct shdma_desc *sdesc)
446
{
447 448 449 450
	struct sh_dmae_chan *sh_chan = container_of(schan,
					struct sh_dmae_chan, shdma_chan);
	struct sh_dmae_desc *sh_desc = container_of(sdesc,
					struct sh_dmae_desc, shdma_desc);
451
	u32 sar_buf = sh_dmae_readl(sh_chan, SAR);
452
	u32 dar_buf = sh_dmae_readl(sh_chan, DAR);
453

454 455 456 457
	return	(sdesc->direction == DMA_DEV_TO_MEM &&
		 (sh_desc->hw.dar + sh_desc->hw.tcr) == dar_buf) ||
		(sdesc->direction != DMA_DEV_TO_MEM &&
		 (sh_desc->hw.sar + sh_desc->hw.tcr) == sar_buf);
458 459
}

P
Paul Mundt 已提交
460 461 462 463 464 465
static bool sh_dmae_nmi_notify(struct sh_dmae_device *shdev)
{
	/* Fast path out if NMIF is not asserted for this controller */
	if ((dmaor_read(shdev) & DMAOR_NMIF) == 0)
		return false;

466
	return sh_dmae_reset(shdev);
P
Paul Mundt 已提交
467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507
}

static int sh_dmae_nmi_handler(struct notifier_block *self,
			       unsigned long cmd, void *data)
{
	struct sh_dmae_device *shdev;
	int ret = NOTIFY_DONE;
	bool triggered;

	/*
	 * Only concern ourselves with NMI events.
	 *
	 * Normally we would check the die chain value, but as this needs
	 * to be architecture independent, check for NMI context instead.
	 */
	if (!in_nmi())
		return NOTIFY_DONE;

	rcu_read_lock();
	list_for_each_entry_rcu(shdev, &sh_dmae_devices, node) {
		/*
		 * Only stop if one of the controllers has NMIF asserted,
		 * we do not want to interfere with regular address error
		 * handling or NMI events that don't concern the DMACs.
		 */
		triggered = sh_dmae_nmi_notify(shdev);
		if (triggered == true)
			ret = NOTIFY_OK;
	}
	rcu_read_unlock();

	return ret;
}

static struct notifier_block sh_dmae_nmi_notifier __read_mostly = {
	.notifier_call	= sh_dmae_nmi_handler,

	/* Run before NMI debug handler and KGDB */
	.priority	= 1,
};

B
Bill Pemberton 已提交
508
static int sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id,
509
					int irq, unsigned long flags)
510
{
511
	const struct sh_dmae_channel *chan_pdata = &shdev->pdata->channel[id];
512 513 514 515 516
	struct shdma_dev *sdev = &shdev->shdma_dev;
	struct platform_device *pdev = to_platform_device(sdev->dma_dev.dev);
	struct sh_dmae_chan *sh_chan;
	struct shdma_chan *schan;
	int err;
517

518 519
	sh_chan = devm_kzalloc(sdev->dma_dev.dev, sizeof(struct sh_dmae_chan),
			       GFP_KERNEL);
520 521
	if (!sh_chan) {
		dev_err(sdev->dma_dev.dev,
522
			"No free memory for allocating dma channels!\n");
523 524 525
		return -ENOMEM;
	}

526 527
	schan = &sh_chan->shdma_chan;
	schan->max_xfer_len = SH_DMA_TCR_MAX + 1;
528

529
	shdma_chan_probe(sdev, schan, id);
530

531
	sh_chan->base = shdev->chan_reg + chan_pdata->offset;
532

533
	/* set up channel irq */
534
	if (pdev->id >= 0)
535 536
		snprintf(sh_chan->dev_id, sizeof(sh_chan->dev_id),
			 "sh-dmae%d.%d", pdev->id, id);
537
	else
538 539
		snprintf(sh_chan->dev_id, sizeof(sh_chan->dev_id),
			 "sh-dma%d", id);
540

541
	err = shdma_request_irq(schan, irq, flags, sh_chan->dev_id);
542
	if (err) {
543 544 545
		dev_err(sdev->dma_dev.dev,
			"DMA channel %d request_irq error %d\n",
			id, err);
546 547 548
		goto err_no_irq;
	}

549
	shdev->chan[id] = sh_chan;
550 551 552 553
	return 0;

err_no_irq:
	/* remove from dmaengine device node */
554
	shdma_chan_remove(schan);
555 556 557 558 559
	return err;
}

static void sh_dmae_chan_remove(struct sh_dmae_device *shdev)
{
560 561
	struct dma_device *dma_dev = &shdev->shdma_dev.dma_dev;
	struct shdma_chan *schan;
562 563
	int i;

564 565
	shdma_for_each_chan(schan, &shdev->shdma_dev, i) {
		BUG_ON(!schan);
566

567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610
		shdma_chan_remove(schan);
	}
	dma_dev->chancnt = 0;
}

static void sh_dmae_shutdown(struct platform_device *pdev)
{
	struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
	sh_dmae_ctl_stop(shdev);
}

static int sh_dmae_runtime_suspend(struct device *dev)
{
	return 0;
}

static int sh_dmae_runtime_resume(struct device *dev)
{
	struct sh_dmae_device *shdev = dev_get_drvdata(dev);

	return sh_dmae_rst(shdev);
}

#ifdef CONFIG_PM
static int sh_dmae_suspend(struct device *dev)
{
	return 0;
}

static int sh_dmae_resume(struct device *dev)
{
	struct sh_dmae_device *shdev = dev_get_drvdata(dev);
	int i, ret;

	ret = sh_dmae_rst(shdev);
	if (ret < 0)
		dev_err(dev, "Failed to reset!\n");

	for (i = 0; i < shdev->pdata->channel_num; i++) {
		struct sh_dmae_chan *sh_chan = shdev->chan[i];

		if (!sh_chan->shdma_chan.desc_num)
			continue;

611
		if (sh_chan->shdma_chan.slave_id >= 0) {
612
			const struct sh_dmae_slave_config *cfg = sh_chan->config;
613 614 615 616
			dmae_set_dmars(sh_chan, cfg->mid_rid);
			dmae_set_chcr(sh_chan, cfg->chcr);
		} else {
			dmae_init(sh_chan);
617 618
		}
	}
619 620

	return 0;
621
}
622 623 624 625
#else
#define sh_dmae_suspend NULL
#define sh_dmae_resume NULL
#endif
626

627 628 629 630 631 632 633 634 635
const struct dev_pm_ops sh_dmae_pm = {
	.suspend		= sh_dmae_suspend,
	.resume			= sh_dmae_resume,
	.runtime_suspend	= sh_dmae_runtime_suspend,
	.runtime_resume		= sh_dmae_runtime_resume,
};

static dma_addr_t sh_dmae_slave_addr(struct shdma_chan *schan)
{
636 637
	struct sh_dmae_chan *sh_chan = container_of(schan,
					struct sh_dmae_chan, shdma_chan);
638 639

	/*
640 641 642
	 * Implicit BUG_ON(!sh_chan->config)
	 * This is an exclusive slave DMA operation, may only be called after a
	 * successful slave configuration.
643
	 */
644
	return sh_chan->config->addr;
645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662
}

static struct shdma_desc *sh_dmae_embedded_desc(void *buf, int i)
{
	return &((struct sh_dmae_desc *)buf)[i].shdma_desc;
}

static const struct shdma_ops sh_dmae_shdma_ops = {
	.desc_completed = sh_dmae_desc_completed,
	.halt_channel = sh_dmae_halt,
	.channel_busy = sh_dmae_channel_busy,
	.slave_addr = sh_dmae_slave_addr,
	.desc_setup = sh_dmae_desc_setup,
	.set_slave = sh_dmae_set_slave,
	.setup_xfer = sh_dmae_setup_xfer,
	.start_xfer = sh_dmae_start_xfer,
	.embedded_desc = sh_dmae_embedded_desc,
	.chan_irq = sh_dmae_chan_irq,
663
	.get_partial = sh_dmae_get_partial,
664 665
};

B
Bill Pemberton 已提交
666
static int sh_dmae_probe(struct platform_device *pdev)
667
{
668 669
	struct sh_dmae_pdata *pdata = pdev->dev.platform_data;
	unsigned long irqflags = IRQF_DISABLED,
670 671
		chan_flag[SH_DMAE_MAX_CHANNELS] = {};
	int errirq, chan_irq[SH_DMAE_MAX_CHANNELS];
672
	int err, i, irq_cnt = 0, irqres = 0, irq_cap = 0;
673
	struct sh_dmae_device *shdev;
674
	struct dma_device *dma_dev;
675
	struct resource *chan, *dmars, *errirq_res, *chanirq_res;
676

677
	/* get platform data */
678
	if (!pdata || !pdata->channel_num)
679 680
		return -ENODEV;

681
	chan = platform_get_resource(pdev, IORESOURCE_MEM, 0);
682
	/* DMARS area is optional */
683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703
	dmars = platform_get_resource(pdev, IORESOURCE_MEM, 1);
	/*
	 * IRQ resources:
	 * 1. there always must be at least one IRQ IO-resource. On SH4 it is
	 *    the error IRQ, in which case it is the only IRQ in this resource:
	 *    start == end. If it is the only IRQ resource, all channels also
	 *    use the same IRQ.
	 * 2. DMA channel IRQ resources can be specified one per resource or in
	 *    ranges (start != end)
	 * 3. iff all events (channels and, optionally, error) on this
	 *    controller use the same IRQ, only one IRQ resource can be
	 *    specified, otherwise there must be one IRQ per channel, even if
	 *    some of them are equal
	 * 4. if all IRQs on this controller are equal or if some specific IRQs
	 *    specify IORESOURCE_IRQ_SHAREABLE in their resources, they will be
	 *    requested with the IRQF_SHARED flag
	 */
	errirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
	if (!chan || !errirq_res)
		return -ENODEV;

704 705
	shdev = devm_kzalloc(&pdev->dev, sizeof(struct sh_dmae_device),
			     GFP_KERNEL);
706
	if (!shdev) {
707
		dev_err(&pdev->dev, "Not enough memory\n");
708
		return -ENOMEM;
709 710
	}

711 712
	dma_dev = &shdev->shdma_dev.dma_dev;

713 714 715
	shdev->chan_reg = devm_ioremap_resource(&pdev->dev, chan);
	if (IS_ERR(shdev->chan_reg))
		return PTR_ERR(shdev->chan_reg);
716
	if (dmars) {
717 718 719
		shdev->dmars = devm_ioremap_resource(&pdev->dev, dmars);
		if (IS_ERR(shdev->dmars))
			return PTR_ERR(shdev->dmars);
720 721
	}

722 723 724 725 726 727 728 729 730 731 732 733 734 735 736
	if (!pdata->slave_only)
		dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
	if (pdata->slave && pdata->slave_num)
		dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);

	/* Default transfer size of 32 bytes requires 32-byte alignment */
	dma_dev->copy_align = LOG2_DEFAULT_XFER_SIZE;

	shdev->shdma_dev.ops = &sh_dmae_shdma_ops;
	shdev->shdma_dev.desc_size = sizeof(struct sh_dmae_desc);
	err = shdma_init(&pdev->dev, &shdev->shdma_dev,
			      pdata->channel_num);
	if (err < 0)
		goto eshdma;

737
	/* platform data */
738
	shdev->pdata = pdata;
739

740 741 742 743 744
	if (pdata->chcr_offset)
		shdev->chcr_offset = pdata->chcr_offset;
	else
		shdev->chcr_offset = CHCR;

745 746 747 748 749
	if (pdata->chcr_ie_bit)
		shdev->chcr_ie_bit = pdata->chcr_ie_bit;
	else
		shdev->chcr_ie_bit = CHCR_IE;

750 751
	platform_set_drvdata(pdev, shdev);

752
	pm_runtime_enable(&pdev->dev);
753 754 755
	err = pm_runtime_get_sync(&pdev->dev);
	if (err < 0)
		dev_err(&pdev->dev, "%s(): GET = %d\n", __func__, err);
756

757
	spin_lock_irq(&sh_dmae_lock);
P
Paul Mundt 已提交
758
	list_add_tail_rcu(&shdev->node, &sh_dmae_devices);
759
	spin_unlock_irq(&sh_dmae_lock);
P
Paul Mundt 已提交
760

761
	/* reset dma controller - only needed as a test */
762
	err = sh_dmae_rst(shdev);
763 764 765
	if (err)
		goto rst_err;

766
#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
767 768 769 770 771 772 773 774 775
	chanirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 1);

	if (!chanirq_res)
		chanirq_res = errirq_res;
	else
		irqres++;

	if (chanirq_res == errirq_res ||
	    (errirq_res->flags & IORESOURCE_BITS) == IORESOURCE_IRQ_SHAREABLE)
776
		irqflags = IRQF_SHARED;
777 778 779

	errirq = errirq_res->start;

780 781
	err = devm_request_irq(&pdev->dev, errirq, sh_dmae_err, irqflags,
			       "DMAC Address Error", shdev);
782 783 784 785 786
	if (err) {
		dev_err(&pdev->dev,
			"DMA failed requesting irq #%d, error %d\n",
			errirq, err);
		goto eirq_err;
787 788
	}

789 790
#else
	chanirq_res = errirq_res;
791
#endif /* CONFIG_CPU_SH4 || CONFIG_ARCH_SHMOBILE */
792 793 794 795 796

	if (chanirq_res->start == chanirq_res->end &&
	    !platform_get_resource(pdev, IORESOURCE_IRQ, 1)) {
		/* Special case - all multiplexed */
		for (; irq_cnt < pdata->channel_num; irq_cnt++) {
797
			if (irq_cnt < SH_DMAE_MAX_CHANNELS) {
798 799 800 801 802 803
				chan_irq[irq_cnt] = chanirq_res->start;
				chan_flag[irq_cnt] = IRQF_SHARED;
			} else {
				irq_cap = 1;
				break;
			}
804
		}
805 806 807
	} else {
		do {
			for (i = chanirq_res->start; i <= chanirq_res->end; i++) {
808
				if (irq_cnt >= SH_DMAE_MAX_CHANNELS) {
809 810 811 812
					irq_cap = 1;
					break;
				}

813 814 815 816 817 818 819 820 821
				if ((errirq_res->flags & IORESOURCE_BITS) ==
				    IORESOURCE_IRQ_SHAREABLE)
					chan_flag[irq_cnt] = IRQF_SHARED;
				else
					chan_flag[irq_cnt] = IRQF_DISABLED;
				dev_dbg(&pdev->dev,
					"Found IRQ %d for channel %d\n",
					i, irq_cnt);
				chan_irq[irq_cnt++] = i;
822 823
			}

824
			if (irq_cnt >= SH_DMAE_MAX_CHANNELS)
825
				break;
826

827 828 829
			chanirq_res = platform_get_resource(pdev,
						IORESOURCE_IRQ, ++irqres);
		} while (irq_cnt < pdata->channel_num && chanirq_res);
830
	}
831

832
	/* Create DMA Channel */
833
	for (i = 0; i < irq_cnt; i++) {
834
		err = sh_dmae_chan_probe(shdev, i, chan_irq[i], chan_flag[i]);
835 836 837 838
		if (err)
			goto chan_probe_err;
	}

839 840 841
	if (irq_cap)
		dev_notice(&pdev->dev, "Attempting to register %d DMA "
			   "channels when a maximum of %d are supported.\n",
842
			   pdata->channel_num, SH_DMAE_MAX_CHANNELS);
843

844 845
	pm_runtime_put(&pdev->dev);

846 847 848
	err = dma_async_device_register(&shdev->shdma_dev.dma_dev);
	if (err < 0)
		goto edmadevreg;
849 850 851

	return err;

852 853 854
edmadevreg:
	pm_runtime_get(&pdev->dev);

855 856
chan_probe_err:
	sh_dmae_chan_remove(shdev);
857

858
#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
859
eirq_err:
860
#endif
861
rst_err:
862
	spin_lock_irq(&sh_dmae_lock);
P
Paul Mundt 已提交
863
	list_del_rcu(&shdev->node);
864
	spin_unlock_irq(&sh_dmae_lock);
P
Paul Mundt 已提交
865

866
	pm_runtime_put(&pdev->dev);
867 868
	pm_runtime_disable(&pdev->dev);

869 870 871
	platform_set_drvdata(pdev, NULL);
	shdma_cleanup(&shdev->shdma_dev);
eshdma:
872
	synchronize_rcu();
873 874 875 876

	return err;
}

877
static int sh_dmae_remove(struct platform_device *pdev)
878 879
{
	struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
880
	struct dma_device *dma_dev = &shdev->shdma_dev.dma_dev;
881 882
	struct resource *res;
	int errirq = platform_get_irq(pdev, 0);
883

884
	dma_async_device_unregister(dma_dev);
885

886 887
	if (errirq > 0)
		free_irq(errirq, shdev);
888

889
	spin_lock_irq(&sh_dmae_lock);
P
Paul Mundt 已提交
890
	list_del_rcu(&shdev->node);
891
	spin_unlock_irq(&sh_dmae_lock);
P
Paul Mundt 已提交
892

893 894
	pm_runtime_disable(&pdev->dev);

895 896 897
	sh_dmae_chan_remove(shdev);
	shdma_cleanup(&shdev->shdma_dev);

898 899
	platform_set_drvdata(pdev, NULL);

900
	synchronize_rcu();
901

902 903 904
	return 0;
}

905 906 907 908 909 910
static const struct of_device_id sh_dmae_of_match[] = {
	{ .compatible = "renesas,shdma", },
	{ }
};
MODULE_DEVICE_TABLE(of, sh_dmae_of_match);

911
static struct platform_driver sh_dmae_driver = {
912
	.driver 	= {
913
		.owner	= THIS_MODULE,
914
		.pm	= &sh_dmae_pm,
915
		.name	= SH_DMAE_DRV_NAME,
916
		.of_match_table = sh_dmae_of_match,
917
	},
B
Bill Pemberton 已提交
918
	.remove		= sh_dmae_remove,
919
	.shutdown	= sh_dmae_shutdown,
920 921 922 923
};

static int __init sh_dmae_init(void)
{
924 925 926 927 928
	/* Wire up NMI handling */
	int err = register_die_notifier(&sh_dmae_nmi_notifier);
	if (err)
		return err;

929 930 931 932 933 934 935
	return platform_driver_probe(&sh_dmae_driver, sh_dmae_probe);
}
module_init(sh_dmae_init);

static void __exit sh_dmae_exit(void)
{
	platform_driver_unregister(&sh_dmae_driver);
936 937

	unregister_die_notifier(&sh_dmae_nmi_notifier);
938 939 940 941 942 943
}
module_exit(sh_dmae_exit);

MODULE_AUTHOR("Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>");
MODULE_DESCRIPTION("Renesas SH DMA Engine driver");
MODULE_LICENSE("GPL");
944
MODULE_ALIAS("platform:" SH_DMAE_DRV_NAME);