amba-pl08x.c 58.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
/*
 * Copyright (c) 2006 ARM Ltd.
 * Copyright (c) 2010 ST-Ericsson SA
 *
 * Author: Peter Pearse <peter.pearse@arm.com>
 * Author: Linus Walleij <linus.walleij@stericsson.com>
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License as published by the Free
 * Software Foundation; either version 2 of the License, or (at your option)
 * any later version.
 *
 * This program is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
 * this program; if not, write to the Free Software Foundation, Inc., 59
 * Temple Place - Suite 330, Boston, MA  02111-1307, USA.
 *
22 23
 * The full GNU General Public License is in this distribution in the file
 * called COPYING.
24 25
 *
 * Documentation: ARM DDI 0196G == PL080
26
 * Documentation: ARM DDI 0218E == PL081
27
 *
28 29
 * PL080 & PL081 both have 16 sets of DMA signals that can be routed to any
 * channel.
30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55
 *
 * The PL080 has 8 channels available for simultaneous use, and the PL081
 * has only two channels. So on these DMA controllers the number of channels
 * and the number of incoming DMA signals are two totally different things.
 * It is usually not possible to theoretically handle all physical signals,
 * so a multiplexing scheme with possible denial of use is necessary.
 *
 * The PL080 has a dual bus master, PL081 has a single master.
 *
 * Memory to peripheral transfer may be visualized as
 *	Get data from memory to DMAC
 *	Until no data left
 *		On burst request from peripheral
 *			Destination burst from DMAC to peripheral
 *			Clear burst request
 *	Raise terminal count interrupt
 *
 * For peripherals with a FIFO:
 * Source      burst size == half the depth of the peripheral FIFO
 * Destination burst size == the depth of the peripheral FIFO
 *
 * (Bursts are irrelevant for mem to mem transfers - there are no burst
 * signals, the DMA controller will simply facilitate its AHB master.)
 *
 * ASSUMES default (little) endianness for DMA transfers
 *
56 57 58 59 60 61 62 63 64 65 66 67 68
 * The PL08x has two flow control settings:
 *  - DMAC flow control: the transfer size defines the number of transfers
 *    which occur for the current LLI entry, and the DMAC raises TC at the
 *    end of every LLI entry.  Observed behaviour shows the DMAC listening
 *    to both the BREQ and SREQ signals (contrary to documented),
 *    transferring data if either is active.  The LBREQ and LSREQ signals
 *    are ignored.
 *
 *  - Peripheral flow control: the transfer size is ignored (and should be
 *    zero).  The data is transferred from the current LLI entry, until
 *    after the final transfer signalled by LBREQ or LSREQ.  The DMAC
 *    will then move to the next LLI entry.
 *
69 70 71
 * Global TODO:
 * - Break out common code from arch/arm/mach-s3c64xx and share
 */
72
#include <linux/amba/bus.h>
73 74
#include <linux/amba/pl08x.h>
#include <linux/debugfs.h>
75 76 77 78
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/dmaengine.h>
#include <linux/dmapool.h>
V
Vinod Koul 已提交
79
#include <linux/dma-mapping.h>
80 81 82
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/module.h>
83
#include <linux/pm_runtime.h>
84
#include <linux/seq_file.h>
85
#include <linux/slab.h>
86 87
#include <asm/hardware/pl080.h>

88 89
#include "dmaengine.h"

90 91
#define DRIVER_NAME	"pl08xdmac"

92
static struct amba_driver pl08x_amba_driver;
93
struct pl08x_driver_data;
94

95
/**
96
 * struct vendor_data - vendor-specific config parameters for PL08x derivatives
97
 * @channels: the number of channels available in this variant
98
 * @dualmaster: whether this version supports dual AHB masters or not.
99 100 101
 * @nomadik: whether the channels have Nomadik security extension bits
 *	that need to be checked for permission before use and some registers are
 *	missing
102 103 104 105
 */
struct vendor_data {
	u8 channels;
	bool dualmaster;
106
	bool nomadik;
107 108 109 110
};

/*
 * PL08X private data structures
111
 * An LLI struct - see PL08x TRM.  Note that next uses bit[0] as a bus bit,
112 113
 * start & end do not - their bus bit info is in cctl.  Also note that these
 * are fixed 32-bit quantities.
114
 */
115
struct pl08x_lli {
116 117
	u32 src;
	u32 dst;
118
	u32 lli;
119 120 121
	u32 cctl;
};

122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140
/**
 * struct pl08x_bus_data - information of source or destination
 * busses for a transfer
 * @addr: current address
 * @maxwidth: the maximum width of a transfer on this bus
 * @buswidth: the width of this bus in bytes: 1, 2 or 4
 */
struct pl08x_bus_data {
	dma_addr_t addr;
	u8 maxwidth;
	u8 buswidth;
};

/**
 * struct pl08x_phy_chan - holder for the physical channels
 * @id: physical index to this channel
 * @lock: a lock to use when altering an instance of this struct
 * @serving: the virtual channel currently being served by this physical
 * channel
141 142
 * @locked: channel unavailable for the system, e.g. dedicated to secure
 * world
143 144 145 146 147 148
 */
struct pl08x_phy_chan {
	unsigned int id;
	void __iomem *base;
	spinlock_t lock;
	struct pl08x_dma_chan *serving;
149
	bool locked;
150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228
};

/**
 * struct pl08x_sg - structure containing data per sg
 * @src_addr: src address of sg
 * @dst_addr: dst address of sg
 * @len: transfer len in bytes
 * @node: node for txd's dsg_list
 */
struct pl08x_sg {
	dma_addr_t src_addr;
	dma_addr_t dst_addr;
	size_t len;
	struct list_head node;
};

/**
 * struct pl08x_txd - wrapper for struct dma_async_tx_descriptor
 * @tx: async tx descriptor
 * @node: node for txd list for channels
 * @dsg_list: list of children sg's
 * @direction: direction of transfer
 * @llis_bus: DMA memory address (physical) start for the LLIs
 * @llis_va: virtual memory address start for the LLIs
 * @cctl: control reg values for current txd
 * @ccfg: config reg values for current txd
 */
struct pl08x_txd {
	struct dma_async_tx_descriptor tx;
	struct list_head node;
	struct list_head dsg_list;
	enum dma_transfer_direction direction;
	dma_addr_t llis_bus;
	struct pl08x_lli *llis_va;
	/* Default cctl value for LLIs */
	u32 cctl;
	/*
	 * Settings to be put into the physical channel when we
	 * trigger this txd.  Other registers are in llis_va[0].
	 */
	u32 ccfg;
};

/**
 * struct pl08x_dma_chan_state - holds the PL08x specific virtual channel
 * states
 * @PL08X_CHAN_IDLE: the channel is idle
 * @PL08X_CHAN_RUNNING: the channel has allocated a physical transport
 * channel and is running a transfer on it
 * @PL08X_CHAN_PAUSED: the channel has allocated a physical transport
 * channel, but the transfer is currently paused
 * @PL08X_CHAN_WAITING: the channel is waiting for a physical transport
 * channel to become available (only pertains to memcpy channels)
 */
enum pl08x_dma_chan_state {
	PL08X_CHAN_IDLE,
	PL08X_CHAN_RUNNING,
	PL08X_CHAN_PAUSED,
	PL08X_CHAN_WAITING,
};

/**
 * struct pl08x_dma_chan - this structure wraps a DMA ENGINE channel
 * @chan: wrappped abstract channel
 * @phychan: the physical channel utilized by this channel, if there is one
 * @phychan_hold: if non-zero, hold on to the physical channel even if we
 * have no pending entries
 * @tasklet: tasklet scheduled by the IRQ to handle actual work etc
 * @name: name of channel
 * @cd: channel platform data
 * @runtime_addr: address for RX/TX according to the runtime config
 * @pend_list: queued transactions pending on this channel
 * @at: active transaction on this channel
 * @lock: a lock for this channel data
 * @host: a pointer to the host (internal use)
 * @state: whether the channel is idle, paused, running etc
 * @slave: whether this channel is a device (slave) or for memcpy
 * @waiting: a TX descriptor on this channel which is waiting for a physical
 * channel to become available
229
 * @signal: the physical DMA request signal which this channel is using
230
 * @mux_use: count of descriptors using this DMA request signal setting
231 232 233 234 235 236
 */
struct pl08x_dma_chan {
	struct dma_chan chan;
	struct pl08x_phy_chan *phychan;
	int phychan_hold;
	struct tasklet_struct tasklet;
237
	const char *name;
238
	const struct pl08x_channel_data *cd;
239
	struct dma_slave_config cfg;
240 241 242 243 244 245 246
	struct list_head pend_list;
	struct pl08x_txd *at;
	spinlock_t lock;
	struct pl08x_driver_data *host;
	enum pl08x_dma_chan_state state;
	bool slave;
	struct pl08x_txd *waiting;
247
	int signal;
248
	unsigned mux_use;
249 250
};

251 252 253 254 255 256 257 258 259 260 261
/**
 * struct pl08x_driver_data - the local state holder for the PL08x
 * @slave: slave engine for this instance
 * @memcpy: memcpy engine for this instance
 * @base: virtual memory base (remapped) for the PL08x
 * @adev: the corresponding AMBA (PrimeCell) bus entry
 * @vd: vendor data for this PL08x variant
 * @pd: platform data passed in from the platform/machine
 * @phy_chans: array of data for the physical channels
 * @pool: a pool for the LLI descriptors
 * @pool_ctr: counter of LLIs in the pool
262 263
 * @lli_buses: bitmask to or in to LLI pointer selecting AHB port for LLI
 * fetches
264
 * @mem_buses: set to indicate memory transfers on AHB2.
265 266 267 268 269 270 271
 * @lock: a spinlock for this struct
 */
struct pl08x_driver_data {
	struct dma_device slave;
	struct dma_device memcpy;
	void __iomem *base;
	struct amba_device *adev;
272
	const struct vendor_data *vd;
273 274 275 276
	struct pl08x_platform_data *pd;
	struct pl08x_phy_chan *phy_chans;
	struct dma_pool *pool;
	int pool_ctr;
277 278
	u8 lli_buses;
	u8 mem_buses;
279 280 281 282 283 284 285 286 287
};

/*
 * PL08X specific defines
 */

/* Size (bytes) of each LLI buffer allocated for one transfer */
# define PL08X_LLI_TSFR_SIZE	0x2000

288
/* Maximum times we call dma_pool_alloc on this pool without freeing */
289
#define MAX_NUM_TSFR_LLIS	(PL08X_LLI_TSFR_SIZE/sizeof(struct pl08x_lli))
290 291 292 293 294 295 296
#define PL08X_ALIGN		8

static inline struct pl08x_dma_chan *to_pl08x_chan(struct dma_chan *chan)
{
	return container_of(chan, struct pl08x_dma_chan, chan);
}

297 298 299 300 301
static inline struct pl08x_txd *to_pl08x_txd(struct dma_async_tx_descriptor *tx)
{
	return container_of(tx, struct pl08x_txd, tx);
}

302 303 304 305 306 307 308 309
/*
 * Mux handling.
 *
 * This gives us the DMA request input to the PL08x primecell which the
 * peripheral described by the channel data will be routed to, possibly
 * via a board/SoC specific external MUX.  One important point to note
 * here is that this does not depend on the physical channel.
 */
310
static int pl08x_request_mux(struct pl08x_dma_chan *plchan)
311 312 313 314
{
	const struct pl08x_platform_data *pd = plchan->host->pd;
	int ret;

315
	if (plchan->mux_use++ == 0 && pd->get_signal) {
316
		ret = pd->get_signal(plchan->cd);
317 318
		if (ret < 0) {
			plchan->mux_use = 0;
319
			return ret;
320
		}
321

322
		plchan->signal = ret;
323 324 325 326 327 328 329 330
	}
	return 0;
}

static void pl08x_release_mux(struct pl08x_dma_chan *plchan)
{
	const struct pl08x_platform_data *pd = plchan->host->pd;

331 332 333 334 335 336 337
	if (plchan->signal >= 0) {
		WARN_ON(plchan->mux_use == 0);

		if (--plchan->mux_use == 0 && pd->put_signal) {
			pd->put_signal(plchan->cd, plchan->signal);
			plchan->signal = -1;
		}
338 339 340
	}
}

341 342 343 344 345 346 347 348 349 350 351 352 353 354 355
/*
 * Physical channel handling
 */

/* Whether a certain channel is busy or not */
static int pl08x_phy_channel_busy(struct pl08x_phy_chan *ch)
{
	unsigned int val;

	val = readl(ch->base + PL080_CH_CONFIG);
	return val & PL080_CONFIG_ACTIVE;
}

/*
 * Set the initial DMA register values i.e. those for the first LLI
356
 * The next LLI pointer and the configuration interrupt bit have
357 358
 * been set when the LLIs were constructed.  Poke them into the hardware
 * and start the transfer.
359
 */
360 361
static void pl08x_start_txd(struct pl08x_dma_chan *plchan,
	struct pl08x_txd *txd)
362
{
363
	struct pl08x_driver_data *pl08x = plchan->host;
364
	struct pl08x_phy_chan *phychan = plchan->phychan;
365
	struct pl08x_lli *lli = &txd->llis_va[0];
366
	u32 val;
367 368

	plchan->at = txd;
369

370 371 372
	/* Wait for channel inactive */
	while (pl08x_phy_channel_busy(phychan))
		cpu_relax();
373

374 375
	dev_vdbg(&pl08x->adev->dev,
		"WRITE channel %d: csrc=0x%08x, cdst=0x%08x, "
376 377
		"clli=0x%08x, cctl=0x%08x, ccfg=0x%08x\n",
		phychan->id, lli->src, lli->dst, lli->lli, lli->cctl,
378
		txd->ccfg);
379 380 381 382 383

	writel(lli->src, phychan->base + PL080_CH_SRC_ADDR);
	writel(lli->dst, phychan->base + PL080_CH_DST_ADDR);
	writel(lli->lli, phychan->base + PL080_CH_LLI);
	writel(lli->cctl, phychan->base + PL080_CH_CONTROL);
384
	writel(txd->ccfg, phychan->base + PL080_CH_CONFIG);
385 386 387 388

	/* Enable the DMA channel */
	/* Do not access config register until channel shows as disabled */
	while (readl(pl08x->base + PL080_EN_CHAN) & (1 << phychan->id))
389
		cpu_relax();
390

391 392
	/* Do not access config register until channel shows as inactive */
	val = readl(phychan->base + PL080_CH_CONFIG);
393
	while ((val & PL080_CONFIG_ACTIVE) || (val & PL080_CONFIG_ENABLE))
394
		val = readl(phychan->base + PL080_CH_CONFIG);
395

396
	writel(val | PL080_CONFIG_ENABLE, phychan->base + PL080_CH_CONFIG);
397 398 399
}

/*
400
 * Pause the channel by setting the HALT bit.
401
 *
402 403 404
 * For M->P transfers, pause the DMAC first and then stop the peripheral -
 * the FIFO can only drain if the peripheral is still requesting data.
 * (note: this can still timeout if the DMAC FIFO never drains of data.)
405
 *
406 407
 * For P->M transfers, disable the peripheral first to stop it filling
 * the DMAC FIFO, and then pause the DMAC.
408 409 410 411
 */
static void pl08x_pause_phy_chan(struct pl08x_phy_chan *ch)
{
	u32 val;
412
	int timeout;
413 414 415 416 417 418 419

	/* Set the HALT bit and wait for the FIFO to drain */
	val = readl(ch->base + PL080_CH_CONFIG);
	val |= PL080_CONFIG_HALT;
	writel(val, ch->base + PL080_CH_CONFIG);

	/* Wait for channel inactive */
420 421 422 423 424 425 426
	for (timeout = 1000; timeout; timeout--) {
		if (!pl08x_phy_channel_busy(ch))
			break;
		udelay(1);
	}
	if (pl08x_phy_channel_busy(ch))
		pr_err("pl08x: channel%u timeout waiting for pause\n", ch->id);
427 428 429 430 431 432 433 434 435 436 437 438
}

static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch)
{
	u32 val;

	/* Clear the HALT bit */
	val = readl(ch->base + PL080_CH_CONFIG);
	val &= ~PL080_CONFIG_HALT;
	writel(val, ch->base + PL080_CH_CONFIG);
}

439 440 441 442 443 444 445 446
/*
 * pl08x_terminate_phy_chan() stops the channel, clears the FIFO and
 * clears any pending interrupt status.  This should not be used for
 * an on-going transfer, but as a method of shutting down a channel
 * (eg, when it's no longer used) or terminating a transfer.
 */
static void pl08x_terminate_phy_chan(struct pl08x_driver_data *pl08x,
	struct pl08x_phy_chan *ch)
447
{
448
	u32 val = readl(ch->base + PL080_CH_CONFIG);
449

450 451
	val &= ~(PL080_CONFIG_ENABLE | PL080_CONFIG_ERR_IRQ_MASK |
	         PL080_CONFIG_TC_IRQ_MASK);
452 453

	writel(val, ch->base + PL080_CH_CONFIG);
454 455 456

	writel(1 << ch->id, pl08x->base + PL080_ERR_CLEAR);
	writel(1 << ch->id, pl08x->base + PL080_TC_CLEAR);
457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482
}

static inline u32 get_bytes_in_cctl(u32 cctl)
{
	/* The source width defines the number of bytes */
	u32 bytes = cctl & PL080_CONTROL_TRANSFER_SIZE_MASK;

	switch (cctl >> PL080_CONTROL_SWIDTH_SHIFT) {
	case PL080_WIDTH_8BIT:
		break;
	case PL080_WIDTH_16BIT:
		bytes *= 2;
		break;
	case PL080_WIDTH_32BIT:
		bytes *= 4;
		break;
	}
	return bytes;
}

/* The channel should be paused when calling this */
static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan)
{
	struct pl08x_phy_chan *ch;
	struct pl08x_txd *txd;
	unsigned long flags;
483
	size_t bytes = 0;
484 485 486 487 488 489

	spin_lock_irqsave(&plchan->lock, flags);
	ch = plchan->phychan;
	txd = plchan->at;

	/*
490 491
	 * Follow the LLIs to get the number of remaining
	 * bytes in the currently active transaction.
492 493
	 */
	if (ch && txd) {
494
		u32 clli = readl(ch->base + PL080_CH_LLI) & ~PL080_LLI_LM_AHB2;
495

496
		/* First get the remaining bytes in the active transfer */
497 498 499
		bytes = get_bytes_in_cctl(readl(ch->base + PL080_CH_CONTROL));

		if (clli) {
500 501 502 503 504 505
			struct pl08x_lli *llis_va = txd->llis_va;
			dma_addr_t llis_bus = txd->llis_bus;
			int index;

			BUG_ON(clli < llis_bus || clli >= llis_bus +
				sizeof(struct pl08x_lli) * MAX_NUM_TSFR_LLIS);
506

507 508 509 510 511 512 513 514
			/*
			 * Locate the next LLI - as this is an array,
			 * it's simple maths to find.
			 */
			index = (clli - llis_bus) / sizeof(struct pl08x_lli);

			for (; index < MAX_NUM_TSFR_LLIS; index++) {
				bytes += get_bytes_in_cctl(llis_va[index].cctl);
515 516

				/*
517
				 * A LLI pointer of 0 terminates the LLI list
518
				 */
519 520
				if (!llis_va[index].lli)
					break;
521 522 523 524 525
			}
		}
	}

	/* Sum up all queued transactions */
526
	if (!list_empty(&plchan->pend_list)) {
527
		struct pl08x_txd *txdi;
528
		list_for_each_entry(txdi, &plchan->pend_list, node) {
529 530 531
			struct pl08x_sg *dsg;
			list_for_each_entry(dsg, &txd->dsg_list, node)
				bytes += dsg->len;
532 533 534 535 536 537 538 539 540 541
		}
	}

	spin_unlock_irqrestore(&plchan->lock, flags);

	return bytes;
}

/*
 * Allocate a physical channel for a virtual channel
542 543 544 545
 *
 * Try to locate a physical channel to be used for this transfer. If all
 * are taken return NULL and the requester will have to cope by using
 * some fallback PIO mode or retrying later.
546 547 548 549 550 551 552 553 554 555 556 557 558 559
 */
static struct pl08x_phy_chan *
pl08x_get_phy_channel(struct pl08x_driver_data *pl08x,
		      struct pl08x_dma_chan *virt_chan)
{
	struct pl08x_phy_chan *ch = NULL;
	unsigned long flags;
	int i;

	for (i = 0; i < pl08x->vd->channels; i++) {
		ch = &pl08x->phy_chans[i];

		spin_lock_irqsave(&ch->lock, flags);

560
		if (!ch->locked && !ch->serving) {
561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581
			ch->serving = virt_chan;
			spin_unlock_irqrestore(&ch->lock, flags);
			break;
		}

		spin_unlock_irqrestore(&ch->lock, flags);
	}

	if (i == pl08x->vd->channels) {
		/* No physical channel available, cope with it */
		return NULL;
	}

	return ch;
}

static inline void pl08x_put_phy_channel(struct pl08x_driver_data *pl08x,
					 struct pl08x_phy_chan *ch)
{
	unsigned long flags;

582 583
	spin_lock_irqsave(&ch->lock, flags);

584
	/* Stop the channel and clear its interrupts */
585
	pl08x_terminate_phy_chan(pl08x, ch);
586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612

	/* Mark it as free */
	ch->serving = NULL;
	spin_unlock_irqrestore(&ch->lock, flags);
}

/*
 * LLI handling
 */

static inline unsigned int pl08x_get_bytes_for_cctl(unsigned int coded)
{
	switch (coded) {
	case PL080_WIDTH_8BIT:
		return 1;
	case PL080_WIDTH_16BIT:
		return 2;
	case PL080_WIDTH_32BIT:
		return 4;
	default:
		break;
	}
	BUG();
	return 0;
}

static inline u32 pl08x_cctl_bits(u32 cctl, u8 srcwidth, u8 dstwidth,
613
				  size_t tsize)
614 615 616
{
	u32 retbits = cctl;

617
	/* Remove all src, dst and transfer size bits */
618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656
	retbits &= ~PL080_CONTROL_DWIDTH_MASK;
	retbits &= ~PL080_CONTROL_SWIDTH_MASK;
	retbits &= ~PL080_CONTROL_TRANSFER_SIZE_MASK;

	/* Then set the bits according to the parameters */
	switch (srcwidth) {
	case 1:
		retbits |= PL080_WIDTH_8BIT << PL080_CONTROL_SWIDTH_SHIFT;
		break;
	case 2:
		retbits |= PL080_WIDTH_16BIT << PL080_CONTROL_SWIDTH_SHIFT;
		break;
	case 4:
		retbits |= PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT;
		break;
	default:
		BUG();
		break;
	}

	switch (dstwidth) {
	case 1:
		retbits |= PL080_WIDTH_8BIT << PL080_CONTROL_DWIDTH_SHIFT;
		break;
	case 2:
		retbits |= PL080_WIDTH_16BIT << PL080_CONTROL_DWIDTH_SHIFT;
		break;
	case 4:
		retbits |= PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT;
		break;
	default:
		BUG();
		break;
	}

	retbits |= tsize << PL080_CONTROL_TRANSFER_SIZE_SHIFT;
	return retbits;
}

657 658 659 660 661
struct pl08x_lli_build_data {
	struct pl08x_txd *txd;
	struct pl08x_bus_data srcbus;
	struct pl08x_bus_data dstbus;
	size_t remainder;
662
	u32 lli_bus;
663 664
};

665
/*
666 667 668 669 670 671
 * Autoselect a master bus to use for the transfer. Slave will be the chosen as
 * victim in case src & dest are not similarly aligned. i.e. If after aligning
 * masters address with width requirements of transfer (by sending few byte by
 * byte data), slave is still not aligned, then its width will be reduced to
 * BYTE.
 * - prefers the destination bus if both available
672
 * - prefers bus with fixed address (i.e. peripheral)
673
 */
674 675
static void pl08x_choose_master_bus(struct pl08x_lli_build_data *bd,
	struct pl08x_bus_data **mbus, struct pl08x_bus_data **sbus, u32 cctl)
676 677
{
	if (!(cctl & PL080_CONTROL_DST_INCR)) {
678 679
		*mbus = &bd->dstbus;
		*sbus = &bd->srcbus;
680 681 682
	} else if (!(cctl & PL080_CONTROL_SRC_INCR)) {
		*mbus = &bd->srcbus;
		*sbus = &bd->dstbus;
683
	} else {
684
		if (bd->dstbus.buswidth >= bd->srcbus.buswidth) {
685 686
			*mbus = &bd->dstbus;
			*sbus = &bd->srcbus;
687
		} else {
688 689
			*mbus = &bd->srcbus;
			*sbus = &bd->dstbus;
690 691 692 693 694
		}
	}
}

/*
695
 * Fills in one LLI for a certain transfer descriptor and advance the counter
696
 */
697 698
static void pl08x_fill_lli_for_desc(struct pl08x_lli_build_data *bd,
	int num_llis, int len, u32 cctl)
699
{
700 701
	struct pl08x_lli *llis_va = bd->txd->llis_va;
	dma_addr_t llis_bus = bd->txd->llis_bus;
702 703 704

	BUG_ON(num_llis >= MAX_NUM_TSFR_LLIS);

705
	llis_va[num_llis].cctl = cctl;
706 707
	llis_va[num_llis].src = bd->srcbus.addr;
	llis_va[num_llis].dst = bd->dstbus.addr;
708 709
	llis_va[num_llis].lli = llis_bus + (num_llis + 1) *
		sizeof(struct pl08x_lli);
710
	llis_va[num_llis].lli |= bd->lli_bus;
711 712

	if (cctl & PL080_CONTROL_SRC_INCR)
713
		bd->srcbus.addr += len;
714
	if (cctl & PL080_CONTROL_DST_INCR)
715
		bd->dstbus.addr += len;
716

717
	BUG_ON(bd->remainder < len);
718

719
	bd->remainder -= len;
720 721
}

722 723
static inline void prep_byte_width_lli(struct pl08x_lli_build_data *bd,
		u32 *cctl, u32 len, int num_llis, size_t *total_bytes)
724
{
725 726 727
	*cctl = pl08x_cctl_bits(*cctl, 1, 1, len);
	pl08x_fill_lli_for_desc(bd, num_llis, len, *cctl);
	(*total_bytes) += len;
728 729 730 731 732 733 734 735 736 737 738
}

/*
 * This fills in the table of LLIs for the transfer descriptor
 * Note that we assume we never have to change the burst sizes
 * Return 0 for error
 */
static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
			      struct pl08x_txd *txd)
{
	struct pl08x_bus_data *mbus, *sbus;
739
	struct pl08x_lli_build_data bd;
740
	int num_llis = 0;
741
	u32 cctl, early_bytes = 0;
742
	size_t max_bytes_per_lli, total_bytes;
743
	struct pl08x_lli *llis_va;
744
	struct pl08x_sg *dsg;
745

746
	txd->llis_va = dma_pool_alloc(pl08x->pool, GFP_NOWAIT, &txd->llis_bus);
747 748 749 750 751 752 753
	if (!txd->llis_va) {
		dev_err(&pl08x->adev->dev, "%s no memory for llis\n", __func__);
		return 0;
	}

	pl08x->pool_ctr++;

754
	bd.txd = txd;
755
	bd.lli_bus = (pl08x->lli_buses & PL08X_AHB2) ? PL080_LLI_LM_AHB2 : 0;
756
	cctl = txd->cctl;
757

758
	/* Find maximum width of the source bus */
759
	bd.srcbus.maxwidth =
760 761 762 763
		pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_SWIDTH_MASK) >>
				       PL080_CONTROL_SWIDTH_SHIFT);

	/* Find maximum width of the destination bus */
764
	bd.dstbus.maxwidth =
765 766 767
		pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_DWIDTH_MASK) >>
				       PL080_CONTROL_DWIDTH_SHIFT);

768 769 770
	list_for_each_entry(dsg, &txd->dsg_list, node) {
		total_bytes = 0;
		cctl = txd->cctl;
771

772 773 774 775 776
		bd.srcbus.addr = dsg->src_addr;
		bd.dstbus.addr = dsg->dst_addr;
		bd.remainder = dsg->len;
		bd.srcbus.buswidth = bd.srcbus.maxwidth;
		bd.dstbus.buswidth = bd.dstbus.maxwidth;
777

778
		pl08x_choose_master_bus(&bd, &mbus, &sbus, cctl);
779

780 781 782 783 784 785 786 787 788
		dev_vdbg(&pl08x->adev->dev, "src=0x%08x%s/%u dst=0x%08x%s/%u len=%zu\n",
			bd.srcbus.addr, cctl & PL080_CONTROL_SRC_INCR ? "+" : "",
			bd.srcbus.buswidth,
			bd.dstbus.addr, cctl & PL080_CONTROL_DST_INCR ? "+" : "",
			bd.dstbus.buswidth,
			bd.remainder);
		dev_vdbg(&pl08x->adev->dev, "mbus=%s sbus=%s\n",
			mbus == &bd.srcbus ? "src" : "dst",
			sbus == &bd.srcbus ? "src" : "dst");
789

790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814
		/*
		 * Zero length is only allowed if all these requirements are
		 * met:
		 * - flow controller is peripheral.
		 * - src.addr is aligned to src.width
		 * - dst.addr is aligned to dst.width
		 *
		 * sg_len == 1 should be true, as there can be two cases here:
		 *
		 * - Memory addresses are contiguous and are not scattered.
		 *   Here, Only one sg will be passed by user driver, with
		 *   memory address and zero length. We pass this to controller
		 *   and after the transfer it will receive the last burst
		 *   request from peripheral and so transfer finishes.
		 *
		 * - Memory addresses are scattered and are not contiguous.
		 *   Here, Obviously as DMA controller doesn't know when a lli's
		 *   transfer gets over, it can't load next lli. So in this
		 *   case, there has to be an assumption that only one lli is
		 *   supported. Thus, we can't have scattered addresses.
		 */
		if (!bd.remainder) {
			u32 fc = (txd->ccfg & PL080_CONFIG_FLOW_CONTROL_MASK) >>
				PL080_CONFIG_FLOW_CONTROL_SHIFT;
			if (!((fc >= PL080_FLOW_SRC2DST_DST) &&
815
					(fc <= PL080_FLOW_SRC2DST_SRC))) {
816 817 818 819
				dev_err(&pl08x->adev->dev, "%s sg len can't be zero",
					__func__);
				return 0;
			}
820

821
			if ((bd.srcbus.addr % bd.srcbus.buswidth) ||
822
					(bd.dstbus.addr % bd.dstbus.buswidth)) {
823 824 825 826 827 828
				dev_err(&pl08x->adev->dev,
					"%s src & dst address must be aligned to src"
					" & dst width if peripheral is flow controller",
					__func__);
				return 0;
			}
829

830 831 832 833 834
			cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth,
					bd.dstbus.buswidth, 0);
			pl08x_fill_lli_for_desc(&bd, num_llis++, 0, cctl);
			break;
		}
835 836

		/*
837 838 839
		 * Send byte by byte for following cases
		 * - Less than a bus width available
		 * - until master bus is aligned
840
		 */
841 842 843 844 845 846 847 848
		if (bd.remainder < mbus->buswidth)
			early_bytes = bd.remainder;
		else if ((mbus->addr) % (mbus->buswidth)) {
			early_bytes = mbus->buswidth - (mbus->addr) %
				(mbus->buswidth);
			if ((bd.remainder - early_bytes) < mbus->buswidth)
				early_bytes = bd.remainder;
		}
849

850 851 852 853 854 855
		if (early_bytes) {
			dev_vdbg(&pl08x->adev->dev,
				"%s byte width LLIs (remain 0x%08x)\n",
				__func__, bd.remainder);
			prep_byte_width_lli(&bd, &cctl, early_bytes, num_llis++,
				&total_bytes);
856 857
		}

858 859 860 861 862 863 864 865 866
		if (bd.remainder) {
			/*
			 * Master now aligned
			 * - if slave is not then we must set its width down
			 */
			if (sbus->addr % sbus->buswidth) {
				dev_dbg(&pl08x->adev->dev,
					"%s set down bus width to one byte\n",
					__func__);
867

868 869
				sbus->buswidth = 1;
			}
870 871

			/*
872 873
			 * Bytes transferred = tsize * src width, not
			 * MIN(buswidths)
874
			 */
875 876 877 878 879
			max_bytes_per_lli = bd.srcbus.buswidth *
				PL080_CONTROL_TRANSFER_SIZE_MASK;
			dev_vdbg(&pl08x->adev->dev,
				"%s max bytes per lli = %zu\n",
				__func__, max_bytes_per_lli);
880 881

			/*
882 883
			 * Make largest possible LLIs until less than one bus
			 * width left
884
			 */
885 886
			while (bd.remainder > (mbus->buswidth - 1)) {
				size_t lli_len, tsize, width;
887

888 889 890 891 892
				/*
				 * If enough left try to send max possible,
				 * otherwise try to send the remainder
				 */
				lli_len = min(bd.remainder, max_bytes_per_lli);
893

894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909
				/*
				 * Check against maximum bus alignment:
				 * Calculate actual transfer size in relation to
				 * bus width an get a maximum remainder of the
				 * highest bus width - 1
				 */
				width = max(mbus->buswidth, sbus->buswidth);
				lli_len = (lli_len / width) * width;
				tsize = lli_len / bd.srcbus.buswidth;

				dev_vdbg(&pl08x->adev->dev,
					"%s fill lli with single lli chunk of "
					"size 0x%08zx (remainder 0x%08zx)\n",
					__func__, lli_len, bd.remainder);

				cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth,
910
					bd.dstbus.buswidth, tsize);
911 912 913 914
				pl08x_fill_lli_for_desc(&bd, num_llis++,
						lli_len, cctl);
				total_bytes += lli_len;
			}
915

916 917 918 919 920 921 922 923 924 925
			/*
			 * Send any odd bytes
			 */
			if (bd.remainder) {
				dev_vdbg(&pl08x->adev->dev,
					"%s align with boundary, send odd bytes (remain %zu)\n",
					__func__, bd.remainder);
				prep_byte_width_lli(&bd, &cctl, bd.remainder,
						num_llis++, &total_bytes);
			}
926
		}
927

928 929 930 931 932 933
		if (total_bytes != dsg->len) {
			dev_err(&pl08x->adev->dev,
				"%s size of encoded lli:s don't match total txd, transferred 0x%08zx from size 0x%08zx\n",
				__func__, total_bytes, dsg->len);
			return 0;
		}
934

935 936 937 938 939 940
		if (num_llis >= MAX_NUM_TSFR_LLIS) {
			dev_err(&pl08x->adev->dev,
				"%s need to increase MAX_NUM_TSFR_LLIS from 0x%08x\n",
				__func__, (u32) MAX_NUM_TSFR_LLIS);
			return 0;
		}
941
	}
942 943

	llis_va = txd->llis_va;
944
	/* The final LLI terminates the LLI. */
945
	llis_va[num_llis - 1].lli = 0;
946
	/* The final LLI element shall also fire an interrupt. */
947
	llis_va[num_llis - 1].cctl |= PL080_CONTROL_TC_IRQ_EN;
948 949 950 951 952

#ifdef VERBOSE_DEBUG
	{
		int i;

953 954 955
		dev_vdbg(&pl08x->adev->dev,
			 "%-3s %-9s  %-10s %-10s %-10s %s\n",
			 "lli", "", "csrc", "cdst", "clli", "cctl");
956 957
		for (i = 0; i < num_llis; i++) {
			dev_vdbg(&pl08x->adev->dev,
958 959 960
				 "%3d @%p: 0x%08x 0x%08x 0x%08x 0x%08x\n",
				 i, &llis_va[i], llis_va[i].src,
				 llis_va[i].dst, llis_va[i].lli, llis_va[i].cctl
961 962 963 964 965 966 967 968 969 970 971 972
				);
		}
	}
#endif

	return num_llis;
}

/* You should call this with the struct pl08x lock held */
static void pl08x_free_txd(struct pl08x_driver_data *pl08x,
			   struct pl08x_txd *txd)
{
973 974
	struct pl08x_sg *dsg, *_dsg;

975
	/* Free the LLI */
976 977
	if (txd->llis_va)
		dma_pool_free(pl08x->pool, txd->llis_va, txd->llis_bus);
978 979 980

	pl08x->pool_ctr--;

981 982 983 984 985
	list_for_each_entry_safe(dsg, _dsg, &txd->dsg_list, node) {
		list_del(&dsg->node);
		kfree(dsg);
	}

986 987 988 989 990 991 992 993 994
	kfree(txd);
}

static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x,
				struct pl08x_dma_chan *plchan)
{
	struct pl08x_txd *txdi = NULL;
	struct pl08x_txd *next;

995
	if (!list_empty(&plchan->pend_list)) {
996
		list_for_each_entry_safe(txdi,
997
					 next, &plchan->pend_list, node) {
998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026
			list_del(&txdi->node);
			pl08x_free_txd(pl08x, txdi);
		}
	}
}

/*
 * The DMA ENGINE API
 */
static int pl08x_alloc_chan_resources(struct dma_chan *chan)
{
	return 0;
}

static void pl08x_free_chan_resources(struct dma_chan *chan)
{
}

/*
 * This should be called with the channel plchan->lock held
 */
static int prep_phy_channel(struct pl08x_dma_chan *plchan,
			    struct pl08x_txd *txd)
{
	struct pl08x_driver_data *pl08x = plchan->host;
	struct pl08x_phy_chan *ch;
	int ret;

	/* Check if we already have a channel */
1027 1028 1029 1030
	if (plchan->phychan) {
		ch = plchan->phychan;
		goto got_channel;
	}
1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043

	ch = pl08x_get_phy_channel(pl08x, plchan);
	if (!ch) {
		/* No physical channel available, cope with it */
		dev_dbg(&pl08x->adev->dev, "no physical channel available for xfer on %s\n", plchan->name);
		return -EBUSY;
	}

	/*
	 * OK we have a physical channel: for memcpy() this is all we
	 * need, but for slaves the physical signals may be muxed!
	 * Can the platform allow us to use this channel?
	 */
1044
	if (plchan->slave) {
1045
		ret = pl08x_request_mux(plchan);
1046 1047 1048 1049 1050 1051 1052 1053 1054 1055
		if (ret < 0) {
			dev_dbg(&pl08x->adev->dev,
				"unable to use physical channel %d for transfer on %s due to platform restrictions\n",
				ch->id, plchan->name);
			/* Release physical channel & return */
			pl08x_put_phy_channel(pl08x, ch);
			return -EBUSY;
		}
	}

1056
	plchan->phychan = ch;
1057 1058
	dev_dbg(&pl08x->adev->dev, "allocated physical channel %d and signal %d for xfer on %s\n",
		 ch->id,
1059
		 plchan->signal,
1060 1061
		 plchan->name);

1062 1063 1064
got_channel:
	/* Assign the flow control signal to this channel */
	if (txd->direction == DMA_MEM_TO_DEV)
1065
		txd->ccfg |= plchan->signal << PL080_CONFIG_DST_SEL_SHIFT;
1066
	else if (txd->direction == DMA_DEV_TO_MEM)
1067
		txd->ccfg |= plchan->signal << PL080_CONFIG_SRC_SEL_SHIFT;
1068

1069
	plchan->phychan_hold++;
1070 1071 1072 1073

	return 0;
}

1074 1075 1076 1077
static void release_phy_channel(struct pl08x_dma_chan *plchan)
{
	struct pl08x_driver_data *pl08x = plchan->host;

1078
	pl08x_release_mux(plchan);
1079 1080 1081 1082
	pl08x_put_phy_channel(pl08x, plchan->phychan);
	plchan->phychan = NULL;
}

1083 1084 1085
static dma_cookie_t pl08x_tx_submit(struct dma_async_tx_descriptor *tx)
{
	struct pl08x_dma_chan *plchan = to_pl08x_chan(tx->chan);
1086
	struct pl08x_txd *txd = to_pl08x_txd(tx);
1087
	unsigned long flags;
1088
	dma_cookie_t cookie;
1089 1090

	spin_lock_irqsave(&plchan->lock, flags);
1091
	cookie = dma_cookie_assign(tx);
1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104

	/* Put this onto the pending list */
	list_add_tail(&txd->node, &plchan->pend_list);

	/*
	 * If there was no physical channel available for this memcpy,
	 * stack the request up and indicate that the channel is waiting
	 * for a free physical channel.
	 */
	if (!plchan->slave && !plchan->phychan) {
		/* Do this memcpy whenever there is a channel ready */
		plchan->state = PL08X_CHAN_WAITING;
		plchan->waiting = txd;
1105 1106
	} else {
		plchan->phychan_hold--;
1107 1108
	}

1109
	spin_unlock_irqrestore(&plchan->lock, flags);
1110

1111
	return cookie;
1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122
}

static struct dma_async_tx_descriptor *pl08x_prep_dma_interrupt(
		struct dma_chan *chan, unsigned long flags)
{
	struct dma_async_tx_descriptor *retval = NULL;

	return retval;
}

/*
1123 1124 1125
 * Code accessing dma_async_is_complete() in a tight loop may give problems.
 * If slaves are relying on interrupts to signal completion this function
 * must not be called with interrupts disabled.
1126
 */
1127 1128
static enum dma_status pl08x_dma_tx_status(struct dma_chan *chan,
		dma_cookie_t cookie, struct dma_tx_state *txstate)
1129 1130 1131 1132
{
	struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
	enum dma_status ret;

1133 1134
	ret = dma_cookie_status(chan, cookie, txstate);
	if (ret == DMA_SUCCESS)
1135 1136 1137 1138
		return ret;

	/*
	 * This cookie not complete yet
1139
	 * Get number of bytes left in the active transactions and queue
1140
	 */
1141
	dma_set_residue(txstate, pl08x_getbytes_chan(plchan));
1142 1143 1144 1145 1146 1147 1148 1149 1150 1151

	if (plchan->state == PL08X_CHAN_PAUSED)
		return DMA_PAUSED;

	/* Whether waiting or running, we're in progress */
	return DMA_IN_PROGRESS;
}

/* PrimeCell DMA extension */
struct burst_table {
1152
	u32 burstwords;
1153 1154 1155 1156 1157 1158
	u32 reg;
};

static const struct burst_table burst_sizes[] = {
	{
		.burstwords = 256,
1159
		.reg = PL080_BSIZE_256,
1160 1161 1162
	},
	{
		.burstwords = 128,
1163
		.reg = PL080_BSIZE_128,
1164 1165 1166
	},
	{
		.burstwords = 64,
1167
		.reg = PL080_BSIZE_64,
1168 1169 1170
	},
	{
		.burstwords = 32,
1171
		.reg = PL080_BSIZE_32,
1172 1173 1174
	},
	{
		.burstwords = 16,
1175
		.reg = PL080_BSIZE_16,
1176 1177 1178
	},
	{
		.burstwords = 8,
1179
		.reg = PL080_BSIZE_8,
1180 1181 1182
	},
	{
		.burstwords = 4,
1183
		.reg = PL080_BSIZE_4,
1184 1185
	},
	{
1186 1187
		.burstwords = 0,
		.reg = PL080_BSIZE_1,
1188 1189 1190
	},
};

1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207
/*
 * Given the source and destination available bus masks, select which
 * will be routed to each port.  We try to have source and destination
 * on separate ports, but always respect the allowable settings.
 */
static u32 pl08x_select_bus(u8 src, u8 dst)
{
	u32 cctl = 0;

	if (!(dst & PL08X_AHB1) || ((dst & PL08X_AHB2) && (src & PL08X_AHB1)))
		cctl |= PL080_CONTROL_DST_AHB2;
	if (!(src & PL08X_AHB1) || ((src & PL08X_AHB2) && !(dst & PL08X_AHB2)))
		cctl |= PL080_CONTROL_SRC_AHB2;

	return cctl;
}

1208 1209 1210 1211 1212 1213 1214 1215 1216 1217
static u32 pl08x_cctl(u32 cctl)
{
	cctl &= ~(PL080_CONTROL_SRC_AHB2 | PL080_CONTROL_DST_AHB2 |
		  PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR |
		  PL080_CONTROL_PROT_MASK);

	/* Access the cell in privileged mode, non-bufferable, non-cacheable */
	return cctl | PL080_CONTROL_PROT_SYS;
}

1218 1219 1220 1221 1222 1223 1224 1225 1226
static u32 pl08x_width(enum dma_slave_buswidth width)
{
	switch (width) {
	case DMA_SLAVE_BUSWIDTH_1_BYTE:
		return PL080_WIDTH_8BIT;
	case DMA_SLAVE_BUSWIDTH_2_BYTES:
		return PL080_WIDTH_16BIT;
	case DMA_SLAVE_BUSWIDTH_4_BYTES:
		return PL080_WIDTH_32BIT;
1227 1228
	default:
		return ~0;
1229 1230 1231
	}
}

1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242
static u32 pl08x_burst(u32 maxburst)
{
	int i;

	for (i = 0; i < ARRAY_SIZE(burst_sizes); i++)
		if (burst_sizes[i].burstwords <= maxburst)
			break;

	return burst_sizes[i].reg;
}

1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269
static u32 pl08x_get_cctl(struct pl08x_dma_chan *plchan,
	enum dma_slave_buswidth addr_width, u32 maxburst)
{
	u32 width, burst, cctl = 0;

	width = pl08x_width(addr_width);
	if (width == ~0)
		return ~0;

	cctl |= width << PL080_CONTROL_SWIDTH_SHIFT;
	cctl |= width << PL080_CONTROL_DWIDTH_SHIFT;

	/*
	 * If this channel will only request single transfers, set this
	 * down to ONE element.  Also select one element if no maxburst
	 * is specified.
	 */
	if (plchan->cd->single)
		maxburst = 1;

	burst = pl08x_burst(maxburst);
	cctl |= burst << PL080_CONTROL_SB_SIZE_SHIFT;
	cctl |= burst << PL080_CONTROL_DB_SIZE_SHIFT;

	return pl08x_cctl(cctl);
}

1270 1271
static int dma_set_runtime_config(struct dma_chan *chan,
				  struct dma_slave_config *config)
1272 1273
{
	struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1274 1275 1276

	if (!plchan->slave)
		return -EINVAL;
1277

1278 1279 1280
	/* Reject definitely invalid configurations */
	if (config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
	    config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
1281
		return -EINVAL;
1282

1283 1284
	plchan->cfg = *config;

1285
	return 0;
1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297
}

/*
 * Slave transactions callback to the slave device to allow
 * synchronization of slave DMA signals with the DMAC enable
 */
static void pl08x_issue_pending(struct dma_chan *chan)
{
	struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
	unsigned long flags;

	spin_lock_irqsave(&plchan->lock, flags);
1298 1299 1300
	/* Something is already active, or we're waiting for a channel... */
	if (plchan->at || plchan->state == PL08X_CHAN_WAITING) {
		spin_unlock_irqrestore(&plchan->lock, flags);
1301
		return;
1302
	}
1303 1304

	/* Take the first element in the queue and execute it */
1305
	if (!list_empty(&plchan->pend_list)) {
1306 1307
		struct pl08x_txd *next;

1308
		next = list_first_entry(&plchan->pend_list,
1309 1310 1311 1312 1313
					struct pl08x_txd,
					node);
		list_del(&next->node);
		plchan->state = PL08X_CHAN_RUNNING;

1314
		pl08x_start_txd(plchan, next);
1315 1316 1317 1318 1319 1320 1321 1322 1323
	}

	spin_unlock_irqrestore(&plchan->lock, flags);
}

static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan,
					struct pl08x_txd *txd)
{
	struct pl08x_driver_data *pl08x = plchan->host;
1324 1325
	unsigned long flags;
	int num_llis, ret;
1326 1327

	num_llis = pl08x_fill_llis_for_desc(pl08x, txd);
1328
	if (!num_llis) {
1329 1330 1331
		spin_lock_irqsave(&plchan->lock, flags);
		pl08x_free_txd(pl08x, txd);
		spin_unlock_irqrestore(&plchan->lock, flags);
1332
		return -EINVAL;
1333
	}
1334

1335
	spin_lock_irqsave(&plchan->lock, flags);
1336 1337 1338 1339 1340 1341 1342 1343

	/*
	 * See if we already have a physical channel allocated,
	 * else this is the time to try to get one.
	 */
	ret = prep_phy_channel(plchan, txd);
	if (ret) {
		/*
1344 1345 1346 1347 1348 1349 1350 1351 1352 1353
		 * No physical channel was available.
		 *
		 * memcpy transfers can be sorted out at submission time.
		 *
		 * Slave transfers may have been denied due to platform
		 * channel muxing restrictions.  Since there is no guarantee
		 * that this will ever be resolved, and the signal must be
		 * acquired AFTER acquiring the physical channel, we will let
		 * them be NACK:ed with -EBUSY here. The drivers can retry
		 * the prep() call if they are eager on doing this using DMA.
1354 1355 1356
		 */
		if (plchan->slave) {
			pl08x_free_txd_list(pl08x, plchan);
1357
			pl08x_free_txd(pl08x, txd);
1358
			spin_unlock_irqrestore(&plchan->lock, flags);
1359 1360 1361 1362
			return -EBUSY;
		}
	} else
		/*
1363 1364 1365 1366
		 * Else we're all set, paused and ready to roll, status
		 * will switch to PL08X_CHAN_RUNNING when we call
		 * issue_pending(). If there is something running on the
		 * channel already we don't change its state.
1367 1368 1369 1370
		 */
		if (plchan->state == PL08X_CHAN_IDLE)
			plchan->state = PL08X_CHAN_PAUSED;

1371
	spin_unlock_irqrestore(&plchan->lock, flags);
1372 1373 1374 1375

	return 0;
}

1376 1377
static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan,
	unsigned long flags)
1378
{
1379
	struct pl08x_txd *txd = kzalloc(sizeof(*txd), GFP_NOWAIT);
1380 1381 1382

	if (txd) {
		dma_async_tx_descriptor_init(&txd->tx, &plchan->chan);
1383
		txd->tx.flags = flags;
1384 1385
		txd->tx.tx_submit = pl08x_tx_submit;
		INIT_LIST_HEAD(&txd->node);
1386
		INIT_LIST_HEAD(&txd->dsg_list);
1387 1388 1389 1390

		/* Always enable error and terminal interrupts */
		txd->ccfg = PL080_CONFIG_ERR_IRQ_MASK |
			    PL080_CONFIG_TC_IRQ_MASK;
1391 1392 1393 1394
	}
	return txd;
}

1395 1396 1397 1398 1399 1400 1401 1402 1403 1404
/*
 * Initialize a descriptor to be used by memcpy submit
 */
static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
		struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
		size_t len, unsigned long flags)
{
	struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
	struct pl08x_driver_data *pl08x = plchan->host;
	struct pl08x_txd *txd;
1405
	struct pl08x_sg *dsg;
1406 1407
	int ret;

1408
	txd = pl08x_get_txd(plchan, flags);
1409 1410 1411 1412 1413 1414
	if (!txd) {
		dev_err(&pl08x->adev->dev,
			"%s no memory for descriptor\n", __func__);
		return NULL;
	}

1415 1416 1417 1418 1419 1420 1421 1422 1423
	dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT);
	if (!dsg) {
		pl08x_free_txd(pl08x, txd);
		dev_err(&pl08x->adev->dev, "%s no memory for pl080 sg\n",
				__func__);
		return NULL;
	}
	list_add_tail(&dsg->node, &txd->dsg_list);

1424
	txd->direction = DMA_MEM_TO_MEM;
1425 1426 1427
	dsg->src_addr = src;
	dsg->dst_addr = dest;
	dsg->len = len;
1428 1429

	/* Set platform data for m2m */
1430
	txd->ccfg |= PL080_FLOW_MEM2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT;
1431
	txd->cctl = pl08x->pd->memcpy_channel.cctl_memcpy &
1432
			~(PL080_CONTROL_DST_AHB2 | PL080_CONTROL_SRC_AHB2);
1433

1434
	/* Both to be incremented or the code will break */
1435
	txd->cctl |= PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR;
1436 1437

	if (pl08x->vd->dualmaster)
1438 1439
		txd->cctl |= pl08x_select_bus(pl08x->mem_buses,
					      pl08x->mem_buses);
1440 1441 1442 1443 1444 1445 1446 1447

	ret = pl08x_prep_channel_resources(plchan, txd);
	if (ret)
		return NULL;

	return &txd->tx;
}

1448
static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
1449
		struct dma_chan *chan, struct scatterlist *sgl,
1450
		unsigned int sg_len, enum dma_transfer_direction direction,
1451
		unsigned long flags, void *context)
1452 1453 1454 1455
{
	struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
	struct pl08x_driver_data *pl08x = plchan->host;
	struct pl08x_txd *txd;
1456 1457
	struct pl08x_sg *dsg;
	struct scatterlist *sg;
1458
	enum dma_slave_buswidth addr_width;
1459
	dma_addr_t slave_addr;
1460
	int ret, tmp;
1461
	u8 src_buses, dst_buses;
1462
	u32 maxburst, cctl;
1463 1464

	dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n",
1465
			__func__, sg_dma_len(sgl), plchan->name);
1466

1467
	txd = pl08x_get_txd(plchan, flags);
1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478
	if (!txd) {
		dev_err(&pl08x->adev->dev, "%s no txd\n", __func__);
		return NULL;
	}

	/*
	 * Set up addresses, the PrimeCell configured address
	 * will take precedence since this may configure the
	 * channel target address dynamically at runtime.
	 */
	txd->direction = direction;
1479

1480
	if (direction == DMA_MEM_TO_DEV) {
1481
		cctl = PL080_CONTROL_SRC_INCR;
1482
		slave_addr = plchan->cfg.dst_addr;
1483 1484
		addr_width = plchan->cfg.dst_addr_width;
		maxburst = plchan->cfg.dst_maxburst;
1485 1486
		src_buses = pl08x->mem_buses;
		dst_buses = plchan->cd->periph_buses;
1487
	} else if (direction == DMA_DEV_TO_MEM) {
1488
		cctl = PL080_CONTROL_DST_INCR;
1489
		slave_addr = plchan->cfg.src_addr;
1490 1491
		addr_width = plchan->cfg.src_addr_width;
		maxburst = plchan->cfg.src_maxburst;
1492 1493
		src_buses = plchan->cd->periph_buses;
		dst_buses = pl08x->mem_buses;
1494
	} else {
1495
		pl08x_free_txd(pl08x, txd);
1496 1497 1498 1499 1500
		dev_err(&pl08x->adev->dev,
			"%s direction unsupported\n", __func__);
		return NULL;
	}

1501
	cctl |= pl08x_get_cctl(plchan, addr_width, maxburst);
1502 1503 1504 1505 1506 1507 1508
	if (cctl == ~0) {
		pl08x_free_txd(pl08x, txd);
		dev_err(&pl08x->adev->dev,
			"DMA slave configuration botched?\n");
		return NULL;
	}

1509 1510
	txd->cctl = cctl | pl08x_select_bus(src_buses, dst_buses);

1511
	if (plchan->cfg.device_fc)
1512
		tmp = (direction == DMA_MEM_TO_DEV) ? PL080_FLOW_MEM2PER_PER :
1513 1514
			PL080_FLOW_PER2MEM_PER;
	else
1515
		tmp = (direction == DMA_MEM_TO_DEV) ? PL080_FLOW_MEM2PER :
1516 1517 1518 1519
			PL080_FLOW_PER2MEM;

	txd->ccfg |= tmp << PL080_CONFIG_FLOW_CONTROL_SHIFT;

1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530
	for_each_sg(sgl, sg, sg_len, tmp) {
		dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT);
		if (!dsg) {
			pl08x_free_txd(pl08x, txd);
			dev_err(&pl08x->adev->dev, "%s no mem for pl080 sg\n",
					__func__);
			return NULL;
		}
		list_add_tail(&dsg->node, &txd->dsg_list);

		dsg->len = sg_dma_len(sg);
1531
		if (direction == DMA_MEM_TO_DEV) {
1532
			dsg->src_addr = sg_dma_address(sg);
1533 1534 1535
			dsg->dst_addr = slave_addr;
		} else {
			dsg->src_addr = slave_addr;
1536
			dsg->dst_addr = sg_dma_address(sg);
1537 1538 1539
		}
	}

1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556
	ret = pl08x_prep_channel_resources(plchan, txd);
	if (ret)
		return NULL;

	return &txd->tx;
}

static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
			 unsigned long arg)
{
	struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
	struct pl08x_driver_data *pl08x = plchan->host;
	unsigned long flags;
	int ret = 0;

	/* Controls applicable to inactive channels */
	if (cmd == DMA_SLAVE_CONFIG) {
1557 1558
		return dma_set_runtime_config(chan,
					      (struct dma_slave_config *)arg);
1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575
	}

	/*
	 * Anything succeeds on channels with no physical allocation and
	 * no queued transfers.
	 */
	spin_lock_irqsave(&plchan->lock, flags);
	if (!plchan->phychan && !plchan->at) {
		spin_unlock_irqrestore(&plchan->lock, flags);
		return 0;
	}

	switch (cmd) {
	case DMA_TERMINATE_ALL:
		plchan->state = PL08X_CHAN_IDLE;

		if (plchan->phychan) {
1576
			pl08x_terminate_phy_chan(pl08x, plchan->phychan);
1577 1578 1579 1580 1581

			/*
			 * Mark physical channel as free and free any slave
			 * signal
			 */
1582
			release_phy_channel(plchan);
1583
			plchan->phychan_hold = 0;
1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613
		}
		/* Dequeue jobs and free LLIs */
		if (plchan->at) {
			pl08x_free_txd(pl08x, plchan->at);
			plchan->at = NULL;
		}
		/* Dequeue jobs not yet fired as well */
		pl08x_free_txd_list(pl08x, plchan);
		break;
	case DMA_PAUSE:
		pl08x_pause_phy_chan(plchan->phychan);
		plchan->state = PL08X_CHAN_PAUSED;
		break;
	case DMA_RESUME:
		pl08x_resume_phy_chan(plchan->phychan);
		plchan->state = PL08X_CHAN_RUNNING;
		break;
	default:
		/* Unknown command */
		ret = -ENXIO;
		break;
	}

	spin_unlock_irqrestore(&plchan->lock, flags);

	return ret;
}

bool pl08x_filter_id(struct dma_chan *chan, void *chan_id)
{
1614
	struct pl08x_dma_chan *plchan;
1615 1616
	char *name = chan_id;

1617 1618 1619 1620 1621 1622
	/* Reject channels for devices not bound to this driver */
	if (chan->device->dev->driver != &pl08x_amba_driver.drv)
		return false;

	plchan = to_pl08x_chan(chan);

1623 1624 1625 1626 1627 1628 1629 1630 1631
	/* Check that the channel is not taken! */
	if (!strcmp(plchan->name, name))
		return true;

	return false;
}

/*
 * Just check that the device is there and active
1632 1633 1634
 * TODO: turn this bit on/off depending on the number of physical channels
 * actually used, if it is zero... well shut it off. That will save some
 * power. Cut the clock at the same time.
1635 1636 1637
 */
static void pl08x_ensure_on(struct pl08x_driver_data *pl08x)
{
1638 1639 1640
	/* The Nomadik variant does not have the config register */
	if (pl08x->vd->nomadik)
		return;
1641
	writel(PL080_CONFIG_ENABLE, pl08x->base + PL080_CONFIG);
1642 1643
}

1644 1645 1646
static void pl08x_unmap_buffers(struct pl08x_txd *txd)
{
	struct device *dev = txd->tx.chan->device->dev;
1647
	struct pl08x_sg *dsg;
1648 1649 1650

	if (!(txd->tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
		if (txd->tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE)
1651 1652 1653 1654 1655 1656 1657 1658
			list_for_each_entry(dsg, &txd->dsg_list, node)
				dma_unmap_single(dev, dsg->src_addr, dsg->len,
						DMA_TO_DEVICE);
		else {
			list_for_each_entry(dsg, &txd->dsg_list, node)
				dma_unmap_page(dev, dsg->src_addr, dsg->len,
						DMA_TO_DEVICE);
		}
1659 1660 1661
	}
	if (!(txd->tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
		if (txd->tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE)
1662 1663 1664
			list_for_each_entry(dsg, &txd->dsg_list, node)
				dma_unmap_single(dev, dsg->dst_addr, dsg->len,
						DMA_FROM_DEVICE);
1665
		else
1666 1667 1668
			list_for_each_entry(dsg, &txd->dsg_list, node)
				dma_unmap_page(dev, dsg->dst_addr, dsg->len,
						DMA_FROM_DEVICE);
1669 1670 1671
	}
}

1672 1673 1674 1675
static void pl08x_tasklet(unsigned long data)
{
	struct pl08x_dma_chan *plchan = (struct pl08x_dma_chan *) data;
	struct pl08x_driver_data *pl08x = plchan->host;
1676
	struct pl08x_txd *txd;
1677
	unsigned long flags;
1678

1679
	spin_lock_irqsave(&plchan->lock, flags);
1680

1681 1682
	txd = plchan->at;
	plchan->at = NULL;
1683

1684
	if (txd) {
1685
		/* Update last completed */
1686
		dma_cookie_complete(&txd->tx);
1687
	}
1688

1689
	/* If a new descriptor is queued, set it up plchan->at is NULL here */
1690
	if (!list_empty(&plchan->pend_list)) {
1691 1692
		struct pl08x_txd *next;

1693
		next = list_first_entry(&plchan->pend_list,
1694 1695 1696
					struct pl08x_txd,
					node);
		list_del(&next->node);
1697 1698

		pl08x_start_txd(plchan, next);
1699 1700 1701 1702 1703 1704
	} else if (plchan->phychan_hold) {
		/*
		 * This channel is still in use - we have a new txd being
		 * prepared and will soon be queued.  Don't give up the
		 * physical channel.
		 */
1705 1706 1707 1708 1709 1710 1711
	} else {
		struct pl08x_dma_chan *waiting = NULL;

		/*
		 * No more jobs, so free up the physical channel
		 * Free any allocated signal on slave transfers too
		 */
1712
		release_phy_channel(plchan);
1713 1714 1715
		plchan->state = PL08X_CHAN_IDLE;

		/*
1716 1717 1718 1719
		 * And NOW before anyone else can grab that free:d up
		 * physical channel, see if there is some memcpy pending
		 * that seriously needs to start because of being stacked
		 * up while we were choking the physical channels with data.
1720 1721 1722
		 */
		list_for_each_entry(waiting, &pl08x->memcpy.channels,
				    chan.device_node) {
1723 1724
			if (waiting->state == PL08X_CHAN_WAITING &&
				waiting->waiting != NULL) {
1725 1726 1727 1728 1729 1730
				int ret;

				/* This should REALLY not fail now */
				ret = prep_phy_channel(waiting,
						       waiting->waiting);
				BUG_ON(ret);
1731
				waiting->phychan_hold--;
1732 1733 1734 1735 1736 1737 1738 1739
				waiting->state = PL08X_CHAN_RUNNING;
				waiting->waiting = NULL;
				pl08x_issue_pending(&waiting->chan);
				break;
			}
		}
	}

1740
	spin_unlock_irqrestore(&plchan->lock, flags);
1741

1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758
	if (txd) {
		dma_async_tx_callback callback = txd->tx.callback;
		void *callback_param = txd->tx.callback_param;

		/* Don't try to unmap buffers on slave channels */
		if (!plchan->slave)
			pl08x_unmap_buffers(txd);

		/* Free the descriptor */
		spin_lock_irqsave(&plchan->lock, flags);
		pl08x_free_txd(pl08x, txd);
		spin_unlock_irqrestore(&plchan->lock, flags);

		/* Callback to signal completion */
		if (callback)
			callback(callback_param);
	}
1759 1760 1761 1762 1763
}

static irqreturn_t pl08x_irq(int irq, void *dev)
{
	struct pl08x_driver_data *pl08x = dev;
1764 1765 1766 1767 1768 1769 1770 1771
	u32 mask = 0, err, tc, i;

	/* check & clear - ERR & TC interrupts */
	err = readl(pl08x->base + PL080_ERR_STATUS);
	if (err) {
		dev_err(&pl08x->adev->dev, "%s error interrupt, register value 0x%08x\n",
			__func__, err);
		writel(err, pl08x->base + PL080_ERR_CLEAR);
1772
	}
1773
	tc = readl(pl08x->base + PL080_TC_STATUS);
1774 1775 1776 1777 1778 1779
	if (tc)
		writel(tc, pl08x->base + PL080_TC_CLEAR);

	if (!err && !tc)
		return IRQ_NONE;

1780
	for (i = 0; i < pl08x->vd->channels; i++) {
1781
		if (((1 << i) & err) || ((1 << i) & tc)) {
1782 1783 1784 1785
			/* Locate physical channel */
			struct pl08x_phy_chan *phychan = &pl08x->phy_chans[i];
			struct pl08x_dma_chan *plchan = phychan->serving;

1786 1787 1788 1789 1790 1791 1792
			if (!plchan) {
				dev_err(&pl08x->adev->dev,
					"%s Error TC interrupt on unused channel: 0x%08x\n",
					__func__, i);
				continue;
			}

1793 1794 1795 1796 1797 1798 1799 1800 1801
			/* Schedule tasklet on this channel */
			tasklet_schedule(&plchan->tasklet);
			mask |= (1 << i);
		}
	}

	return mask ? IRQ_HANDLED : IRQ_NONE;
}

1802 1803 1804 1805
static void pl08x_dma_slave_init(struct pl08x_dma_chan *chan)
{
	chan->slave = true;
	chan->name = chan->cd->bus_id;
1806 1807
	chan->cfg.src_addr = chan->cd->addr;
	chan->cfg.dst_addr = chan->cd->addr;
1808 1809
}

1810 1811 1812 1813 1814
/*
 * Initialise the DMAC memcpy/slave channels.
 * Make a local wrapper to hold required data
 */
static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
1815
		struct dma_device *dmadev, unsigned int channels, bool slave)
1816 1817 1818 1819 1820
{
	struct pl08x_dma_chan *chan;
	int i;

	INIT_LIST_HEAD(&dmadev->channels);
1821

1822 1823 1824 1825 1826 1827
	/*
	 * Register as many many memcpy as we have physical channels,
	 * we won't always be able to use all but the code will have
	 * to cope with that situation.
	 */
	for (i = 0; i < channels; i++) {
1828
		chan = kzalloc(sizeof(*chan), GFP_KERNEL);
1829 1830 1831 1832 1833 1834 1835 1836
		if (!chan) {
			dev_err(&pl08x->adev->dev,
				"%s no memory for channel\n", __func__);
			return -ENOMEM;
		}

		chan->host = pl08x;
		chan->state = PL08X_CHAN_IDLE;
1837
		chan->signal = -1;
1838 1839 1840

		if (slave) {
			chan->cd = &pl08x->pd->slave_channels[i];
1841
			pl08x_dma_slave_init(chan);
1842 1843 1844 1845 1846 1847 1848 1849
		} else {
			chan->cd = &pl08x->pd->memcpy_channel;
			chan->name = kasprintf(GFP_KERNEL, "memcpy%d", i);
			if (!chan->name) {
				kfree(chan);
				return -ENOMEM;
			}
		}
1850
		dev_dbg(&pl08x->adev->dev,
1851 1852 1853 1854
			 "initialize virtual channel \"%s\"\n",
			 chan->name);

		chan->chan.device = dmadev;
1855
		dma_cookie_init(&chan->chan);
1856 1857

		spin_lock_init(&chan->lock);
1858
		INIT_LIST_HEAD(&chan->pend_list);
1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917
		tasklet_init(&chan->tasklet, pl08x_tasklet,
			     (unsigned long) chan);

		list_add_tail(&chan->chan.device_node, &dmadev->channels);
	}
	dev_info(&pl08x->adev->dev, "initialized %d virtual %s channels\n",
		 i, slave ? "slave" : "memcpy");
	return i;
}

static void pl08x_free_virtual_channels(struct dma_device *dmadev)
{
	struct pl08x_dma_chan *chan = NULL;
	struct pl08x_dma_chan *next;

	list_for_each_entry_safe(chan,
				 next, &dmadev->channels, chan.device_node) {
		list_del(&chan->chan.device_node);
		kfree(chan);
	}
}

#ifdef CONFIG_DEBUG_FS
static const char *pl08x_state_str(enum pl08x_dma_chan_state state)
{
	switch (state) {
	case PL08X_CHAN_IDLE:
		return "idle";
	case PL08X_CHAN_RUNNING:
		return "running";
	case PL08X_CHAN_PAUSED:
		return "paused";
	case PL08X_CHAN_WAITING:
		return "waiting";
	default:
		break;
	}
	return "UNKNOWN STATE";
}

static int pl08x_debugfs_show(struct seq_file *s, void *data)
{
	struct pl08x_driver_data *pl08x = s->private;
	struct pl08x_dma_chan *chan;
	struct pl08x_phy_chan *ch;
	unsigned long flags;
	int i;

	seq_printf(s, "PL08x physical channels:\n");
	seq_printf(s, "CHANNEL:\tUSER:\n");
	seq_printf(s, "--------\t-----\n");
	for (i = 0; i < pl08x->vd->channels; i++) {
		struct pl08x_dma_chan *virt_chan;

		ch = &pl08x->phy_chans[i];

		spin_lock_irqsave(&ch->lock, flags);
		virt_chan = ch->serving;

1918 1919 1920 1921
		seq_printf(s, "%d\t\t%s%s\n",
			   ch->id,
			   virt_chan ? virt_chan->name : "(none)",
			   ch->locked ? " LOCKED" : "");
1922 1923 1924 1925 1926 1927 1928 1929

		spin_unlock_irqrestore(&ch->lock, flags);
	}

	seq_printf(s, "\nPL08x virtual memcpy channels:\n");
	seq_printf(s, "CHANNEL:\tSTATE:\n");
	seq_printf(s, "--------\t------\n");
	list_for_each_entry(chan, &pl08x->memcpy.channels, chan.device_node) {
1930
		seq_printf(s, "%s\t\t%s\n", chan->name,
1931 1932 1933 1934 1935 1936 1937
			   pl08x_state_str(chan->state));
	}

	seq_printf(s, "\nPL08x virtual slave channels:\n");
	seq_printf(s, "CHANNEL:\tSTATE:\n");
	seq_printf(s, "--------\t------\n");
	list_for_each_entry(chan, &pl08x->slave.channels, chan.device_node) {
1938
		seq_printf(s, "%s\t\t%s\n", chan->name,
1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959
			   pl08x_state_str(chan->state));
	}

	return 0;
}

static int pl08x_debugfs_open(struct inode *inode, struct file *file)
{
	return single_open(file, pl08x_debugfs_show, inode->i_private);
}

static const struct file_operations pl08x_debugfs_operations = {
	.open		= pl08x_debugfs_open,
	.read		= seq_read,
	.llseek		= seq_lseek,
	.release	= single_release,
};

static void init_pl08x_debugfs(struct pl08x_driver_data *pl08x)
{
	/* Expose a simple debugfs interface to view all clocks */
1960 1961 1962
	(void) debugfs_create_file(dev_name(&pl08x->adev->dev),
			S_IFREG | S_IRUGO, NULL, pl08x,
			&pl08x_debugfs_operations);
1963 1964 1965 1966 1967 1968 1969 1970
}

#else
static inline void init_pl08x_debugfs(struct pl08x_driver_data *pl08x)
{
}
#endif

1971
static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
1972 1973
{
	struct pl08x_driver_data *pl08x;
1974
	const struct vendor_data *vd = id->data;
1975 1976 1977 1978 1979 1980 1981 1982
	int ret = 0;
	int i;

	ret = amba_request_regions(adev, NULL);
	if (ret)
		return ret;

	/* Create the driver state holder */
1983
	pl08x = kzalloc(sizeof(*pl08x), GFP_KERNEL);
1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021
	if (!pl08x) {
		ret = -ENOMEM;
		goto out_no_pl08x;
	}

	/* Initialize memcpy engine */
	dma_cap_set(DMA_MEMCPY, pl08x->memcpy.cap_mask);
	pl08x->memcpy.dev = &adev->dev;
	pl08x->memcpy.device_alloc_chan_resources = pl08x_alloc_chan_resources;
	pl08x->memcpy.device_free_chan_resources = pl08x_free_chan_resources;
	pl08x->memcpy.device_prep_dma_memcpy = pl08x_prep_dma_memcpy;
	pl08x->memcpy.device_prep_dma_interrupt = pl08x_prep_dma_interrupt;
	pl08x->memcpy.device_tx_status = pl08x_dma_tx_status;
	pl08x->memcpy.device_issue_pending = pl08x_issue_pending;
	pl08x->memcpy.device_control = pl08x_control;

	/* Initialize slave engine */
	dma_cap_set(DMA_SLAVE, pl08x->slave.cap_mask);
	pl08x->slave.dev = &adev->dev;
	pl08x->slave.device_alloc_chan_resources = pl08x_alloc_chan_resources;
	pl08x->slave.device_free_chan_resources = pl08x_free_chan_resources;
	pl08x->slave.device_prep_dma_interrupt = pl08x_prep_dma_interrupt;
	pl08x->slave.device_tx_status = pl08x_dma_tx_status;
	pl08x->slave.device_issue_pending = pl08x_issue_pending;
	pl08x->slave.device_prep_slave_sg = pl08x_prep_slave_sg;
	pl08x->slave.device_control = pl08x_control;

	/* Get the platform data */
	pl08x->pd = dev_get_platdata(&adev->dev);
	if (!pl08x->pd) {
		dev_err(&adev->dev, "no platform data supplied\n");
		goto out_no_platdata;
	}

	/* Assign useful pointers to the driver state */
	pl08x->adev = adev;
	pl08x->vd = vd;

2022 2023 2024 2025 2026 2027 2028 2029
	/* By default, AHB1 only.  If dualmaster, from platform */
	pl08x->lli_buses = PL08X_AHB1;
	pl08x->mem_buses = PL08X_AHB1;
	if (pl08x->vd->dualmaster) {
		pl08x->lli_buses = pl08x->pd->lli_buses;
		pl08x->mem_buses = pl08x->pd->mem_buses;
	}

2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046
	/* A DMA memory pool for LLIs, align on 1-byte boundary */
	pl08x->pool = dma_pool_create(DRIVER_NAME, &pl08x->adev->dev,
			PL08X_LLI_TSFR_SIZE, PL08X_ALIGN, 0);
	if (!pl08x->pool) {
		ret = -ENOMEM;
		goto out_no_lli_pool;
	}

	pl08x->base = ioremap(adev->res.start, resource_size(&adev->res));
	if (!pl08x->base) {
		ret = -ENOMEM;
		goto out_no_ioremap;
	}

	/* Turn on the PL08x */
	pl08x_ensure_on(pl08x);

2047
	/* Attach the interrupt handler */
2048 2049 2050 2051
	writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR);
	writel(0x000000FF, pl08x->base + PL080_TC_CLEAR);

	ret = request_irq(adev->irq[0], pl08x_irq, IRQF_DISABLED,
2052
			  DRIVER_NAME, pl08x);
2053 2054 2055 2056 2057 2058 2059
	if (ret) {
		dev_err(&adev->dev, "%s failed to request interrupt %d\n",
			__func__, adev->irq[0]);
		goto out_no_irq;
	}

	/* Initialize physical channels */
2060
	pl08x->phy_chans = kzalloc((vd->channels * sizeof(*pl08x->phy_chans)),
2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074
			GFP_KERNEL);
	if (!pl08x->phy_chans) {
		dev_err(&adev->dev, "%s failed to allocate "
			"physical channel holders\n",
			__func__);
		goto out_no_phychans;
	}

	for (i = 0; i < vd->channels; i++) {
		struct pl08x_phy_chan *ch = &pl08x->phy_chans[i];

		ch->id = i;
		ch->base = pl08x->base + PL080_Cx_BASE(i);
		spin_lock_init(&ch->lock);
2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090

		/*
		 * Nomadik variants can have channels that are locked
		 * down for the secure world only. Lock up these channels
		 * by perpetually serving a dummy virtual channel.
		 */
		if (vd->nomadik) {
			u32 val;

			val = readl(ch->base + PL080_CH_CONFIG);
			if (val & (PL080N_CONFIG_ITPROT | PL080N_CONFIG_SECPROT)) {
				dev_info(&adev->dev, "physical channel %d reserved for secure access only\n", i);
				ch->locked = true;
			}
		}

2091 2092
		dev_dbg(&adev->dev, "physical channel %d is %s\n",
			i, pl08x_phy_channel_busy(ch) ? "BUSY" : "FREE");
2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107
	}

	/* Register as many memcpy channels as there are physical channels */
	ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->memcpy,
					      pl08x->vd->channels, false);
	if (ret <= 0) {
		dev_warn(&pl08x->adev->dev,
			 "%s failed to enumerate memcpy channels - %d\n",
			 __func__, ret);
		goto out_no_memcpy;
	}
	pl08x->memcpy.chancnt = ret;

	/* Register slave channels */
	ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->slave,
2108
			pl08x->pd->num_slave_channels, true);
2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134
	if (ret <= 0) {
		dev_warn(&pl08x->adev->dev,
			"%s failed to enumerate slave channels - %d\n",
				__func__, ret);
		goto out_no_slave;
	}
	pl08x->slave.chancnt = ret;

	ret = dma_async_device_register(&pl08x->memcpy);
	if (ret) {
		dev_warn(&pl08x->adev->dev,
			"%s failed to register memcpy as an async device - %d\n",
			__func__, ret);
		goto out_no_memcpy_reg;
	}

	ret = dma_async_device_register(&pl08x->slave);
	if (ret) {
		dev_warn(&pl08x->adev->dev,
			"%s failed to register slave as an async device - %d\n",
			__func__, ret);
		goto out_no_slave_reg;
	}

	amba_set_drvdata(adev, pl08x);
	init_pl08x_debugfs(pl08x);
2135 2136 2137
	dev_info(&pl08x->adev->dev, "DMA: PL%03x rev%u at 0x%08llx irq %d\n",
		 amba_part(adev), amba_rev(adev),
		 (unsigned long long)adev->res.start, adev->irq[0]);
2138

2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168
	return 0;

out_no_slave_reg:
	dma_async_device_unregister(&pl08x->memcpy);
out_no_memcpy_reg:
	pl08x_free_virtual_channels(&pl08x->slave);
out_no_slave:
	pl08x_free_virtual_channels(&pl08x->memcpy);
out_no_memcpy:
	kfree(pl08x->phy_chans);
out_no_phychans:
	free_irq(adev->irq[0], pl08x);
out_no_irq:
	iounmap(pl08x->base);
out_no_ioremap:
	dma_pool_destroy(pl08x->pool);
out_no_lli_pool:
out_no_platdata:
	kfree(pl08x);
out_no_pl08x:
	amba_release_regions(adev);
	return ret;
}

/* PL080 has 8 channels and the PL080 have just 2 */
static struct vendor_data vendor_pl080 = {
	.channels = 8,
	.dualmaster = true,
};

2169 2170 2171 2172 2173 2174
static struct vendor_data vendor_nomadik = {
	.channels = 8,
	.dualmaster = true,
	.nomadik = true,
};

2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194
static struct vendor_data vendor_pl081 = {
	.channels = 2,
	.dualmaster = false,
};

static struct amba_id pl08x_ids[] = {
	/* PL080 */
	{
		.id	= 0x00041080,
		.mask	= 0x000fffff,
		.data	= &vendor_pl080,
	},
	/* PL081 */
	{
		.id	= 0x00041081,
		.mask	= 0x000fffff,
		.data	= &vendor_pl081,
	},
	/* Nomadik 8815 PL080 variant */
	{
2195
		.id	= 0x00280080,
2196
		.mask	= 0x00ffffff,
2197
		.data	= &vendor_nomadik,
2198 2199 2200 2201
	},
	{ 0, 0 },
};

2202 2203
MODULE_DEVICE_TABLE(amba, pl08x_ids);

2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215
static struct amba_driver pl08x_amba_driver = {
	.drv.name	= DRIVER_NAME,
	.id_table	= pl08x_ids,
	.probe		= pl08x_probe,
};

static int __init pl08x_init(void)
{
	int retval;
	retval = amba_driver_register(&pl08x_amba_driver);
	if (retval)
		printk(KERN_WARNING DRIVER_NAME
2216
		       "failed to register as an AMBA device (%d)\n",
2217 2218 2219 2220
		       retval);
	return retval;
}
subsys_initcall(pl08x_init);