dbdma.c 29.2 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
/*
 *
 * BRIEF MODULE DESCRIPTION
 *      The Descriptor Based DMA channel manager that first appeared
 *	on the Au1550.  I started with dma.c, but I think all that is
 *	left is this initial comment :-)
 *
 * Copyright 2004 Embedded Edge, LLC
 *	dan@embeddededge.com
 *
 *  This program is free software; you can redistribute  it and/or modify it
 *  under  the terms of  the GNU General  Public License as published by the
 *  Free Software Foundation;  either version 2 of the  License, or (at your
 *  option) any later version.
 *
 *  THIS  SOFTWARE  IS PROVIDED   ``AS  IS'' AND   ANY  EXPRESS OR IMPLIED
 *  WARRANTIES,   INCLUDING, BUT NOT  LIMITED  TO, THE IMPLIED WARRANTIES OF
 *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
 *  NO  EVENT  SHALL   THE AUTHOR  BE    LIABLE FOR ANY   DIRECT, INDIRECT,
 *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
 *  NOT LIMITED   TO, PROCUREMENT OF  SUBSTITUTE GOODS  OR SERVICES; LOSS OF
 *  USE, DATA,  OR PROFITS; OR  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
 *  ANY THEORY OF LIABILITY, WHETHER IN  CONTRACT, STRICT LIABILITY, OR TORT
 *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
 *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 *
 *  You should have received a copy of the  GNU General Public License along
 *  with this program; if not, write  to the Free Software Foundation, Inc.,
 *  675 Mass Ave, Cambridge, MA 02139, USA.
 *
 */
P
Pete Popov 已提交
32

33
#include <linux/init.h>
L
Linus Torvalds 已提交
34 35 36 37
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
38
#include <linux/module.h>
39
#include <linux/syscore_ops.h>
L
Linus Torvalds 已提交
40 41
#include <asm/mach-au1x00/au1000.h>
#include <asm/mach-au1x00/au1xxx_dbdma.h>
P
Pete Popov 已提交
42

L
Linus Torvalds 已提交
43 44 45 46 47 48 49 50 51 52 53
/*
 * The Descriptor Based DMA supports up to 16 channels.
 *
 * There are 32 devices defined. We keep an internal structure
 * of devices using these channels, along with additional
 * information.
 *
 * We allocate the descriptors and allow access to them through various
 * functions.  The drivers allocate the data buffers and assign them
 * to the descriptors.
 */
54
static DEFINE_SPINLOCK(au1xxx_dbdma_spin_lock);
L
Linus Torvalds 已提交
55

56
/* I couldn't find a macro that did this... */
L
Linus Torvalds 已提交
57 58
#define ALIGN_ADDR(x, a)	((((u32)(x)) + (a-1)) & ~(a-1))

59 60
static dbdma_global_t *dbdma_gptr =
			(dbdma_global_t *)KSEG1ADDR(AU1550_DBDMA_CONF_PHYS_ADDR);
61
static int dbdma_initialized;
L
Linus Torvalds 已提交
62

63 64 65
static dbdev_tab_t *dbdev_tab;

static dbdev_tab_t au1550_dbdev_tab[] __initdata = {
L
Linus Torvalds 已提交
66
	/* UARTS */
67 68 69 70
	{ AU1550_DSCR_CMD0_UART0_TX, DEV_FLAGS_OUT, 0, 8, 0x11100004, 0, 0 },
	{ AU1550_DSCR_CMD0_UART0_RX, DEV_FLAGS_IN,  0, 8, 0x11100000, 0, 0 },
	{ AU1550_DSCR_CMD0_UART3_TX, DEV_FLAGS_OUT, 0, 8, 0x11400004, 0, 0 },
	{ AU1550_DSCR_CMD0_UART3_RX, DEV_FLAGS_IN,  0, 8, 0x11400000, 0, 0 },
L
Linus Torvalds 已提交
71 72

	/* EXT DMA */
73 74 75 76
	{ AU1550_DSCR_CMD0_DMA_REQ0, 0, 0, 0, 0x00000000, 0, 0 },
	{ AU1550_DSCR_CMD0_DMA_REQ1, 0, 0, 0, 0x00000000, 0, 0 },
	{ AU1550_DSCR_CMD0_DMA_REQ2, 0, 0, 0, 0x00000000, 0, 0 },
	{ AU1550_DSCR_CMD0_DMA_REQ3, 0, 0, 0, 0x00000000, 0, 0 },
L
Linus Torvalds 已提交
77 78

	/* USB DEV */
79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97
	{ AU1550_DSCR_CMD0_USBDEV_RX0, DEV_FLAGS_IN,  4, 8, 0x10200000, 0, 0 },
	{ AU1550_DSCR_CMD0_USBDEV_TX0, DEV_FLAGS_OUT, 4, 8, 0x10200004, 0, 0 },
	{ AU1550_DSCR_CMD0_USBDEV_TX1, DEV_FLAGS_OUT, 4, 8, 0x10200008, 0, 0 },
	{ AU1550_DSCR_CMD0_USBDEV_TX2, DEV_FLAGS_OUT, 4, 8, 0x1020000c, 0, 0 },
	{ AU1550_DSCR_CMD0_USBDEV_RX3, DEV_FLAGS_IN,  4, 8, 0x10200010, 0, 0 },
	{ AU1550_DSCR_CMD0_USBDEV_RX4, DEV_FLAGS_IN,  4, 8, 0x10200014, 0, 0 },

	/* PSCs */
	{ AU1550_DSCR_CMD0_PSC0_TX, DEV_FLAGS_OUT, 0, 0, 0x11a0001c, 0, 0 },
	{ AU1550_DSCR_CMD0_PSC0_RX, DEV_FLAGS_IN,  0, 0, 0x11a0001c, 0, 0 },
	{ AU1550_DSCR_CMD0_PSC1_TX, DEV_FLAGS_OUT, 0, 0, 0x11b0001c, 0, 0 },
	{ AU1550_DSCR_CMD0_PSC1_RX, DEV_FLAGS_IN,  0, 0, 0x11b0001c, 0, 0 },
	{ AU1550_DSCR_CMD0_PSC2_TX, DEV_FLAGS_OUT, 0, 0, 0x10a0001c, 0, 0 },
	{ AU1550_DSCR_CMD0_PSC2_RX, DEV_FLAGS_IN,  0, 0, 0x10a0001c, 0, 0 },
	{ AU1550_DSCR_CMD0_PSC3_TX, DEV_FLAGS_OUT, 0, 0, 0x10b0001c, 0, 0 },
	{ AU1550_DSCR_CMD0_PSC3_RX, DEV_FLAGS_IN,  0, 0, 0x10b0001c, 0, 0 },

	{ AU1550_DSCR_CMD0_PCI_WRITE,  0, 0, 0, 0x00000000, 0, 0 },  /* PCI */
	{ AU1550_DSCR_CMD0_NAND_FLASH, 0, 0, 0, 0x00000000, 0, 0 }, /* NAND */
L
Linus Torvalds 已提交
98 99

	/* MAC 0 */
100 101
	{ AU1550_DSCR_CMD0_MAC0_RX, DEV_FLAGS_IN,  0, 0, 0x00000000, 0, 0 },
	{ AU1550_DSCR_CMD0_MAC0_TX, DEV_FLAGS_OUT, 0, 0, 0x00000000, 0, 0 },
L
Linus Torvalds 已提交
102 103

	/* MAC 1 */
104 105
	{ AU1550_DSCR_CMD0_MAC1_RX, DEV_FLAGS_IN,  0, 0, 0x00000000, 0, 0 },
	{ AU1550_DSCR_CMD0_MAC1_TX, DEV_FLAGS_OUT, 0, 0, 0x00000000, 0, 0 },
L
Linus Torvalds 已提交
106

107 108 109
	{ DSCR_CMD0_THROTTLE, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
	{ DSCR_CMD0_ALWAYS,   DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
};
L
Linus Torvalds 已提交
110

111 112 113 114 115
static dbdev_tab_t au1200_dbdev_tab[] __initdata = {
	{ AU1200_DSCR_CMD0_UART0_TX, DEV_FLAGS_OUT, 0, 8, 0x11100004, 0, 0 },
	{ AU1200_DSCR_CMD0_UART0_RX, DEV_FLAGS_IN,  0, 8, 0x11100000, 0, 0 },
	{ AU1200_DSCR_CMD0_UART1_TX, DEV_FLAGS_OUT, 0, 8, 0x11200004, 0, 0 },
	{ AU1200_DSCR_CMD0_UART1_RX, DEV_FLAGS_IN,  0, 8, 0x11200000, 0, 0 },
L
Linus Torvalds 已提交
116

117 118
	{ AU1200_DSCR_CMD0_DMA_REQ0, 0, 0, 0, 0x00000000, 0, 0 },
	{ AU1200_DSCR_CMD0_DMA_REQ1, 0, 0, 0, 0x00000000, 0, 0 },
L
Linus Torvalds 已提交
119

120 121 122 123
	{ AU1200_DSCR_CMD0_MAE_BE, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
	{ AU1200_DSCR_CMD0_MAE_FE, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
	{ AU1200_DSCR_CMD0_MAE_BOTH, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
	{ AU1200_DSCR_CMD0_LCD, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
L
Linus Torvalds 已提交
124

125 126 127 128
	{ AU1200_DSCR_CMD0_SDMS_TX0, DEV_FLAGS_OUT, 4, 8, 0x10600000, 0, 0 },
	{ AU1200_DSCR_CMD0_SDMS_RX0, DEV_FLAGS_IN,  4, 8, 0x10600004, 0, 0 },
	{ AU1200_DSCR_CMD0_SDMS_TX1, DEV_FLAGS_OUT, 4, 8, 0x10680000, 0, 0 },
	{ AU1200_DSCR_CMD0_SDMS_RX1, DEV_FLAGS_IN,  4, 8, 0x10680004, 0, 0 },
L
Linus Torvalds 已提交
129

130 131
	{ AU1200_DSCR_CMD0_AES_RX, DEV_FLAGS_IN , 4, 32, 0x10300008, 0, 0 },
	{ AU1200_DSCR_CMD0_AES_TX, DEV_FLAGS_OUT, 4, 32, 0x10300004, 0, 0 },
L
Linus Torvalds 已提交
132

133 134 135 136 137 138
	{ AU1200_DSCR_CMD0_PSC0_TX,   DEV_FLAGS_OUT, 0, 16, 0x11a0001c, 0, 0 },
	{ AU1200_DSCR_CMD0_PSC0_RX,   DEV_FLAGS_IN,  0, 16, 0x11a0001c, 0, 0 },
	{ AU1200_DSCR_CMD0_PSC0_SYNC, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
	{ AU1200_DSCR_CMD0_PSC1_TX,   DEV_FLAGS_OUT, 0, 16, 0x11b0001c, 0, 0 },
	{ AU1200_DSCR_CMD0_PSC1_RX,   DEV_FLAGS_IN,  0, 16, 0x11b0001c, 0, 0 },
	{ AU1200_DSCR_CMD0_PSC1_SYNC, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
L
Linus Torvalds 已提交
139

140 141 142 143
	{ AU1200_DSCR_CMD0_CIM_RXA,  DEV_FLAGS_IN, 0, 32, 0x14004020, 0, 0 },
	{ AU1200_DSCR_CMD0_CIM_RXB,  DEV_FLAGS_IN, 0, 32, 0x14004040, 0, 0 },
	{ AU1200_DSCR_CMD0_CIM_RXC,  DEV_FLAGS_IN, 0, 32, 0x14004060, 0, 0 },
	{ AU1200_DSCR_CMD0_CIM_SYNC, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
L
Linus Torvalds 已提交
144

145
	{ AU1200_DSCR_CMD0_NAND_FLASH, DEV_FLAGS_IN, 0, 0, 0x00000000, 0, 0 },
L
Linus Torvalds 已提交
146 147

	{ DSCR_CMD0_THROTTLE, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
148
	{ DSCR_CMD0_ALWAYS,   DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
L
Linus Torvalds 已提交
149 150
};

151 152
/* 32 predefined plus 32 custom */
#define DBDEV_TAB_SIZE		64
153

L
Linus Torvalds 已提交
154 155
static chan_tab_t *chan_tab_ptr[NUM_DBDMA_CHANS];

156
static dbdev_tab_t *find_dbdev_id(u32 id)
L
Linus Torvalds 已提交
157 158 159 160 161 162 163 164 165 166 167
{
	int i;
	dbdev_tab_t *p;
	for (i = 0; i < DBDEV_TAB_SIZE; ++i) {
		p = &dbdev_tab[i];
		if (p->dev_id == id)
			return p;
	}
	return NULL;
}

168
void *au1xxx_ddma_get_nextptr_virt(au1x_ddma_desc_t *dp)
P
Pete Popov 已提交
169
{
170
	return phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
P
Pete Popov 已提交
171 172 173
}
EXPORT_SYMBOL(au1xxx_ddma_get_nextptr_virt);

174
u32 au1xxx_ddma_add_device(dbdev_tab_t *dev)
P
Pete Popov 已提交
175 176
{
	u32 ret = 0;
177 178
	dbdev_tab_t *p;
	static u16 new_id = 0x1000;
P
Pete Popov 已提交
179

180
	p = find_dbdev_id(~0);
181
	if (NULL != p) {
P
Pete Popov 已提交
182
		memcpy(p, dev, sizeof(dbdev_tab_t));
183
		p->dev_id = DSCR_DEV2CUSTOM_ID(new_id, dev->dev_id);
P
Pete Popov 已提交
184 185 186
		ret = p->dev_id;
		new_id++;
#if 0
187 188
		printk(KERN_DEBUG "add_device: id:%x flags:%x padd:%x\n",
				  p->dev_id, p->dev_flags, p->dev_physaddr);
P
Pete Popov 已提交
189 190 191 192 193 194 195
#endif
	}

	return ret;
}
EXPORT_SYMBOL(au1xxx_ddma_add_device);

196 197 198 199 200 201 202 203 204 205 206
void au1xxx_ddma_del_device(u32 devid)
{
	dbdev_tab_t *p = find_dbdev_id(devid);

	if (p != NULL) {
		memset(p, 0, sizeof(dbdev_tab_t));
		p->dev_id = ~0;
	}
}
EXPORT_SYMBOL(au1xxx_ddma_del_device);

207 208
/* Allocate a channel and return a non-zero descriptor if successful. */
u32 au1xxx_dbdma_chan_alloc(u32 srcid, u32 destid,
209
       void (*callback)(int, void *), void *callparam)
L
Linus Torvalds 已提交
210 211
{
	unsigned long   flags;
212
	u32		used, chan;
L
Linus Torvalds 已提交
213 214 215 216
	u32		dcp;
	int		i;
	dbdev_tab_t	*stp, *dtp;
	chan_tab_t	*ctp;
P
Pete Popov 已提交
217
	au1x_dma_chan_t *cp;
L
Linus Torvalds 已提交
218

219 220
	/*
	 * We do the intialization on the first channel allocation.
L
Linus Torvalds 已提交
221 222 223 224
	 * We have to wait because of the interrupt handler initialization
	 * which can't be done successfully during board set up.
	 */
	if (!dbdma_initialized)
225
		return 0;
L
Linus Torvalds 已提交
226

227 228
	stp = find_dbdev_id(srcid);
	if (stp == NULL)
229
		return 0;
230 231
	dtp = find_dbdev_id(destid);
	if (dtp == NULL)
232
		return 0;
L
Linus Torvalds 已提交
233 234 235

	used = 0;

236
	/* Check to see if we can get both channels. */
L
Linus Torvalds 已提交
237 238 239
	spin_lock_irqsave(&au1xxx_dbdma_spin_lock, flags);
	if (!(stp->dev_flags & DEV_FLAGS_INUSE) ||
	     (stp->dev_flags & DEV_FLAGS_ANYUSE)) {
240
		/* Got source */
L
Linus Torvalds 已提交
241 242 243 244 245
		stp->dev_flags |= DEV_FLAGS_INUSE;
		if (!(dtp->dev_flags & DEV_FLAGS_INUSE) ||
		     (dtp->dev_flags & DEV_FLAGS_ANYUSE)) {
			/* Got destination */
			dtp->dev_flags |= DEV_FLAGS_INUSE;
246 247
		} else {
			/* Can't get dest.  Release src. */
L
Linus Torvalds 已提交
248 249 250
			stp->dev_flags &= ~DEV_FLAGS_INUSE;
			used++;
		}
251
	} else
L
Linus Torvalds 已提交
252 253 254
		used++;
	spin_unlock_irqrestore(&au1xxx_dbdma_spin_lock, flags);

255 256 257 258 259 260 261 262 263 264 265 266
	if (used)
		return 0;

	/* Let's see if we can allocate a channel for it. */
	ctp = NULL;
	chan = 0;
	spin_lock_irqsave(&au1xxx_dbdma_spin_lock, flags);
	for (i = 0; i < NUM_DBDMA_CHANS; i++)
		if (chan_tab_ptr[i] == NULL) {
			/*
			 * If kmalloc fails, it is caught below same
			 * as a channel not available.
L
Linus Torvalds 已提交
267
			 */
268 269 270
			ctp = kmalloc(sizeof(chan_tab_t), GFP_ATOMIC);
			chan_tab_ptr[i] = ctp;
			break;
L
Linus Torvalds 已提交
271
		}
272 273 274 275 276
	spin_unlock_irqrestore(&au1xxx_dbdma_spin_lock, flags);

	if (ctp != NULL) {
		memset(ctp, 0, sizeof(chan_tab_t));
		ctp->chan_index = chan = i;
277
		dcp = KSEG1ADDR(AU1550_DBDMA_PHYS_ADDR);
278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306
		dcp += (0x0100 * chan);
		ctp->chan_ptr = (au1x_dma_chan_t *)dcp;
		cp = (au1x_dma_chan_t *)dcp;
		ctp->chan_src = stp;
		ctp->chan_dest = dtp;
		ctp->chan_callback = callback;
		ctp->chan_callparam = callparam;

		/* Initialize channel configuration. */
		i = 0;
		if (stp->dev_intlevel)
			i |= DDMA_CFG_SED;
		if (stp->dev_intpolarity)
			i |= DDMA_CFG_SP;
		if (dtp->dev_intlevel)
			i |= DDMA_CFG_DED;
		if (dtp->dev_intpolarity)
			i |= DDMA_CFG_DP;
		if ((stp->dev_flags & DEV_FLAGS_SYNC) ||
			(dtp->dev_flags & DEV_FLAGS_SYNC))
				i |= DDMA_CFG_SYNC;
		cp->ddma_cfg = i;
		au_sync();

		/*
		 * Return a non-zero value that can be used to find the channel
		 * information in subsequent operations.
		 */
		return (u32)(&chan_tab_ptr[chan]);
L
Linus Torvalds 已提交
307
	}
308 309 310 311 312 313

	/* Release devices */
	stp->dev_flags &= ~DEV_FLAGS_INUSE;
	dtp->dev_flags &= ~DEV_FLAGS_INUSE;

	return 0;
L
Linus Torvalds 已提交
314
}
P
Pete Popov 已提交
315
EXPORT_SYMBOL(au1xxx_dbdma_chan_alloc);
L
Linus Torvalds 已提交
316

317 318
/*
 * Set the device width if source or destination is a FIFO.
L
Linus Torvalds 已提交
319 320
 * Should be 8, 16, or 32 bits.
 */
321
u32 au1xxx_dbdma_set_devwidth(u32 chanid, int bits)
L
Linus Torvalds 已提交
322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342
{
	u32		rv;
	chan_tab_t	*ctp;
	dbdev_tab_t	*stp, *dtp;

	ctp = *((chan_tab_t **)chanid);
	stp = ctp->chan_src;
	dtp = ctp->chan_dest;
	rv = 0;

	if (stp->dev_flags & DEV_FLAGS_IN) {	/* Source in fifo */
		rv = stp->dev_devwidth;
		stp->dev_devwidth = bits;
	}
	if (dtp->dev_flags & DEV_FLAGS_OUT) {	/* Destination out fifo */
		rv = dtp->dev_devwidth;
		dtp->dev_devwidth = bits;
	}

	return rv;
}
P
Pete Popov 已提交
343
EXPORT_SYMBOL(au1xxx_dbdma_set_devwidth);
L
Linus Torvalds 已提交
344

345 346
/* Allocate a descriptor ring, initializing as much as possible. */
u32 au1xxx_dbdma_ring_alloc(u32 chanid, int entries)
L
Linus Torvalds 已提交
347 348 349 350 351 352 353 354 355
{
	int			i;
	u32			desc_base, srcid, destid;
	u32			cmd0, cmd1, src1, dest1;
	u32			src0, dest0;
	chan_tab_t		*ctp;
	dbdev_tab_t		*stp, *dtp;
	au1x_ddma_desc_t	*dp;

356 357
	/*
	 * I guess we could check this to be within the
L
Linus Torvalds 已提交
358 359 360 361 362 363
	 * range of the table......
	 */
	ctp = *((chan_tab_t **)chanid);
	stp = ctp->chan_src;
	dtp = ctp->chan_dest;

364 365
	/*
	 * The descriptors must be 32-byte aligned.  There is a
L
Linus Torvalds 已提交
366 367 368 369
	 * possibility the allocation will give us such an address,
	 * and if we try that first we are likely to not waste larger
	 * slabs of memory.
	 */
P
Pete Popov 已提交
370
	desc_base = (u32)kmalloc(entries * sizeof(au1x_ddma_desc_t),
371
				 GFP_KERNEL|GFP_DMA);
L
Linus Torvalds 已提交
372 373 374 375
	if (desc_base == 0)
		return 0;

	if (desc_base & 0x1f) {
376 377
		/*
		 * Lost....do it again, allocate extra, and round
L
Linus Torvalds 已提交
378 379 380 381 382
		 * the address base.
		 */
		kfree((const void *)desc_base);
		i = entries * sizeof(au1x_ddma_desc_t);
		i += (sizeof(au1x_ddma_desc_t) - 1);
383 384
		desc_base = (u32)kmalloc(i, GFP_KERNEL|GFP_DMA);
		if (desc_base == 0)
L
Linus Torvalds 已提交
385 386
			return 0;

387
		ctp->cdb_membase = desc_base;
L
Linus Torvalds 已提交
388
		desc_base = ALIGN_ADDR(desc_base, sizeof(au1x_ddma_desc_t));
389 390 391
	} else
		ctp->cdb_membase = desc_base;

L
Linus Torvalds 已提交
392 393
	dp = (au1x_ddma_desc_t *)desc_base;

394
	/* Keep track of the base descriptor. */
L
Linus Torvalds 已提交
395 396
	ctp->chan_desc_base = dp;

397
	/* Initialize the rings with as much information as we know. */
L
Linus Torvalds 已提交
398 399 400 401 402 403 404 405 406
	srcid = stp->dev_id;
	destid = dtp->dev_id;

	cmd0 = cmd1 = src1 = dest1 = 0;
	src0 = dest0 = 0;

	cmd0 |= DSCR_CMD0_SID(srcid);
	cmd0 |= DSCR_CMD0_DID(destid);
	cmd0 |= DSCR_CMD0_IE | DSCR_CMD0_CV;
P
Pete Popov 已提交
407 408
	cmd0 |= DSCR_CMD0_ST(DSCR_CMD0_ST_NOCHANGE);

409 410 411 412 413 414
	/* Is it mem to mem transfer? */
	if (((DSCR_CUSTOM2DEV_ID(srcid) == DSCR_CMD0_THROTTLE) ||
	     (DSCR_CUSTOM2DEV_ID(srcid) == DSCR_CMD0_ALWAYS)) &&
	    ((DSCR_CUSTOM2DEV_ID(destid) == DSCR_CMD0_THROTTLE) ||
	     (DSCR_CUSTOM2DEV_ID(destid) == DSCR_CMD0_ALWAYS)))
		cmd0 |= DSCR_CMD0_MEM;
L
Linus Torvalds 已提交
415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441

	switch (stp->dev_devwidth) {
	case 8:
		cmd0 |= DSCR_CMD0_SW(DSCR_CMD0_BYTE);
		break;
	case 16:
		cmd0 |= DSCR_CMD0_SW(DSCR_CMD0_HALFWORD);
		break;
	case 32:
	default:
		cmd0 |= DSCR_CMD0_SW(DSCR_CMD0_WORD);
		break;
	}

	switch (dtp->dev_devwidth) {
	case 8:
		cmd0 |= DSCR_CMD0_DW(DSCR_CMD0_BYTE);
		break;
	case 16:
		cmd0 |= DSCR_CMD0_DW(DSCR_CMD0_HALFWORD);
		break;
	case 32:
	default:
		cmd0 |= DSCR_CMD0_DW(DSCR_CMD0_WORD);
		break;
	}

442 443
	/*
	 * If the device is marked as an in/out FIFO, ensure it is
L
Linus Torvalds 已提交
444 445 446
	 * set non-coherent.
	 */
	if (stp->dev_flags & DEV_FLAGS_IN)
447
		cmd0 |= DSCR_CMD0_SN;		/* Source in FIFO */
L
Linus Torvalds 已提交
448
	if (dtp->dev_flags & DEV_FLAGS_OUT)
449
		cmd0 |= DSCR_CMD0_DN;		/* Destination out FIFO */
L
Linus Torvalds 已提交
450

451 452
	/*
	 * Set up source1.  For now, assume no stride and increment.
L
Linus Torvalds 已提交
453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470
	 * A channel attribute update can change this later.
	 */
	switch (stp->dev_tsize) {
	case 1:
		src1 |= DSCR_SRC1_STS(DSCR_xTS_SIZE1);
		break;
	case 2:
		src1 |= DSCR_SRC1_STS(DSCR_xTS_SIZE2);
		break;
	case 4:
		src1 |= DSCR_SRC1_STS(DSCR_xTS_SIZE4);
		break;
	case 8:
	default:
		src1 |= DSCR_SRC1_STS(DSCR_xTS_SIZE8);
		break;
	}

471
	/* If source input is FIFO, set static address.	*/
L
Linus Torvalds 已提交
472
	if (stp->dev_flags & DEV_FLAGS_IN) {
473
		if (stp->dev_flags & DEV_FLAGS_BURSTABLE)
P
Pete Popov 已提交
474 475
			src1 |= DSCR_SRC1_SAM(DSCR_xAM_BURST);
		else
476
			src1 |= DSCR_SRC1_SAM(DSCR_xAM_STATIC);
L
Linus Torvalds 已提交
477
	}
478

P
Pete Popov 已提交
479 480
	if (stp->dev_physaddr)
		src0 = stp->dev_physaddr;
L
Linus Torvalds 已提交
481

482 483
	/*
	 * Set up dest1.  For now, assume no stride and increment.
L
Linus Torvalds 已提交
484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501
	 * A channel attribute update can change this later.
	 */
	switch (dtp->dev_tsize) {
	case 1:
		dest1 |= DSCR_DEST1_DTS(DSCR_xTS_SIZE1);
		break;
	case 2:
		dest1 |= DSCR_DEST1_DTS(DSCR_xTS_SIZE2);
		break;
	case 4:
		dest1 |= DSCR_DEST1_DTS(DSCR_xTS_SIZE4);
		break;
	case 8:
	default:
		dest1 |= DSCR_DEST1_DTS(DSCR_xTS_SIZE8);
		break;
	}

502
	/* If destination output is FIFO, set static address. */
L
Linus Torvalds 已提交
503
	if (dtp->dev_flags & DEV_FLAGS_OUT) {
504 505 506 507
		if (dtp->dev_flags & DEV_FLAGS_BURSTABLE)
			dest1 |= DSCR_DEST1_DAM(DSCR_xAM_BURST);
		else
			dest1 |= DSCR_DEST1_DAM(DSCR_xAM_STATIC);
L
Linus Torvalds 已提交
508
	}
509

P
Pete Popov 已提交
510 511
	if (dtp->dev_physaddr)
		dest0 = dtp->dev_physaddr;
L
Linus Torvalds 已提交
512

P
Pete Popov 已提交
513
#if 0
514 515 516 517
		printk(KERN_DEBUG "did:%x sid:%x cmd0:%x cmd1:%x source0:%x "
				  "source1:%x dest0:%x dest1:%x\n",
				  dtp->dev_id, stp->dev_id, cmd0, cmd1, src0,
				  src1, dest0, dest1);
P
Pete Popov 已提交
518
#endif
519
	for (i = 0; i < entries; i++) {
L
Linus Torvalds 已提交
520 521 522 523 524 525 526
		dp->dscr_cmd0 = cmd0;
		dp->dscr_cmd1 = cmd1;
		dp->dscr_source0 = src0;
		dp->dscr_source1 = src1;
		dp->dscr_dest0 = dest0;
		dp->dscr_dest1 = dest1;
		dp->dscr_stat = 0;
P
Pete Popov 已提交
527 528
		dp->sw_context = 0;
		dp->sw_status = 0;
L
Linus Torvalds 已提交
529 530 531 532
		dp->dscr_nxtptr = DSCR_NXTPTR(virt_to_phys(dp + 1));
		dp++;
	}

533
	/* Make last descrptor point to the first. */
L
Linus Torvalds 已提交
534 535 536 537
	dp--;
	dp->dscr_nxtptr = DSCR_NXTPTR(virt_to_phys(ctp->chan_desc_base));
	ctp->get_ptr = ctp->put_ptr = ctp->cur_ptr = ctp->chan_desc_base;

538
	return (u32)ctp->chan_desc_base;
L
Linus Torvalds 已提交
539
}
P
Pete Popov 已提交
540
EXPORT_SYMBOL(au1xxx_dbdma_ring_alloc);
L
Linus Torvalds 已提交
541

542 543
/*
 * Put a source buffer into the DMA ring.
L
Linus Torvalds 已提交
544 545 546
 * This updates the source pointer and byte count.  Normally used
 * for memory to fifo transfers.
 */
547
u32 au1xxx_dbdma_put_source(u32 chanid, dma_addr_t buf, int nbytes, u32 flags)
L
Linus Torvalds 已提交
548 549 550 551
{
	chan_tab_t		*ctp;
	au1x_ddma_desc_t	*dp;

552 553
	/*
	 * I guess we could check this to be within the
L
Linus Torvalds 已提交
554 555
	 * range of the table......
	 */
556
	ctp = *(chan_tab_t **)chanid;
L
Linus Torvalds 已提交
557

558 559
	/*
	 * We should have multiple callers for a particular channel,
L
Linus Torvalds 已提交
560 561 562 563 564
	 * an interrupt doesn't affect this pointer nor the descriptor,
	 * so no locking should be needed.
	 */
	dp = ctp->put_ptr;

565 566
	/*
	 * If the descriptor is valid, we are way ahead of the DMA
L
Linus Torvalds 已提交
567 568
	 * engine, so just return an error condition.
	 */
569
	if (dp->dscr_cmd0 & DSCR_CMD0_V)
L
Linus Torvalds 已提交
570 571
		return 0;

572
	/* Load up buffer address and byte count. */
573
	dp->dscr_source0 = buf & ~0UL;
L
Linus Torvalds 已提交
574
	dp->dscr_cmd1 = nbytes;
575
	/* Check flags */
P
Pete Popov 已提交
576 577 578 579
	if (flags & DDMA_FLAGS_IE)
		dp->dscr_cmd0 |= DSCR_CMD0_IE;
	if (flags & DDMA_FLAGS_NOIE)
		dp->dscr_cmd0 &= ~DSCR_CMD0_IE;
L
Linus Torvalds 已提交
580

P
Pete Popov 已提交
581 582
	/*
	 * There is an errata on the Au1200/Au1550 parts that could result
583 584 585
	 * in "stale" data being DMA'ed. It has to do with the snoop logic on
	 * the cache eviction buffer.  DMA_NONCOHERENT is on by default for
	 * these parts. If it is fixed in the future, these dma_cache_inv will
P
Pete Popov 已提交
586
	 * just be nothing more than empty macros. See io.h.
587
	 */
588
	dma_cache_wback_inv((unsigned long)buf, nbytes);
589
	dp->dscr_cmd0 |= DSCR_CMD0_V;	/* Let it rip */
P
Pete Popov 已提交
590
	au_sync();
591
	dma_cache_wback_inv((unsigned long)dp, sizeof(*dp));
592
	ctp->chan_ptr->ddma_dbell = 0;
P
Pete Popov 已提交
593

594
	/* Get next descriptor pointer.	*/
P
Pete Popov 已提交
595 596
	ctp->put_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));

597
	/* Return something non-zero. */
L
Linus Torvalds 已提交
598 599
	return nbytes;
}
600
EXPORT_SYMBOL(au1xxx_dbdma_put_source);
L
Linus Torvalds 已提交
601 602 603 604 605

/* Put a destination buffer into the DMA ring.
 * This updates the destination pointer and byte count.  Normally used
 * to place an empty buffer into the ring for fifo to memory transfers.
 */
606
u32 au1xxx_dbdma_put_dest(u32 chanid, dma_addr_t buf, int nbytes, u32 flags)
L
Linus Torvalds 已提交
607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627
{
	chan_tab_t		*ctp;
	au1x_ddma_desc_t	*dp;

	/* I guess we could check this to be within the
	 * range of the table......
	 */
	ctp = *((chan_tab_t **)chanid);

	/* We should have multiple callers for a particular channel,
	 * an interrupt doesn't affect this pointer nor the descriptor,
	 * so no locking should be needed.
	 */
	dp = ctp->put_ptr;

	/* If the descriptor is valid, we are way ahead of the DMA
	 * engine, so just return an error condition.
	 */
	if (dp->dscr_cmd0 & DSCR_CMD0_V)
		return 0;

P
Pete Popov 已提交
628 629 630 631 632 633 634 635
	/* Load up buffer address and byte count */

	/* Check flags  */
	if (flags & DDMA_FLAGS_IE)
		dp->dscr_cmd0 |= DSCR_CMD0_IE;
	if (flags & DDMA_FLAGS_NOIE)
		dp->dscr_cmd0 &= ~DSCR_CMD0_IE;

636
	dp->dscr_dest0 = buf & ~0UL;
L
Linus Torvalds 已提交
637
	dp->dscr_cmd1 = nbytes;
P
Pete Popov 已提交
638
#if 0
639 640 641
	printk(KERN_DEBUG "cmd0:%x cmd1:%x source0:%x source1:%x dest0:%x dest1:%x\n",
			  dp->dscr_cmd0, dp->dscr_cmd1, dp->dscr_source0,
			  dp->dscr_source1, dp->dscr_dest0, dp->dscr_dest1);
P
Pete Popov 已提交
642 643 644
#endif
	/*
	 * There is an errata on the Au1200/Au1550 parts that could result in
645 646 647
	 * "stale" data being DMA'ed. It has to do with the snoop logic on the
	 * cache eviction buffer.  DMA_NONCOHERENT is on by default for these
	 * parts. If it is fixed in the future, these dma_cache_inv will just
P
Pete Popov 已提交
648
	 * be nothing more than empty macros. See io.h.
649
	 */
650
	dma_cache_inv((unsigned long)buf, nbytes);
L
Linus Torvalds 已提交
651
	dp->dscr_cmd0 |= DSCR_CMD0_V;	/* Let it rip */
P
Pete Popov 已提交
652
	au_sync();
653
	dma_cache_wback_inv((unsigned long)dp, sizeof(*dp));
654
	ctp->chan_ptr->ddma_dbell = 0;
L
Linus Torvalds 已提交
655

656
	/* Get next descriptor pointer.	*/
L
Linus Torvalds 已提交
657 658
	ctp->put_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));

659
	/* Return something non-zero. */
L
Linus Torvalds 已提交
660 661
	return nbytes;
}
662
EXPORT_SYMBOL(au1xxx_dbdma_put_dest);
L
Linus Torvalds 已提交
663

664 665
/*
 * Get a destination buffer into the DMA ring.
L
Linus Torvalds 已提交
666 667 668 669
 * Normally used to get a full buffer from the ring during fifo
 * to memory transfers.  This does not set the valid bit, you will
 * have to put another destination buffer to keep the DMA going.
 */
670
u32 au1xxx_dbdma_get_dest(u32 chanid, void **buf, int *nbytes)
L
Linus Torvalds 已提交
671 672 673 674 675
{
	chan_tab_t		*ctp;
	au1x_ddma_desc_t	*dp;
	u32			rv;

676 677
	/*
	 * I guess we could check this to be within the
L
Linus Torvalds 已提交
678 679 680 681
	 * range of the table......
	 */
	ctp = *((chan_tab_t **)chanid);

682 683
	/*
	 * We should have multiple callers for a particular channel,
L
Linus Torvalds 已提交
684 685 686 687 688
	 * an interrupt doesn't affect this pointer nor the descriptor,
	 * so no locking should be needed.
	 */
	dp = ctp->get_ptr;

689 690
	/*
	 * If the descriptor is valid, we are way ahead of the DMA
L
Linus Torvalds 已提交
691 692 693 694 695
	 * engine, so just return an error condition.
	 */
	if (dp->dscr_cmd0 & DSCR_CMD0_V)
		return 0;

696
	/* Return buffer address and byte count. */
L
Linus Torvalds 已提交
697 698 699 700
	*buf = (void *)(phys_to_virt(dp->dscr_dest0));
	*nbytes = dp->dscr_cmd1;
	rv = dp->dscr_stat;

701
	/* Get next descriptor pointer.	*/
L
Linus Torvalds 已提交
702 703
	ctp->get_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));

704
	/* Return something non-zero. */
L
Linus Torvalds 已提交
705 706
	return rv;
}
707 708
EXPORT_SYMBOL_GPL(au1xxx_dbdma_get_dest);

709
void au1xxx_dbdma_stop(u32 chanid)
L
Linus Torvalds 已提交
710 711
{
	chan_tab_t	*ctp;
P
Pete Popov 已提交
712
	au1x_dma_chan_t *cp;
L
Linus Torvalds 已提交
713 714 715 716 717 718 719 720 721 722 723
	int halt_timeout = 0;

	ctp = *((chan_tab_t **)chanid);

	cp = ctp->chan_ptr;
	cp->ddma_cfg &= ~DDMA_CFG_EN;	/* Disable channel */
	au_sync();
	while (!(cp->ddma_stat & DDMA_STAT_H)) {
		udelay(1);
		halt_timeout++;
		if (halt_timeout > 100) {
724
			printk(KERN_WARNING "warning: DMA channel won't halt\n");
L
Linus Torvalds 已提交
725 726 727 728 729 730 731
			break;
		}
	}
	/* clear current desc valid and doorbell */
	cp->ddma_stat |= (DDMA_STAT_DB | DDMA_STAT_V);
	au_sync();
}
P
Pete Popov 已提交
732
EXPORT_SYMBOL(au1xxx_dbdma_stop);
L
Linus Torvalds 已提交
733

734 735 736
/*
 * Start using the current descriptor pointer.  If the DBDMA encounters
 * a non-valid descriptor, it will stop.  In this case, we can just
L
Linus Torvalds 已提交
737 738
 * continue by adding a buffer to the list and starting again.
 */
739
void au1xxx_dbdma_start(u32 chanid)
L
Linus Torvalds 已提交
740 741
{
	chan_tab_t	*ctp;
P
Pete Popov 已提交
742
	au1x_dma_chan_t *cp;
L
Linus Torvalds 已提交
743 744 745 746 747 748

	ctp = *((chan_tab_t **)chanid);
	cp = ctp->chan_ptr;
	cp->ddma_desptr = virt_to_phys(ctp->cur_ptr);
	cp->ddma_cfg |= DDMA_CFG_EN;	/* Enable channel */
	au_sync();
P
Pete Popov 已提交
749
	cp->ddma_dbell = 0;
L
Linus Torvalds 已提交
750 751
	au_sync();
}
P
Pete Popov 已提交
752
EXPORT_SYMBOL(au1xxx_dbdma_start);
L
Linus Torvalds 已提交
753

754
void au1xxx_dbdma_reset(u32 chanid)
L
Linus Torvalds 已提交
755 756 757 758 759 760 761 762 763
{
	chan_tab_t		*ctp;
	au1x_ddma_desc_t	*dp;

	au1xxx_dbdma_stop(chanid);

	ctp = *((chan_tab_t **)chanid);
	ctp->get_ptr = ctp->put_ptr = ctp->cur_ptr = ctp->chan_desc_base;

764
	/* Run through the descriptors and reset the valid indicator. */
L
Linus Torvalds 已提交
765 766 767 768
	dp = ctp->chan_desc_base;

	do {
		dp->dscr_cmd0 &= ~DSCR_CMD0_V;
769 770 771
		/*
		 * Reset our software status -- this is used to determine
		 * if a descriptor is in use by upper level software. Since
P
Pete Popov 已提交
772 773 774
		 * posting can reset 'V' bit.
		 */
		dp->sw_status = 0;
L
Linus Torvalds 已提交
775 776 777
		dp = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
	} while (dp != ctp->chan_desc_base);
}
P
Pete Popov 已提交
778
EXPORT_SYMBOL(au1xxx_dbdma_reset);
L
Linus Torvalds 已提交
779

780
u32 au1xxx_get_dma_residue(u32 chanid)
L
Linus Torvalds 已提交
781 782
{
	chan_tab_t	*ctp;
P
Pete Popov 已提交
783
	au1x_dma_chan_t *cp;
L
Linus Torvalds 已提交
784 785 786 787 788
	u32		rv;

	ctp = *((chan_tab_t **)chanid);
	cp = ctp->chan_ptr;

789
	/* This is only valid if the channel is stopped. */
L
Linus Torvalds 已提交
790 791 792 793 794
	rv = cp->ddma_bytecnt;
	au_sync();

	return rv;
}
795 796
EXPORT_SYMBOL_GPL(au1xxx_get_dma_residue);

797
void au1xxx_dbdma_chan_free(u32 chanid)
L
Linus Torvalds 已提交
798 799 800 801 802 803 804 805 806 807
{
	chan_tab_t	*ctp;
	dbdev_tab_t	*stp, *dtp;

	ctp = *((chan_tab_t **)chanid);
	stp = ctp->chan_src;
	dtp = ctp->chan_dest;

	au1xxx_dbdma_stop(chanid);

808
	kfree((void *)ctp->cdb_membase);
L
Linus Torvalds 已提交
809 810 811 812 813 814 815

	stp->dev_flags &= ~DEV_FLAGS_INUSE;
	dtp->dev_flags &= ~DEV_FLAGS_INUSE;
	chan_tab_ptr[ctp->chan_index] = NULL;

	kfree(ctp);
}
P
Pete Popov 已提交
816
EXPORT_SYMBOL(au1xxx_dbdma_chan_free);
L
Linus Torvalds 已提交
817

818
static irqreturn_t dbdma_interrupt(int irq, void *dev_id)
L
Linus Torvalds 已提交
819
{
820 821
	u32 intstat;
	u32 chan_index;
L
Linus Torvalds 已提交
822 823
	chan_tab_t		*ctp;
	au1x_ddma_desc_t	*dp;
P
Pete Popov 已提交
824
	au1x_dma_chan_t *cp;
L
Linus Torvalds 已提交
825 826 827

	intstat = dbdma_gptr->ddma_intstat;
	au_sync();
828
	chan_index = __ffs(intstat);
L
Linus Torvalds 已提交
829 830 831 832 833

	ctp = chan_tab_ptr[chan_index];
	cp = ctp->chan_ptr;
	dp = ctp->cur_ptr;

834
	/* Reset interrupt. */
L
Linus Torvalds 已提交
835 836 837 838
	cp->ddma_irq = 0;
	au_sync();

	if (ctp->chan_callback)
839
		ctp->chan_callback(irq, ctp->chan_callparam);
L
Linus Torvalds 已提交
840 841

	ctp->cur_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
842
	return IRQ_RETVAL(1);
L
Linus Torvalds 已提交
843 844
}

845
void au1xxx_dbdma_dump(u32 chanid)
L
Linus Torvalds 已提交
846
{
847 848 849 850 851
	chan_tab_t	 *ctp;
	au1x_ddma_desc_t *dp;
	dbdev_tab_t	 *stp, *dtp;
	au1x_dma_chan_t  *cp;
	u32 i		 = 0;
L
Linus Torvalds 已提交
852 853 854 855 856 857

	ctp = *((chan_tab_t **)chanid);
	stp = ctp->chan_src;
	dtp = ctp->chan_dest;
	cp = ctp->chan_ptr;

858
	printk(KERN_DEBUG "Chan %x, stp %x (dev %d)  dtp %x (dev %d)\n",
859 860 861 862 863 864 865 866 867 868 869 870 871 872
			  (u32)ctp, (u32)stp, stp - dbdev_tab, (u32)dtp,
			  dtp - dbdev_tab);
	printk(KERN_DEBUG "desc base %x, get %x, put %x, cur %x\n",
			  (u32)(ctp->chan_desc_base), (u32)(ctp->get_ptr),
			  (u32)(ctp->put_ptr), (u32)(ctp->cur_ptr));

	printk(KERN_DEBUG "dbdma chan %x\n", (u32)cp);
	printk(KERN_DEBUG "cfg %08x, desptr %08x, statptr %08x\n",
			  cp->ddma_cfg, cp->ddma_desptr, cp->ddma_statptr);
	printk(KERN_DEBUG "dbell %08x, irq %08x, stat %08x, bytecnt %08x\n",
			  cp->ddma_dbell, cp->ddma_irq, cp->ddma_stat,
			  cp->ddma_bytecnt);

	/* Run through the descriptors */
L
Linus Torvalds 已提交
873 874 875
	dp = ctp->chan_desc_base;

	do {
876 877 878 879 880 881 882
		printk(KERN_DEBUG "Dp[%d]= %08x, cmd0 %08x, cmd1 %08x\n",
				  i++, (u32)dp, dp->dscr_cmd0, dp->dscr_cmd1);
		printk(KERN_DEBUG "src0 %08x, src1 %08x, dest0 %08x, dest1 %08x\n",
				  dp->dscr_source0, dp->dscr_source1,
				  dp->dscr_dest0, dp->dscr_dest1);
		printk(KERN_DEBUG "stat %08x, nxtptr %08x\n",
				  dp->dscr_stat, dp->dscr_nxtptr);
L
Linus Torvalds 已提交
883 884 885 886
		dp = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
	} while (dp != ctp->chan_desc_base);
}

P
Pete Popov 已提交
887 888 889
/* Put a descriptor into the DMA ring.
 * This updates the source/destination pointers and byte count.
 */
890
u32 au1xxx_dbdma_put_dscr(u32 chanid, au1x_ddma_desc_t *dscr)
P
Pete Popov 已提交
891 892 893
{
	chan_tab_t *ctp;
	au1x_ddma_desc_t *dp;
894
	u32 nbytes = 0;
P
Pete Popov 已提交
895

896 897 898 899
	/*
	 * I guess we could check this to be within the
	 * range of the table......
	 */
P
Pete Popov 已提交
900 901
	ctp = *((chan_tab_t **)chanid);

902 903 904 905 906
	/*
	 * We should have multiple callers for a particular channel,
	 * an interrupt doesn't affect this pointer nor the descriptor,
	 * so no locking should be needed.
	 */
P
Pete Popov 已提交
907 908
	dp = ctp->put_ptr;

909 910 911 912
	/*
	 * If the descriptor is valid, we are way ahead of the DMA
	 * engine, so just return an error condition.
	 */
P
Pete Popov 已提交
913 914 915
	if (dp->dscr_cmd0 & DSCR_CMD0_V)
		return 0;

916
	/* Load up buffer addresses and byte count. */
P
Pete Popov 已提交
917 918 919 920 921 922 923 924 925 926 927
	dp->dscr_dest0 = dscr->dscr_dest0;
	dp->dscr_source0 = dscr->dscr_source0;
	dp->dscr_dest1 = dscr->dscr_dest1;
	dp->dscr_source1 = dscr->dscr_source1;
	dp->dscr_cmd1 = dscr->dscr_cmd1;
	nbytes = dscr->dscr_cmd1;
	/* Allow the caller to specifiy if an interrupt is generated */
	dp->dscr_cmd0 &= ~DSCR_CMD0_IE;
	dp->dscr_cmd0 |= dscr->dscr_cmd0 | DSCR_CMD0_V;
	ctp->chan_ptr->ddma_dbell = 0;

928
	/* Get next descriptor pointer.	*/
P
Pete Popov 已提交
929 930
	ctp->put_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));

931
	/* Return something non-zero. */
P
Pete Popov 已提交
932 933 934
	return nbytes;
}

935

936
static unsigned long alchemy_dbdma_pm_data[NUM_DBDMA_CHANS + 1][6];
937

938
static int alchemy_dbdma_suspend(void)
939 940
{
	int i;
941
	void __iomem *addr;
942

943 944 945 946 947
	addr = (void __iomem *)KSEG1ADDR(AU1550_DBDMA_CONF_PHYS_ADDR);
	alchemy_dbdma_pm_data[0][0] = __raw_readl(addr + 0x00);
	alchemy_dbdma_pm_data[0][1] = __raw_readl(addr + 0x04);
	alchemy_dbdma_pm_data[0][2] = __raw_readl(addr + 0x08);
	alchemy_dbdma_pm_data[0][3] = __raw_readl(addr + 0x0c);
948 949

	/* save channel configurations */
950 951 952 953 954 955 956 957
	addr = (void __iomem *)KSEG1ADDR(AU1550_DBDMA_PHYS_ADDR);
	for (i = 1; i <= NUM_DBDMA_CHANS; i++) {
		alchemy_dbdma_pm_data[i][0] = __raw_readl(addr + 0x00);
		alchemy_dbdma_pm_data[i][1] = __raw_readl(addr + 0x04);
		alchemy_dbdma_pm_data[i][2] = __raw_readl(addr + 0x08);
		alchemy_dbdma_pm_data[i][3] = __raw_readl(addr + 0x0c);
		alchemy_dbdma_pm_data[i][4] = __raw_readl(addr + 0x10);
		alchemy_dbdma_pm_data[i][5] = __raw_readl(addr + 0x14);
958 959

		/* halt channel */
960 961 962 963
		__raw_writel(alchemy_dbdma_pm_data[i][0] & ~1, addr + 0x00);
		wmb();
		while (!(__raw_readl(addr + 0x14) & 1))
			wmb();
964 965 966 967

		addr += 0x100;	/* next channel base */
	}
	/* disable channel interrupts */
968 969 970
	addr = (void __iomem *)KSEG1ADDR(AU1550_DBDMA_CONF_PHYS_ADDR);
	__raw_writel(0, addr + 0x0c);
	wmb();
971 972

	return 0;
973 974
}

975
static void alchemy_dbdma_resume(void)
976 977
{
	int i;
978
	void __iomem *addr;
979

980 981 982 983 984
	addr = (void __iomem *)KSEG1ADDR(AU1550_DBDMA_CONF_PHYS_ADDR);
	__raw_writel(alchemy_dbdma_pm_data[0][0], addr + 0x00);
	__raw_writel(alchemy_dbdma_pm_data[0][1], addr + 0x04);
	__raw_writel(alchemy_dbdma_pm_data[0][2], addr + 0x08);
	__raw_writel(alchemy_dbdma_pm_data[0][3], addr + 0x0c);
985 986

	/* restore channel configurations */
987 988 989 990 991 992 993 994 995
	addr = (void __iomem *)KSEG1ADDR(AU1550_DBDMA_PHYS_ADDR);
	for (i = 1; i <= NUM_DBDMA_CHANS; i++) {
		__raw_writel(alchemy_dbdma_pm_data[i][0], addr + 0x00);
		__raw_writel(alchemy_dbdma_pm_data[i][1], addr + 0x04);
		__raw_writel(alchemy_dbdma_pm_data[i][2], addr + 0x08);
		__raw_writel(alchemy_dbdma_pm_data[i][3], addr + 0x0c);
		__raw_writel(alchemy_dbdma_pm_data[i][4], addr + 0x10);
		__raw_writel(alchemy_dbdma_pm_data[i][5], addr + 0x14);
		wmb();
996 997
		addr += 0x100;	/* next channel base */
	}
998 999
}

1000
static struct syscore_ops alchemy_dbdma_syscore_ops = {
1001 1002 1003 1004
	.suspend	= alchemy_dbdma_suspend,
	.resume		= alchemy_dbdma_resume,
};

1005
static int __init dbdma_setup(unsigned int irq, dbdev_tab_t *idtable)
1006
{
1007 1008 1009 1010 1011 1012 1013 1014 1015
	int ret;

	dbdev_tab = kzalloc(sizeof(dbdev_tab_t) * DBDEV_TAB_SIZE, GFP_KERNEL);
	if (!dbdev_tab)
		return -ENOMEM;

	memcpy(dbdev_tab, idtable, 32 * sizeof(dbdev_tab_t));
	for (ret = 32; ret < DBDEV_TAB_SIZE; ret++)
		dbdev_tab[ret].dev_id = ~0;
1016 1017 1018 1019 1020 1021

	dbdma_gptr->ddma_config = 0;
	dbdma_gptr->ddma_throttle = 0;
	dbdma_gptr->ddma_inten = 0xffff;
	au_sync();

1022 1023
	ret = request_irq(irq, dbdma_interrupt, IRQF_DISABLED, "dbdma",
			  (void *)dbdma_gptr);
1024 1025 1026 1027
	if (ret)
		printk(KERN_ERR "Cannot grab DBDMA interrupt!\n");
	else {
		dbdma_initialized = 1;
1028
		register_syscore_ops(&alchemy_dbdma_syscore_ops);
1029 1030 1031 1032 1033
	}

	return ret;
}

1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044
static int __init alchemy_dbdma_init(void)
{
	switch (alchemy_get_cputype()) {
	case ALCHEMY_CPU_AU1550:
		return dbdma_setup(AU1550_DDMA_INT, au1550_dbdev_tab);
	case ALCHEMY_CPU_AU1200:
		return dbdma_setup(AU1200_DDMA_INT, au1200_dbdev_tab);
	}
	return 0;
}
subsys_initcall(alchemy_dbdma_init);