dma.h 9.6 KB
Newer Older
1 2 3 4 5 6 7
/*
 * linux/include/asm/dma.h: Defines for using and allocating dma channels.
 * Written by Hennus Bergman, 1992.
 * High DMA channel support & info by Hannu Savolainen
 * and John Boyd, Nov. 1992.
 */

H
H. Peter Anvin 已提交
8 9
#ifndef _ASM_X86_DMA_H
#define _ASM_X86_DMA_H
10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72

#include <linux/spinlock.h>	/* And spinlocks */
#include <asm/io.h>		/* need byte IO */
#include <linux/delay.h>

#ifdef HAVE_REALLY_SLOW_DMA_CONTROLLER
#define dma_outb	outb_p
#else
#define dma_outb	outb
#endif

#define dma_inb		inb

/*
 * NOTES about DMA transfers:
 *
 *  controller 1: channels 0-3, byte operations, ports 00-1F
 *  controller 2: channels 4-7, word operations, ports C0-DF
 *
 *  - ALL registers are 8 bits only, regardless of transfer size
 *  - channel 4 is not used - cascades 1 into 2.
 *  - channels 0-3 are byte - addresses/counts are for physical bytes
 *  - channels 5-7 are word - addresses/counts are for physical words
 *  - transfers must not cross physical 64K (0-3) or 128K (5-7) boundaries
 *  - transfer count loaded to registers is 1 less than actual count
 *  - controller 2 offsets are all even (2x offsets for controller 1)
 *  - page registers for 5-7 don't use data bit 0, represent 128K pages
 *  - page registers for 0-3 use bit 0, represent 64K pages
 *
 * DMA transfers are limited to the lower 16MB of _physical_ memory.
 * Note that addresses loaded into registers must be _physical_ addresses,
 * not logical addresses (which may differ if paging is active).
 *
 *  Address mapping for channels 0-3:
 *
 *   A23 ... A16 A15 ... A8  A7 ... A0    (Physical addresses)
 *    |  ...  |   |  ... |   |  ... |
 *    |  ...  |   |  ... |   |  ... |
 *    |  ...  |   |  ... |   |  ... |
 *   P7  ...  P0  A7 ... A0  A7 ... A0
 * |    Page    | Addr MSB | Addr LSB |   (DMA registers)
 *
 *  Address mapping for channels 5-7:
 *
 *   A23 ... A17 A16 A15 ... A9 A8 A7 ... A1 A0    (Physical addresses)
 *    |  ...  |   \   \   ... \  \  \  ... \  \
 *    |  ...  |    \   \   ... \  \  \  ... \  (not used)
 *    |  ...  |     \   \   ... \  \  \  ... \
 *   P7  ...  P1 (0) A7 A6  ... A0 A7 A6 ... A0
 * |      Page      |  Addr MSB   |  Addr LSB  |   (DMA registers)
 *
 * Again, channels 5-7 transfer _physical_ words (16 bits), so addresses
 * and counts _must_ be word-aligned (the lowest address bit is _ignored_ at
 * the hardware level, so odd-byte transfers aren't possible).
 *
 * Transfer count (_not # bytes_) is limited to 64K, represented as actual
 * count - 1 : 64K => 0xFFFF, 1 => 0x0000.  Thus, count is always 1 or more,
 * and up to 128K bytes may be transferred on channels 5-7 in one operation.
 *
 */

#define MAX_DMA_CHANNELS	8

73
#ifdef CONFIG_X86_32
74 75

/* The maximum address that we can perform a DMA transfer to on this platform */
76
#define MAX_DMA_ADDRESS      (PAGE_OFFSET + 0x1000000)
77 78 79 80

#else

/* 16MB ISA DMA zone */
81
#define MAX_DMA_PFN   ((16 * 1024 * 1024) >> PAGE_SHIFT)
82 83

/* 4GB broken PCI/AGP hardware bus master zone */
84
#define MAX_DMA32_PFN ((4UL * 1024 * 1024 * 1024) >> PAGE_SHIFT)
85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153

/* Compat define for old dma zone */
#define MAX_DMA_ADDRESS ((unsigned long)__va(MAX_DMA_PFN << PAGE_SHIFT))

#endif

/* 8237 DMA controllers */
#define IO_DMA1_BASE	0x00	/* 8 bit slave DMA, channels 0..3 */
#define IO_DMA2_BASE	0xC0	/* 16 bit master DMA, ch 4(=slave input)..7 */

/* DMA controller registers */
#define DMA1_CMD_REG		0x08	/* command register (w) */
#define DMA1_STAT_REG		0x08	/* status register (r) */
#define DMA1_REQ_REG		0x09    /* request register (w) */
#define DMA1_MASK_REG		0x0A	/* single-channel mask (w) */
#define DMA1_MODE_REG		0x0B	/* mode register (w) */
#define DMA1_CLEAR_FF_REG	0x0C	/* clear pointer flip-flop (w) */
#define DMA1_TEMP_REG		0x0D    /* Temporary Register (r) */
#define DMA1_RESET_REG		0x0D	/* Master Clear (w) */
#define DMA1_CLR_MASK_REG       0x0E    /* Clear Mask */
#define DMA1_MASK_ALL_REG       0x0F    /* all-channels mask (w) */

#define DMA2_CMD_REG		0xD0	/* command register (w) */
#define DMA2_STAT_REG		0xD0	/* status register (r) */
#define DMA2_REQ_REG		0xD2    /* request register (w) */
#define DMA2_MASK_REG		0xD4	/* single-channel mask (w) */
#define DMA2_MODE_REG		0xD6	/* mode register (w) */
#define DMA2_CLEAR_FF_REG	0xD8	/* clear pointer flip-flop (w) */
#define DMA2_TEMP_REG		0xDA    /* Temporary Register (r) */
#define DMA2_RESET_REG		0xDA	/* Master Clear (w) */
#define DMA2_CLR_MASK_REG       0xDC    /* Clear Mask */
#define DMA2_MASK_ALL_REG       0xDE    /* all-channels mask (w) */

#define DMA_ADDR_0		0x00    /* DMA address registers */
#define DMA_ADDR_1		0x02
#define DMA_ADDR_2		0x04
#define DMA_ADDR_3		0x06
#define DMA_ADDR_4		0xC0
#define DMA_ADDR_5		0xC4
#define DMA_ADDR_6		0xC8
#define DMA_ADDR_7		0xCC

#define DMA_CNT_0		0x01    /* DMA count registers */
#define DMA_CNT_1		0x03
#define DMA_CNT_2		0x05
#define DMA_CNT_3		0x07
#define DMA_CNT_4		0xC2
#define DMA_CNT_5		0xC6
#define DMA_CNT_6		0xCA
#define DMA_CNT_7		0xCE

#define DMA_PAGE_0		0x87    /* DMA page registers */
#define DMA_PAGE_1		0x83
#define DMA_PAGE_2		0x81
#define DMA_PAGE_3		0x82
#define DMA_PAGE_5		0x8B
#define DMA_PAGE_6		0x89
#define DMA_PAGE_7		0x8A

/* I/O to memory, no autoinit, increment, single mode */
#define DMA_MODE_READ		0x44
/* memory to I/O, no autoinit, increment, single mode */
#define DMA_MODE_WRITE		0x48
/* pass thru DREQ->HRQ, DACK<-HLDA only */
#define DMA_MODE_CASCADE	0xC0

#define DMA_AUTOINIT		0x10


154
#ifdef CONFIG_ISA_DMA_API
155 156
extern spinlock_t  dma_spin_lock;

157
static inline unsigned long claim_dma_lock(void)
158 159 160 161 162 163
{
	unsigned long flags;
	spin_lock_irqsave(&dma_spin_lock, flags);
	return flags;
}

164
static inline void release_dma_lock(unsigned long flags)
165 166 167
{
	spin_unlock_irqrestore(&dma_spin_lock, flags);
}
168
#endif /* CONFIG_ISA_DMA_API */
169 170

/* enable/disable a specific DMA channel */
171
static inline void enable_dma(unsigned int dmanr)
172 173 174 175 176 177 178
{
	if (dmanr <= 3)
		dma_outb(dmanr, DMA1_MASK_REG);
	else
		dma_outb(dmanr & 3, DMA2_MASK_REG);
}

179
static inline void disable_dma(unsigned int dmanr)
180 181 182 183 184 185 186 187 188 189 190 191 192 193
{
	if (dmanr <= 3)
		dma_outb(dmanr | 4, DMA1_MASK_REG);
	else
		dma_outb((dmanr & 3) | 4, DMA2_MASK_REG);
}

/* Clear the 'DMA Pointer Flip Flop'.
 * Write 0 for LSB/MSB, 1 for MSB/LSB access.
 * Use this once to initialize the FF to a known state.
 * After that, keep track of it. :-)
 * --- In order to do that, the DMA routines below should ---
 * --- only be used while holding the DMA lock ! ---
 */
194
static inline void clear_dma_ff(unsigned int dmanr)
195 196 197 198 199 200 201 202
{
	if (dmanr <= 3)
		dma_outb(0, DMA1_CLEAR_FF_REG);
	else
		dma_outb(0, DMA2_CLEAR_FF_REG);
}

/* set mode (above) for a specific DMA channel */
203
static inline void set_dma_mode(unsigned int dmanr, char mode)
204 205 206 207 208 209 210 211 212 213 214 215
{
	if (dmanr <= 3)
		dma_outb(mode | dmanr, DMA1_MODE_REG);
	else
		dma_outb(mode | (dmanr & 3), DMA2_MODE_REG);
}

/* Set only the page register bits of the transfer address.
 * This is used for successive transfers when we know the contents of
 * the lower 16 bits of the DMA current address register, but a 64k boundary
 * may have been crossed.
 */
216
static inline void set_dma_page(unsigned int dmanr, char pagenr)
217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246
{
	switch (dmanr) {
	case 0:
		dma_outb(pagenr, DMA_PAGE_0);
		break;
	case 1:
		dma_outb(pagenr, DMA_PAGE_1);
		break;
	case 2:
		dma_outb(pagenr, DMA_PAGE_2);
		break;
	case 3:
		dma_outb(pagenr, DMA_PAGE_3);
		break;
	case 5:
		dma_outb(pagenr & 0xfe, DMA_PAGE_5);
		break;
	case 6:
		dma_outb(pagenr & 0xfe, DMA_PAGE_6);
		break;
	case 7:
		dma_outb(pagenr & 0xfe, DMA_PAGE_7);
		break;
	}
}


/* Set transfer address & page bits for specific DMA channel.
 * Assumes dma flipflop is clear.
 */
247
static inline void set_dma_addr(unsigned int dmanr, unsigned int a)
248 249 250 251 252 253
{
	set_dma_page(dmanr, a>>16);
	if (dmanr <= 3)  {
		dma_outb(a & 0xff, ((dmanr & 3) << 1) + IO_DMA1_BASE);
		dma_outb((a >> 8) & 0xff, ((dmanr & 3) << 1) + IO_DMA1_BASE);
	}  else  {
254 255
		dma_outb((a >> 1) & 0xff, ((dmanr & 3) << 2) + IO_DMA2_BASE);
		dma_outb((a >> 9) & 0xff, ((dmanr & 3) << 2) + IO_DMA2_BASE);
256 257 258 259 260 261 262 263 264 265 266 267
	}
}


/* Set transfer size (max 64k for DMA0..3, 128k for DMA5..7) for
 * a specific DMA channel.
 * You must ensure the parameters are valid.
 * NOTE: from a manual: "the number of transfers is one more
 * than the initial word count"! This is taken into account.
 * Assumes dma flip-flop is clear.
 * NOTE 2: "count" represents _bytes_ and must be even for channels 5-7.
 */
268
static inline void set_dma_count(unsigned int dmanr, unsigned int count)
269 270 271
{
	count--;
	if (dmanr <= 3)  {
272 273 274
		dma_outb(count & 0xff, ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE);
		dma_outb((count >> 8) & 0xff,
			 ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE);
275
	} else {
276 277 278 279
		dma_outb((count >> 1) & 0xff,
			 ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE);
		dma_outb((count >> 9) & 0xff,
			 ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE);
280 281 282 283 284 285 286 287 288 289 290 291
	}
}


/* Get DMA residue count. After a DMA transfer, this
 * should return zero. Reading this while a DMA transfer is
 * still in progress will return unpredictable results.
 * If called before the channel has been used, it may return 1.
 * Otherwise, it returns the number of _bytes_ left to transfer.
 *
 * Assumes DMA flip-flop is clear.
 */
292
static inline int get_dma_residue(unsigned int dmanr)
293 294 295 296 297 298 299 300 301 302 303 304 305 306 307
{
	unsigned int io_port;
	/* using short to get 16-bit wrap around */
	unsigned short count;

	io_port = (dmanr <= 3) ? ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE
		: ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE;

	count = 1 + dma_inb(io_port);
	count += dma_inb(io_port) << 8;

	return (dmanr <= 3) ? count : (count << 1);
}


308 309
/* These are in kernel/dma.c because x86 uses CONFIG_GENERIC_ISA_DMA */
#ifdef CONFIG_ISA_DMA_API
310 311
extern int request_dma(unsigned int dmanr, const char *device_id);
extern void free_dma(unsigned int dmanr);
312
#endif
313 314 315 316 317

/* From PCI */

#ifdef CONFIG_PCI
extern int isa_dma_bridge_buggy;
318
#else
319
#define isa_dma_bridge_buggy	(0)
320
#endif
321

H
H. Peter Anvin 已提交
322
#endif /* _ASM_X86_DMA_H */