io.h 12.3 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4
#ifndef __ASM_SH_IO_H
#define __ASM_SH_IO_H
/*
 * Convention:
P
Paul Mundt 已提交
5
 *    read{b,w,l,q}/write{b,w,l,q} are for PCI,
L
Linus Torvalds 已提交
6
 *    while in{b,w,l}/out{b,w,l} are for ISA
P
Paul Mundt 已提交
7
 *
L
Linus Torvalds 已提交
8 9 10
 * In addition we have 'pausing' versions: in{b,w,l}_p/out{b,w,l}_p
 * and 'string' versions: ins{b,w,l}/outs{b,w,l}
 *
P
Paul Mundt 已提交
11 12 13 14 15 16 17 18 19 20 21 22 23
 * While read{b,w,l,q} and write{b,w,l,q} contain memory barriers
 * automatically, there are also __raw versions, which do not.
 *
 * Historically, we have also had ctrl_in{b,w,l,q}/ctrl_out{b,w,l,q} for
 * SuperH specific I/O (raw I/O to on-chip CPU peripherals). In practice
 * these have the same semantics as the __raw variants, and as such, all
 * new code should be using the __raw versions.
 *
 * All ISA I/O routines are wrapped through the machine vector. If a
 * board does not provide overrides, a generic set that are copied in
 * from the default machine vector are used instead. These are largely
 * for old compat code for I/O offseting to SuperIOs, all of which are
 * better handled through the machvec ioport mapping routines these days.
L
Linus Torvalds 已提交
24
 */
25
#include <linux/errno.h>
L
Linus Torvalds 已提交
26 27 28 29
#include <asm/cache.h>
#include <asm/system.h>
#include <asm/addrspace.h>
#include <asm/machvec.h>
30 31 32 33
#include <asm/pgtable.h>
#include <asm-generic/iomap.h>

#ifdef __KERNEL__
L
Linus Torvalds 已提交
34 35 36 37
/*
 * Depending on which platform we are running on, we need different
 * I/O functions.
 */
38 39
#define __IO_PREFIX	generic
#include <asm/io_generic.h>
M
Magnus Damm 已提交
40
#include <asm/io_trapped.h>
41

42 43
#ifdef CONFIG_HAS_IOPORT

P
Paul Mundt 已提交
44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64
#define inb(p)			sh_mv.mv_inb((p))
#define inw(p)			sh_mv.mv_inw((p))
#define inl(p)			sh_mv.mv_inl((p))
#define outb(x,p)		sh_mv.mv_outb((x),(p))
#define outw(x,p)		sh_mv.mv_outw((x),(p))
#define outl(x,p)		sh_mv.mv_outl((x),(p))

#define inb_p(p)		sh_mv.mv_inb_p((p))
#define inw_p(p)		sh_mv.mv_inw_p((p))
#define inl_p(p)		sh_mv.mv_inl_p((p))
#define outb_p(x,p)		sh_mv.mv_outb_p((x),(p))
#define outw_p(x,p)		sh_mv.mv_outw_p((x),(p))
#define outl_p(x,p)		sh_mv.mv_outl_p((x),(p))

#define insb(p,b,c)		sh_mv.mv_insb((p), (b), (c))
#define insw(p,b,c)		sh_mv.mv_insw((p), (b), (c))
#define insl(p,b,c)		sh_mv.mv_insl((p), (b), (c))
#define outsb(p,b,c)		sh_mv.mv_outsb((p), (b), (c))
#define outsw(p,b,c)		sh_mv.mv_outsw((p), (b), (c))
#define outsl(p,b,c)		sh_mv.mv_outsl((p), (b), (c))

65 66
#endif

P
Paul Mundt 已提交
67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85
#define __raw_writeb(v,a)	(__chk_io_ptr(a), *(volatile u8  __force *)(a) = (v))
#define __raw_writew(v,a)	(__chk_io_ptr(a), *(volatile u16 __force *)(a) = (v))
#define __raw_writel(v,a)	(__chk_io_ptr(a), *(volatile u32 __force *)(a) = (v))
#define __raw_writeq(v,a)	(__chk_io_ptr(a), *(volatile u64 __force *)(a) = (v))

#define __raw_readb(a)		(__chk_io_ptr(a), *(volatile u8  __force *)(a))
#define __raw_readw(a)		(__chk_io_ptr(a), *(volatile u16 __force *)(a))
#define __raw_readl(a)		(__chk_io_ptr(a), *(volatile u32 __force *)(a))
#define __raw_readq(a)		(__chk_io_ptr(a), *(volatile u64 __force *)(a))

#define readb(a)		({ u8  r_ = __raw_readb(a); mb(); r_; })
#define readw(a)		({ u16 r_ = __raw_readw(a); mb(); r_; })
#define readl(a)		({ u32 r_ = __raw_readl(a); mb(); r_; })
#define readq(a)		({ u64 r_ = __raw_readq(a); mb(); r_; })

#define writeb(v,a)		({ __raw_writeb((v),(a)); mb(); })
#define writew(v,a)		({ __raw_writew((v),(a)); mb(); })
#define writel(v,a)		({ __raw_writel((v),(a)); mb(); })
#define writeq(v,a)		({ __raw_writeq((v),(a)); mb(); })
L
Linus Torvalds 已提交
86

87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131
/*
 * Legacy SuperH on-chip I/O functions
 *
 * These are all deprecated, all new (and especially cross-platform) code
 * should be using the __raw_xxx() routines directly.
 */
static inline u8 __deprecated ctrl_inb(unsigned long addr)
{
	return __raw_readb(addr);
}

static inline u16 __deprecated ctrl_inw(unsigned long addr)
{
	return __raw_readw(addr);
}

static inline u32 __deprecated ctrl_inl(unsigned long addr)
{
	return __raw_readl(addr);
}

static inline u64 __deprecated ctrl_inq(unsigned long addr)
{
	return __raw_readq(addr);
}

static inline void __deprecated ctrl_outb(u8 v, unsigned long addr)
{
	__raw_writeb(v, addr);
}

static inline void __deprecated ctrl_outw(u16 v, unsigned long addr)
{
	__raw_writew(v, addr);
}

static inline void __deprecated ctrl_outl(u32 v, unsigned long addr)
{
	__raw_writel(v, addr);
}

static inline void __deprecated ctrl_outq(u64 v, unsigned long addr)
{
	__raw_writeq(v, addr);
}
132

133 134
extern unsigned long generic_io_base;

P
Paul Mundt 已提交
135 136
static inline void ctrl_delay(void)
{
137
	__raw_readw(generic_io_base);
P
Paul Mundt 已提交
138
}
L
Linus Torvalds 已提交
139

P
Paul Mundt 已提交
140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161
#define __BUILD_UNCACHED_IO(bwlq, type)					\
static inline type read##bwlq##_uncached(unsigned long addr)		\
{									\
	type ret;							\
	jump_to_uncached();						\
	ret = __raw_read##bwlq(addr);					\
	back_to_cached();						\
	return ret;							\
}									\
									\
static inline void write##bwlq##_uncached(type v, unsigned long addr)	\
{									\
	jump_to_uncached();						\
	__raw_write##bwlq(v, addr);					\
	back_to_cached();						\
}

__BUILD_UNCACHED_IO(b, u8)
__BUILD_UNCACHED_IO(w, u16)
__BUILD_UNCACHED_IO(l, u32)
__BUILD_UNCACHED_IO(q, u64)

162 163
#define __BUILD_MEMORY_STRING(bwlq, type)				\
									\
164
static inline void __raw_writes##bwlq(volatile void __iomem *mem,	\
165 166 167 168 169 170 171 172 173 174
				const void *addr, unsigned int count)	\
{									\
	const volatile type *__addr = addr;				\
									\
	while (count--) {						\
		__raw_write##bwlq(*__addr, mem);			\
		__addr++;						\
	}								\
}									\
									\
175 176
static inline void __raw_reads##bwlq(volatile void __iomem *mem,	\
			       void *addr, unsigned int count)		\
177 178 179 180 181 182 183 184 185 186 187
{									\
	volatile type *__addr = addr;					\
									\
	while (count--) {						\
		*__addr = __raw_read##bwlq(mem);			\
		__addr++;						\
	}								\
}

__BUILD_MEMORY_STRING(b, u8)
__BUILD_MEMORY_STRING(w, u16)
P
Paul Mundt 已提交
188

189
#ifdef CONFIG_SUPERH32
P
Paul Mundt 已提交
190 191
void __raw_writesl(void __iomem *addr, const void *data, int longlen);
void __raw_readsl(const void __iomem *addr, void *data, int longlen);
192 193 194 195 196
#else
__BUILD_MEMORY_STRING(l, u32)
#endif

__BUILD_MEMORY_STRING(q, u64)
197

P
Paul Mundt 已提交
198 199 200
#define writesb			__raw_writesb
#define writesw			__raw_writesw
#define writesl			__raw_writesl
201

P
Paul Mundt 已提交
202 203 204
#define readsb			__raw_readsb
#define readsw			__raw_readsw
#define readsl			__raw_readsl
205

P
Paul Mundt 已提交
206 207 208 209
#define readb_relaxed(a)	readb(a)
#define readw_relaxed(a)	readw(a)
#define readl_relaxed(a)	readl(a)
#define readq_relaxed(a)	readq(a)
L
Linus Torvalds 已提交
210

D
David McKay 已提交
211
#ifndef CONFIG_GENERIC_IOMAP
212
/* Simple MMIO */
213 214
#define ioread8(a)		__raw_readb(a)
#define ioread16(a)		__raw_readw(a)
215
#define ioread16be(a)		be16_to_cpu(__raw_readw((a)))
216
#define ioread32(a)		__raw_readl(a)
217
#define ioread32be(a)		be32_to_cpu(__raw_readl((a)))
L
Linus Torvalds 已提交
218

219 220
#define iowrite8(v,a)		__raw_writeb((v),(a))
#define iowrite16(v,a)		__raw_writew((v),(a))
221
#define iowrite16be(v,a)	__raw_writew(cpu_to_be16((v)),(a))
222
#define iowrite32(v,a)		__raw_writel((v),(a))
223 224
#define iowrite32be(v,a)	__raw_writel(cpu_to_be32((v)),(a))

225 226 227
#define ioread8_rep(a, d, c)	__raw_readsb((a), (d), (c))
#define ioread16_rep(a, d, c)	__raw_readsw((a), (d), (c))
#define ioread32_rep(a, d, c)	__raw_readsl((a), (d), (c))
228

229 230 231
#define iowrite8_rep(a, s, c)	__raw_writesb((a), (s), (c))
#define iowrite16_rep(a, s, c)	__raw_writesw((a), (s), (c))
#define iowrite32_rep(a, s, c)	__raw_writesl((a), (s), (c))
D
David McKay 已提交
232 233 234 235 236 237 238 239 240
#endif

#define mmio_insb(p,d,c)	__raw_readsb(p,d,c)
#define mmio_insw(p,d,c)	__raw_readsw(p,d,c)
#define mmio_insl(p,d,c)	__raw_readsl(p,d,c)

#define mmio_outsb(p,s,c)	__raw_writesb(p,s,c)
#define mmio_outsw(p,s,c)	__raw_writesw(p,s,c)
#define mmio_outsl(p,s,c)	__raw_writesl(p,s,c)
241

P
Paul Mundt 已提交
242 243
/* synco on SH-4A, otherwise a nop */
#define mmiowb()		wmb()
L
Linus Torvalds 已提交
244

245 246
#define IO_SPACE_LIMIT 0xffffffff

247 248
#ifdef CONFIG_HAS_IOPORT

L
Linus Torvalds 已提交
249
/*
P
Paul Mundt 已提交
250 251 252
 * This function provides a method for the generic case where a
 * board-specific ioport_map simply needs to return the port + some
 * arbitrary port base.
L
Linus Torvalds 已提交
253 254
 *
 * We use this at board setup time to implicitly set the port base, and
255
 * as a result, we can use the generic ioport_map.
L
Linus Torvalds 已提交
256 257 258 259 260 261
 */
static inline void __set_io_port_base(unsigned long pbase)
{
	generic_io_base = pbase;
}

M
Magnus Damm 已提交
262 263
#define __ioport_map(p, n) sh_mv.mv_ioport_map((p), (n))

264 265
#endif

L
Linus Torvalds 已提交
266
/* We really want to try and get these to memcpy etc */
P
Paul Mundt 已提交
267 268 269
void memcpy_fromio(void *, const volatile void __iomem *, unsigned long);
void memcpy_toio(volatile void __iomem *, const void *, unsigned long);
void memset_io(volatile void __iomem *, int, unsigned long);
270

271 272 273 274 275
/* Quad-word real-mode I/O, don't ask.. */
unsigned long long peek_real_address_q(unsigned long long addr);
unsigned long long poke_real_address_q(unsigned long long addr,
				       unsigned long long val);

P
Paul Mundt 已提交
276 277 278
#if !defined(CONFIG_MMU)
#define virt_to_phys(address)	((unsigned long)(address))
#define phys_to_virt(address)	((void *)(address))
279
#else
P
Paul Mundt 已提交
280 281
#define virt_to_phys(address)	(__pa(address))
#define phys_to_virt(address)	(__va(address))
282
#endif
L
Linus Torvalds 已提交
283 284

/*
P
Paul Mundt 已提交
285 286 287 288 289 290 291
 * On 32-bit SH, we traditionally have the whole physical address space
 * mapped at all times (as MIPS does), so "ioremap()" and "iounmap()" do
 * not need to do anything but place the address in the proper segment.
 * This is true for P1 and P2 addresses, as well as some P3 ones.
 * However, most of the P3 addresses and newer cores using extended
 * addressing need to map through page tables, so the ioremap()
 * implementation becomes a bit more complicated.
L
Linus Torvalds 已提交
292
 *
P
Paul Mundt 已提交
293
 * See arch/sh/mm/ioremap.c for additional notes on this.
L
Linus Torvalds 已提交
294 295
 *
 * We cheat a bit and always return uncachable areas until we've fixed
296
 * the drivers to handle caching properly.
P
Paul Mundt 已提交
297 298 299
 *
 * On the SH-5 the concept of segmentation in the 1:1 PXSEG sense simply
 * doesn't exist, so everything must go through page tables.
L
Linus Torvalds 已提交
300
 */
301
#ifdef CONFIG_MMU
P
Paul Mundt 已提交
302
void __iomem *__ioremap_caller(phys_addr_t offset, unsigned long size,
303
			       pgprot_t prot, void *caller);
304
void __iounmap(void __iomem *addr);
P
Paul Mundt 已提交
305

306
static inline void __iomem *
P
Paul Mundt 已提交
307
__ioremap(phys_addr_t offset, unsigned long size, pgprot_t prot)
308
{
309
	return __ioremap_caller(offset, size, prot, __builtin_return_address(0));
310 311
}

312
static inline void __iomem *
P
Paul Mundt 已提交
313
__ioremap_29bit(phys_addr_t offset, unsigned long size, pgprot_t prot)
L
Linus Torvalds 已提交
314
{
P
Paul Mundt 已提交
315
#ifdef CONFIG_29BIT
P
Paul Mundt 已提交
316
	phys_addr_t last_addr = offset + size - 1;
317 318 319 320 321 322 323 324

	/*
	 * For P1 and P2 space this is trivial, as everything is already
	 * mapped. Uncached access for P1 addresses are done through P2.
	 * In the P3 case or for addresses outside of the 29-bit space,
	 * mapping must be done by the PMB or by using page tables.
	 */
	if (likely(PXSEG(offset) < P3SEG && PXSEG(last_addr) < P3SEG)) {
325
		if (unlikely(pgprot_val(prot) & _PAGE_CACHABLE))
326 327 328 329
			return (void __iomem *)P1SEGADDR(offset);

		return (void __iomem *)P2SEGADDR(offset);
	}
M
Magnus Damm 已提交
330 331 332 333

	/* P4 above the store queues are always mapped. */
	if (unlikely(offset >= P3_ADDR_MAX))
		return (void __iomem *)P4SEGADDR(offset);
P
Paul Mundt 已提交
334
#endif
335

P
Paul Mundt 已提交
336 337 338 339
	return NULL;
}

static inline void __iomem *
P
Paul Mundt 已提交
340
__ioremap_mode(phys_addr_t offset, unsigned long size, pgprot_t prot)
P
Paul Mundt 已提交
341 342 343 344 345 346 347
{
	void __iomem *ret;

	ret = __ioremap_trapped(offset, size);
	if (ret)
		return ret;

348
	ret = __ioremap_29bit(offset, size, prot);
P
Paul Mundt 已提交
349 350 351
	if (ret)
		return ret;

352
	return __ioremap(offset, size, prot);
L
Linus Torvalds 已提交
353
}
354
#else
355 356
#define __ioremap(offset, size, prot)		((void __iomem *)(offset))
#define __ioremap_mode(offset, size, prot)	((void __iomem *)(offset))
357 358
#define __iounmap(addr)				do { } while (0)
#endif /* CONFIG_MMU */
L
Linus Torvalds 已提交
359

P
Paul Mundt 已提交
360
static inline void __iomem *ioremap(phys_addr_t offset, unsigned long size)
361 362 363 364 365
{
	return __ioremap_mode(offset, size, PAGE_KERNEL_NOCACHE);
}

static inline void __iomem *
P
Paul Mundt 已提交
366
ioremap_cache(phys_addr_t offset, unsigned long size)
367 368 369 370
{
	return __ioremap_mode(offset, size, PAGE_KERNEL);
}

371
#ifdef CONFIG_HAVE_IOREMAP_PROT
372
static inline void __iomem *
P
Paul Mundt 已提交
373
ioremap_prot(phys_addr_t offset, unsigned long size, unsigned long flags)
374 375 376
{
	return __ioremap_mode(offset, size, __pgprot(flags));
}
377
#endif
378

379
#ifdef CONFIG_IOREMAP_FIXED
P
Paul Mundt 已提交
380
extern void __iomem *ioremap_fixed(phys_addr_t, unsigned long, pgprot_t);
381 382 383 384
extern int iounmap_fixed(void __iomem *);
extern void ioremap_fixed_init(void);
#else
static inline void __iomem *
P
Paul Mundt 已提交
385
ioremap_fixed(phys_addr_t phys_addr, unsigned long size, pgprot_t prot)
386 387 388 389 390 391 392 393 394
{
	BUG();
	return NULL;
}

static inline void ioremap_fixed_init(void) { }
static inline int iounmap_fixed(void __iomem *addr) { return -EINVAL; }
#endif

395 396
#define ioremap_nocache	ioremap
#define iounmap		__iounmap
397

P
Paul Mundt 已提交
398 399 400 401
#define maybebadio(port) \
	printk(KERN_ERR "bad PC-like io %s:%u for port 0x%lx at 0x%08x\n", \
	       __func__, __LINE__, (port), (u32)__builtin_return_address(0))

L
Linus Torvalds 已提交
402 403 404 405 406 407 408 409 410 411 412
/*
 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
 * access
 */
#define xlate_dev_mem_ptr(p)	__va(p)

/*
 * Convert a virtual cached pointer to an uncached pointer
 */
#define xlate_dev_kmem_ptr(p)	p

413 414 415 416
#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
int valid_phys_addr_range(unsigned long addr, size_t size);
int valid_mmap_phys_addr_range(unsigned long pfn, size_t size);

L
Linus Torvalds 已提交
417 418 419
#endif /* __KERNEL__ */

#endif /* __ASM_SH_IO_H */