io.h 12.4 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4
#ifndef __ASM_SH_IO_H
#define __ASM_SH_IO_H
/*
 * Convention:
P
Paul Mundt 已提交
5
 *    read{b,w,l,q}/write{b,w,l,q} are for PCI,
L
Linus Torvalds 已提交
6
 *    while in{b,w,l}/out{b,w,l} are for ISA
P
Paul Mundt 已提交
7
 *
L
Linus Torvalds 已提交
8 9 10
 * In addition we have 'pausing' versions: in{b,w,l}_p/out{b,w,l}_p
 * and 'string' versions: ins{b,w,l}/outs{b,w,l}
 *
P
Paul Mundt 已提交
11 12 13 14 15 16 17 18 19 20 21 22 23
 * While read{b,w,l,q} and write{b,w,l,q} contain memory barriers
 * automatically, there are also __raw versions, which do not.
 *
 * Historically, we have also had ctrl_in{b,w,l,q}/ctrl_out{b,w,l,q} for
 * SuperH specific I/O (raw I/O to on-chip CPU peripherals). In practice
 * these have the same semantics as the __raw variants, and as such, all
 * new code should be using the __raw versions.
 *
 * All ISA I/O routines are wrapped through the machine vector. If a
 * board does not provide overrides, a generic set that are copied in
 * from the default machine vector are used instead. These are largely
 * for old compat code for I/O offseting to SuperIOs, all of which are
 * better handled through the machvec ioport mapping routines these days.
L
Linus Torvalds 已提交
24
 */
25
#include <linux/errno.h>
L
Linus Torvalds 已提交
26 27 28 29
#include <asm/cache.h>
#include <asm/system.h>
#include <asm/addrspace.h>
#include <asm/machvec.h>
30 31 32 33
#include <asm/pgtable.h>
#include <asm-generic/iomap.h>

#ifdef __KERNEL__
L
Linus Torvalds 已提交
34 35 36 37
/*
 * Depending on which platform we are running on, we need different
 * I/O functions.
 */
38 39
#define __IO_PREFIX	generic
#include <asm/io_generic.h>
M
Magnus Damm 已提交
40
#include <asm/io_trapped.h>
41

P
Paul Mundt 已提交
42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81
#define inb(p)			sh_mv.mv_inb((p))
#define inw(p)			sh_mv.mv_inw((p))
#define inl(p)			sh_mv.mv_inl((p))
#define outb(x,p)		sh_mv.mv_outb((x),(p))
#define outw(x,p)		sh_mv.mv_outw((x),(p))
#define outl(x,p)		sh_mv.mv_outl((x),(p))

#define inb_p(p)		sh_mv.mv_inb_p((p))
#define inw_p(p)		sh_mv.mv_inw_p((p))
#define inl_p(p)		sh_mv.mv_inl_p((p))
#define outb_p(x,p)		sh_mv.mv_outb_p((x),(p))
#define outw_p(x,p)		sh_mv.mv_outw_p((x),(p))
#define outl_p(x,p)		sh_mv.mv_outl_p((x),(p))

#define insb(p,b,c)		sh_mv.mv_insb((p), (b), (c))
#define insw(p,b,c)		sh_mv.mv_insw((p), (b), (c))
#define insl(p,b,c)		sh_mv.mv_insl((p), (b), (c))
#define outsb(p,b,c)		sh_mv.mv_outsb((p), (b), (c))
#define outsw(p,b,c)		sh_mv.mv_outsw((p), (b), (c))
#define outsl(p,b,c)		sh_mv.mv_outsl((p), (b), (c))

#define __raw_writeb(v,a)	(__chk_io_ptr(a), *(volatile u8  __force *)(a) = (v))
#define __raw_writew(v,a)	(__chk_io_ptr(a), *(volatile u16 __force *)(a) = (v))
#define __raw_writel(v,a)	(__chk_io_ptr(a), *(volatile u32 __force *)(a) = (v))
#define __raw_writeq(v,a)	(__chk_io_ptr(a), *(volatile u64 __force *)(a) = (v))

#define __raw_readb(a)		(__chk_io_ptr(a), *(volatile u8  __force *)(a))
#define __raw_readw(a)		(__chk_io_ptr(a), *(volatile u16 __force *)(a))
#define __raw_readl(a)		(__chk_io_ptr(a), *(volatile u32 __force *)(a))
#define __raw_readq(a)		(__chk_io_ptr(a), *(volatile u64 __force *)(a))

#define readb(a)		({ u8  r_ = __raw_readb(a); mb(); r_; })
#define readw(a)		({ u16 r_ = __raw_readw(a); mb(); r_; })
#define readl(a)		({ u32 r_ = __raw_readl(a); mb(); r_; })
#define readq(a)		({ u64 r_ = __raw_readq(a); mb(); r_; })

#define writeb(v,a)		({ __raw_writeb((v),(a)); mb(); })
#define writew(v,a)		({ __raw_writew((v),(a)); mb(); })
#define writel(v,a)		({ __raw_writel((v),(a)); mb(); })
#define writeq(v,a)		({ __raw_writeq((v),(a)); mb(); })
L
Linus Torvalds 已提交
82

83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127
/*
 * Legacy SuperH on-chip I/O functions
 *
 * These are all deprecated, all new (and especially cross-platform) code
 * should be using the __raw_xxx() routines directly.
 */
static inline u8 __deprecated ctrl_inb(unsigned long addr)
{
	return __raw_readb(addr);
}

static inline u16 __deprecated ctrl_inw(unsigned long addr)
{
	return __raw_readw(addr);
}

static inline u32 __deprecated ctrl_inl(unsigned long addr)
{
	return __raw_readl(addr);
}

static inline u64 __deprecated ctrl_inq(unsigned long addr)
{
	return __raw_readq(addr);
}

static inline void __deprecated ctrl_outb(u8 v, unsigned long addr)
{
	__raw_writeb(v, addr);
}

static inline void __deprecated ctrl_outw(u16 v, unsigned long addr)
{
	__raw_writew(v, addr);
}

static inline void __deprecated ctrl_outl(u32 v, unsigned long addr)
{
	__raw_writel(v, addr);
}

static inline void __deprecated ctrl_outq(u64 v, unsigned long addr)
{
	__raw_writeq(v, addr);
}
128

129 130
extern unsigned long generic_io_base;

P
Paul Mundt 已提交
131 132
static inline void ctrl_delay(void)
{
133
	__raw_readw(generic_io_base);
P
Paul Mundt 已提交
134
}
L
Linus Torvalds 已提交
135

P
Paul Mundt 已提交
136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157
#define __BUILD_UNCACHED_IO(bwlq, type)					\
static inline type read##bwlq##_uncached(unsigned long addr)		\
{									\
	type ret;							\
	jump_to_uncached();						\
	ret = __raw_read##bwlq(addr);					\
	back_to_cached();						\
	return ret;							\
}									\
									\
static inline void write##bwlq##_uncached(type v, unsigned long addr)	\
{									\
	jump_to_uncached();						\
	__raw_write##bwlq(v, addr);					\
	back_to_cached();						\
}

__BUILD_UNCACHED_IO(b, u8)
__BUILD_UNCACHED_IO(w, u16)
__BUILD_UNCACHED_IO(l, u32)
__BUILD_UNCACHED_IO(q, u64)

158 159
#define __BUILD_MEMORY_STRING(bwlq, type)				\
									\
160
static inline void __raw_writes##bwlq(volatile void __iomem *mem,	\
161 162 163 164 165 166 167 168 169 170
				const void *addr, unsigned int count)	\
{									\
	const volatile type *__addr = addr;				\
									\
	while (count--) {						\
		__raw_write##bwlq(*__addr, mem);			\
		__addr++;						\
	}								\
}									\
									\
171 172
static inline void __raw_reads##bwlq(volatile void __iomem *mem,	\
			       void *addr, unsigned int count)		\
173 174 175 176 177 178 179 180 181 182 183
{									\
	volatile type *__addr = addr;					\
									\
	while (count--) {						\
		*__addr = __raw_read##bwlq(mem);			\
		__addr++;						\
	}								\
}

__BUILD_MEMORY_STRING(b, u8)
__BUILD_MEMORY_STRING(w, u16)
P
Paul Mundt 已提交
184

185
#ifdef CONFIG_SUPERH32
P
Paul Mundt 已提交
186 187
void __raw_writesl(void __iomem *addr, const void *data, int longlen);
void __raw_readsl(const void __iomem *addr, void *data, int longlen);
188 189 190 191 192
#else
__BUILD_MEMORY_STRING(l, u32)
#endif

__BUILD_MEMORY_STRING(q, u64)
193

P
Paul Mundt 已提交
194 195 196
#define writesb			__raw_writesb
#define writesw			__raw_writesw
#define writesl			__raw_writesl
197

P
Paul Mundt 已提交
198 199 200
#define readsb			__raw_readsb
#define readsw			__raw_readsw
#define readsl			__raw_readsl
201

P
Paul Mundt 已提交
202 203 204 205
#define readb_relaxed(a)	readb(a)
#define readw_relaxed(a)	readw(a)
#define readl_relaxed(a)	readl(a)
#define readq_relaxed(a)	readq(a)
L
Linus Torvalds 已提交
206

D
David McKay 已提交
207
#ifndef CONFIG_GENERIC_IOMAP
208
/* Simple MMIO */
209 210
#define ioread8(a)		__raw_readb(a)
#define ioread16(a)		__raw_readw(a)
211
#define ioread16be(a)		be16_to_cpu(__raw_readw((a)))
212
#define ioread32(a)		__raw_readl(a)
213
#define ioread32be(a)		be32_to_cpu(__raw_readl((a)))
L
Linus Torvalds 已提交
214

215 216
#define iowrite8(v,a)		__raw_writeb((v),(a))
#define iowrite16(v,a)		__raw_writew((v),(a))
217
#define iowrite16be(v,a)	__raw_writew(cpu_to_be16((v)),(a))
218
#define iowrite32(v,a)		__raw_writel((v),(a))
219 220
#define iowrite32be(v,a)	__raw_writel(cpu_to_be32((v)),(a))

221 222 223
#define ioread8_rep(a, d, c)	__raw_readsb((a), (d), (c))
#define ioread16_rep(a, d, c)	__raw_readsw((a), (d), (c))
#define ioread32_rep(a, d, c)	__raw_readsl((a), (d), (c))
224

225 226 227
#define iowrite8_rep(a, s, c)	__raw_writesb((a), (s), (c))
#define iowrite16_rep(a, s, c)	__raw_writesw((a), (s), (c))
#define iowrite32_rep(a, s, c)	__raw_writesl((a), (s), (c))
D
David McKay 已提交
228 229 230 231 232 233 234 235 236
#endif

#define mmio_insb(p,d,c)	__raw_readsb(p,d,c)
#define mmio_insw(p,d,c)	__raw_readsw(p,d,c)
#define mmio_insl(p,d,c)	__raw_readsl(p,d,c)

#define mmio_outsb(p,s,c)	__raw_writesb(p,s,c)
#define mmio_outsw(p,s,c)	__raw_writesw(p,s,c)
#define mmio_outsl(p,s,c)	__raw_writesl(p,s,c)
237

P
Paul Mundt 已提交
238 239
/* synco on SH-4A, otherwise a nop */
#define mmiowb()		wmb()
L
Linus Torvalds 已提交
240

241 242
#define IO_SPACE_LIMIT 0xffffffff

L
Linus Torvalds 已提交
243
/*
P
Paul Mundt 已提交
244 245 246
 * This function provides a method for the generic case where a
 * board-specific ioport_map simply needs to return the port + some
 * arbitrary port base.
L
Linus Torvalds 已提交
247 248
 *
 * We use this at board setup time to implicitly set the port base, and
249
 * as a result, we can use the generic ioport_map.
L
Linus Torvalds 已提交
250 251 252 253 254 255
 */
static inline void __set_io_port_base(unsigned long pbase)
{
	generic_io_base = pbase;
}

M
Magnus Damm 已提交
256 257
#define __ioport_map(p, n) sh_mv.mv_ioport_map((p), (n))

L
Linus Torvalds 已提交
258
/* We really want to try and get these to memcpy etc */
P
Paul Mundt 已提交
259 260 261
void memcpy_fromio(void *, const volatile void __iomem *, unsigned long);
void memcpy_toio(volatile void __iomem *, const void *, unsigned long);
void memset_io(volatile void __iomem *, int, unsigned long);
262

263 264 265 266 267
/* Quad-word real-mode I/O, don't ask.. */
unsigned long long peek_real_address_q(unsigned long long addr);
unsigned long long poke_real_address_q(unsigned long long addr,
				       unsigned long long val);

P
Paul Mundt 已提交
268 269 270
#if !defined(CONFIG_MMU)
#define virt_to_phys(address)	((unsigned long)(address))
#define phys_to_virt(address)	((void *)(address))
271
#else
P
Paul Mundt 已提交
272 273
#define virt_to_phys(address)	(__pa(address))
#define phys_to_virt(address)	(__va(address))
274
#endif
L
Linus Torvalds 已提交
275 276

/*
P
Paul Mundt 已提交
277 278 279 280 281 282 283
 * On 32-bit SH, we traditionally have the whole physical address space
 * mapped at all times (as MIPS does), so "ioremap()" and "iounmap()" do
 * not need to do anything but place the address in the proper segment.
 * This is true for P1 and P2 addresses, as well as some P3 ones.
 * However, most of the P3 addresses and newer cores using extended
 * addressing need to map through page tables, so the ioremap()
 * implementation becomes a bit more complicated.
L
Linus Torvalds 已提交
284
 *
P
Paul Mundt 已提交
285
 * See arch/sh/mm/ioremap.c for additional notes on this.
L
Linus Torvalds 已提交
286 287
 *
 * We cheat a bit and always return uncachable areas until we've fixed
288
 * the drivers to handle caching properly.
P
Paul Mundt 已提交
289 290 291
 *
 * On the SH-5 the concept of segmentation in the 1:1 PXSEG sense simply
 * doesn't exist, so everything must go through page tables.
L
Linus Torvalds 已提交
292
 */
293
#ifdef CONFIG_MMU
294
void __iomem *__ioremap_caller(unsigned long offset, unsigned long size,
295
			       pgprot_t prot, void *caller);
296
void __iounmap(void __iomem *addr);
P
Paul Mundt 已提交
297

298
static inline void __iomem *
299
__ioremap(unsigned long offset, unsigned long size, pgprot_t prot)
300
{
301
	return __ioremap_caller(offset, size, prot, __builtin_return_address(0));
302 303
}

304
static inline void __iomem *
305
__ioremap_29bit(unsigned long offset, unsigned long size, pgprot_t prot)
L
Linus Torvalds 已提交
306
{
P
Paul Mundt 已提交
307
#ifdef CONFIG_29BIT
308 309 310 311 312 313 314 315 316
	unsigned long last_addr = offset + size - 1;

	/*
	 * For P1 and P2 space this is trivial, as everything is already
	 * mapped. Uncached access for P1 addresses are done through P2.
	 * In the P3 case or for addresses outside of the 29-bit space,
	 * mapping must be done by the PMB or by using page tables.
	 */
	if (likely(PXSEG(offset) < P3SEG && PXSEG(last_addr) < P3SEG)) {
317
		if (unlikely(pgprot_val(prot) & _PAGE_CACHABLE))
318 319 320 321
			return (void __iomem *)P1SEGADDR(offset);

		return (void __iomem *)P2SEGADDR(offset);
	}
M
Magnus Damm 已提交
322 323 324 325

	/* P4 above the store queues are always mapped. */
	if (unlikely(offset >= P3_ADDR_MAX))
		return (void __iomem *)P4SEGADDR(offset);
P
Paul Mundt 已提交
326
#endif
327

P
Paul Mundt 已提交
328 329 330 331
	return NULL;
}

static inline void __iomem *
332
__ioremap_mode(unsigned long offset, unsigned long size, pgprot_t prot)
P
Paul Mundt 已提交
333 334 335 336 337 338 339
{
	void __iomem *ret;

	ret = __ioremap_trapped(offset, size);
	if (ret)
		return ret;

340
	ret = __ioremap_29bit(offset, size, prot);
P
Paul Mundt 已提交
341 342 343
	if (ret)
		return ret;

344
	return __ioremap(offset, size, prot);
L
Linus Torvalds 已提交
345
}
346
#else
347 348
#define __ioremap(offset, size, prot)		((void __iomem *)(offset))
#define __ioremap_mode(offset, size, prot)	((void __iomem *)(offset))
349 350
#define __iounmap(addr)				do { } while (0)
#endif /* CONFIG_MMU */
L
Linus Torvalds 已提交
351

352 353 354 355 356 357 358 359 360 361 362 363
static inline void __iomem *
ioremap(unsigned long offset, unsigned long size)
{
	return __ioremap_mode(offset, size, PAGE_KERNEL_NOCACHE);
}

static inline void __iomem *
ioremap_cache(unsigned long offset, unsigned long size)
{
	return __ioremap_mode(offset, size, PAGE_KERNEL);
}

364
#ifdef CONFIG_HAVE_IOREMAP_PROT
365 366 367 368 369
static inline void __iomem *
ioremap_prot(resource_size_t offset, unsigned long size, unsigned long flags)
{
	return __ioremap_mode(offset, size, __pgprot(flags));
}
370
#endif
371

372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389
#ifdef CONFIG_IOREMAP_FIXED
extern void __iomem *ioremap_fixed(resource_size_t, unsigned long,
				   unsigned long, pgprot_t);
extern int iounmap_fixed(void __iomem *);
extern void ioremap_fixed_init(void);
#else
static inline void __iomem *
ioremap_fixed(resource_size_t phys_addr, unsigned long offset,
	      unsigned long size, pgprot_t prot)
{
	BUG();
	return NULL;
}

static inline void ioremap_fixed_init(void) { }
static inline int iounmap_fixed(void __iomem *addr) { return -EINVAL; }
#endif

390 391
#define ioremap_nocache	ioremap
#define iounmap		__iounmap
392

P
Paul Mundt 已提交
393 394 395 396
#define maybebadio(port) \
	printk(KERN_ERR "bad PC-like io %s:%u for port 0x%lx at 0x%08x\n", \
	       __func__, __LINE__, (port), (u32)__builtin_return_address(0))

L
Linus Torvalds 已提交
397 398 399 400 401 402 403 404 405 406 407
/*
 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
 * access
 */
#define xlate_dev_mem_ptr(p)	__va(p)

/*
 * Convert a virtual cached pointer to an uncached pointer
 */
#define xlate_dev_kmem_ptr(p)	p

408 409 410 411
#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
int valid_phys_addr_range(unsigned long addr, size_t size);
int valid_mmap_phys_addr_range(unsigned long pfn, size_t size);

L
Linus Torvalds 已提交
412 413 414
#endif /* __KERNEL__ */

#endif /* __ASM_SH_IO_H */