io.h 11.1 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4
#ifndef __ASM_SH_IO_H
#define __ASM_SH_IO_H
/*
 * Convention:
P
Paul Mundt 已提交
5
 *    read{b,w,l,q}/write{b,w,l,q} are for PCI,
L
Linus Torvalds 已提交
6
 *    while in{b,w,l}/out{b,w,l} are for ISA
P
Paul Mundt 已提交
7
 *
L
Linus Torvalds 已提交
8 9 10
 * In addition we have 'pausing' versions: in{b,w,l}_p/out{b,w,l}_p
 * and 'string' versions: ins{b,w,l}/outs{b,w,l}
 *
P
Paul Mundt 已提交
11 12 13 14 15 16 17 18 19 20 21 22 23
 * While read{b,w,l,q} and write{b,w,l,q} contain memory barriers
 * automatically, there are also __raw versions, which do not.
 *
 * Historically, we have also had ctrl_in{b,w,l,q}/ctrl_out{b,w,l,q} for
 * SuperH specific I/O (raw I/O to on-chip CPU peripherals). In practice
 * these have the same semantics as the __raw variants, and as such, all
 * new code should be using the __raw versions.
 *
 * All ISA I/O routines are wrapped through the machine vector. If a
 * board does not provide overrides, a generic set that are copied in
 * from the default machine vector are used instead. These are largely
 * for old compat code for I/O offseting to SuperIOs, all of which are
 * better handled through the machvec ioport mapping routines these days.
L
Linus Torvalds 已提交
24
 */
25
#include <linux/errno.h>
L
Linus Torvalds 已提交
26 27 28 29
#include <asm/cache.h>
#include <asm/system.h>
#include <asm/addrspace.h>
#include <asm/machvec.h>
30 31 32 33
#include <asm/pgtable.h>
#include <asm-generic/iomap.h>

#ifdef __KERNEL__
L
Linus Torvalds 已提交
34 35 36 37
/*
 * Depending on which platform we are running on, we need different
 * I/O functions.
 */
38 39
#define __IO_PREFIX	generic
#include <asm/io_generic.h>
M
Magnus Damm 已提交
40
#include <asm/io_trapped.h>
41

P
Paul Mundt 已提交
42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81
#define inb(p)			sh_mv.mv_inb((p))
#define inw(p)			sh_mv.mv_inw((p))
#define inl(p)			sh_mv.mv_inl((p))
#define outb(x,p)		sh_mv.mv_outb((x),(p))
#define outw(x,p)		sh_mv.mv_outw((x),(p))
#define outl(x,p)		sh_mv.mv_outl((x),(p))

#define inb_p(p)		sh_mv.mv_inb_p((p))
#define inw_p(p)		sh_mv.mv_inw_p((p))
#define inl_p(p)		sh_mv.mv_inl_p((p))
#define outb_p(x,p)		sh_mv.mv_outb_p((x),(p))
#define outw_p(x,p)		sh_mv.mv_outw_p((x),(p))
#define outl_p(x,p)		sh_mv.mv_outl_p((x),(p))

#define insb(p,b,c)		sh_mv.mv_insb((p), (b), (c))
#define insw(p,b,c)		sh_mv.mv_insw((p), (b), (c))
#define insl(p,b,c)		sh_mv.mv_insl((p), (b), (c))
#define outsb(p,b,c)		sh_mv.mv_outsb((p), (b), (c))
#define outsw(p,b,c)		sh_mv.mv_outsw((p), (b), (c))
#define outsl(p,b,c)		sh_mv.mv_outsl((p), (b), (c))

#define __raw_writeb(v,a)	(__chk_io_ptr(a), *(volatile u8  __force *)(a) = (v))
#define __raw_writew(v,a)	(__chk_io_ptr(a), *(volatile u16 __force *)(a) = (v))
#define __raw_writel(v,a)	(__chk_io_ptr(a), *(volatile u32 __force *)(a) = (v))
#define __raw_writeq(v,a)	(__chk_io_ptr(a), *(volatile u64 __force *)(a) = (v))

#define __raw_readb(a)		(__chk_io_ptr(a), *(volatile u8  __force *)(a))
#define __raw_readw(a)		(__chk_io_ptr(a), *(volatile u16 __force *)(a))
#define __raw_readl(a)		(__chk_io_ptr(a), *(volatile u32 __force *)(a))
#define __raw_readq(a)		(__chk_io_ptr(a), *(volatile u64 __force *)(a))

#define readb(a)		({ u8  r_ = __raw_readb(a); mb(); r_; })
#define readw(a)		({ u16 r_ = __raw_readw(a); mb(); r_; })
#define readl(a)		({ u32 r_ = __raw_readl(a); mb(); r_; })
#define readq(a)		({ u64 r_ = __raw_readq(a); mb(); r_; })

#define writeb(v,a)		({ __raw_writeb((v),(a)); mb(); })
#define writew(v,a)		({ __raw_writew((v),(a)); mb(); })
#define writel(v,a)		({ __raw_writel((v),(a)); mb(); })
#define writeq(v,a)		({ __raw_writeq((v),(a)); mb(); })
L
Linus Torvalds 已提交
82

P
Paul Mundt 已提交
83 84 85 86 87
/* SuperH on-chip I/O functions */
#define ctrl_inb		__raw_readb
#define ctrl_inw		__raw_readw
#define ctrl_inl		__raw_readl
#define ctrl_inq		__raw_readq
88

P
Paul Mundt 已提交
89 90 91 92
#define ctrl_outb		__raw_writeb
#define ctrl_outw		__raw_writew
#define ctrl_outl		__raw_writel
#define ctrl_outq		__raw_writeq
93

94 95
extern unsigned long generic_io_base;

P
Paul Mundt 已提交
96 97
static inline void ctrl_delay(void)
{
98
	__raw_readw(generic_io_base);
P
Paul Mundt 已提交
99
}
L
Linus Torvalds 已提交
100

101 102
#define __BUILD_MEMORY_STRING(bwlq, type)				\
									\
103
static inline void __raw_writes##bwlq(volatile void __iomem *mem,	\
104 105 106 107 108 109 110 111 112 113
				const void *addr, unsigned int count)	\
{									\
	const volatile type *__addr = addr;				\
									\
	while (count--) {						\
		__raw_write##bwlq(*__addr, mem);			\
		__addr++;						\
	}								\
}									\
									\
114 115
static inline void __raw_reads##bwlq(volatile void __iomem *mem,	\
			       void *addr, unsigned int count)		\
116 117 118 119 120 121 122 123 124 125 126
{									\
	volatile type *__addr = addr;					\
									\
	while (count--) {						\
		*__addr = __raw_read##bwlq(mem);			\
		__addr++;						\
	}								\
}

__BUILD_MEMORY_STRING(b, u8)
__BUILD_MEMORY_STRING(w, u16)
P
Paul Mundt 已提交
127

128
#ifdef CONFIG_SUPERH32
P
Paul Mundt 已提交
129 130
void __raw_writesl(void __iomem *addr, const void *data, int longlen);
void __raw_readsl(const void __iomem *addr, void *data, int longlen);
131 132 133 134 135
#else
__BUILD_MEMORY_STRING(l, u32)
#endif

__BUILD_MEMORY_STRING(q, u64)
136

P
Paul Mundt 已提交
137 138 139
#define writesb			__raw_writesb
#define writesw			__raw_writesw
#define writesl			__raw_writesl
140

P
Paul Mundt 已提交
141 142 143
#define readsb			__raw_readsb
#define readsw			__raw_readsw
#define readsl			__raw_readsl
144

P
Paul Mundt 已提交
145 146 147 148
#define readb_relaxed(a)	readb(a)
#define readw_relaxed(a)	readw(a)
#define readl_relaxed(a)	readl(a)
#define readq_relaxed(a)	readq(a)
L
Linus Torvalds 已提交
149

D
David McKay 已提交
150
#ifndef CONFIG_GENERIC_IOMAP
151
/* Simple MMIO */
152 153
#define ioread8(a)		__raw_readb(a)
#define ioread16(a)		__raw_readw(a)
154
#define ioread16be(a)		be16_to_cpu(__raw_readw((a)))
155
#define ioread32(a)		__raw_readl(a)
156
#define ioread32be(a)		be32_to_cpu(__raw_readl((a)))
L
Linus Torvalds 已提交
157

158 159
#define iowrite8(v,a)		__raw_writeb((v),(a))
#define iowrite16(v,a)		__raw_writew((v),(a))
160
#define iowrite16be(v,a)	__raw_writew(cpu_to_be16((v)),(a))
161
#define iowrite32(v,a)		__raw_writel((v),(a))
162 163
#define iowrite32be(v,a)	__raw_writel(cpu_to_be32((v)),(a))

164 165 166
#define ioread8_rep(a, d, c)	__raw_readsb((a), (d), (c))
#define ioread16_rep(a, d, c)	__raw_readsw((a), (d), (c))
#define ioread32_rep(a, d, c)	__raw_readsl((a), (d), (c))
167

168 169 170
#define iowrite8_rep(a, s, c)	__raw_writesb((a), (s), (c))
#define iowrite16_rep(a, s, c)	__raw_writesw((a), (s), (c))
#define iowrite32_rep(a, s, c)	__raw_writesl((a), (s), (c))
D
David McKay 已提交
171 172 173 174 175 176 177 178 179
#endif

#define mmio_insb(p,d,c)	__raw_readsb(p,d,c)
#define mmio_insw(p,d,c)	__raw_readsw(p,d,c)
#define mmio_insl(p,d,c)	__raw_readsl(p,d,c)

#define mmio_outsb(p,s,c)	__raw_writesb(p,s,c)
#define mmio_outsw(p,s,c)	__raw_writesw(p,s,c)
#define mmio_outsl(p,s,c)	__raw_writesl(p,s,c)
180

P
Paul Mundt 已提交
181 182
/* synco on SH-4A, otherwise a nop */
#define mmiowb()		wmb()
L
Linus Torvalds 已提交
183

184 185
#define IO_SPACE_LIMIT 0xffffffff

L
Linus Torvalds 已提交
186
/*
P
Paul Mundt 已提交
187 188 189
 * This function provides a method for the generic case where a
 * board-specific ioport_map simply needs to return the port + some
 * arbitrary port base.
L
Linus Torvalds 已提交
190 191
 *
 * We use this at board setup time to implicitly set the port base, and
192
 * as a result, we can use the generic ioport_map.
L
Linus Torvalds 已提交
193 194 195 196 197 198
 */
static inline void __set_io_port_base(unsigned long pbase)
{
	generic_io_base = pbase;
}

M
Magnus Damm 已提交
199 200
#define __ioport_map(p, n) sh_mv.mv_ioport_map((p), (n))

L
Linus Torvalds 已提交
201
/* We really want to try and get these to memcpy etc */
P
Paul Mundt 已提交
202 203 204
void memcpy_fromio(void *, const volatile void __iomem *, unsigned long);
void memcpy_toio(volatile void __iomem *, const void *, unsigned long);
void memset_io(volatile void __iomem *, int, unsigned long);
205

206 207 208 209 210
/* Quad-word real-mode I/O, don't ask.. */
unsigned long long peek_real_address_q(unsigned long long addr);
unsigned long long poke_real_address_q(unsigned long long addr,
				       unsigned long long val);

P
Paul Mundt 已提交
211 212 213
#if !defined(CONFIG_MMU)
#define virt_to_phys(address)	((unsigned long)(address))
#define phys_to_virt(address)	((void *)(address))
214
#else
P
Paul Mundt 已提交
215 216
#define virt_to_phys(address)	(__pa(address))
#define phys_to_virt(address)	(__va(address))
217
#endif
L
Linus Torvalds 已提交
218 219

/*
P
Paul Mundt 已提交
220 221 222 223 224 225 226
 * On 32-bit SH, we traditionally have the whole physical address space
 * mapped at all times (as MIPS does), so "ioremap()" and "iounmap()" do
 * not need to do anything but place the address in the proper segment.
 * This is true for P1 and P2 addresses, as well as some P3 ones.
 * However, most of the P3 addresses and newer cores using extended
 * addressing need to map through page tables, so the ioremap()
 * implementation becomes a bit more complicated.
L
Linus Torvalds 已提交
227
 *
P
Paul Mundt 已提交
228
 * See arch/sh/mm/ioremap.c for additional notes on this.
L
Linus Torvalds 已提交
229 230
 *
 * We cheat a bit and always return uncachable areas until we've fixed
231
 * the drivers to handle caching properly.
P
Paul Mundt 已提交
232 233 234
 *
 * On the SH-5 the concept of segmentation in the 1:1 PXSEG sense simply
 * doesn't exist, so everything must go through page tables.
L
Linus Torvalds 已提交
235
 */
236
#ifdef CONFIG_MMU
237
void __iomem *__ioremap_caller(unsigned long offset, unsigned long size,
238
			       pgprot_t prot, void *caller);
239
void __iounmap(void __iomem *addr);
P
Paul Mundt 已提交
240

M
Matt Fleming 已提交
241
#ifdef CONFIG_IOREMAP_FIXED
242 243
extern void __iomem *ioremap_fixed(resource_size_t, unsigned long,
				   unsigned long, pgprot_t);
244
extern int iounmap_fixed(void __iomem *);
M
Matt Fleming 已提交
245
extern void ioremap_fixed_init(void);
246 247
#else
static inline void __iomem *
248 249
ioremap_fixed(resource_size t phys_addr, unsigned long offset,
	      unsigned long size, pgprot_t prot)
250 251 252 253 254
{
	BUG();
}

static inline void ioremap_fixed_init(void) { }
255
static inline int iounmap_fixed(void __iomem *addr) { return -EINVAL; }
M
Matt Fleming 已提交
256 257
#endif

258
static inline void __iomem *
259
__ioremap(unsigned long offset, unsigned long size, pgprot_t prot)
260
{
261
	return __ioremap_caller(offset, size, prot, __builtin_return_address(0));
262 263
}

264
static inline void __iomem *
265
__ioremap_29bit(unsigned long offset, unsigned long size, pgprot_t prot)
L
Linus Torvalds 已提交
266
{
P
Paul Mundt 已提交
267
#ifdef CONFIG_29BIT
268 269 270 271 272 273 274 275 276
	unsigned long last_addr = offset + size - 1;

	/*
	 * For P1 and P2 space this is trivial, as everything is already
	 * mapped. Uncached access for P1 addresses are done through P2.
	 * In the P3 case or for addresses outside of the 29-bit space,
	 * mapping must be done by the PMB or by using page tables.
	 */
	if (likely(PXSEG(offset) < P3SEG && PXSEG(last_addr) < P3SEG)) {
277
		if (unlikely(pgprot_val(prot) & _PAGE_CACHABLE))
278 279 280 281
			return (void __iomem *)P1SEGADDR(offset);

		return (void __iomem *)P2SEGADDR(offset);
	}
M
Magnus Damm 已提交
282 283 284 285

	/* P4 above the store queues are always mapped. */
	if (unlikely(offset >= P3_ADDR_MAX))
		return (void __iomem *)P4SEGADDR(offset);
P
Paul Mundt 已提交
286
#endif
287

P
Paul Mundt 已提交
288 289 290 291
	return NULL;
}

static inline void __iomem *
292
__ioremap_mode(unsigned long offset, unsigned long size, pgprot_t prot)
P
Paul Mundt 已提交
293 294 295 296 297 298 299
{
	void __iomem *ret;

	ret = __ioremap_trapped(offset, size);
	if (ret)
		return ret;

300
	ret = __ioremap_29bit(offset, size, prot);
P
Paul Mundt 已提交
301 302 303
	if (ret)
		return ret;

304
	return __ioremap(offset, size, prot);
L
Linus Torvalds 已提交
305
}
306
#else
307 308
#define __ioremap(offset, size, prot)		((void __iomem *)(offset))
#define __ioremap_mode(offset, size, prot)	((void __iomem *)(offset))
309 310
#define __iounmap(addr)				do { } while (0)
#endif /* CONFIG_MMU */
L
Linus Torvalds 已提交
311

312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331
static inline void __iomem *
ioremap(unsigned long offset, unsigned long size)
{
	return __ioremap_mode(offset, size, PAGE_KERNEL_NOCACHE);
}

static inline void __iomem *
ioremap_cache(unsigned long offset, unsigned long size)
{
	return __ioremap_mode(offset, size, PAGE_KERNEL);
}

static inline void __iomem *
ioremap_prot(resource_size_t offset, unsigned long size, unsigned long flags)
{
	return __ioremap_mode(offset, size, __pgprot(flags));
}

#define ioremap_nocache	ioremap
#define iounmap		__iounmap
332

P
Paul Mundt 已提交
333 334 335 336
#define maybebadio(port) \
	printk(KERN_ERR "bad PC-like io %s:%u for port 0x%lx at 0x%08x\n", \
	       __func__, __LINE__, (port), (u32)__builtin_return_address(0))

L
Linus Torvalds 已提交
337 338 339 340 341 342 343 344 345 346 347
/*
 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
 * access
 */
#define xlate_dev_mem_ptr(p)	__va(p)

/*
 * Convert a virtual cached pointer to an uncached pointer
 */
#define xlate_dev_kmem_ptr(p)	p

348 349 350 351
#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
int valid_phys_addr_range(unsigned long addr, size_t size);
int valid_mmap_phys_addr_range(unsigned long pfn, size_t size);

L
Linus Torvalds 已提交
352 353 354
#endif /* __KERNEL__ */

#endif /* __ASM_SH_IO_H */