io_64.h 12.5 KB
Newer Older
1 2 3 4 5 6 7 8 9
#ifndef __SPARC64_IO_H
#define __SPARC64_IO_H

#include <linux/kernel.h>
#include <linux/compiler.h>
#include <linux/types.h>

#include <asm/page.h>      /* IO address mapping routines need this */
#include <asm/asi.h>
10
#include <asm-generic/pci_iomap.h>
11 12 13 14

/* BIO layer definitions. */
extern unsigned long kern_base, kern_size;

15 16 17 18 19 20
/* __raw_{read,write}{b,w,l,q} uses direct access.
 * Access the memory as big endian bypassing the cache
 * by using ASI_PHYS_BYPASS_EC_E
 */
#define __raw_readb __raw_readb
static inline u8 __raw_readb(const volatile void __iomem *addr)
21 22 23
{
	u8 ret;

24
	__asm__ __volatile__("lduba\t[%1] %2, %0\t/* pci_raw_readb */"
25
			     : "=r" (ret)
26
			     : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
27 28 29 30

	return ret;
}

31 32
#define __raw_readw __raw_readw
static inline u16 __raw_readw(const volatile void __iomem *addr)
33 34 35
{
	u16 ret;

36
	__asm__ __volatile__("lduha\t[%1] %2, %0\t/* pci_raw_readw */"
37
			     : "=r" (ret)
38
			     : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
39 40 41 42

	return ret;
}

43 44
#define __raw_readl __raw_readl
static inline u32 __raw_readl(const volatile void __iomem *addr)
45 46 47
{
	u32 ret;

48
	__asm__ __volatile__("lduwa\t[%1] %2, %0\t/* pci_raw_readl */"
49
			     : "=r" (ret)
50
			     : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
51 52 53 54

	return ret;
}

55 56
#define __raw_readq __raw_readq
static inline u64 __raw_readq(const volatile void __iomem *addr)
57
{
58
	u64 ret;
59

60 61 62
	__asm__ __volatile__("ldxa\t[%1] %2, %0\t/* pci_raw_readq */"
			     : "=r" (ret)
			     : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
63

64
	return ret;
65 66
}

67 68
#define __raw_writeb __raw_writeb
static inline void __raw_writeb(u8 b, const volatile void __iomem *addr)
69
{
70 71 72
	__asm__ __volatile__("stba\t%r0, [%1] %2\t/* pci_raw_writeb */"
			     : /* no outputs */
			     : "Jr" (b), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
73 74
}

75 76
#define __raw_writew __raw_writew
static inline void __raw_writew(u16 w, const volatile void __iomem *addr)
77
{
78 79 80
	__asm__ __volatile__("stha\t%r0, [%1] %2\t/* pci_raw_writew */"
			     : /* no outputs */
			     : "Jr" (w), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
81 82
}

83 84
#define __raw_writel __raw_writel
static inline void __raw_writel(u32 l, const volatile void __iomem *addr)
85
{
86 87 88
	__asm__ __volatile__("stwa\t%r0, [%1] %2\t/* pci_raw_writel */"
			     : /* no outputs */
			     : "Jr" (l), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
89 90
}

91 92
#define __raw_writeq __raw_writeq
static inline void __raw_writeq(u64 q, const volatile void __iomem *addr)
93
{
94 95 96
	__asm__ __volatile__("stxa\t%r0, [%1] %2\t/* pci_raw_writeq */"
			     : /* no outputs */
			     : "Jr" (q), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
97 98
}

99 100 101 102
/* Memory functions, same as I/O accesses on Ultra.
 * Access memory as little endian bypassing
 * the cache by using ASI_PHYS_BYPASS_EC_E_L
 */
103 104
#define readb readb
static inline u8 readb(const volatile void __iomem *addr)
105 106 107 108 109 110 111 112 113
{	u8 ret;

	__asm__ __volatile__("lduba\t[%1] %2, %0\t/* pci_readb */"
			     : "=r" (ret)
			     : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
			     : "memory");
	return ret;
}

114 115
#define readw readw
static inline u16 readw(const volatile void __iomem *addr)
116 117 118 119 120 121 122 123 124 125
{	u16 ret;

	__asm__ __volatile__("lduha\t[%1] %2, %0\t/* pci_readw */"
			     : "=r" (ret)
			     : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
			     : "memory");

	return ret;
}

126 127
#define readl readl
static inline u32 readl(const volatile void __iomem *addr)
128 129 130 131 132 133 134 135 136 137
{	u32 ret;

	__asm__ __volatile__("lduwa\t[%1] %2, %0\t/* pci_readl */"
			     : "=r" (ret)
			     : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
			     : "memory");

	return ret;
}

138 139
#define readq readq
static inline u64 readq(const volatile void __iomem *addr)
140 141 142 143 144 145 146 147 148 149
{	u64 ret;

	__asm__ __volatile__("ldxa\t[%1] %2, %0\t/* pci_readq */"
			     : "=r" (ret)
			     : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
			     : "memory");

	return ret;
}

150 151
#define writeb writeb
static inline void writeb(u8 b, volatile void __iomem *addr)
152 153 154 155 156 157 158
{
	__asm__ __volatile__("stba\t%r0, [%1] %2\t/* pci_writeb */"
			     : /* no outputs */
			     : "Jr" (b), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
			     : "memory");
}

159 160
#define writew writew
static inline void writew(u16 w, volatile void __iomem *addr)
161 162 163 164 165 166 167
{
	__asm__ __volatile__("stha\t%r0, [%1] %2\t/* pci_writew */"
			     : /* no outputs */
			     : "Jr" (w), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
			     : "memory");
}

168 169
#define writel writel
static inline void writel(u32 l, volatile void __iomem *addr)
170 171 172 173 174 175 176
{
	__asm__ __volatile__("stwa\t%r0, [%1] %2\t/* pci_writel */"
			     : /* no outputs */
			     : "Jr" (l), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
			     : "memory");
}

177 178
#define writeq writeq
static inline void writeq(u64 q, volatile void __iomem *addr)
179 180 181 182 183 184 185 186
{
	__asm__ __volatile__("stxa\t%r0, [%1] %2\t/* pci_writeq */"
			     : /* no outputs */
			     : "Jr" (q), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
			     : "memory");
}


187 188
#define inb inb
static inline u8 inb(unsigned long addr)
189 190 191
{
	u8 ret;

192
	__asm__ __volatile__("lduba\t[%1] %2, %0\t/* pci_inb */"
193
			     : "=r" (ret)
194 195
			     : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
			     : "memory");
196 197 198 199

	return ret;
}

200 201
#define inw inw
static inline u16 inw(unsigned long addr)
202 203 204
{
	u16 ret;

205
	__asm__ __volatile__("lduha\t[%1] %2, %0\t/* pci_inw */"
206
			     : "=r" (ret)
207 208
			     : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
			     : "memory");
209 210 211 212

	return ret;
}

213 214
#define inl inl
static inline u32 inl(unsigned long addr)
215 216 217
{
	u32 ret;

218
	__asm__ __volatile__("lduwa\t[%1] %2, %0\t/* pci_inl */"
219
			     : "=r" (ret)
220 221
			     : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
			     : "memory");
222 223 224 225

	return ret;
}

226 227
#define outb outb
static inline void outb(u8 b, unsigned long addr)
228
{
229 230 231 232
	__asm__ __volatile__("stba\t%r0, [%1] %2\t/* pci_outb */"
			     : /* no outputs */
			     : "Jr" (b), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
			     : "memory");
233 234
}

235 236
#define outw outw
static inline void outw(u16 w, unsigned long addr)
237
{
238
	__asm__ __volatile__("stha\t%r0, [%1] %2\t/* pci_outw */"
239
			     : /* no outputs */
240 241
			     : "Jr" (w), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
			     : "memory");
242 243
}

244 245
#define outl outl
static inline void outl(u32 l, unsigned long addr)
246
{
247
	__asm__ __volatile__("stwa\t%r0, [%1] %2\t/* pci_outl */"
248
			     : /* no outputs */
249 250
			     : "Jr" (l), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
			     : "memory");
251 252
}

253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268

#define inb_p(__addr) 		inb(__addr)
#define outb_p(__b, __addr)	outb(__b, __addr)
#define inw_p(__addr)		inw(__addr)
#define outw_p(__w, __addr)	outw(__w, __addr)
#define inl_p(__addr)		inl(__addr)
#define outl_p(__l, __addr)	outl(__l, __addr)

void outsb(unsigned long, const void *, unsigned long);
void outsw(unsigned long, const void *, unsigned long);
void outsl(unsigned long, const void *, unsigned long);
void insb(unsigned long, void *, unsigned long);
void insw(unsigned long, void *, unsigned long);
void insl(unsigned long, void *, unsigned long);

static inline void ioread8_rep(void __iomem *port, void *buf, unsigned long count)
269
{
270 271 272 273 274
	insb((unsigned long __force)port, buf, count);
}
static inline void ioread16_rep(void __iomem *port, void *buf, unsigned long count)
{
	insw((unsigned long __force)port, buf, count);
275 276
}

277
static inline void ioread32_rep(void __iomem *port, void *buf, unsigned long count)
278
{
279 280 281 282 283 284
	insl((unsigned long __force)port, buf, count);
}

static inline void iowrite8_rep(void __iomem *port, const void *buf, unsigned long count)
{
	outsb((unsigned long __force)port, buf, count);
285 286
}

287 288 289 290 291 292 293 294 295 296 297 298 299 300
static inline void iowrite16_rep(void __iomem *port, const void *buf, unsigned long count)
{
	outsw((unsigned long __force)port, buf, count);
}

static inline void iowrite32_rep(void __iomem *port, const void *buf, unsigned long count)
{
	outsl((unsigned long __force)port, buf, count);
}

#define readb_relaxed(__addr)	readb(__addr)
#define readw_relaxed(__addr)	readw(__addr)
#define readl_relaxed(__addr)	readl(__addr)
#define readq_relaxed(__addr)	readq(__addr)
301 302 303 304 305 306 307 308 309

/* Valid I/O Space regions are anywhere, because each PCI bus supported
 * can live in an arbitrary area of the physical address range.
 */
#define IO_SPACE_LIMIT 0xffffffffffffffffUL

/* Now, SBUS variants, only difference from PCI is that we do
 * not use little-endian ASIs.
 */
310
static inline u8 sbus_readb(const volatile void __iomem *addr)
311 312 313 314 315 316 317 318 319 320 321
{
	u8 ret;

	__asm__ __volatile__("lduba\t[%1] %2, %0\t/* sbus_readb */"
			     : "=r" (ret)
			     : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E)
			     : "memory");

	return ret;
}

322
static inline u16 sbus_readw(const volatile void __iomem *addr)
323 324 325 326 327 328 329 330 331 332 333
{
	u16 ret;

	__asm__ __volatile__("lduha\t[%1] %2, %0\t/* sbus_readw */"
			     : "=r" (ret)
			     : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E)
			     : "memory");

	return ret;
}

334
static inline u32 sbus_readl(const volatile void __iomem *addr)
335 336 337 338 339 340 341 342 343 344 345
{
	u32 ret;

	__asm__ __volatile__("lduwa\t[%1] %2, %0\t/* sbus_readl */"
			     : "=r" (ret)
			     : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E)
			     : "memory");

	return ret;
}

346
static inline u64 sbus_readq(const volatile void __iomem *addr)
347 348 349 350 351 352 353 354 355 356 357
{
	u64 ret;

	__asm__ __volatile__("ldxa\t[%1] %2, %0\t/* sbus_readq */"
			     : "=r" (ret)
			     : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E)
			     : "memory");

	return ret;
}

358
static inline void sbus_writeb(u8 b, volatile void __iomem *addr)
359 360 361 362 363 364 365
{
	__asm__ __volatile__("stba\t%r0, [%1] %2\t/* sbus_writeb */"
			     : /* no outputs */
			     : "Jr" (b), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E)
			     : "memory");
}

366
static inline void sbus_writew(u16 w, volatile void __iomem *addr)
367 368 369 370 371 372 373
{
	__asm__ __volatile__("stha\t%r0, [%1] %2\t/* sbus_writew */"
			     : /* no outputs */
			     : "Jr" (w), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E)
			     : "memory");
}

374
static inline void sbus_writel(u32 l, volatile void __iomem *addr)
375 376 377 378 379 380 381
{
	__asm__ __volatile__("stwa\t%r0, [%1] %2\t/* sbus_writel */"
			     : /* no outputs */
			     : "Jr" (l), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E)
			     : "memory");
}

382
static inline void sbus_writeq(u64 l, volatile void __iomem *addr)
383 384 385 386 387 388 389
{
	__asm__ __volatile__("stxa\t%r0, [%1] %2\t/* sbus_writeq */"
			     : /* no outputs */
			     : "Jr" (l), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E)
			     : "memory");
}

390
static inline void sbus_memset_io(volatile void __iomem *dst, int c, __kernel_size_t n)
391 392 393 394 395 396 397
{
	while(n--) {
		sbus_writeb(c, dst);
		dst++;
	}
}

398
static inline void memset_io(volatile void __iomem *dst, int c, __kernel_size_t n)
399 400 401 402 403 404 405 406 407
{
	volatile void __iomem *d = dst;

	while (n--) {
		writeb(c, d);
		d++;
	}
}

408 409
static inline void sbus_memcpy_fromio(void *dst, const volatile void __iomem *src,
				      __kernel_size_t n)
410 411 412 413 414 415 416 417 418 419 420
{
	char *d = dst;

	while (n--) {
		char tmp = sbus_readb(src);
		*d++ = tmp;
		src++;
	}
}


421 422
static inline void memcpy_fromio(void *dst, const volatile void __iomem *src,
				 __kernel_size_t n)
423 424 425 426 427 428 429 430 431 432
{
	char *d = dst;

	while (n--) {
		char tmp = readb(src);
		*d++ = tmp;
		src++;
	}
}

433 434
static inline void sbus_memcpy_toio(volatile void __iomem *dst, const void *src,
				    __kernel_size_t n)
435 436 437 438 439 440 441 442 443 444 445
{
	const char *s = src;
	volatile void __iomem *d = dst;

	while (n--) {
		char tmp = *s++;
		sbus_writeb(tmp, d);
		d++;
	}
}

446 447
static inline void memcpy_toio(volatile void __iomem *dst, const void *src,
			       __kernel_size_t n)
448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471
{
	const char *s = src;
	volatile void __iomem *d = dst;

	while (n--) {
		char tmp = *s++;
		writeb(tmp, d);
		d++;
	}
}

#define mmiowb()

#ifdef __KERNEL__

/* On sparc64 we have the whole physical IO address space accessible
 * using physically addressed loads and stores, so this does nothing.
 */
static inline void __iomem *ioremap(unsigned long offset, unsigned long size)
{
	return (void __iomem *)offset;
}

#define ioremap_nocache(X,Y)		ioremap((X),(Y))
472
#define ioremap_wc(X,Y)			ioremap((X),(Y))
473 474 475 476 477 478 479

static inline void iounmap(volatile void __iomem *addr)
{
}

#define ioread8(X)			readb(X)
#define ioread16(X)			readw(X)
480
#define ioread16be(X)			__raw_readw(X)
481
#define ioread32(X)			readl(X)
482
#define ioread32be(X)			__raw_readl(X)
483 484
#define iowrite8(val,X)			writeb(val,X)
#define iowrite16(val,X)		writew(val,X)
485
#define iowrite16be(val,X)		__raw_writew(val,X)
486
#define iowrite32(val,X)		writel(val,X)
487
#define iowrite32be(val,X)		__raw_writel(val,X)
488 489

/* Create a virtual mapping cookie for an IO port range */
490 491
void __iomem *ioport_map(unsigned long port, unsigned int nr);
void ioport_unmap(void __iomem *);
492 493 494

/* Create a virtual mapping cookie for a PCI BAR (memory or IO) */
struct pci_dev;
495
void pci_iounmap(struct pci_dev *dev, void __iomem *);
496

497 498 499 500 501 502 503 504 505
static inline int sbus_can_dma_64bit(void)
{
	return 1;
}
static inline int sbus_can_burst64(void)
{
	return 1;
}
struct device;
506
void sbus_set_sbus64(struct device *, int);
507

508 509 510 511 512 513 514 515 516 517 518 519 520 521
/*
 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
 * access
 */
#define xlate_dev_mem_ptr(p)	__va(p)

/*
 * Convert a virtual cached pointer to an uncached pointer
 */
#define xlate_dev_kmem_ptr(p)	p

#endif

#endif /* !(__SPARC64_IO_H) */