iommu.c 12.8 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/*
 * iommu.c:  IOMMU specific routines for memory management.
 *
 * Copyright (C) 1995 David S. Miller  (davem@caip.rutgers.edu)
 * Copyright (C) 1995,2002 Pete Zaitcev     (zaitcev@yahoo.com)
 * Copyright (C) 1996 Eddie C. Dost    (ecd@skynet.be)
 * Copyright (C) 1997,1998 Jakub Jelinek    (jj@sunsite.mff.cuni.cz)
 */
 
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/highmem.h>	/* pte_offset_map => kmap_atomic */
J
Jens Axboe 已提交
15
#include <linux/scatterlist.h>
L
Linus Torvalds 已提交
16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57

#include <asm/pgalloc.h>
#include <asm/pgtable.h>
#include <asm/sbus.h>
#include <asm/io.h>
#include <asm/mxcc.h>
#include <asm/mbus.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
#include <asm/bitext.h>
#include <asm/iommu.h>
#include <asm/dma.h>

/*
 * This can be sized dynamically, but we will do this
 * only when we have a guidance about actual I/O pressures.
 */
#define IOMMU_RNGE	IOMMU_RNGE_256MB
#define IOMMU_START	0xF0000000
#define IOMMU_WINSIZE	(256*1024*1024U)
#define IOMMU_NPTES	(IOMMU_WINSIZE/PAGE_SIZE)	/* 64K PTEs, 265KB */
#define IOMMU_ORDER	6				/* 4096 * (1<<6) */

/* srmmu.c */
extern int viking_mxcc_present;
BTFIXUPDEF_CALL(void, flush_page_for_dma, unsigned long)
#define flush_page_for_dma(page) BTFIXUP_CALL(flush_page_for_dma)(page)
extern int flush_page_for_dma_global;
static int viking_flush;
/* viking.S */
extern void viking_flush_page(unsigned long page);
extern void viking_mxcc_flush_page(unsigned long page);

/*
 * Values precomputed according to CPU type.
 */
static unsigned int ioperm_noc;		/* Consistent mapping iopte flags */
static pgprot_t dvma_prot;		/* Consistent mapping pte flags */

#define IOPERM        (IOPTE_CACHE | IOPTE_WRITE | IOPTE_VALID)
#define MKIOPTE(pfn, perm) (((((pfn)<<8) & IOPTE_PAGE) | (perm)) & ~IOPTE_WAZ)

58
static void __init sbus_iommu_init(struct of_device *op)
L
Linus Torvalds 已提交
59 60
{
	struct iommu_struct *iommu;
61
	unsigned int impl, vers;
L
Linus Torvalds 已提交
62
	unsigned long *bitmap;
63 64
	unsigned long tmp;

L
Linus Torvalds 已提交
65 66 67 68 69
	iommu = kmalloc(sizeof(struct iommu_struct), GFP_ATOMIC);
	if (!iommu) {
		prom_printf("Unable to allocate iommu structure\n");
		prom_halt();
	}
70

71
	iommu->regs = of_ioremap(&op->resource[0], 0, PAGE_SIZE * 3,
72
				 "iommu_regs");
L
Linus Torvalds 已提交
73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121
	if (!iommu->regs) {
		prom_printf("Cannot map IOMMU registers\n");
		prom_halt();
	}
	impl = (iommu->regs->control & IOMMU_CTRL_IMPL) >> 28;
	vers = (iommu->regs->control & IOMMU_CTRL_VERS) >> 24;
	tmp = iommu->regs->control;
	tmp &= ~(IOMMU_CTRL_RNGE);
	tmp |= (IOMMU_RNGE_256MB | IOMMU_CTRL_ENAB);
	iommu->regs->control = tmp;
	iommu_invalidate(iommu->regs);
	iommu->start = IOMMU_START;
	iommu->end = 0xffffffff;

	/* Allocate IOMMU page table */
	/* Stupid alignment constraints give me a headache. 
	   We need 256K or 512K or 1M or 2M area aligned to
           its size and current gfp will fortunately give
           it to us. */
        tmp = __get_free_pages(GFP_KERNEL, IOMMU_ORDER);
	if (!tmp) {
		prom_printf("Unable to allocate iommu table [0x%08x]\n",
			    IOMMU_NPTES*sizeof(iopte_t));
		prom_halt();
	}
	iommu->page_table = (iopte_t *)tmp;

	/* Initialize new table. */
	memset(iommu->page_table, 0, IOMMU_NPTES*sizeof(iopte_t));
	flush_cache_all();
	flush_tlb_all();
	iommu->regs->base = __pa((unsigned long) iommu->page_table) >> 4;
	iommu_invalidate(iommu->regs);

	bitmap = kmalloc(IOMMU_NPTES>>3, GFP_KERNEL);
	if (!bitmap) {
		prom_printf("Unable to allocate iommu bitmap [%d]\n",
			    (int)(IOMMU_NPTES>>3));
		prom_halt();
	}
	bit_map_init(&iommu->usemap, bitmap, IOMMU_NPTES);
	/* To be coherent on HyperSparc, the page color of DVMA
	 * and physical addresses must match.
	 */
	if (srmmu_modtype == HyperSparc)
		iommu->usemap.num_colors = vac_cache_size >> PAGE_SHIFT;
	else
		iommu->usemap.num_colors = 1;

122 123 124
	printk(KERN_INFO "IOMMU: impl %d vers %d table 0x%p[%d B] map [%d b]\n",
	       impl, vers, iommu->page_table,
	       (int)(IOMMU_NPTES*sizeof(iopte_t)), (int)IOMMU_NPTES);
L
Linus Torvalds 已提交
125

126
	op->dev.archdata.iommu = iommu;
L
Linus Torvalds 已提交
127 128
}

129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144
static int __init iommu_init(void)
{
	struct device_node *dp;

	for_each_node_by_name(dp, "iommu") {
		struct of_device *op = of_find_device_by_node(dp);

		sbus_iommu_init(op);
		of_propagate_archdata(op);
	}

	return 0;
}

subsys_initcall(iommu_init);

L
Linus Torvalds 已提交
145 146 147 148 149 150 151 152
/* This begs to be btfixup-ed by srmmu. */
/* Flush the iotlb entries to ram. */
/* This could be better if we didn't have to flush whole pages. */
static void iommu_flush_iotlb(iopte_t *iopte, unsigned int niopte)
{
	unsigned long start;
	unsigned long end;

153
	start = (unsigned long)iopte;
L
Linus Torvalds 已提交
154
	end = PAGE_ALIGN(start + niopte*sizeof(iopte_t));
155
	start &= PAGE_MASK;
L
Linus Torvalds 已提交
156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173
	if (viking_mxcc_present) {
		while(start < end) {
			viking_mxcc_flush_page(start);
			start += PAGE_SIZE;
		}
	} else if (viking_flush) {
		while(start < end) {
			viking_flush_page(start);
			start += PAGE_SIZE;
		}
	} else {
		while(start < end) {
			__flush_page_to_ram(start);
			start += PAGE_SIZE;
		}
	}
}

174
static u32 iommu_get_one(struct device *dev, struct page *page, int npages)
L
Linus Torvalds 已提交
175
{
176
	struct iommu_struct *iommu = dev->archdata.iommu;
L
Linus Torvalds 已提交
177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203
	int ioptex;
	iopte_t *iopte, *iopte0;
	unsigned int busa, busa0;
	int i;

	/* page color = pfn of page */
	ioptex = bit_map_string_get(&iommu->usemap, npages, page_to_pfn(page));
	if (ioptex < 0)
		panic("iommu out");
	busa0 = iommu->start + (ioptex << PAGE_SHIFT);
	iopte0 = &iommu->page_table[ioptex];

	busa = busa0;
	iopte = iopte0;
	for (i = 0; i < npages; i++) {
		iopte_val(*iopte) = MKIOPTE(page_to_pfn(page), IOPERM);
		iommu_invalidate_page(iommu->regs, busa);
		busa += PAGE_SIZE;
		iopte++;
		page++;
	}

	iommu_flush_iotlb(iopte0, npages);

	return busa0;
}

204
static u32 iommu_get_scsi_one(struct device *dev, char *vaddr, unsigned int len)
L
Linus Torvalds 已提交
205 206 207 208 209 210 211 212 213
{
	unsigned long off;
	int npages;
	struct page *page;
	u32 busa;

	off = (unsigned long)vaddr & ~PAGE_MASK;
	npages = (off + len + PAGE_SIZE-1) >> PAGE_SHIFT;
	page = virt_to_page((unsigned long)vaddr & PAGE_MASK);
214
	busa = iommu_get_one(dev, page, npages);
L
Linus Torvalds 已提交
215 216 217
	return busa + off;
}

218
static __u32 iommu_get_scsi_one_noflush(struct device *dev, char *vaddr, unsigned long len)
L
Linus Torvalds 已提交
219
{
220
	return iommu_get_scsi_one(dev, vaddr, len);
L
Linus Torvalds 已提交
221 222
}

223
static __u32 iommu_get_scsi_one_gflush(struct device *dev, char *vaddr, unsigned long len)
L
Linus Torvalds 已提交
224 225
{
	flush_page_for_dma(0);
226
	return iommu_get_scsi_one(dev, vaddr, len);
L
Linus Torvalds 已提交
227 228
}

229
static __u32 iommu_get_scsi_one_pflush(struct device *dev, char *vaddr, unsigned long len)
L
Linus Torvalds 已提交
230 231 232 233 234 235 236
{
	unsigned long page = ((unsigned long) vaddr) & PAGE_MASK;

	while(page < ((unsigned long)(vaddr + len))) {
		flush_page_for_dma(page);
		page += PAGE_SIZE;
	}
237
	return iommu_get_scsi_one(dev, vaddr, len);
L
Linus Torvalds 已提交
238 239
}

240
static void iommu_get_scsi_sgl_noflush(struct device *dev, struct scatterlist *sg, int sz)
L
Linus Torvalds 已提交
241 242 243 244 245 246
{
	int n;

	while (sz != 0) {
		--sz;
		n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
247
		sg->dvma_address = iommu_get_one(dev, sg_page(sg), n) + sg->offset;
L
Linus Torvalds 已提交
248
		sg->dvma_length = (__u32) sg->length;
J
Jens Axboe 已提交
249
		sg = sg_next(sg);
L
Linus Torvalds 已提交
250 251 252
	}
}

253
static void iommu_get_scsi_sgl_gflush(struct device *dev, struct scatterlist *sg, int sz)
L
Linus Torvalds 已提交
254 255 256 257 258 259 260
{
	int n;

	flush_page_for_dma(0);
	while (sz != 0) {
		--sz;
		n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
261
		sg->dvma_address = iommu_get_one(dev, sg_page(sg), n) + sg->offset;
L
Linus Torvalds 已提交
262
		sg->dvma_length = (__u32) sg->length;
J
Jens Axboe 已提交
263
		sg = sg_next(sg);
L
Linus Torvalds 已提交
264 265 266
	}
}

267
static void iommu_get_scsi_sgl_pflush(struct device *dev, struct scatterlist *sg, int sz)
L
Linus Torvalds 已提交
268 269 270 271 272 273 274 275 276 277 278 279 280 281
{
	unsigned long page, oldpage = 0;
	int n, i;

	while(sz != 0) {
		--sz;

		n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;

		/*
		 * We expect unmapped highmem pages to be not in the cache.
		 * XXX Is this a good assumption?
		 * XXX What if someone else unmaps it here and races us?
		 */
J
Jens Axboe 已提交
282
		if ((page = (unsigned long) page_address(sg_page(sg))) != 0) {
L
Linus Torvalds 已提交
283 284 285 286 287 288 289 290 291
			for (i = 0; i < n; i++) {
				if (page != oldpage) {	/* Already flushed? */
					flush_page_for_dma(page);
					oldpage = page;
				}
				page += PAGE_SIZE;
			}
		}

292
		sg->dvma_address = iommu_get_one(dev, sg_page(sg), n) + sg->offset;
L
Linus Torvalds 已提交
293
		sg->dvma_length = (__u32) sg->length;
J
Jens Axboe 已提交
294
		sg = sg_next(sg);
L
Linus Torvalds 已提交
295 296 297
	}
}

298
static void iommu_release_one(struct device *dev, u32 busa, int npages)
L
Linus Torvalds 已提交
299
{
300
	struct iommu_struct *iommu = dev->archdata.iommu;
L
Linus Torvalds 已提交
301 302 303
	int ioptex;
	int i;

304
	BUG_ON(busa < iommu->start);
L
Linus Torvalds 已提交
305 306 307 308 309 310 311 312 313
	ioptex = (busa - iommu->start) >> PAGE_SHIFT;
	for (i = 0; i < npages; i++) {
		iopte_val(iommu->page_table[ioptex + i]) = 0;
		iommu_invalidate_page(iommu->regs, busa);
		busa += PAGE_SIZE;
	}
	bit_map_clear(&iommu->usemap, ioptex, npages);
}

314
static void iommu_release_scsi_one(struct device *dev, __u32 vaddr, unsigned long len)
L
Linus Torvalds 已提交
315 316 317 318 319 320
{
	unsigned long off;
	int npages;

	off = vaddr & ~PAGE_MASK;
	npages = (off + len + PAGE_SIZE-1) >> PAGE_SHIFT;
321
	iommu_release_one(dev, vaddr & PAGE_MASK, npages);
L
Linus Torvalds 已提交
322 323
}

324
static void iommu_release_scsi_sgl(struct device *dev, struct scatterlist *sg, int sz)
L
Linus Torvalds 已提交
325 326 327 328 329 330 331
{
	int n;

	while(sz != 0) {
		--sz;

		n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
332
		iommu_release_one(dev, sg->dvma_address & PAGE_MASK, n);
L
Linus Torvalds 已提交
333
		sg->dvma_address = 0x21212121;
J
Jens Axboe 已提交
334
		sg = sg_next(sg);
L
Linus Torvalds 已提交
335 336 337 338
	}
}

#ifdef CONFIG_SBUS
339 340
static int iommu_map_dma_area(struct device *dev, dma_addr_t *pba, unsigned long va,
			      unsigned long addr, int len)
L
Linus Torvalds 已提交
341
{
342
	struct iommu_struct *iommu = dev->archdata.iommu;
L
Linus Torvalds 已提交
343 344 345 346 347
	unsigned long page, end;
	iopte_t *iopte = iommu->page_table;
	iopte_t *first;
	int ioptex;

348 349 350
	BUG_ON((va & ~PAGE_MASK) != 0);
	BUG_ON((addr & ~PAGE_MASK) != 0);
	BUG_ON((len & ~PAGE_MASK) != 0);
L
Linus Torvalds 已提交
351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405

	/* page color = physical address */
	ioptex = bit_map_string_get(&iommu->usemap, len >> PAGE_SHIFT,
		addr >> PAGE_SHIFT);
	if (ioptex < 0)
		panic("iommu out");

	iopte += ioptex;
	first = iopte;
	end = addr + len;
	while(addr < end) {
		page = va;
		{
			pgd_t *pgdp;
			pmd_t *pmdp;
			pte_t *ptep;

			if (viking_mxcc_present)
				viking_mxcc_flush_page(page);
			else if (viking_flush)
				viking_flush_page(page);
			else
				__flush_page_to_ram(page);

			pgdp = pgd_offset(&init_mm, addr);
			pmdp = pmd_offset(pgdp, addr);
			ptep = pte_offset_map(pmdp, addr);

			set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot));
		}
		iopte_val(*iopte++) =
		    MKIOPTE(page_to_pfn(virt_to_page(page)), ioperm_noc);
		addr += PAGE_SIZE;
		va += PAGE_SIZE;
	}
	/* P3: why do we need this?
	 *
	 * DAVEM: Because there are several aspects, none of which
	 *        are handled by a single interface.  Some cpus are
	 *        completely not I/O DMA coherent, and some have
	 *        virtually indexed caches.  The driver DMA flushing
	 *        methods handle the former case, but here during
	 *        IOMMU page table modifications, and usage of non-cacheable
	 *        cpu mappings of pages potentially in the cpu caches, we have
	 *        to handle the latter case as well.
	 */
	flush_cache_all();
	iommu_flush_iotlb(first, len >> PAGE_SHIFT);
	flush_tlb_all();
	iommu_invalidate(iommu->regs);

	*pba = iommu->start + (ioptex << PAGE_SHIFT);
	return 0;
}

406
static void iommu_unmap_dma_area(struct device *dev, unsigned long busa, int len)
L
Linus Torvalds 已提交
407
{
408
	struct iommu_struct *iommu = dev->archdata.iommu;
L
Linus Torvalds 已提交
409 410 411 412
	iopte_t *iopte = iommu->page_table;
	unsigned long end;
	int ioptex = (busa - iommu->start) >> PAGE_SHIFT;

413 414
	BUG_ON((busa & ~PAGE_MASK) != 0);
	BUG_ON((len & ~PAGE_MASK) != 0);
L
Linus Torvalds 已提交
415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470

	iopte += ioptex;
	end = busa + len;
	while (busa < end) {
		iopte_val(*iopte++) = 0;
		busa += PAGE_SIZE;
	}
	flush_tlb_all();
	iommu_invalidate(iommu->regs);
	bit_map_clear(&iommu->usemap, ioptex, len >> PAGE_SHIFT);
}
#endif

static char *iommu_lockarea(char *vaddr, unsigned long len)
{
	return vaddr;
}

static void iommu_unlockarea(char *vaddr, unsigned long len)
{
}

void __init ld_mmu_iommu(void)
{
	viking_flush = (BTFIXUPVAL_CALL(flush_page_for_dma) == (unsigned long)viking_flush_page);
	BTFIXUPSET_CALL(mmu_lockarea, iommu_lockarea, BTFIXUPCALL_RETO0);
	BTFIXUPSET_CALL(mmu_unlockarea, iommu_unlockarea, BTFIXUPCALL_NOP);

	if (!BTFIXUPVAL_CALL(flush_page_for_dma)) {
		/* IO coherent chip */
		BTFIXUPSET_CALL(mmu_get_scsi_one, iommu_get_scsi_one_noflush, BTFIXUPCALL_RETO0);
		BTFIXUPSET_CALL(mmu_get_scsi_sgl, iommu_get_scsi_sgl_noflush, BTFIXUPCALL_NORM);
	} else if (flush_page_for_dma_global) {
		/* flush_page_for_dma flushes everything, no matter of what page is it */
		BTFIXUPSET_CALL(mmu_get_scsi_one, iommu_get_scsi_one_gflush, BTFIXUPCALL_NORM);
		BTFIXUPSET_CALL(mmu_get_scsi_sgl, iommu_get_scsi_sgl_gflush, BTFIXUPCALL_NORM);
	} else {
		BTFIXUPSET_CALL(mmu_get_scsi_one, iommu_get_scsi_one_pflush, BTFIXUPCALL_NORM);
		BTFIXUPSET_CALL(mmu_get_scsi_sgl, iommu_get_scsi_sgl_pflush, BTFIXUPCALL_NORM);
	}
	BTFIXUPSET_CALL(mmu_release_scsi_one, iommu_release_scsi_one, BTFIXUPCALL_NORM);
	BTFIXUPSET_CALL(mmu_release_scsi_sgl, iommu_release_scsi_sgl, BTFIXUPCALL_NORM);

#ifdef CONFIG_SBUS
	BTFIXUPSET_CALL(mmu_map_dma_area, iommu_map_dma_area, BTFIXUPCALL_NORM);
	BTFIXUPSET_CALL(mmu_unmap_dma_area, iommu_unmap_dma_area, BTFIXUPCALL_NORM);
#endif

	if (viking_mxcc_present || srmmu_modtype == HyperSparc) {
		dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV);
		ioperm_noc = IOPTE_CACHE | IOPTE_WRITE | IOPTE_VALID;
	} else {
		dvma_prot = __pgprot(SRMMU_ET_PTE | SRMMU_PRIV);
		ioperm_noc = IOPTE_WRITE | IOPTE_VALID;
	}
}