pci-gart_64.c 22.3 KB
Newer Older
L
Linus Torvalds 已提交
1 2
/*
 * Dynamic DMA mapping support for AMD Hammer.
3
 *
L
Linus Torvalds 已提交
4 5
 * Use the integrated AGP GART in the Hammer northbridge as an IOMMU for PCI.
 * This allows to use PCI devices that only support 32bit addresses on systems
6
 * with more than 4GB.
L
Linus Torvalds 已提交
7 8
 *
 * See Documentation/DMA-mapping.txt for the interface specification.
9
 *
L
Linus Torvalds 已提交
10
 * Copyright 2002 Andi Kleen, SuSE Labs.
A
Andi Kleen 已提交
11
 * Subject to the GNU General Public License v2 only.
L
Linus Torvalds 已提交
12 13 14 15 16 17 18 19 20 21 22 23 24 25
 */

#include <linux/types.h>
#include <linux/ctype.h>
#include <linux/agp_backend.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/spinlock.h>
#include <linux/pci.h>
#include <linux/module.h>
#include <linux/topology.h>
#include <linux/interrupt.h>
#include <linux/bitops.h>
26
#include <linux/kdebug.h>
27
#include <linux/scatterlist.h>
28
#include <linux/iommu-helper.h>
29
#include <linux/sysdev.h>
L
Linus Torvalds 已提交
30 31 32 33 34
#include <asm/atomic.h>
#include <asm/io.h>
#include <asm/mtrr.h>
#include <asm/pgtable.h>
#include <asm/proto.h>
35
#include <asm/iommu.h>
J
Joerg Roedel 已提交
36
#include <asm/gart.h>
L
Linus Torvalds 已提交
37
#include <asm/cacheflush.h>
38 39
#include <asm/swiotlb.h>
#include <asm/dma.h>
40
#include <asm/k8.h>
L
Linus Torvalds 已提交
41

42
static unsigned long iommu_bus_base;	/* GART remapping area (physical) */
43
static unsigned long iommu_size;	/* size of remapping area bytes */
L
Linus Torvalds 已提交
44 45
static unsigned long iommu_pages;	/* .. and in pages */

46
static u32 *iommu_gatt_base;		/* Remapping table */
L
Linus Torvalds 已提交
47

48 49 50 51 52 53 54
/*
 * If this is disabled the IOMMU will use an optimized flushing strategy
 * of only flushing when an mapping is reused. With it true the GART is
 * flushed for every mapping. Problem is that doing the lazy flush seems
 * to trigger bugs with some popular PCI cards, in particular 3ware (but
 * has been also also seen with Qlogic at least).
 */
L
Linus Torvalds 已提交
55 56
int iommu_fullflush = 1;

57
/* Allocation bitmap for the remapping area: */
L
Linus Torvalds 已提交
58
static DEFINE_SPINLOCK(iommu_bitmap_lock);
59 60
/* Guarded by iommu_bitmap_lock: */
static unsigned long *iommu_gart_bitmap;
L
Linus Torvalds 已提交
61

62
static u32 gart_unmapped_entry;
L
Linus Torvalds 已提交
63 64 65 66 67 68 69

#define GPTE_VALID    1
#define GPTE_COHERENT 2
#define GPTE_ENCODE(x) \
	(((x) & 0xfffff000) | (((x) >> 32) << 4) | GPTE_VALID | GPTE_COHERENT)
#define GPTE_DECODE(x) (((x) & 0xfffff000) | (((u64)(x) & 0xff0) << 28))

70
#define EMERGENCY_PAGES 32 /* = 128KB */
L
Linus Torvalds 已提交
71 72 73 74 75 76 77 78 79 80 81 82

#ifdef CONFIG_AGP
#define AGPEXTERN extern
#else
#define AGPEXTERN
#endif

/* backdoor interface to AGP driver */
AGPEXTERN int agp_memory_reserved;
AGPEXTERN __u32 *agp_gatt_table;

static unsigned long next_bit;  /* protected by iommu_bitmap_lock */
83
static int need_flush;		/* global flush state. set for each gart wrap */
L
Linus Torvalds 已提交
84

85
static unsigned long alloc_iommu(struct device *dev, int size)
86
{
L
Linus Torvalds 已提交
87
	unsigned long offset, flags;
88 89 90 91 92 93 94
	unsigned long boundary_size;
	unsigned long base_index;

	base_index = ALIGN(iommu_bus_base & dma_get_seg_boundary(dev),
			   PAGE_SIZE) >> PAGE_SHIFT;
	boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
			      PAGE_SIZE) >> PAGE_SHIFT;
L
Linus Torvalds 已提交
95

96
	spin_lock_irqsave(&iommu_bitmap_lock, flags);
97 98
	offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, next_bit,
				  size, base_index, boundary_size, 0);
L
Linus Torvalds 已提交
99 100
	if (offset == -1) {
		need_flush = 1;
101 102
		offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, 0,
					  size, base_index, boundary_size, 0);
L
Linus Torvalds 已提交
103
	}
104 105 106
	if (offset != -1) {
		next_bit = offset+size;
		if (next_bit >= iommu_pages) {
L
Linus Torvalds 已提交
107 108
			next_bit = 0;
			need_flush = 1;
109 110
		}
	}
L
Linus Torvalds 已提交
111 112
	if (iommu_fullflush)
		need_flush = 1;
113 114
	spin_unlock_irqrestore(&iommu_bitmap_lock, flags);

L
Linus Torvalds 已提交
115
	return offset;
116
}
L
Linus Torvalds 已提交
117 118

static void free_iommu(unsigned long offset, int size)
119
{
L
Linus Torvalds 已提交
120
	unsigned long flags;
121

L
Linus Torvalds 已提交
122
	spin_lock_irqsave(&iommu_bitmap_lock, flags);
123
	iommu_area_free(iommu_gart_bitmap, offset, size);
L
Linus Torvalds 已提交
124
	spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
125
}
L
Linus Torvalds 已提交
126

127
/*
L
Linus Torvalds 已提交
128 129
 * Use global flush state to avoid races with multiple flushers.
 */
130
static void flush_gart(void)
131
{
L
Linus Torvalds 已提交
132
	unsigned long flags;
133

L
Linus Torvalds 已提交
134
	spin_lock_irqsave(&iommu_bitmap_lock, flags);
135 136
	if (need_flush) {
		k8_flush_garts();
L
Linus Torvalds 已提交
137
		need_flush = 0;
138
	}
L
Linus Torvalds 已提交
139
	spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
140
}
L
Linus Torvalds 已提交
141 142 143

#ifdef CONFIG_IOMMU_LEAK

144 145 146 147 148 149 150 151 152 153 154
#define SET_LEAK(x)							\
	do {								\
		if (iommu_leak_tab)					\
			iommu_leak_tab[x] = __builtin_return_address(0);\
	} while (0)

#define CLEAR_LEAK(x)							\
	do {								\
		if (iommu_leak_tab)					\
			iommu_leak_tab[x] = NULL;			\
	} while (0)
L
Linus Torvalds 已提交
155 156

/* Debugging aid for drivers that don't free their IOMMU tables */
157
static void **iommu_leak_tab;
L
Linus Torvalds 已提交
158
static int leak_trace;
159
static int iommu_leak_pages = 20;
160

161
static void dump_leak(void)
L
Linus Torvalds 已提交
162 163
{
	int i;
164 165 166 167
	static int dump;

	if (dump || !iommu_leak_tab)
		return;
L
Linus Torvalds 已提交
168
	dump = 1;
169 170 171 172 173 174 175
	show_stack(NULL, NULL);

	/* Very crude. dump some from the end of the table too */
	printk(KERN_DEBUG "Dumping %d pages from end of IOMMU:\n",
	       iommu_leak_pages);
	for (i = 0; i < iommu_leak_pages; i += 2) {
		printk(KERN_DEBUG "%lu: ", iommu_pages-i);
176
		printk_address((unsigned long) iommu_leak_tab[iommu_pages-i], 0);
177 178 179
		printk(KERN_CONT "%c", (i+1)%2 == 0 ? '\n' : ' ');
	}
	printk(KERN_DEBUG "\n");
L
Linus Torvalds 已提交
180 181
}
#else
182 183
# define SET_LEAK(x)
# define CLEAR_LEAK(x)
L
Linus Torvalds 已提交
184 185
#endif

186
static void iommu_full(struct device *dev, size_t size, int dir)
L
Linus Torvalds 已提交
187
{
188
	/*
L
Linus Torvalds 已提交
189 190
	 * Ran out of IOMMU space for this operation. This is very bad.
	 * Unfortunately the drivers cannot handle this operation properly.
191
	 * Return some non mapped prereserved space in the aperture and
L
Linus Torvalds 已提交
192 193
	 * let the Northbridge deal with it. This will result in garbage
	 * in the IO operation. When the size exceeds the prereserved space
194
	 * memory corruption will occur or random memory will be DMAed
L
Linus Torvalds 已提交
195
	 * out. Hopefully no network devices use single mappings that big.
196 197
	 */

198
	dev_err(dev, "PCI-DMA: Out of IOMMU space for %lu bytes\n", size);
L
Linus Torvalds 已提交
199

200
	if (size > PAGE_SIZE*EMERGENCY_PAGES) {
L
Linus Torvalds 已提交
201 202
		if (dir == PCI_DMA_FROMDEVICE || dir == PCI_DMA_BIDIRECTIONAL)
			panic("PCI-DMA: Memory would be corrupted\n");
203 204 205 206
		if (dir == PCI_DMA_TODEVICE || dir == PCI_DMA_BIDIRECTIONAL)
			panic(KERN_ERR
				"PCI-DMA: Random memory would be DMAed\n");
	}
L
Linus Torvalds 已提交
207
#ifdef CONFIG_IOMMU_LEAK
208
	dump_leak();
L
Linus Torvalds 已提交
209
#endif
210
}
L
Linus Torvalds 已提交
211

212 213 214
static inline int
need_iommu(struct device *dev, unsigned long addr, size_t size)
{
L
Linus Torvalds 已提交
215
	u64 mask = *dev->dma_mask;
216
	int high = addr + size > mask;
L
Linus Torvalds 已提交
217
	int mmu = high;
218 219 220 221 222

	if (force_iommu)
		mmu = 1;

	return mmu;
L
Linus Torvalds 已提交
223 224
}

225 226 227
static inline int
nonforced_iommu(struct device *dev, unsigned long addr, size_t size)
{
L
Linus Torvalds 已提交
228
	u64 mask = *dev->dma_mask;
229
	int high = addr + size > mask;
L
Linus Torvalds 已提交
230
	int mmu = high;
231 232

	return mmu;
L
Linus Torvalds 已提交
233 234 235 236 237
}

/* Map a single continuous physical area into the IOMMU.
 * Caller needs to check if the iommu is needed and flush.
 */
238 239
static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
				size_t size, int dir)
240
{
241
	unsigned long npages = iommu_num_pages(phys_mem, size);
242
	unsigned long iommu_page = alloc_iommu(dev, npages);
L
Linus Torvalds 已提交
243
	int i;
244

L
Linus Torvalds 已提交
245 246
	if (iommu_page == -1) {
		if (!nonforced_iommu(dev, phys_mem, size))
247
			return phys_mem;
L
Linus Torvalds 已提交
248 249
		if (panic_on_overflow)
			panic("dma_map_area overflow %lu bytes\n", size);
250
		iommu_full(dev, size, dir);
L
Linus Torvalds 已提交
251 252 253 254 255 256 257 258 259 260 261
		return bad_dma_address;
	}

	for (i = 0; i < npages; i++) {
		iommu_gatt_base[iommu_page + i] = GPTE_ENCODE(phys_mem);
		SET_LEAK(iommu_page + i);
		phys_mem += PAGE_SIZE;
	}
	return iommu_bus_base + iommu_page*PAGE_SIZE + (phys_mem & ~PAGE_MASK);
}

262
static dma_addr_t
I
Ingo Molnar 已提交
263
gart_map_simple(struct device *dev, phys_addr_t paddr, size_t size, int dir)
264
{
I
Ingo Molnar 已提交
265
	dma_addr_t map = dma_map_area(dev, paddr, size, dir);
266

267
	flush_gart();
268

269 270 271
	return map;
}

L
Linus Torvalds 已提交
272
/* Map a single area into the IOMMU */
273
static dma_addr_t
I
Ingo Molnar 已提交
274
gart_map_single(struct device *dev, phys_addr_t paddr, size_t size, int dir)
L
Linus Torvalds 已提交
275
{
I
Ingo Molnar 已提交
276
	unsigned long bus;
L
Linus Torvalds 已提交
277 278 279 280

	if (!dev)
		dev = &fallback_dev;

I
Ingo Molnar 已提交
281 282
	if (!need_iommu(dev, paddr, size))
		return paddr;
L
Linus Torvalds 已提交
283

I
Ingo Molnar 已提交
284
	bus = gart_map_simple(dev, paddr, size, dir);
285 286

	return bus;
287 288
}

289 290 291
/*
 * Free a DMA mapping.
 */
292
static void gart_unmap_single(struct device *dev, dma_addr_t dma_addr,
293
			      size_t size, int direction)
294 295 296 297 298 299 300 301
{
	unsigned long iommu_page;
	int npages;
	int i;

	if (dma_addr < iommu_bus_base + EMERGENCY_PAGES*PAGE_SIZE ||
	    dma_addr >= iommu_bus_base + iommu_size)
		return;
302

303
	iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT;
304
	npages = iommu_num_pages(dma_addr, size);
305 306 307 308 309 310 311
	for (i = 0; i < npages; i++) {
		iommu_gatt_base[iommu_page + i] = gart_unmapped_entry;
		CLEAR_LEAK(iommu_page + i);
	}
	free_iommu(iommu_page, npages);
}

312 313 314
/*
 * Wrapper for pci_unmap_single working with scatterlists.
 */
315 316
static void
gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
317
{
318
	struct scatterlist *s;
319 320
	int i;

321
	for_each_sg(sg, s, nents, i) {
322
		if (!s->dma_length || !s->length)
323
			break;
324
		gart_unmap_single(dev, s->dma_address, s->dma_length, dir);
325 326
	}
}
L
Linus Torvalds 已提交
327 328 329 330 331

/* Fallback for dma_map_sg in case of overflow */
static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
			       int nents, int dir)
{
332
	struct scatterlist *s;
L
Linus Torvalds 已提交
333 334 335 336 337 338
	int i;

#ifdef CONFIG_IOMMU_DEBUG
	printk(KERN_DEBUG "dma_map_sg overflow\n");
#endif

339
	for_each_sg(sg, s, nents, i) {
J
Jens Axboe 已提交
340
		unsigned long addr = sg_phys(s);
341 342

		if (nonforced_iommu(dev, addr, s->length)) {
343
			addr = dma_map_area(dev, addr, s->length, dir);
344 345
			if (addr == bad_dma_address) {
				if (i > 0)
346
					gart_unmap_sg(dev, sg, i, dir);
347
				nents = 0;
L
Linus Torvalds 已提交
348 349 350 351 352 353 354
				sg[0].dma_length = 0;
				break;
			}
		}
		s->dma_address = addr;
		s->dma_length = s->length;
	}
355
	flush_gart();
356

L
Linus Torvalds 已提交
357 358 359 360
	return nents;
}

/* Map multiple scatterlist entries continuous into the first. */
361 362 363
static int __dma_map_cont(struct device *dev, struct scatterlist *start,
			  int nelems, struct scatterlist *sout,
			  unsigned long pages)
L
Linus Torvalds 已提交
364
{
365
	unsigned long iommu_start = alloc_iommu(dev, pages);
366
	unsigned long iommu_page = iommu_start;
367
	struct scatterlist *s;
L
Linus Torvalds 已提交
368 369 370 371
	int i;

	if (iommu_start == -1)
		return -1;
372 373

	for_each_sg(start, s, nelems, i) {
L
Linus Torvalds 已提交
374 375
		unsigned long pages, addr;
		unsigned long phys_addr = s->dma_address;
376

377 378
		BUG_ON(s != start && s->offset);
		if (s == start) {
L
Linus Torvalds 已提交
379 380 381
			sout->dma_address = iommu_bus_base;
			sout->dma_address += iommu_page*PAGE_SIZE + s->offset;
			sout->dma_length = s->length;
382 383
		} else {
			sout->dma_length += s->length;
L
Linus Torvalds 已提交
384 385 386
		}

		addr = phys_addr;
387
		pages = iommu_num_pages(s->offset, s->length);
388 389
		while (pages--) {
			iommu_gatt_base[iommu_page] = GPTE_ENCODE(addr);
L
Linus Torvalds 已提交
390 391 392
			SET_LEAK(iommu_page);
			addr += PAGE_SIZE;
			iommu_page++;
393
		}
394 395 396
	}
	BUG_ON(iommu_page - iommu_start != pages);

L
Linus Torvalds 已提交
397 398 399
	return 0;
}

400
static inline int
401 402
dma_map_cont(struct device *dev, struct scatterlist *start, int nelems,
	     struct scatterlist *sout, unsigned long pages, int need)
L
Linus Torvalds 已提交
403
{
404 405
	if (!need) {
		BUG_ON(nelems != 1);
F
FUJITA Tomonori 已提交
406
		sout->dma_address = start->dma_address;
407
		sout->dma_length = start->length;
L
Linus Torvalds 已提交
408
		return 0;
409
	}
410
	return __dma_map_cont(dev, start, nelems, sout, pages);
L
Linus Torvalds 已提交
411
}
412

L
Linus Torvalds 已提交
413 414
/*
 * DMA map all entries in a scatterlist.
415
 * Merge chunks that have page aligned sizes into a continuous mapping.
L
Linus Torvalds 已提交
416
 */
417 418
static int
gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
L
Linus Torvalds 已提交
419
{
420
	struct scatterlist *s, *ps, *start_sg, *sgmap;
421 422
	int need = 0, nextneed, i, out, start;
	unsigned long pages = 0;
423 424
	unsigned int seg_size;
	unsigned int max_seg_size;
L
Linus Torvalds 已提交
425

426
	if (nents == 0)
L
Linus Torvalds 已提交
427 428 429 430 431 432 433
		return 0;

	if (!dev)
		dev = &fallback_dev;

	out = 0;
	start = 0;
434
	start_sg = sgmap = sg;
435 436
	seg_size = 0;
	max_seg_size = dma_get_max_seg_size(dev);
437 438
	ps = NULL; /* shut up gcc */
	for_each_sg(sg, s, nents, i) {
J
Jens Axboe 已提交
439
		dma_addr_t addr = sg_phys(s);
440

L
Linus Torvalds 已提交
441
		s->dma_address = addr;
442
		BUG_ON(s->length == 0);
L
Linus Torvalds 已提交
443

444
		nextneed = need_iommu(dev, addr, s->length);
L
Linus Torvalds 已提交
445 446 447

		/* Handle the previous not yet processed entries */
		if (i > start) {
448 449 450 451 452
			/*
			 * Can only merge when the last chunk ends on a
			 * page boundary and the new one doesn't have an
			 * offset.
			 */
L
Linus Torvalds 已提交
453
			if (!iommu_merge || !nextneed || !need || s->offset ||
454
			    (s->length + seg_size > max_seg_size) ||
455
			    (ps->offset + ps->length) % PAGE_SIZE) {
456 457
				if (dma_map_cont(dev, start_sg, i - start,
						 sgmap, pages, need) < 0)
L
Linus Torvalds 已提交
458 459
					goto error;
				out++;
460
				seg_size = 0;
461
				sgmap = sg_next(sgmap);
L
Linus Torvalds 已提交
462
				pages = 0;
463 464
				start = i;
				start_sg = s;
L
Linus Torvalds 已提交
465 466 467
			}
		}

468
		seg_size += s->length;
L
Linus Torvalds 已提交
469
		need = nextneed;
470
		pages += iommu_num_pages(s->offset, s->length);
471
		ps = s;
L
Linus Torvalds 已提交
472
	}
473
	if (dma_map_cont(dev, start_sg, i - start, sgmap, pages, need) < 0)
L
Linus Torvalds 已提交
474 475
		goto error;
	out++;
476
	flush_gart();
477 478 479 480
	if (out < nents) {
		sgmap = sg_next(sgmap);
		sgmap->dma_length = 0;
	}
L
Linus Torvalds 已提交
481 482 483
	return out;

error:
484
	flush_gart();
485
	gart_unmap_sg(dev, sg, out, dir);
486

487 488 489 490 491 492
	/* When it was forced or merged try again in a dumb way */
	if (force_iommu || iommu_merge) {
		out = dma_map_sg_nonforce(dev, sg, nents, dir);
		if (out > 0)
			return out;
	}
L
Linus Torvalds 已提交
493 494
	if (panic_on_overflow)
		panic("dma_map_sg: overflow on %lu pages\n", pages);
495

496
	iommu_full(dev, pages << PAGE_SHIFT, dir);
497 498
	for_each_sg(sg, s, nents, i)
		s->dma_address = bad_dma_address;
L
Linus Torvalds 已提交
499
	return 0;
500
}
L
Linus Torvalds 已提交
501

502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521
/* allocate and map a coherent mapping */
static void *
gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr,
		    gfp_t flag)
{
	void *vaddr;

	vaddr = (void *)__get_free_pages(flag | __GFP_ZERO, get_order(size));
	if (!vaddr)
		return NULL;

	*dma_addr = gart_map_single(dev, __pa(vaddr), size, DMA_BIDIRECTIONAL);
	if (*dma_addr != bad_dma_address)
		return vaddr;

	free_pages((unsigned long)vaddr, get_order(size));

	return NULL;
}

522
static int no_agp;
L
Linus Torvalds 已提交
523 524

static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size)
525 526 527 528 529 530 531 532 533 534
{
	unsigned long a;

	if (!iommu_size) {
		iommu_size = aper_size;
		if (!no_agp)
			iommu_size /= 2;
	}

	a = aper + iommu_size;
535
	iommu_size -= round_up(a, PMD_PAGE_SIZE) - a;
L
Linus Torvalds 已提交
536

537
	if (iommu_size < 64*1024*1024) {
L
Linus Torvalds 已提交
538
		printk(KERN_WARNING
539 540 541 542 543
			"PCI-DMA: Warning: Small IOMMU %luMB."
			" Consider increasing the AGP aperture in BIOS\n",
				iommu_size >> 20);
	}

L
Linus Torvalds 已提交
544
	return iommu_size;
545
}
L
Linus Torvalds 已提交
546

547 548 549
static __init unsigned read_aperture(struct pci_dev *dev, u32 *size)
{
	unsigned aper_size = 0, aper_base_32, aper_order;
L
Linus Torvalds 已提交
550 551
	u64 aper_base;

P
Pavel Machek 已提交
552 553
	pci_read_config_dword(dev, AMD64_GARTAPERTUREBASE, &aper_base_32);
	pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &aper_order);
554
	aper_order = (aper_order >> 1) & 7;
L
Linus Torvalds 已提交
555

556
	aper_base = aper_base_32 & 0x7fff;
L
Linus Torvalds 已提交
557 558
	aper_base <<= 25;

559 560
	aper_size = (32 * 1024 * 1024) << aper_order;
	if (aper_base + aper_size > 0x100000000UL || !aper_size)
L
Linus Torvalds 已提交
561 562 563 564
		aper_base = 0;

	*size = aper_size;
	return aper_base;
565
}
L
Linus Torvalds 已提交
566

567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592
static void enable_gart_translations(void)
{
	int i;

	for (i = 0; i < num_k8_northbridges; i++) {
		struct pci_dev *dev = k8_northbridges[i];

		enable_gart_translation(dev, __pa(agp_gatt_table));
	}
}

/*
 * If fix_up_north_bridges is set, the north bridges have to be fixed up on
 * resume in the same way as they are handled in gart_iommu_hole_init().
 */
static bool fix_up_north_bridges;
static u32 aperture_order;
static u32 aperture_alloc;

void set_up_gart_resume(u32 aper_order, u32 aper_alloc)
{
	fix_up_north_bridges = true;
	aperture_order = aper_order;
	aperture_alloc = aper_alloc;
}

593 594
static int gart_resume(struct sys_device *dev)
{
595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617
	printk(KERN_INFO "PCI-DMA: Resuming GART IOMMU\n");

	if (fix_up_north_bridges) {
		int i;

		printk(KERN_INFO "PCI-DMA: Restoring GART aperture settings\n");

		for (i = 0; i < num_k8_northbridges; i++) {
			struct pci_dev *dev = k8_northbridges[i];

			/*
			 * Don't enable translations just yet.  That is the next
			 * step.  Restore the pre-suspend aperture settings.
			 */
			pci_write_config_dword(dev, AMD64_GARTAPERTURECTL,
						aperture_order << 1);
			pci_write_config_dword(dev, AMD64_GARTAPERTUREBASE,
						aperture_alloc >> 25);
		}
	}

	enable_gart_translations();

618 619 620 621 622
	return 0;
}

static int gart_suspend(struct sys_device *dev, pm_message_t state)
{
623
	return 0;
624 625 626 627 628 629 630 631 632 633 634 635 636 637
}

static struct sysdev_class gart_sysdev_class = {
	.name = "gart",
	.suspend = gart_suspend,
	.resume = gart_resume,

};

static struct sys_device device_gart = {
	.id	= 0,
	.cls	= &gart_sysdev_class,
};

638
/*
L
Linus Torvalds 已提交
639
 * Private Northbridge GATT initialization in case we cannot use the
640
 * AGP driver for some reason.
L
Linus Torvalds 已提交
641 642
 */
static __init int init_k8_gatt(struct agp_kern_info *info)
643 644 645
{
	unsigned aper_size, gatt_size, new_aper_size;
	unsigned aper_base, new_aper_base;
L
Linus Torvalds 已提交
646 647
	struct pci_dev *dev;
	void *gatt;
648
	int i, error;
Y
Yinghai Lu 已提交
649
	unsigned long start_pfn, end_pfn;
650

L
Linus Torvalds 已提交
651 652
	printk(KERN_INFO "PCI-DMA: Disabling AGP.\n");
	aper_size = aper_base = info->aper_size = 0;
653 654 655
	dev = NULL;
	for (i = 0; i < num_k8_northbridges; i++) {
		dev = k8_northbridges[i];
656 657 658 659 660
		new_aper_base = read_aperture(dev, &new_aper_size);
		if (!new_aper_base)
			goto nommu;

		if (!aper_base) {
L
Linus Torvalds 已提交
661 662
			aper_size = new_aper_size;
			aper_base = new_aper_base;
663 664
		}
		if (aper_size != new_aper_size || aper_base != new_aper_base)
L
Linus Torvalds 已提交
665 666 667
			goto nommu;
	}
	if (!aper_base)
668
		goto nommu;
L
Linus Torvalds 已提交
669
	info->aper_base = aper_base;
670
	info->aper_size = aper_size >> 20;
L
Linus Torvalds 已提交
671

672 673 674
	gatt_size = (aper_size >> PAGE_SHIFT) * sizeof(u32);
	gatt = (void *)__get_free_pages(GFP_KERNEL, get_order(gatt_size));
	if (!gatt)
675
		panic("Cannot allocate GATT table");
676
	if (set_memory_uc((unsigned long)gatt, gatt_size >> PAGE_SHIFT))
677 678
		panic("Could not set GART PTEs to uncacheable pages");

679
	memset(gatt, 0, gatt_size);
L
Linus Torvalds 已提交
680
	agp_gatt_table = gatt;
681

682
	enable_gart_translations();
683 684 685 686 687 688

	error = sysdev_class_register(&gart_sysdev_class);
	if (!error)
		error = sysdev_register(&device_gart);
	if (error)
		panic("Could not register gart_sysdev -- would corrupt data on next suspend");
689

690
	flush_gart();
691 692 693

	printk(KERN_INFO "PCI-DMA: aperture base @ %x size %u KB\n",
	       aper_base, aper_size>>10);
Y
Yinghai Lu 已提交
694 695 696 697

	/* need to map that range */
	end_pfn = (aper_base>>PAGE_SHIFT) + (aper_size>>PAGE_SHIFT);
	if (end_pfn > max_low_pfn_mapped) {
Y
Yinghai Lu 已提交
698 699
		start_pfn = (aper_base>>PAGE_SHIFT);
		init_memory_mapping(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT);
Y
Yinghai Lu 已提交
700
	}
L
Linus Torvalds 已提交
701 702 703
	return 0;

 nommu:
704
	/* Should not happen anymore */
705 706
	printk(KERN_WARNING "PCI-DMA: More than 4GB of RAM and no IOMMU\n"
	       KERN_WARNING "falling back to iommu=soft.\n");
707 708
	return -1;
}
L
Linus Torvalds 已提交
709 710 711

extern int agp_amd64_init(void);

712
static struct dma_mapping_ops gart_dma_ops = {
713 714 715 716 717 718 719 720 721 722 723
	.map_single			= gart_map_single,
	.map_simple			= gart_map_simple,
	.unmap_single			= gart_unmap_single,
	.sync_single_for_cpu		= NULL,
	.sync_single_for_device		= NULL,
	.sync_single_range_for_cpu	= NULL,
	.sync_single_range_for_device	= NULL,
	.sync_sg_for_cpu		= NULL,
	.sync_sg_for_device		= NULL,
	.map_sg				= gart_map_sg,
	.unmap_sg			= gart_unmap_sg,
724
	.alloc_coherent			= gart_alloc_coherent,
725 726
};

727 728 729 730 731 732 733 734
void gart_iommu_shutdown(void)
{
	struct pci_dev *dev;
	int i;

	if (no_agp && (dma_ops != &gart_dma_ops))
		return;

735 736
	for (i = 0; i < num_k8_northbridges; i++) {
		u32 ctl;
737

738
		dev = k8_northbridges[i];
P
Pavel Machek 已提交
739
		pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &ctl);
740

P
Pavel Machek 已提交
741
		ctl &= ~GARTEN;
742

P
Pavel Machek 已提交
743
		pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, ctl);
744
	}
745 746
}

747
void __init gart_iommu_init(void)
748
{
L
Linus Torvalds 已提交
749 750
	struct agp_kern_info info;
	unsigned long iommu_start;
751
	unsigned long aper_size;
L
Linus Torvalds 已提交
752 753 754
	unsigned long scratch;
	long i;

755 756
	if (cache_k8_northbridges() < 0 || num_k8_northbridges == 0) {
		printk(KERN_INFO "PCI-GART: No AMD northbridge found.\n");
757
		return;
758 759
	}

L
Linus Torvalds 已提交
760
#ifndef CONFIG_AGP_AMD64
761
	no_agp = 1;
L
Linus Torvalds 已提交
762 763 764
#else
	/* Makefile puts PCI initialization via subsys_initcall first. */
	/* Add other K8 AGP bridge drivers here */
765 766
	no_agp = no_agp ||
		(agp_amd64_init() < 0) ||
L
Linus Torvalds 已提交
767
		(agp_copy_info(agp_bridge, &info) < 0);
768
#endif
L
Linus Torvalds 已提交
769

770
	if (swiotlb)
771
		return;
772

773
	/* Did we detect a different HW IOMMU? */
774
	if (iommu_detected && !gart_iommu_aperture)
775
		return;
776

L
Linus Torvalds 已提交
777
	if (no_iommu ||
Y
Yinghai Lu 已提交
778
	    (!force_iommu && max_pfn <= MAX_DMA32_PFN) ||
779
	    !gart_iommu_aperture ||
L
Linus Torvalds 已提交
780
	    (no_agp && init_k8_gatt(&info) < 0)) {
Y
Yinghai Lu 已提交
781
		if (max_pfn > MAX_DMA32_PFN) {
782 783 784
			printk(KERN_WARNING "More than 4GB of memory "
			       	          "but GART IOMMU not available.\n"
			       KERN_WARNING "falling back to iommu=soft.\n");
J
Jon Mason 已提交
785
		}
786
		return;
L
Linus Torvalds 已提交
787 788
	}

J
Jon Mason 已提交
789
	printk(KERN_INFO "PCI-DMA: using GART IOMMU.\n");
790 791 792 793 794 795 796 797
	aper_size = info.aper_size * 1024 * 1024;
	iommu_size = check_iommu_size(info.aper_base, aper_size);
	iommu_pages = iommu_size >> PAGE_SHIFT;

	iommu_gart_bitmap = (void *) __get_free_pages(GFP_KERNEL,
						      get_order(iommu_pages/8));
	if (!iommu_gart_bitmap)
		panic("Cannot allocate iommu bitmap\n");
L
Linus Torvalds 已提交
798 799 800
	memset(iommu_gart_bitmap, 0, iommu_pages/8);

#ifdef CONFIG_IOMMU_LEAK
801 802
	if (leak_trace) {
		iommu_leak_tab = (void *)__get_free_pages(GFP_KERNEL,
L
Linus Torvalds 已提交
803
				  get_order(iommu_pages*sizeof(void *)));
804 805
		if (iommu_leak_tab)
			memset(iommu_leak_tab, 0, iommu_pages * 8);
L
Linus Torvalds 已提交
806
		else
807 808 809
			printk(KERN_DEBUG
			       "PCI-DMA: Cannot allocate leak trace area\n");
	}
L
Linus Torvalds 已提交
810 811
#endif

812
	/*
L
Linus Torvalds 已提交
813
	 * Out of IOMMU space handling.
814 815 816
	 * Reserve some invalid pages at the beginning of the GART.
	 */
	set_bit_string(iommu_gart_bitmap, 0, EMERGENCY_PAGES);
L
Linus Torvalds 已提交
817

818
	agp_memory_reserved = iommu_size;
L
Linus Torvalds 已提交
819 820
	printk(KERN_INFO
	       "PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture\n",
821
	       iommu_size >> 20);
L
Linus Torvalds 已提交
822

823 824
	iommu_start = aper_size - iommu_size;
	iommu_bus_base = info.aper_base + iommu_start;
L
Linus Torvalds 已提交
825 826 827
	bad_dma_address = iommu_bus_base;
	iommu_gatt_base = agp_gatt_table + (iommu_start>>PAGE_SHIFT);

828
	/*
L
Linus Torvalds 已提交
829 830 831 832 833 834
	 * Unmap the IOMMU part of the GART. The alias of the page is
	 * always mapped with cache enabled and there is no full cache
	 * coherency across the GART remapping. The unmapping avoids
	 * automatic prefetches from the CPU allocating cache lines in
	 * there. All CPU accesses are done via the direct mapping to
	 * the backing memory. The GART address is only used by PCI
835
	 * devices.
L
Linus Torvalds 已提交
836
	 */
837 838
	set_memory_np((unsigned long)__va(iommu_bus_base),
				iommu_size >> PAGE_SHIFT);
I
Ingo Molnar 已提交
839 840 841 842 843 844 845 846 847
	/*
	 * Tricky. The GART table remaps the physical memory range,
	 * so the CPU wont notice potential aliases and if the memory
	 * is remapped to UC later on, we might surprise the PCI devices
	 * with a stray writeout of a cacheline. So play it sure and
	 * do an explicit, full-scale wbinvd() _after_ having marked all
	 * the pages as Not-Present:
	 */
	wbinvd();
L
Linus Torvalds 已提交
848

849
	/*
850
	 * Try to workaround a bug (thanks to BenH):
851
	 * Set unmapped entries to a scratch page instead of 0.
L
Linus Torvalds 已提交
852
	 * Any prefetches that hit unmapped entries won't get an bus abort
853
	 * then. (P2P bridge may be prefetching on DMA reads).
L
Linus Torvalds 已提交
854
	 */
855 856
	scratch = get_zeroed_page(GFP_KERNEL);
	if (!scratch)
L
Linus Torvalds 已提交
857 858
		panic("Cannot allocate iommu scratch page");
	gart_unmapped_entry = GPTE_ENCODE(__pa(scratch));
859
	for (i = EMERGENCY_PAGES; i < iommu_pages; i++)
L
Linus Torvalds 已提交
860 861
		iommu_gatt_base[i] = gart_unmapped_entry;

862
	flush_gart();
863
	dma_ops = &gart_dma_ops;
864
}
L
Linus Torvalds 已提交
865

866
void __init gart_parse_options(char *p)
867 868 869
{
	int arg;

L
Linus Torvalds 已提交
870
#ifdef CONFIG_IOMMU_LEAK
871
	if (!strncmp(p, "leak", 4)) {
872 873 874 875 876 877
		leak_trace = 1;
		p += 4;
		if (*p == '=') ++p;
		if (isdigit(*p) && get_option(&p, &arg))
			iommu_leak_pages = arg;
	}
L
Linus Torvalds 已提交
878
#endif
879 880
	if (isdigit(*p) && get_option(&p, &arg))
		iommu_size = arg;
881
	if (!strncmp(p, "fullflush", 8))
882
		iommu_fullflush = 1;
883
	if (!strncmp(p, "nofullflush", 11))
884
		iommu_fullflush = 0;
885
	if (!strncmp(p, "noagp", 5))
886
		no_agp = 1;
887
	if (!strncmp(p, "noaperture", 10))
888 889
		fix_aperture = 0;
	/* duplicated from pci-dma.c */
890
	if (!strncmp(p, "force", 5))
891
		gart_iommu_aperture_allowed = 1;
892
	if (!strncmp(p, "allowed", 7))
893
		gart_iommu_aperture_allowed = 1;
894 895 896 897 898 899 900 901 902 903
	if (!strncmp(p, "memaper", 7)) {
		fallback_aper_force = 1;
		p += 7;
		if (*p == '=') {
			++p;
			if (get_option(&p, &arg))
				fallback_aper_order = arg;
		}
	}
}