pci-gart_64.c 22.1 KB
Newer Older
L
Linus Torvalds 已提交
1 2
/*
 * Dynamic DMA mapping support for AMD Hammer.
3
 *
L
Linus Torvalds 已提交
4 5
 * Use the integrated AGP GART in the Hammer northbridge as an IOMMU for PCI.
 * This allows to use PCI devices that only support 32bit addresses on systems
6
 * with more than 4GB.
L
Linus Torvalds 已提交
7
 *
8
 * See Documentation/PCI/PCI-DMA-mapping.txt for the interface specification.
9
 *
L
Linus Torvalds 已提交
10
 * Copyright 2002 Andi Kleen, SuSE Labs.
A
Andi Kleen 已提交
11
 * Subject to the GNU General Public License v2 only.
L
Linus Torvalds 已提交
12 13 14 15 16 17 18
 */

#include <linux/types.h>
#include <linux/ctype.h>
#include <linux/agp_backend.h>
#include <linux/init.h>
#include <linux/mm.h>
19
#include <linux/sched.h>
L
Linus Torvalds 已提交
20 21 22 23 24 25 26
#include <linux/string.h>
#include <linux/spinlock.h>
#include <linux/pci.h>
#include <linux/module.h>
#include <linux/topology.h>
#include <linux/interrupt.h>
#include <linux/bitops.h>
27
#include <linux/kdebug.h>
28
#include <linux/scatterlist.h>
29
#include <linux/iommu-helper.h>
30
#include <linux/sysdev.h>
31
#include <linux/io.h>
L
Linus Torvalds 已提交
32 33 34 35
#include <asm/atomic.h>
#include <asm/mtrr.h>
#include <asm/pgtable.h>
#include <asm/proto.h>
36
#include <asm/iommu.h>
J
Joerg Roedel 已提交
37
#include <asm/gart.h>
L
Linus Torvalds 已提交
38
#include <asm/cacheflush.h>
39 40
#include <asm/swiotlb.h>
#include <asm/dma.h>
41
#include <asm/k8.h>
42
#include <asm/x86_init.h>
L
Linus Torvalds 已提交
43

44
static unsigned long iommu_bus_base;	/* GART remapping area (physical) */
45
static unsigned long iommu_size;	/* size of remapping area bytes */
L
Linus Torvalds 已提交
46 47
static unsigned long iommu_pages;	/* .. and in pages */

48
static u32 *iommu_gatt_base;		/* Remapping table */
L
Linus Torvalds 已提交
49

50 51 52 53 54 55 56
/*
 * If this is disabled the IOMMU will use an optimized flushing strategy
 * of only flushing when an mapping is reused. With it true the GART is
 * flushed for every mapping. Problem is that doing the lazy flush seems
 * to trigger bugs with some popular PCI cards, in particular 3ware (but
 * has been also also seen with Qlogic at least).
 */
57
static int iommu_fullflush = 1;
L
Linus Torvalds 已提交
58

59
/* Allocation bitmap for the remapping area: */
L
Linus Torvalds 已提交
60
static DEFINE_SPINLOCK(iommu_bitmap_lock);
61 62
/* Guarded by iommu_bitmap_lock: */
static unsigned long *iommu_gart_bitmap;
L
Linus Torvalds 已提交
63

64
static u32 gart_unmapped_entry;
L
Linus Torvalds 已提交
65 66 67 68 69 70 71

#define GPTE_VALID    1
#define GPTE_COHERENT 2
#define GPTE_ENCODE(x) \
	(((x) & 0xfffff000) | (((x) >> 32) << 4) | GPTE_VALID | GPTE_COHERENT)
#define GPTE_DECODE(x) (((x) & 0xfffff000) | (((u64)(x) & 0xff0) << 28))

72
#define EMERGENCY_PAGES 32 /* = 128KB */
L
Linus Torvalds 已提交
73 74 75 76 77 78 79 80 81 82 83 84

#ifdef CONFIG_AGP
#define AGPEXTERN extern
#else
#define AGPEXTERN
#endif

/* backdoor interface to AGP driver */
AGPEXTERN int agp_memory_reserved;
AGPEXTERN __u32 *agp_gatt_table;

static unsigned long next_bit;  /* protected by iommu_bitmap_lock */
85
static bool need_flush;		/* global flush state. set for each gart wrap */
L
Linus Torvalds 已提交
86

87 88
static unsigned long alloc_iommu(struct device *dev, int size,
				 unsigned long align_mask)
89
{
L
Linus Torvalds 已提交
90
	unsigned long offset, flags;
91 92 93 94 95
	unsigned long boundary_size;
	unsigned long base_index;

	base_index = ALIGN(iommu_bus_base & dma_get_seg_boundary(dev),
			   PAGE_SIZE) >> PAGE_SHIFT;
96
	boundary_size = ALIGN((unsigned long long)dma_get_seg_boundary(dev) + 1,
97
			      PAGE_SIZE) >> PAGE_SHIFT;
L
Linus Torvalds 已提交
98

99
	spin_lock_irqsave(&iommu_bitmap_lock, flags);
100
	offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, next_bit,
101
				  size, base_index, boundary_size, align_mask);
L
Linus Torvalds 已提交
102
	if (offset == -1) {
103
		need_flush = true;
104
		offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, 0,
105 106
					  size, base_index, boundary_size,
					  align_mask);
L
Linus Torvalds 已提交
107
	}
108 109 110
	if (offset != -1) {
		next_bit = offset+size;
		if (next_bit >= iommu_pages) {
L
Linus Torvalds 已提交
111
			next_bit = 0;
112
			need_flush = true;
113 114
		}
	}
L
Linus Torvalds 已提交
115
	if (iommu_fullflush)
116
		need_flush = true;
117 118
	spin_unlock_irqrestore(&iommu_bitmap_lock, flags);

L
Linus Torvalds 已提交
119
	return offset;
120
}
L
Linus Torvalds 已提交
121 122

static void free_iommu(unsigned long offset, int size)
123
{
L
Linus Torvalds 已提交
124
	unsigned long flags;
125

L
Linus Torvalds 已提交
126
	spin_lock_irqsave(&iommu_bitmap_lock, flags);
127
	iommu_area_free(iommu_gart_bitmap, offset, size);
128 129
	if (offset >= next_bit)
		next_bit = offset + size;
L
Linus Torvalds 已提交
130
	spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
131
}
L
Linus Torvalds 已提交
132

133
/*
L
Linus Torvalds 已提交
134 135
 * Use global flush state to avoid races with multiple flushers.
 */
136
static void flush_gart(void)
137
{
L
Linus Torvalds 已提交
138
	unsigned long flags;
139

L
Linus Torvalds 已提交
140
	spin_lock_irqsave(&iommu_bitmap_lock, flags);
141 142
	if (need_flush) {
		k8_flush_garts();
143
		need_flush = false;
144
	}
L
Linus Torvalds 已提交
145
	spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
146
}
L
Linus Torvalds 已提交
147 148 149 150

#ifdef CONFIG_IOMMU_LEAK
/* Debugging aid for drivers that don't free their IOMMU tables */
static int leak_trace;
151
static int iommu_leak_pages = 20;
152

153
static void dump_leak(void)
L
Linus Torvalds 已提交
154
{
155 156
	static int dump;

157
	if (dump)
158
		return;
L
Linus Torvalds 已提交
159
	dump = 1;
160

161 162
	show_stack(NULL, NULL);
	debug_dma_dump_mappings(NULL);
L
Linus Torvalds 已提交
163 164 165
}
#endif

166
static void iommu_full(struct device *dev, size_t size, int dir)
L
Linus Torvalds 已提交
167
{
168
	/*
L
Linus Torvalds 已提交
169 170
	 * Ran out of IOMMU space for this operation. This is very bad.
	 * Unfortunately the drivers cannot handle this operation properly.
171
	 * Return some non mapped prereserved space in the aperture and
L
Linus Torvalds 已提交
172 173
	 * let the Northbridge deal with it. This will result in garbage
	 * in the IO operation. When the size exceeds the prereserved space
174
	 * memory corruption will occur or random memory will be DMAed
L
Linus Torvalds 已提交
175
	 * out. Hopefully no network devices use single mappings that big.
176 177
	 */

178
	dev_err(dev, "PCI-DMA: Out of IOMMU space for %lu bytes\n", size);
L
Linus Torvalds 已提交
179

180
	if (size > PAGE_SIZE*EMERGENCY_PAGES) {
L
Linus Torvalds 已提交
181 182
		if (dir == PCI_DMA_FROMDEVICE || dir == PCI_DMA_BIDIRECTIONAL)
			panic("PCI-DMA: Memory would be corrupted\n");
183 184 185 186
		if (dir == PCI_DMA_TODEVICE || dir == PCI_DMA_BIDIRECTIONAL)
			panic(KERN_ERR
				"PCI-DMA: Random memory would be DMAed\n");
	}
L
Linus Torvalds 已提交
187
#ifdef CONFIG_IOMMU_LEAK
188
	dump_leak();
L
Linus Torvalds 已提交
189
#endif
190
}
L
Linus Torvalds 已提交
191

192 193 194
static inline int
need_iommu(struct device *dev, unsigned long addr, size_t size)
{
195
	return force_iommu || !dma_capable(dev, addr, size);
L
Linus Torvalds 已提交
196 197
}

198 199 200
static inline int
nonforced_iommu(struct device *dev, unsigned long addr, size_t size)
{
201
	return !dma_capable(dev, addr, size);
L
Linus Torvalds 已提交
202 203 204 205 206
}

/* Map a single continuous physical area into the IOMMU.
 * Caller needs to check if the iommu is needed and flush.
 */
207
static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
208
				size_t size, int dir, unsigned long align_mask)
209
{
210
	unsigned long npages = iommu_num_pages(phys_mem, size, PAGE_SIZE);
211
	unsigned long iommu_page = alloc_iommu(dev, npages, align_mask);
L
Linus Torvalds 已提交
212
	int i;
213

L
Linus Torvalds 已提交
214 215
	if (iommu_page == -1) {
		if (!nonforced_iommu(dev, phys_mem, size))
216
			return phys_mem;
L
Linus Torvalds 已提交
217 218
		if (panic_on_overflow)
			panic("dma_map_area overflow %lu bytes\n", size);
219
		iommu_full(dev, size, dir);
L
Linus Torvalds 已提交
220 221 222 223 224 225 226 227 228 229 230
		return bad_dma_address;
	}

	for (i = 0; i < npages; i++) {
		iommu_gatt_base[iommu_page + i] = GPTE_ENCODE(phys_mem);
		phys_mem += PAGE_SIZE;
	}
	return iommu_bus_base + iommu_page*PAGE_SIZE + (phys_mem & ~PAGE_MASK);
}

/* Map a single area into the IOMMU */
231 232 233 234
static dma_addr_t gart_map_page(struct device *dev, struct page *page,
				unsigned long offset, size_t size,
				enum dma_data_direction dir,
				struct dma_attrs *attrs)
L
Linus Torvalds 已提交
235
{
I
Ingo Molnar 已提交
236
	unsigned long bus;
237
	phys_addr_t paddr = page_to_phys(page) + offset;
L
Linus Torvalds 已提交
238 239

	if (!dev)
240
		dev = &x86_dma_fallback_dev;
L
Linus Torvalds 已提交
241

I
Ingo Molnar 已提交
242 243
	if (!need_iommu(dev, paddr, size))
		return paddr;
L
Linus Torvalds 已提交
244

245 246
	bus = dma_map_area(dev, paddr, size, dir, 0);
	flush_gart();
247 248

	return bus;
249 250
}

251 252 253
/*
 * Free a DMA mapping.
 */
254 255 256
static void gart_unmap_page(struct device *dev, dma_addr_t dma_addr,
			    size_t size, enum dma_data_direction dir,
			    struct dma_attrs *attrs)
257 258 259 260 261 262 263 264
{
	unsigned long iommu_page;
	int npages;
	int i;

	if (dma_addr < iommu_bus_base + EMERGENCY_PAGES*PAGE_SIZE ||
	    dma_addr >= iommu_bus_base + iommu_size)
		return;
265

266
	iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT;
267
	npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
268 269 270 271 272 273
	for (i = 0; i < npages; i++) {
		iommu_gatt_base[iommu_page + i] = gart_unmapped_entry;
	}
	free_iommu(iommu_page, npages);
}

274 275 276
/*
 * Wrapper for pci_unmap_single working with scatterlists.
 */
277 278
static void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
			  enum dma_data_direction dir, struct dma_attrs *attrs)
279
{
280
	struct scatterlist *s;
281 282
	int i;

283
	for_each_sg(sg, s, nents, i) {
284
		if (!s->dma_length || !s->length)
285
			break;
286
		gart_unmap_page(dev, s->dma_address, s->dma_length, dir, NULL);
287 288
	}
}
L
Linus Torvalds 已提交
289 290 291 292 293

/* Fallback for dma_map_sg in case of overflow */
static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
			       int nents, int dir)
{
294
	struct scatterlist *s;
L
Linus Torvalds 已提交
295 296 297 298 299 300
	int i;

#ifdef CONFIG_IOMMU_DEBUG
	printk(KERN_DEBUG "dma_map_sg overflow\n");
#endif

301
	for_each_sg(sg, s, nents, i) {
J
Jens Axboe 已提交
302
		unsigned long addr = sg_phys(s);
303 304

		if (nonforced_iommu(dev, addr, s->length)) {
305
			addr = dma_map_area(dev, addr, s->length, dir, 0);
306 307
			if (addr == bad_dma_address) {
				if (i > 0)
308
					gart_unmap_sg(dev, sg, i, dir, NULL);
309
				nents = 0;
L
Linus Torvalds 已提交
310 311 312 313 314 315 316
				sg[0].dma_length = 0;
				break;
			}
		}
		s->dma_address = addr;
		s->dma_length = s->length;
	}
317
	flush_gart();
318

L
Linus Torvalds 已提交
319 320 321 322
	return nents;
}

/* Map multiple scatterlist entries continuous into the first. */
323 324 325
static int __dma_map_cont(struct device *dev, struct scatterlist *start,
			  int nelems, struct scatterlist *sout,
			  unsigned long pages)
L
Linus Torvalds 已提交
326
{
327
	unsigned long iommu_start = alloc_iommu(dev, pages, 0);
328
	unsigned long iommu_page = iommu_start;
329
	struct scatterlist *s;
L
Linus Torvalds 已提交
330 331 332 333
	int i;

	if (iommu_start == -1)
		return -1;
334 335

	for_each_sg(start, s, nelems, i) {
L
Linus Torvalds 已提交
336 337
		unsigned long pages, addr;
		unsigned long phys_addr = s->dma_address;
338

339 340
		BUG_ON(s != start && s->offset);
		if (s == start) {
L
Linus Torvalds 已提交
341 342 343
			sout->dma_address = iommu_bus_base;
			sout->dma_address += iommu_page*PAGE_SIZE + s->offset;
			sout->dma_length = s->length;
344 345
		} else {
			sout->dma_length += s->length;
L
Linus Torvalds 已提交
346 347 348
		}

		addr = phys_addr;
349
		pages = iommu_num_pages(s->offset, s->length, PAGE_SIZE);
350 351
		while (pages--) {
			iommu_gatt_base[iommu_page] = GPTE_ENCODE(addr);
L
Linus Torvalds 已提交
352 353
			addr += PAGE_SIZE;
			iommu_page++;
354
		}
355 356 357
	}
	BUG_ON(iommu_page - iommu_start != pages);

L
Linus Torvalds 已提交
358 359 360
	return 0;
}

361
static inline int
362 363
dma_map_cont(struct device *dev, struct scatterlist *start, int nelems,
	     struct scatterlist *sout, unsigned long pages, int need)
L
Linus Torvalds 已提交
364
{
365 366
	if (!need) {
		BUG_ON(nelems != 1);
F
FUJITA Tomonori 已提交
367
		sout->dma_address = start->dma_address;
368
		sout->dma_length = start->length;
L
Linus Torvalds 已提交
369
		return 0;
370
	}
371
	return __dma_map_cont(dev, start, nelems, sout, pages);
L
Linus Torvalds 已提交
372
}
373

L
Linus Torvalds 已提交
374 375
/*
 * DMA map all entries in a scatterlist.
376
 * Merge chunks that have page aligned sizes into a continuous mapping.
L
Linus Torvalds 已提交
377
 */
378 379
static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents,
		       enum dma_data_direction dir, struct dma_attrs *attrs)
L
Linus Torvalds 已提交
380
{
381
	struct scatterlist *s, *ps, *start_sg, *sgmap;
382 383
	int need = 0, nextneed, i, out, start;
	unsigned long pages = 0;
384 385
	unsigned int seg_size;
	unsigned int max_seg_size;
L
Linus Torvalds 已提交
386

387
	if (nents == 0)
L
Linus Torvalds 已提交
388 389 390
		return 0;

	if (!dev)
391
		dev = &x86_dma_fallback_dev;
L
Linus Torvalds 已提交
392 393 394

	out = 0;
	start = 0;
395
	start_sg = sgmap = sg;
396 397
	seg_size = 0;
	max_seg_size = dma_get_max_seg_size(dev);
398 399
	ps = NULL; /* shut up gcc */
	for_each_sg(sg, s, nents, i) {
J
Jens Axboe 已提交
400
		dma_addr_t addr = sg_phys(s);
401

L
Linus Torvalds 已提交
402
		s->dma_address = addr;
403
		BUG_ON(s->length == 0);
L
Linus Torvalds 已提交
404

405
		nextneed = need_iommu(dev, addr, s->length);
L
Linus Torvalds 已提交
406 407 408

		/* Handle the previous not yet processed entries */
		if (i > start) {
409 410 411 412 413
			/*
			 * Can only merge when the last chunk ends on a
			 * page boundary and the new one doesn't have an
			 * offset.
			 */
L
Linus Torvalds 已提交
414
			if (!iommu_merge || !nextneed || !need || s->offset ||
415
			    (s->length + seg_size > max_seg_size) ||
416
			    (ps->offset + ps->length) % PAGE_SIZE) {
417 418
				if (dma_map_cont(dev, start_sg, i - start,
						 sgmap, pages, need) < 0)
L
Linus Torvalds 已提交
419 420
					goto error;
				out++;
421
				seg_size = 0;
422
				sgmap = sg_next(sgmap);
L
Linus Torvalds 已提交
423
				pages = 0;
424 425
				start = i;
				start_sg = s;
L
Linus Torvalds 已提交
426 427 428
			}
		}

429
		seg_size += s->length;
L
Linus Torvalds 已提交
430
		need = nextneed;
431
		pages += iommu_num_pages(s->offset, s->length, PAGE_SIZE);
432
		ps = s;
L
Linus Torvalds 已提交
433
	}
434
	if (dma_map_cont(dev, start_sg, i - start, sgmap, pages, need) < 0)
L
Linus Torvalds 已提交
435 436
		goto error;
	out++;
437
	flush_gart();
438 439 440 441
	if (out < nents) {
		sgmap = sg_next(sgmap);
		sgmap->dma_length = 0;
	}
L
Linus Torvalds 已提交
442 443 444
	return out;

error:
445
	flush_gart();
446
	gart_unmap_sg(dev, sg, out, dir, NULL);
447

448 449 450 451 452 453
	/* When it was forced or merged try again in a dumb way */
	if (force_iommu || iommu_merge) {
		out = dma_map_sg_nonforce(dev, sg, nents, dir);
		if (out > 0)
			return out;
	}
L
Linus Torvalds 已提交
454 455
	if (panic_on_overflow)
		panic("dma_map_sg: overflow on %lu pages\n", pages);
456

457
	iommu_full(dev, pages << PAGE_SHIFT, dir);
458 459
	for_each_sg(sg, s, nents, i)
		s->dma_address = bad_dma_address;
L
Linus Torvalds 已提交
460
	return 0;
461
}
L
Linus Torvalds 已提交
462

463 464 465 466 467
/* allocate and map a coherent mapping */
static void *
gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr,
		    gfp_t flag)
{
468
	dma_addr_t paddr;
469
	unsigned long align_mask;
470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489
	struct page *page;

	if (force_iommu && !(flag & GFP_DMA)) {
		flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
		page = alloc_pages(flag | __GFP_ZERO, get_order(size));
		if (!page)
			return NULL;

		align_mask = (1UL << get_order(size)) - 1;
		paddr = dma_map_area(dev, page_to_phys(page), size,
				     DMA_BIDIRECTIONAL, align_mask);

		flush_gart();
		if (paddr != bad_dma_address) {
			*dma_addr = paddr;
			return page_address(page);
		}
		__free_pages(page, get_order(size));
	} else
		return dma_generic_alloc_coherent(dev, size, dma_addr, flag);
490 491 492 493

	return NULL;
}

494 495 496 497 498
/* free a coherent mapping */
static void
gart_free_coherent(struct device *dev, size_t size, void *vaddr,
		   dma_addr_t dma_addr)
{
499
	gart_unmap_page(dev, dma_addr, size, DMA_BIDIRECTIONAL, NULL);
500 501 502
	free_pages((unsigned long)vaddr, get_order(size));
}

503
static int no_agp;
L
Linus Torvalds 已提交
504 505

static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size)
506 507 508 509 510 511 512 513 514 515
{
	unsigned long a;

	if (!iommu_size) {
		iommu_size = aper_size;
		if (!no_agp)
			iommu_size /= 2;
	}

	a = aper + iommu_size;
516
	iommu_size -= round_up(a, PMD_PAGE_SIZE) - a;
L
Linus Torvalds 已提交
517

518
	if (iommu_size < 64*1024*1024) {
L
Linus Torvalds 已提交
519
		printk(KERN_WARNING
520 521 522 523 524
			"PCI-DMA: Warning: Small IOMMU %luMB."
			" Consider increasing the AGP aperture in BIOS\n",
				iommu_size >> 20);
	}

L
Linus Torvalds 已提交
525
	return iommu_size;
526
}
L
Linus Torvalds 已提交
527

528 529 530
static __init unsigned read_aperture(struct pci_dev *dev, u32 *size)
{
	unsigned aper_size = 0, aper_base_32, aper_order;
L
Linus Torvalds 已提交
531 532
	u64 aper_base;

P
Pavel Machek 已提交
533 534
	pci_read_config_dword(dev, AMD64_GARTAPERTUREBASE, &aper_base_32);
	pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &aper_order);
535
	aper_order = (aper_order >> 1) & 7;
L
Linus Torvalds 已提交
536

537
	aper_base = aper_base_32 & 0x7fff;
L
Linus Torvalds 已提交
538 539
	aper_base <<= 25;

540 541
	aper_size = (32 * 1024 * 1024) << aper_order;
	if (aper_base + aper_size > 0x100000000UL || !aper_size)
L
Linus Torvalds 已提交
542 543 544 545
		aper_base = 0;

	*size = aper_size;
	return aper_base;
546
}
L
Linus Torvalds 已提交
547

548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573
static void enable_gart_translations(void)
{
	int i;

	for (i = 0; i < num_k8_northbridges; i++) {
		struct pci_dev *dev = k8_northbridges[i];

		enable_gart_translation(dev, __pa(agp_gatt_table));
	}
}

/*
 * If fix_up_north_bridges is set, the north bridges have to be fixed up on
 * resume in the same way as they are handled in gart_iommu_hole_init().
 */
static bool fix_up_north_bridges;
static u32 aperture_order;
static u32 aperture_alloc;

void set_up_gart_resume(u32 aper_order, u32 aper_alloc)
{
	fix_up_north_bridges = true;
	aperture_order = aper_order;
	aperture_alloc = aper_alloc;
}

574 575
static int gart_resume(struct sys_device *dev)
{
576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598
	printk(KERN_INFO "PCI-DMA: Resuming GART IOMMU\n");

	if (fix_up_north_bridges) {
		int i;

		printk(KERN_INFO "PCI-DMA: Restoring GART aperture settings\n");

		for (i = 0; i < num_k8_northbridges; i++) {
			struct pci_dev *dev = k8_northbridges[i];

			/*
			 * Don't enable translations just yet.  That is the next
			 * step.  Restore the pre-suspend aperture settings.
			 */
			pci_write_config_dword(dev, AMD64_GARTAPERTURECTL,
						aperture_order << 1);
			pci_write_config_dword(dev, AMD64_GARTAPERTUREBASE,
						aperture_alloc >> 25);
		}
	}

	enable_gart_translations();

599 600 601 602 603
	return 0;
}

static int gart_suspend(struct sys_device *dev, pm_message_t state)
{
604
	return 0;
605 606 607 608 609 610 611 612 613 614 615 616 617 618
}

static struct sysdev_class gart_sysdev_class = {
	.name = "gart",
	.suspend = gart_suspend,
	.resume = gart_resume,

};

static struct sys_device device_gart = {
	.id	= 0,
	.cls	= &gart_sysdev_class,
};

619
/*
L
Linus Torvalds 已提交
620
 * Private Northbridge GATT initialization in case we cannot use the
621
 * AGP driver for some reason.
L
Linus Torvalds 已提交
622 623
 */
static __init int init_k8_gatt(struct agp_kern_info *info)
624 625 626
{
	unsigned aper_size, gatt_size, new_aper_size;
	unsigned aper_base, new_aper_base;
L
Linus Torvalds 已提交
627 628
	struct pci_dev *dev;
	void *gatt;
629
	int i, error;
630

L
Linus Torvalds 已提交
631 632
	printk(KERN_INFO "PCI-DMA: Disabling AGP.\n");
	aper_size = aper_base = info->aper_size = 0;
633 634 635
	dev = NULL;
	for (i = 0; i < num_k8_northbridges; i++) {
		dev = k8_northbridges[i];
636 637 638 639 640
		new_aper_base = read_aperture(dev, &new_aper_size);
		if (!new_aper_base)
			goto nommu;

		if (!aper_base) {
L
Linus Torvalds 已提交
641 642
			aper_size = new_aper_size;
			aper_base = new_aper_base;
643 644
		}
		if (aper_size != new_aper_size || aper_base != new_aper_base)
L
Linus Torvalds 已提交
645 646 647
			goto nommu;
	}
	if (!aper_base)
648
		goto nommu;
L
Linus Torvalds 已提交
649
	info->aper_base = aper_base;
650
	info->aper_size = aper_size >> 20;
L
Linus Torvalds 已提交
651

652
	gatt_size = (aper_size >> PAGE_SHIFT) * sizeof(u32);
653 654
	gatt = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
					get_order(gatt_size));
655
	if (!gatt)
656
		panic("Cannot allocate GATT table");
657
	if (set_memory_uc((unsigned long)gatt, gatt_size >> PAGE_SHIFT))
658 659
		panic("Could not set GART PTEs to uncacheable pages");

L
Linus Torvalds 已提交
660
	agp_gatt_table = gatt;
661

662 663 664 665
	error = sysdev_class_register(&gart_sysdev_class);
	if (!error)
		error = sysdev_register(&device_gart);
	if (error)
666 667
		panic("Could not register gart_sysdev -- "
		      "would corrupt data on next suspend");
668

669
	flush_gart();
670 671 672

	printk(KERN_INFO "PCI-DMA: aperture base @ %x size %u KB\n",
	       aper_base, aper_size>>10);
Y
Yinghai Lu 已提交
673

L
Linus Torvalds 已提交
674 675 676
	return 0;

 nommu:
677
	/* Should not happen anymore */
678
	printk(KERN_WARNING "PCI-DMA: More than 4GB of RAM and no IOMMU\n"
679
	       "falling back to iommu=soft.\n");
680 681
	return -1;
}
L
Linus Torvalds 已提交
682

683
static struct dma_map_ops gart_dma_ops = {
684 685
	.map_sg				= gart_map_sg,
	.unmap_sg			= gart_unmap_sg,
686 687
	.map_page			= gart_map_page,
	.unmap_page			= gart_unmap_page,
688
	.alloc_coherent			= gart_alloc_coherent,
689
	.free_coherent			= gart_free_coherent,
690 691
};

692
static void gart_iommu_shutdown(void)
693 694 695 696
{
	struct pci_dev *dev;
	int i;

697
	if (no_agp)
698 699
		return;

700 701
	for (i = 0; i < num_k8_northbridges; i++) {
		u32 ctl;
702

703
		dev = k8_northbridges[i];
P
Pavel Machek 已提交
704
		pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &ctl);
705

P
Pavel Machek 已提交
706
		ctl &= ~GARTEN;
707

P
Pavel Machek 已提交
708
		pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, ctl);
709
	}
710 711
}

712
int __init gart_iommu_init(void)
713
{
L
Linus Torvalds 已提交
714 715
	struct agp_kern_info info;
	unsigned long iommu_start;
716 717
	unsigned long aper_base, aper_size;
	unsigned long start_pfn, end_pfn;
L
Linus Torvalds 已提交
718 719 720
	unsigned long scratch;
	long i;

721
	if (cache_k8_northbridges() < 0 || num_k8_northbridges == 0)
722
		return 0;
723

L
Linus Torvalds 已提交
724
#ifndef CONFIG_AGP_AMD64
725
	no_agp = 1;
L
Linus Torvalds 已提交
726 727 728
#else
	/* Makefile puts PCI initialization via subsys_initcall first. */
	/* Add other K8 AGP bridge drivers here */
729 730
	no_agp = no_agp ||
		(agp_amd64_init() < 0) ||
L
Linus Torvalds 已提交
731
		(agp_copy_info(agp_bridge, &info) < 0);
732
#endif
L
Linus Torvalds 已提交
733 734

	if (no_iommu ||
Y
Yinghai Lu 已提交
735
	    (!force_iommu && max_pfn <= MAX_DMA32_PFN) ||
736
	    !gart_iommu_aperture ||
L
Linus Torvalds 已提交
737
	    (no_agp && init_k8_gatt(&info) < 0)) {
Y
Yinghai Lu 已提交
738
		if (max_pfn > MAX_DMA32_PFN) {
739
			printk(KERN_WARNING "More than 4GB of memory "
740 741
			       "but GART IOMMU not available.\n");
			printk(KERN_WARNING "falling back to iommu=soft.\n");
J
Jon Mason 已提交
742
		}
743
		return 0;
L
Linus Torvalds 已提交
744 745
	}

746 747 748 749 750 751 752 753 754
	/* need to map that range */
	aper_size = info.aper_size << 20;
	aper_base = info.aper_base;
	end_pfn = (aper_base>>PAGE_SHIFT) + (aper_size>>PAGE_SHIFT);
	if (end_pfn > max_low_pfn_mapped) {
		start_pfn = (aper_base>>PAGE_SHIFT);
		init_memory_mapping(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT);
	}

J
Jon Mason 已提交
755
	printk(KERN_INFO "PCI-DMA: using GART IOMMU.\n");
756 757 758
	iommu_size = check_iommu_size(info.aper_base, aper_size);
	iommu_pages = iommu_size >> PAGE_SHIFT;

759
	iommu_gart_bitmap = (void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO,
760 761 762
						      get_order(iommu_pages/8));
	if (!iommu_gart_bitmap)
		panic("Cannot allocate iommu bitmap\n");
L
Linus Torvalds 已提交
763 764

#ifdef CONFIG_IOMMU_LEAK
765
	if (leak_trace) {
766 767 768 769
		int ret;

		ret = dma_debug_resize_entries(iommu_pages);
		if (ret)
770
			printk(KERN_DEBUG
771
			       "PCI-DMA: Cannot trace all the entries\n");
772
	}
L
Linus Torvalds 已提交
773 774
#endif

775
	/*
L
Linus Torvalds 已提交
776
	 * Out of IOMMU space handling.
777 778
	 * Reserve some invalid pages at the beginning of the GART.
	 */
779
	iommu_area_reserve(iommu_gart_bitmap, 0, EMERGENCY_PAGES);
L
Linus Torvalds 已提交
780

781
	agp_memory_reserved = iommu_size;
L
Linus Torvalds 已提交
782 783
	printk(KERN_INFO
	       "PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture\n",
784
	       iommu_size >> 20);
L
Linus Torvalds 已提交
785

786 787
	iommu_start = aper_size - iommu_size;
	iommu_bus_base = info.aper_base + iommu_start;
L
Linus Torvalds 已提交
788 789 790
	bad_dma_address = iommu_bus_base;
	iommu_gatt_base = agp_gatt_table + (iommu_start>>PAGE_SHIFT);

791
	/*
L
Linus Torvalds 已提交
792 793 794 795 796 797
	 * Unmap the IOMMU part of the GART. The alias of the page is
	 * always mapped with cache enabled and there is no full cache
	 * coherency across the GART remapping. The unmapping avoids
	 * automatic prefetches from the CPU allocating cache lines in
	 * there. All CPU accesses are done via the direct mapping to
	 * the backing memory. The GART address is only used by PCI
798
	 * devices.
L
Linus Torvalds 已提交
799
	 */
800 801
	set_memory_np((unsigned long)__va(iommu_bus_base),
				iommu_size >> PAGE_SHIFT);
I
Ingo Molnar 已提交
802 803 804 805 806 807 808 809 810
	/*
	 * Tricky. The GART table remaps the physical memory range,
	 * so the CPU wont notice potential aliases and if the memory
	 * is remapped to UC later on, we might surprise the PCI devices
	 * with a stray writeout of a cacheline. So play it sure and
	 * do an explicit, full-scale wbinvd() _after_ having marked all
	 * the pages as Not-Present:
	 */
	wbinvd();
811 812 813 814 815 816 817 818
	
	/*
	 * Now all caches are flushed and we can safely enable
	 * GART hardware.  Doing it early leaves the possibility
	 * of stale cache entries that can lead to GART PTE
	 * errors.
	 */
	enable_gart_translations();
L
Linus Torvalds 已提交
819

820
	/*
821
	 * Try to workaround a bug (thanks to BenH):
822
	 * Set unmapped entries to a scratch page instead of 0.
L
Linus Torvalds 已提交
823
	 * Any prefetches that hit unmapped entries won't get an bus abort
824
	 * then. (P2P bridge may be prefetching on DMA reads).
L
Linus Torvalds 已提交
825
	 */
826 827
	scratch = get_zeroed_page(GFP_KERNEL);
	if (!scratch)
L
Linus Torvalds 已提交
828 829
		panic("Cannot allocate iommu scratch page");
	gart_unmapped_entry = GPTE_ENCODE(__pa(scratch));
830
	for (i = EMERGENCY_PAGES; i < iommu_pages; i++)
L
Linus Torvalds 已提交
831 832
		iommu_gatt_base[i] = gart_unmapped_entry;

833
	flush_gart();
834
	dma_ops = &gart_dma_ops;
835
	x86_platform.iommu_shutdown = gart_iommu_shutdown;
836
	swiotlb = 0;
837 838

	return 0;
839
}
L
Linus Torvalds 已提交
840

841
void __init gart_parse_options(char *p)
842 843 844
{
	int arg;

L
Linus Torvalds 已提交
845
#ifdef CONFIG_IOMMU_LEAK
846
	if (!strncmp(p, "leak", 4)) {
847 848
		leak_trace = 1;
		p += 4;
849 850
		if (*p == '=')
			++p;
851 852 853
		if (isdigit(*p) && get_option(&p, &arg))
			iommu_leak_pages = arg;
	}
L
Linus Torvalds 已提交
854
#endif
855 856
	if (isdigit(*p) && get_option(&p, &arg))
		iommu_size = arg;
857
	if (!strncmp(p, "fullflush", 9))
858
		iommu_fullflush = 1;
859
	if (!strncmp(p, "nofullflush", 11))
860
		iommu_fullflush = 0;
861
	if (!strncmp(p, "noagp", 5))
862
		no_agp = 1;
863
	if (!strncmp(p, "noaperture", 10))
864 865
		fix_aperture = 0;
	/* duplicated from pci-dma.c */
866
	if (!strncmp(p, "force", 5))
867
		gart_iommu_aperture_allowed = 1;
868
	if (!strncmp(p, "allowed", 7))
869
		gart_iommu_aperture_allowed = 1;
870 871 872 873 874 875 876 877 878 879
	if (!strncmp(p, "memaper", 7)) {
		fallback_aper_force = 1;
		p += 7;
		if (*p == '=') {
			++p;
			if (get_option(&p, &arg))
				fallback_aper_order = arg;
		}
	}
}