amd_gart_64.c 22.2 KB
Newer Older
L
Linus Torvalds 已提交
1 2
/*
 * Dynamic DMA mapping support for AMD Hammer.
3
 *
L
Linus Torvalds 已提交
4 5
 * Use the integrated AGP GART in the Hammer northbridge as an IOMMU for PCI.
 * This allows to use PCI devices that only support 32bit addresses on systems
6
 * with more than 4GB.
L
Linus Torvalds 已提交
7
 *
P
Paul Bolle 已提交
8
 * See Documentation/DMA-API-HOWTO.txt for the interface specification.
9
 *
L
Linus Torvalds 已提交
10
 * Copyright 2002 Andi Kleen, SuSE Labs.
A
Andi Kleen 已提交
11
 * Subject to the GNU General Public License v2 only.
L
Linus Torvalds 已提交
12 13 14 15 16 17 18
 */

#include <linux/types.h>
#include <linux/ctype.h>
#include <linux/agp_backend.h>
#include <linux/init.h>
#include <linux/mm.h>
19
#include <linux/sched.h>
20
#include <linux/sched/debug.h>
L
Linus Torvalds 已提交
21 22 23 24 25
#include <linux/string.h>
#include <linux/spinlock.h>
#include <linux/pci.h>
#include <linux/topology.h>
#include <linux/interrupt.h>
A
Akinobu Mita 已提交
26
#include <linux/bitmap.h>
27
#include <linux/kdebug.h>
28
#include <linux/scatterlist.h>
29
#include <linux/iommu-helper.h>
30
#include <linux/syscore_ops.h>
31
#include <linux/io.h>
32
#include <linux/gfp.h>
A
Arun Sharma 已提交
33
#include <linux/atomic.h>
34
#include <linux/dma-direct.h>
L
Linus Torvalds 已提交
35 36 37
#include <asm/mtrr.h>
#include <asm/pgtable.h>
#include <asm/proto.h>
38
#include <asm/iommu.h>
J
Joerg Roedel 已提交
39
#include <asm/gart.h>
L
Laura Abbott 已提交
40
#include <asm/set_memory.h>
41 42
#include <asm/swiotlb.h>
#include <asm/dma.h>
43
#include <asm/amd_nb.h>
44
#include <asm/x86_init.h>
45
#include <asm/iommu_table.h>
L
Linus Torvalds 已提交
46

47
static unsigned long iommu_bus_base;	/* GART remapping area (physical) */
48
static unsigned long iommu_size;	/* size of remapping area bytes */
L
Linus Torvalds 已提交
49 50
static unsigned long iommu_pages;	/* .. and in pages */

51
static u32 *iommu_gatt_base;		/* Remapping table */
L
Linus Torvalds 已提交
52

53 54
static dma_addr_t bad_dma_addr;

55 56 57 58 59 60 61
/*
 * If this is disabled the IOMMU will use an optimized flushing strategy
 * of only flushing when an mapping is reused. With it true the GART is
 * flushed for every mapping. Problem is that doing the lazy flush seems
 * to trigger bugs with some popular PCI cards, in particular 3ware (but
 * has been also also seen with Qlogic at least).
 */
62
static int iommu_fullflush = 1;
L
Linus Torvalds 已提交
63

64
/* Allocation bitmap for the remapping area: */
L
Linus Torvalds 已提交
65
static DEFINE_SPINLOCK(iommu_bitmap_lock);
66 67
/* Guarded by iommu_bitmap_lock: */
static unsigned long *iommu_gart_bitmap;
L
Linus Torvalds 已提交
68

69
static u32 gart_unmapped_entry;
L
Linus Torvalds 已提交
70 71 72 73 74 75 76

#define GPTE_VALID    1
#define GPTE_COHERENT 2
#define GPTE_ENCODE(x) \
	(((x) & 0xfffff000) | (((x) >> 32) << 4) | GPTE_VALID | GPTE_COHERENT)
#define GPTE_DECODE(x) (((x) & 0xfffff000) | (((u64)(x) & 0xff0) << 28))

77
#define EMERGENCY_PAGES 32 /* = 128KB */
L
Linus Torvalds 已提交
78 79 80 81 82 83 84

#ifdef CONFIG_AGP
#define AGPEXTERN extern
#else
#define AGPEXTERN
#endif

85 86 87
/* GART can only remap to physical addresses < 1TB */
#define GART_MAX_PHYS_ADDR	(1ULL << 40)

L
Linus Torvalds 已提交
88 89 90 91 92
/* backdoor interface to AGP driver */
AGPEXTERN int agp_memory_reserved;
AGPEXTERN __u32 *agp_gatt_table;

static unsigned long next_bit;  /* protected by iommu_bitmap_lock */
93
static bool need_flush;		/* global flush state. set for each gart wrap */
L
Linus Torvalds 已提交
94

95 96
static unsigned long alloc_iommu(struct device *dev, int size,
				 unsigned long align_mask)
97
{
L
Linus Torvalds 已提交
98
	unsigned long offset, flags;
99 100 101 102 103
	unsigned long boundary_size;
	unsigned long base_index;

	base_index = ALIGN(iommu_bus_base & dma_get_seg_boundary(dev),
			   PAGE_SIZE) >> PAGE_SHIFT;
I
Ingo Molnar 已提交
104
	boundary_size = ALIGN((u64)dma_get_seg_boundary(dev) + 1,
105
			      PAGE_SIZE) >> PAGE_SHIFT;
L
Linus Torvalds 已提交
106

107
	spin_lock_irqsave(&iommu_bitmap_lock, flags);
108
	offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, next_bit,
109
				  size, base_index, boundary_size, align_mask);
L
Linus Torvalds 已提交
110
	if (offset == -1) {
111
		need_flush = true;
112
		offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, 0,
113 114
					  size, base_index, boundary_size,
					  align_mask);
L
Linus Torvalds 已提交
115
	}
116 117 118
	if (offset != -1) {
		next_bit = offset+size;
		if (next_bit >= iommu_pages) {
L
Linus Torvalds 已提交
119
			next_bit = 0;
120
			need_flush = true;
121 122
		}
	}
L
Linus Torvalds 已提交
123
	if (iommu_fullflush)
124
		need_flush = true;
125 126
	spin_unlock_irqrestore(&iommu_bitmap_lock, flags);

L
Linus Torvalds 已提交
127
	return offset;
128
}
L
Linus Torvalds 已提交
129 130

static void free_iommu(unsigned long offset, int size)
131
{
L
Linus Torvalds 已提交
132
	unsigned long flags;
133

L
Linus Torvalds 已提交
134
	spin_lock_irqsave(&iommu_bitmap_lock, flags);
A
Akinobu Mita 已提交
135
	bitmap_clear(iommu_gart_bitmap, offset, size);
136 137
	if (offset >= next_bit)
		next_bit = offset + size;
L
Linus Torvalds 已提交
138
	spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
139
}
L
Linus Torvalds 已提交
140

141
/*
L
Linus Torvalds 已提交
142 143
 * Use global flush state to avoid races with multiple flushers.
 */
144
static void flush_gart(void)
145
{
L
Linus Torvalds 已提交
146
	unsigned long flags;
147

L
Linus Torvalds 已提交
148
	spin_lock_irqsave(&iommu_bitmap_lock, flags);
149
	if (need_flush) {
150
		amd_flush_garts();
151
		need_flush = false;
152
	}
L
Linus Torvalds 已提交
153
	spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
154
}
L
Linus Torvalds 已提交
155 156 157 158

#ifdef CONFIG_IOMMU_LEAK
/* Debugging aid for drivers that don't free their IOMMU tables */
static int leak_trace;
159
static int iommu_leak_pages = 20;
160

161
static void dump_leak(void)
L
Linus Torvalds 已提交
162
{
163 164
	static int dump;

165
	if (dump)
166
		return;
L
Linus Torvalds 已提交
167
	dump = 1;
168

169 170
	show_stack(NULL, NULL);
	debug_dma_dump_mappings(NULL);
L
Linus Torvalds 已提交
171 172 173
}
#endif

174
static void iommu_full(struct device *dev, size_t size, int dir)
L
Linus Torvalds 已提交
175
{
176
	/*
L
Linus Torvalds 已提交
177 178
	 * Ran out of IOMMU space for this operation. This is very bad.
	 * Unfortunately the drivers cannot handle this operation properly.
179
	 * Return some non mapped prereserved space in the aperture and
L
Linus Torvalds 已提交
180 181
	 * let the Northbridge deal with it. This will result in garbage
	 * in the IO operation. When the size exceeds the prereserved space
182
	 * memory corruption will occur or random memory will be DMAed
L
Linus Torvalds 已提交
183
	 * out. Hopefully no network devices use single mappings that big.
184 185
	 */

186
	dev_err(dev, "PCI-DMA: Out of IOMMU space for %lu bytes\n", size);
L
Linus Torvalds 已提交
187

188
	if (size > PAGE_SIZE*EMERGENCY_PAGES) {
L
Linus Torvalds 已提交
189 190
		if (dir == PCI_DMA_FROMDEVICE || dir == PCI_DMA_BIDIRECTIONAL)
			panic("PCI-DMA: Memory would be corrupted\n");
191 192 193 194
		if (dir == PCI_DMA_TODEVICE || dir == PCI_DMA_BIDIRECTIONAL)
			panic(KERN_ERR
				"PCI-DMA: Random memory would be DMAed\n");
	}
L
Linus Torvalds 已提交
195
#ifdef CONFIG_IOMMU_LEAK
196
	dump_leak();
L
Linus Torvalds 已提交
197
#endif
198
}
L
Linus Torvalds 已提交
199

200 201 202
static inline int
need_iommu(struct device *dev, unsigned long addr, size_t size)
{
203
	return force_iommu || !dma_capable(dev, addr, size);
L
Linus Torvalds 已提交
204 205
}

206 207 208
static inline int
nonforced_iommu(struct device *dev, unsigned long addr, size_t size)
{
209
	return !dma_capable(dev, addr, size);
L
Linus Torvalds 已提交
210 211 212 213 214
}

/* Map a single continuous physical area into the IOMMU.
 * Caller needs to check if the iommu is needed and flush.
 */
215
static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
216
				size_t size, int dir, unsigned long align_mask)
217
{
218
	unsigned long npages = iommu_num_pages(phys_mem, size, PAGE_SIZE);
219
	unsigned long iommu_page;
L
Linus Torvalds 已提交
220
	int i;
221

222 223 224 225
	if (unlikely(phys_mem + size > GART_MAX_PHYS_ADDR))
		return bad_dma_addr;

	iommu_page = alloc_iommu(dev, npages, align_mask);
L
Linus Torvalds 已提交
226 227
	if (iommu_page == -1) {
		if (!nonforced_iommu(dev, phys_mem, size))
228
			return phys_mem;
L
Linus Torvalds 已提交
229 230
		if (panic_on_overflow)
			panic("dma_map_area overflow %lu bytes\n", size);
231
		iommu_full(dev, size, dir);
232
		return bad_dma_addr;
L
Linus Torvalds 已提交
233 234 235 236 237 238 239 240 241 242
	}

	for (i = 0; i < npages; i++) {
		iommu_gatt_base[iommu_page + i] = GPTE_ENCODE(phys_mem);
		phys_mem += PAGE_SIZE;
	}
	return iommu_bus_base + iommu_page*PAGE_SIZE + (phys_mem & ~PAGE_MASK);
}

/* Map a single area into the IOMMU */
243 244 245
static dma_addr_t gart_map_page(struct device *dev, struct page *page,
				unsigned long offset, size_t size,
				enum dma_data_direction dir,
246
				unsigned long attrs)
L
Linus Torvalds 已提交
247
{
I
Ingo Molnar 已提交
248
	unsigned long bus;
249
	phys_addr_t paddr = page_to_phys(page) + offset;
L
Linus Torvalds 已提交
250 251

	if (!dev)
252
		dev = &x86_dma_fallback_dev;
L
Linus Torvalds 已提交
253

I
Ingo Molnar 已提交
254 255
	if (!need_iommu(dev, paddr, size))
		return paddr;
L
Linus Torvalds 已提交
256

257 258
	bus = dma_map_area(dev, paddr, size, dir, 0);
	flush_gart();
259 260

	return bus;
261 262
}

263 264 265
/*
 * Free a DMA mapping.
 */
266 267
static void gart_unmap_page(struct device *dev, dma_addr_t dma_addr,
			    size_t size, enum dma_data_direction dir,
268
			    unsigned long attrs)
269 270 271 272 273 274 275 276
{
	unsigned long iommu_page;
	int npages;
	int i;

	if (dma_addr < iommu_bus_base + EMERGENCY_PAGES*PAGE_SIZE ||
	    dma_addr >= iommu_bus_base + iommu_size)
		return;
277

278
	iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT;
279
	npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
280 281 282 283 284 285
	for (i = 0; i < npages; i++) {
		iommu_gatt_base[iommu_page + i] = gart_unmapped_entry;
	}
	free_iommu(iommu_page, npages);
}

286 287 288
/*
 * Wrapper for pci_unmap_single working with scatterlists.
 */
289
static void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
290
			  enum dma_data_direction dir, unsigned long attrs)
291
{
292
	struct scatterlist *s;
293 294
	int i;

295
	for_each_sg(sg, s, nents, i) {
296
		if (!s->dma_length || !s->length)
297
			break;
298
		gart_unmap_page(dev, s->dma_address, s->dma_length, dir, 0);
299 300
	}
}
L
Linus Torvalds 已提交
301 302 303 304 305

/* Fallback for dma_map_sg in case of overflow */
static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
			       int nents, int dir)
{
306
	struct scatterlist *s;
L
Linus Torvalds 已提交
307 308 309
	int i;

#ifdef CONFIG_IOMMU_DEBUG
I
Ingo Molnar 已提交
310
	pr_debug("dma_map_sg overflow\n");
L
Linus Torvalds 已提交
311 312
#endif

313
	for_each_sg(sg, s, nents, i) {
J
Jens Axboe 已提交
314
		unsigned long addr = sg_phys(s);
315 316

		if (nonforced_iommu(dev, addr, s->length)) {
317
			addr = dma_map_area(dev, addr, s->length, dir, 0);
318
			if (addr == bad_dma_addr) {
319
				if (i > 0)
320
					gart_unmap_sg(dev, sg, i, dir, 0);
321
				nents = 0;
L
Linus Torvalds 已提交
322 323 324 325 326 327 328
				sg[0].dma_length = 0;
				break;
			}
		}
		s->dma_address = addr;
		s->dma_length = s->length;
	}
329
	flush_gart();
330

L
Linus Torvalds 已提交
331 332 333 334
	return nents;
}

/* Map multiple scatterlist entries continuous into the first. */
335 336 337
static int __dma_map_cont(struct device *dev, struct scatterlist *start,
			  int nelems, struct scatterlist *sout,
			  unsigned long pages)
L
Linus Torvalds 已提交
338
{
339
	unsigned long iommu_start = alloc_iommu(dev, pages, 0);
340
	unsigned long iommu_page = iommu_start;
341
	struct scatterlist *s;
L
Linus Torvalds 已提交
342 343 344 345
	int i;

	if (iommu_start == -1)
		return -1;
346 347

	for_each_sg(start, s, nelems, i) {
L
Linus Torvalds 已提交
348 349
		unsigned long pages, addr;
		unsigned long phys_addr = s->dma_address;
350

351 352
		BUG_ON(s != start && s->offset);
		if (s == start) {
L
Linus Torvalds 已提交
353 354 355
			sout->dma_address = iommu_bus_base;
			sout->dma_address += iommu_page*PAGE_SIZE + s->offset;
			sout->dma_length = s->length;
356 357
		} else {
			sout->dma_length += s->length;
L
Linus Torvalds 已提交
358 359 360
		}

		addr = phys_addr;
361
		pages = iommu_num_pages(s->offset, s->length, PAGE_SIZE);
362 363
		while (pages--) {
			iommu_gatt_base[iommu_page] = GPTE_ENCODE(addr);
L
Linus Torvalds 已提交
364 365
			addr += PAGE_SIZE;
			iommu_page++;
366
		}
367 368 369
	}
	BUG_ON(iommu_page - iommu_start != pages);

L
Linus Torvalds 已提交
370 371 372
	return 0;
}

373
static inline int
374 375
dma_map_cont(struct device *dev, struct scatterlist *start, int nelems,
	     struct scatterlist *sout, unsigned long pages, int need)
L
Linus Torvalds 已提交
376
{
377 378
	if (!need) {
		BUG_ON(nelems != 1);
F
FUJITA Tomonori 已提交
379
		sout->dma_address = start->dma_address;
380
		sout->dma_length = start->length;
L
Linus Torvalds 已提交
381
		return 0;
382
	}
383
	return __dma_map_cont(dev, start, nelems, sout, pages);
L
Linus Torvalds 已提交
384
}
385

L
Linus Torvalds 已提交
386 387
/*
 * DMA map all entries in a scatterlist.
388
 * Merge chunks that have page aligned sizes into a continuous mapping.
L
Linus Torvalds 已提交
389
 */
390
static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents,
391
		       enum dma_data_direction dir, unsigned long attrs)
L
Linus Torvalds 已提交
392
{
393
	struct scatterlist *s, *ps, *start_sg, *sgmap;
394 395
	int need = 0, nextneed, i, out, start;
	unsigned long pages = 0;
396 397
	unsigned int seg_size;
	unsigned int max_seg_size;
L
Linus Torvalds 已提交
398

399
	if (nents == 0)
L
Linus Torvalds 已提交
400 401 402
		return 0;

	if (!dev)
403
		dev = &x86_dma_fallback_dev;
L
Linus Torvalds 已提交
404

I
Ingo Molnar 已提交
405 406 407 408 409 410 411 412
	out		= 0;
	start		= 0;
	start_sg	= sg;
	sgmap		= sg;
	seg_size	= 0;
	max_seg_size	= dma_get_max_seg_size(dev);
	ps		= NULL; /* shut up gcc */

413
	for_each_sg(sg, s, nents, i) {
J
Jens Axboe 已提交
414
		dma_addr_t addr = sg_phys(s);
415

L
Linus Torvalds 已提交
416
		s->dma_address = addr;
417
		BUG_ON(s->length == 0);
L
Linus Torvalds 已提交
418

419
		nextneed = need_iommu(dev, addr, s->length);
L
Linus Torvalds 已提交
420 421 422

		/* Handle the previous not yet processed entries */
		if (i > start) {
423 424 425 426 427
			/*
			 * Can only merge when the last chunk ends on a
			 * page boundary and the new one doesn't have an
			 * offset.
			 */
L
Linus Torvalds 已提交
428
			if (!iommu_merge || !nextneed || !need || s->offset ||
429
			    (s->length + seg_size > max_seg_size) ||
430
			    (ps->offset + ps->length) % PAGE_SIZE) {
431 432
				if (dma_map_cont(dev, start_sg, i - start,
						 sgmap, pages, need) < 0)
L
Linus Torvalds 已提交
433 434
					goto error;
				out++;
I
Ingo Molnar 已提交
435 436 437 438 439 440

				seg_size	= 0;
				sgmap		= sg_next(sgmap);
				pages		= 0;
				start		= i;
				start_sg	= s;
L
Linus Torvalds 已提交
441 442 443
			}
		}

444
		seg_size += s->length;
L
Linus Torvalds 已提交
445
		need = nextneed;
446
		pages += iommu_num_pages(s->offset, s->length, PAGE_SIZE);
447
		ps = s;
L
Linus Torvalds 已提交
448
	}
449
	if (dma_map_cont(dev, start_sg, i - start, sgmap, pages, need) < 0)
L
Linus Torvalds 已提交
450 451
		goto error;
	out++;
452
	flush_gart();
453 454 455 456
	if (out < nents) {
		sgmap = sg_next(sgmap);
		sgmap->dma_length = 0;
	}
L
Linus Torvalds 已提交
457 458 459
	return out;

error:
460
	flush_gart();
461
	gart_unmap_sg(dev, sg, out, dir, 0);
462

463 464 465 466 467 468
	/* When it was forced or merged try again in a dumb way */
	if (force_iommu || iommu_merge) {
		out = dma_map_sg_nonforce(dev, sg, nents, dir);
		if (out > 0)
			return out;
	}
L
Linus Torvalds 已提交
469 470
	if (panic_on_overflow)
		panic("dma_map_sg: overflow on %lu pages\n", pages);
471

472
	iommu_full(dev, pages << PAGE_SHIFT, dir);
473
	for_each_sg(sg, s, nents, i)
474
		s->dma_address = bad_dma_addr;
L
Linus Torvalds 已提交
475
	return 0;
476
}
L
Linus Torvalds 已提交
477

478 479 480
/* allocate and map a coherent mapping */
static void *
gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr,
481
		    gfp_t flag, unsigned long attrs)
482
{
483 484 485 486 487 488
	void *vaddr;

	vaddr = dma_direct_alloc(dev, size, dma_addr, flag, attrs);
	if (!vaddr ||
	    !force_iommu || dev->coherent_dma_mask <= DMA_BIT_MASK(24))
		return vaddr;
489

490 491 492 493 494 495 496 497
	*dma_addr = dma_map_area(dev, virt_to_phys(vaddr), size,
			DMA_BIDIRECTIONAL, (1UL << get_order(size)) - 1);
	flush_gart();
	if (unlikely(*dma_addr == bad_dma_addr))
		goto out_free;
	return vaddr;
out_free:
	dma_direct_free(dev, size, vaddr, *dma_addr, attrs);
498 499 500
	return NULL;
}

501 502 503
/* free a coherent mapping */
static void
gart_free_coherent(struct device *dev, size_t size, void *vaddr,
504
		   dma_addr_t dma_addr, unsigned long attrs)
505
{
506
	gart_unmap_page(dev, dma_addr, size, DMA_BIDIRECTIONAL, 0);
507
	dma_direct_free(dev, size, vaddr, dma_addr, attrs);
508 509
}

510 511 512 513 514
static int gart_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
	return (dma_addr == bad_dma_addr);
}

515
static int no_agp;
L
Linus Torvalds 已提交
516 517

static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size)
518 519 520 521 522 523 524 525 526 527
{
	unsigned long a;

	if (!iommu_size) {
		iommu_size = aper_size;
		if (!no_agp)
			iommu_size /= 2;
	}

	a = aper + iommu_size;
528
	iommu_size -= round_up(a, PMD_PAGE_SIZE) - a;
L
Linus Torvalds 已提交
529

530
	if (iommu_size < 64*1024*1024) {
I
Ingo Molnar 已提交
531
		pr_warning(
532 533 534 535 536
			"PCI-DMA: Warning: Small IOMMU %luMB."
			" Consider increasing the AGP aperture in BIOS\n",
				iommu_size >> 20);
	}

L
Linus Torvalds 已提交
537
	return iommu_size;
538
}
L
Linus Torvalds 已提交
539

540 541 542
static __init unsigned read_aperture(struct pci_dev *dev, u32 *size)
{
	unsigned aper_size = 0, aper_base_32, aper_order;
L
Linus Torvalds 已提交
543 544
	u64 aper_base;

P
Pavel Machek 已提交
545 546
	pci_read_config_dword(dev, AMD64_GARTAPERTUREBASE, &aper_base_32);
	pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &aper_order);
547
	aper_order = (aper_order >> 1) & 7;
L
Linus Torvalds 已提交
548

549
	aper_base = aper_base_32 & 0x7fff;
L
Linus Torvalds 已提交
550 551
	aper_base <<= 25;

552 553
	aper_size = (32 * 1024 * 1024) << aper_order;
	if (aper_base + aper_size > 0x100000000UL || !aper_size)
L
Linus Torvalds 已提交
554 555 556 557
		aper_base = 0;

	*size = aper_size;
	return aper_base;
558
}
L
Linus Torvalds 已提交
559

560 561 562 563
static void enable_gart_translations(void)
{
	int i;

564
	if (!amd_nb_has_feature(AMD_NB_GART))
565 566
		return;

567 568
	for (i = 0; i < amd_nb_num(); i++) {
		struct pci_dev *dev = node_to_amd_nb(i)->misc;
569 570 571

		enable_gart_translation(dev, __pa(agp_gatt_table));
	}
572 573

	/* Flush the GART-TLB to remove stale entries */
574
	amd_flush_garts();
575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591
}

/*
 * If fix_up_north_bridges is set, the north bridges have to be fixed up on
 * resume in the same way as they are handled in gart_iommu_hole_init().
 */
static bool fix_up_north_bridges;
static u32 aperture_order;
static u32 aperture_alloc;

void set_up_gart_resume(u32 aper_order, u32 aper_alloc)
{
	fix_up_north_bridges = true;
	aperture_order = aper_order;
	aperture_alloc = aper_alloc;
}

592
static void gart_fixup_northbridges(void)
593
{
I
Ingo Molnar 已提交
594
	int i;
595

I
Ingo Molnar 已提交
596 597
	if (!fix_up_north_bridges)
		return;
598

599
	if (!amd_nb_has_feature(AMD_NB_GART))
600 601
		return;

I
Ingo Molnar 已提交
602
	pr_info("PCI-DMA: Restoring GART aperture settings\n");
603

604 605
	for (i = 0; i < amd_nb_num(); i++) {
		struct pci_dev *dev = node_to_amd_nb(i)->misc;
606

I
Ingo Molnar 已提交
607 608 609 610
		/*
		 * Don't enable translations just yet.  That is the next
		 * step.  Restore the pre-suspend aperture settings.
		 */
611
		gart_set_size_and_enable(dev, aperture_order);
I
Ingo Molnar 已提交
612
		pci_write_config_dword(dev, AMD64_GARTAPERTUREBASE, aperture_alloc >> 25);
613
	}
I
Ingo Molnar 已提交
614 615
}

616
static void gart_resume(void)
I
Ingo Molnar 已提交
617 618 619
{
	pr_info("PCI-DMA: Resuming GART IOMMU\n");

620
	gart_fixup_northbridges();
621 622

	enable_gart_translations();
623 624
}

625
static struct syscore_ops gart_syscore_ops = {
I
Ingo Molnar 已提交
626
	.resume		= gart_resume,
627 628 629

};

630
/*
L
Linus Torvalds 已提交
631
 * Private Northbridge GATT initialization in case we cannot use the
632
 * AGP driver for some reason.
L
Linus Torvalds 已提交
633
 */
634
static __init int init_amd_gatt(struct agp_kern_info *info)
635 636 637
{
	unsigned aper_size, gatt_size, new_aper_size;
	unsigned aper_base, new_aper_base;
L
Linus Torvalds 已提交
638 639
	struct pci_dev *dev;
	void *gatt;
640
	int i;
641

I
Ingo Molnar 已提交
642 643
	pr_info("PCI-DMA: Disabling AGP.\n");

L
Linus Torvalds 已提交
644
	aper_size = aper_base = info->aper_size = 0;
645
	dev = NULL;
646 647
	for (i = 0; i < amd_nb_num(); i++) {
		dev = node_to_amd_nb(i)->misc;
648 649 650 651 652
		new_aper_base = read_aperture(dev, &new_aper_size);
		if (!new_aper_base)
			goto nommu;

		if (!aper_base) {
L
Linus Torvalds 已提交
653 654
			aper_size = new_aper_size;
			aper_base = new_aper_base;
655 656
		}
		if (aper_size != new_aper_size || aper_base != new_aper_base)
L
Linus Torvalds 已提交
657 658 659
			goto nommu;
	}
	if (!aper_base)
660
		goto nommu;
I
Ingo Molnar 已提交
661

L
Linus Torvalds 已提交
662
	info->aper_base = aper_base;
663
	info->aper_size = aper_size >> 20;
L
Linus Torvalds 已提交
664

665
	gatt_size = (aper_size >> PAGE_SHIFT) * sizeof(u32);
666 667
	gatt = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
					get_order(gatt_size));
668
	if (!gatt)
669
		panic("Cannot allocate GATT table");
670
	if (set_memory_uc((unsigned long)gatt, gatt_size >> PAGE_SHIFT))
671 672
		panic("Could not set GART PTEs to uncacheable pages");

L
Linus Torvalds 已提交
673
	agp_gatt_table = gatt;
674

675
	register_syscore_ops(&gart_syscore_ops);
676

677
	flush_gart();
678

I
Ingo Molnar 已提交
679
	pr_info("PCI-DMA: aperture base @ %x size %u KB\n",
680
	       aper_base, aper_size>>10);
Y
Yinghai Lu 已提交
681

L
Linus Torvalds 已提交
682 683 684
	return 0;

 nommu:
685
	/* Should not happen anymore */
I
Ingo Molnar 已提交
686
	pr_warning("PCI-DMA: More than 4GB of RAM and no IOMMU\n"
687
	       "falling back to iommu=soft.\n");
688 689
	return -1;
}
L
Linus Torvalds 已提交
690

691
static const struct dma_map_ops gart_dma_ops = {
692 693
	.map_sg				= gart_map_sg,
	.unmap_sg			= gart_unmap_sg,
694 695
	.map_page			= gart_map_page,
	.unmap_page			= gart_unmap_page,
696 697
	.alloc				= gart_alloc_coherent,
	.free				= gart_free_coherent,
698
	.mapping_error			= gart_mapping_error,
699
	.dma_supported			= dma_direct_supported,
700 701
};

702
static void gart_iommu_shutdown(void)
703 704 705 706
{
	struct pci_dev *dev;
	int i;

707 708
	/* don't shutdown it if there is AGP installed */
	if (!no_agp)
709 710
		return;

711
	if (!amd_nb_has_feature(AMD_NB_GART))
712 713
		return;

714
	for (i = 0; i < amd_nb_num(); i++) {
715
		u32 ctl;
716

717
		dev = node_to_amd_nb(i)->misc;
P
Pavel Machek 已提交
718
		pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &ctl);
719

P
Pavel Machek 已提交
720
		ctl &= ~GARTEN;
721

P
Pavel Machek 已提交
722
		pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, ctl);
723
	}
724 725
}

726
int __init gart_iommu_init(void)
727
{
L
Linus Torvalds 已提交
728 729
	struct agp_kern_info info;
	unsigned long iommu_start;
730 731
	unsigned long aper_base, aper_size;
	unsigned long start_pfn, end_pfn;
L
Linus Torvalds 已提交
732 733 734
	unsigned long scratch;
	long i;

735
	if (!amd_nb_has_feature(AMD_NB_GART))
736
		return 0;
737

L
Linus Torvalds 已提交
738
#ifndef CONFIG_AGP_AMD64
739
	no_agp = 1;
L
Linus Torvalds 已提交
740 741
#else
	/* Makefile puts PCI initialization via subsys_initcall first. */
742
	/* Add other AMD AGP bridge drivers here */
743 744
	no_agp = no_agp ||
		(agp_amd64_init() < 0) ||
L
Linus Torvalds 已提交
745
		(agp_copy_info(agp_bridge, &info) < 0);
746
#endif
L
Linus Torvalds 已提交
747 748

	if (no_iommu ||
Y
Yinghai Lu 已提交
749
	    (!force_iommu && max_pfn <= MAX_DMA32_PFN) ||
750
	    !gart_iommu_aperture ||
751
	    (no_agp && init_amd_gatt(&info) < 0)) {
Y
Yinghai Lu 已提交
752
		if (max_pfn > MAX_DMA32_PFN) {
I
Ingo Molnar 已提交
753 754
			pr_warning("More than 4GB of memory but GART IOMMU not available.\n");
			pr_warning("falling back to iommu=soft.\n");
J
Jon Mason 已提交
755
		}
756
		return 0;
L
Linus Torvalds 已提交
757 758
	}

759
	/* need to map that range */
I
Ingo Molnar 已提交
760 761 762 763
	aper_size	= info.aper_size << 20;
	aper_base	= info.aper_base;
	end_pfn		= (aper_base>>PAGE_SHIFT) + (aper_size>>PAGE_SHIFT);

764 765
	start_pfn = PFN_DOWN(aper_base);
	if (!pfn_range_is_mapped(start_pfn, end_pfn))
766 767
		init_memory_mapping(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT);

I
Ingo Molnar 已提交
768
	pr_info("PCI-DMA: using GART IOMMU.\n");
769 770 771
	iommu_size = check_iommu_size(info.aper_base, aper_size);
	iommu_pages = iommu_size >> PAGE_SHIFT;

772
	iommu_gart_bitmap = (void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO,
773 774 775
						      get_order(iommu_pages/8));
	if (!iommu_gart_bitmap)
		panic("Cannot allocate iommu bitmap\n");
L
Linus Torvalds 已提交
776 777

#ifdef CONFIG_IOMMU_LEAK
778
	if (leak_trace) {
779 780 781 782
		int ret;

		ret = dma_debug_resize_entries(iommu_pages);
		if (ret)
I
Ingo Molnar 已提交
783
			pr_debug("PCI-DMA: Cannot trace all the entries\n");
784
	}
L
Linus Torvalds 已提交
785 786
#endif

787
	/*
L
Linus Torvalds 已提交
788
	 * Out of IOMMU space handling.
789 790
	 * Reserve some invalid pages at the beginning of the GART.
	 */
A
Akinobu Mita 已提交
791
	bitmap_set(iommu_gart_bitmap, 0, EMERGENCY_PAGES);
L
Linus Torvalds 已提交
792

I
Ingo Molnar 已提交
793
	pr_info("PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture\n",
794
	       iommu_size >> 20);
L
Linus Torvalds 已提交
795

I
Ingo Molnar 已提交
796 797 798 799 800
	agp_memory_reserved	= iommu_size;
	iommu_start		= aper_size - iommu_size;
	iommu_bus_base		= info.aper_base + iommu_start;
	bad_dma_addr		= iommu_bus_base;
	iommu_gatt_base		= agp_gatt_table + (iommu_start>>PAGE_SHIFT);
L
Linus Torvalds 已提交
801

802
	/*
L
Linus Torvalds 已提交
803 804 805 806 807 808
	 * Unmap the IOMMU part of the GART. The alias of the page is
	 * always mapped with cache enabled and there is no full cache
	 * coherency across the GART remapping. The unmapping avoids
	 * automatic prefetches from the CPU allocating cache lines in
	 * there. All CPU accesses are done via the direct mapping to
	 * the backing memory. The GART address is only used by PCI
809
	 * devices.
L
Linus Torvalds 已提交
810
	 */
811 812
	set_memory_np((unsigned long)__va(iommu_bus_base),
				iommu_size >> PAGE_SHIFT);
I
Ingo Molnar 已提交
813 814 815 816 817 818 819 820 821
	/*
	 * Tricky. The GART table remaps the physical memory range,
	 * so the CPU wont notice potential aliases and if the memory
	 * is remapped to UC later on, we might surprise the PCI devices
	 * with a stray writeout of a cacheline. So play it sure and
	 * do an explicit, full-scale wbinvd() _after_ having marked all
	 * the pages as Not-Present:
	 */
	wbinvd();
I
Ingo Molnar 已提交
822

823 824 825 826 827 828 829
	/*
	 * Now all caches are flushed and we can safely enable
	 * GART hardware.  Doing it early leaves the possibility
	 * of stale cache entries that can lead to GART PTE
	 * errors.
	 */
	enable_gart_translations();
L
Linus Torvalds 已提交
830

831
	/*
832
	 * Try to workaround a bug (thanks to BenH):
833
	 * Set unmapped entries to a scratch page instead of 0.
L
Linus Torvalds 已提交
834
	 * Any prefetches that hit unmapped entries won't get an bus abort
835
	 * then. (P2P bridge may be prefetching on DMA reads).
L
Linus Torvalds 已提交
836
	 */
837 838
	scratch = get_zeroed_page(GFP_KERNEL);
	if (!scratch)
L
Linus Torvalds 已提交
839 840
		panic("Cannot allocate iommu scratch page");
	gart_unmapped_entry = GPTE_ENCODE(__pa(scratch));
841
	for (i = EMERGENCY_PAGES; i < iommu_pages; i++)
L
Linus Torvalds 已提交
842 843
		iommu_gatt_base[i] = gart_unmapped_entry;

844
	flush_gart();
845
	dma_ops = &gart_dma_ops;
846
	x86_platform.iommu_shutdown = gart_iommu_shutdown;
847
	swiotlb = 0;
848 849

	return 0;
850
}
L
Linus Torvalds 已提交
851

852
void __init gart_parse_options(char *p)
853 854 855
{
	int arg;

L
Linus Torvalds 已提交
856
#ifdef CONFIG_IOMMU_LEAK
857
	if (!strncmp(p, "leak", 4)) {
858 859
		leak_trace = 1;
		p += 4;
860 861
		if (*p == '=')
			++p;
862 863 864
		if (isdigit(*p) && get_option(&p, &arg))
			iommu_leak_pages = arg;
	}
L
Linus Torvalds 已提交
865
#endif
866 867
	if (isdigit(*p) && get_option(&p, &arg))
		iommu_size = arg;
868
	if (!strncmp(p, "fullflush", 9))
869
		iommu_fullflush = 1;
870
	if (!strncmp(p, "nofullflush", 11))
871
		iommu_fullflush = 0;
872
	if (!strncmp(p, "noagp", 5))
873
		no_agp = 1;
874
	if (!strncmp(p, "noaperture", 10))
875 876
		fix_aperture = 0;
	/* duplicated from pci-dma.c */
877
	if (!strncmp(p, "force", 5))
878
		gart_iommu_aperture_allowed = 1;
879
	if (!strncmp(p, "allowed", 7))
880
		gart_iommu_aperture_allowed = 1;
881 882 883 884 885 886 887 888 889 890
	if (!strncmp(p, "memaper", 7)) {
		fallback_aper_force = 1;
		p += 7;
		if (*p == '=') {
			++p;
			if (get_option(&p, &arg))
				fallback_aper_order = arg;
		}
	}
}
891
IOMMU_INIT_POST(gart_iommu_hole_init);