amd_gart_64.c 22.3 KB
Newer Older
L
Linus Torvalds 已提交
1 2
/*
 * Dynamic DMA mapping support for AMD Hammer.
3
 *
L
Linus Torvalds 已提交
4 5
 * Use the integrated AGP GART in the Hammer northbridge as an IOMMU for PCI.
 * This allows to use PCI devices that only support 32bit addresses on systems
6
 * with more than 4GB.
L
Linus Torvalds 已提交
7
 *
P
Paul Bolle 已提交
8
 * See Documentation/DMA-API-HOWTO.txt for the interface specification.
9
 *
L
Linus Torvalds 已提交
10
 * Copyright 2002 Andi Kleen, SuSE Labs.
A
Andi Kleen 已提交
11
 * Subject to the GNU General Public License v2 only.
L
Linus Torvalds 已提交
12 13 14 15 16 17 18
 */

#include <linux/types.h>
#include <linux/ctype.h>
#include <linux/agp_backend.h>
#include <linux/init.h>
#include <linux/mm.h>
19
#include <linux/sched.h>
L
Linus Torvalds 已提交
20 21 22 23 24 25
#include <linux/string.h>
#include <linux/spinlock.h>
#include <linux/pci.h>
#include <linux/module.h>
#include <linux/topology.h>
#include <linux/interrupt.h>
A
Akinobu Mita 已提交
26
#include <linux/bitmap.h>
27
#include <linux/kdebug.h>
28
#include <linux/scatterlist.h>
29
#include <linux/iommu-helper.h>
30
#include <linux/syscore_ops.h>
31
#include <linux/io.h>
32
#include <linux/gfp.h>
A
Arun Sharma 已提交
33
#include <linux/atomic.h>
L
Linus Torvalds 已提交
34 35 36
#include <asm/mtrr.h>
#include <asm/pgtable.h>
#include <asm/proto.h>
37
#include <asm/iommu.h>
J
Joerg Roedel 已提交
38
#include <asm/gart.h>
L
Linus Torvalds 已提交
39
#include <asm/cacheflush.h>
40 41
#include <asm/swiotlb.h>
#include <asm/dma.h>
42
#include <asm/amd_nb.h>
43
#include <asm/x86_init.h>
44
#include <asm/iommu_table.h>
L
Linus Torvalds 已提交
45

46
static unsigned long iommu_bus_base;	/* GART remapping area (physical) */
47
static unsigned long iommu_size;	/* size of remapping area bytes */
L
Linus Torvalds 已提交
48 49
static unsigned long iommu_pages;	/* .. and in pages */

50
static u32 *iommu_gatt_base;		/* Remapping table */
L
Linus Torvalds 已提交
51

52 53
static dma_addr_t bad_dma_addr;

54 55 56 57 58 59 60
/*
 * If this is disabled the IOMMU will use an optimized flushing strategy
 * of only flushing when an mapping is reused. With it true the GART is
 * flushed for every mapping. Problem is that doing the lazy flush seems
 * to trigger bugs with some popular PCI cards, in particular 3ware (but
 * has been also also seen with Qlogic at least).
 */
61
static int iommu_fullflush = 1;
L
Linus Torvalds 已提交
62

63
/* Allocation bitmap for the remapping area: */
L
Linus Torvalds 已提交
64
static DEFINE_SPINLOCK(iommu_bitmap_lock);
65 66
/* Guarded by iommu_bitmap_lock: */
static unsigned long *iommu_gart_bitmap;
L
Linus Torvalds 已提交
67

68
static u32 gart_unmapped_entry;
L
Linus Torvalds 已提交
69 70 71 72 73 74 75

#define GPTE_VALID    1
#define GPTE_COHERENT 2
#define GPTE_ENCODE(x) \
	(((x) & 0xfffff000) | (((x) >> 32) << 4) | GPTE_VALID | GPTE_COHERENT)
#define GPTE_DECODE(x) (((x) & 0xfffff000) | (((u64)(x) & 0xff0) << 28))

76
#define EMERGENCY_PAGES 32 /* = 128KB */
L
Linus Torvalds 已提交
77 78 79 80 81 82 83

#ifdef CONFIG_AGP
#define AGPEXTERN extern
#else
#define AGPEXTERN
#endif

84 85 86
/* GART can only remap to physical addresses < 1TB */
#define GART_MAX_PHYS_ADDR	(1ULL << 40)

L
Linus Torvalds 已提交
87 88 89 90 91
/* backdoor interface to AGP driver */
AGPEXTERN int agp_memory_reserved;
AGPEXTERN __u32 *agp_gatt_table;

static unsigned long next_bit;  /* protected by iommu_bitmap_lock */
92
static bool need_flush;		/* global flush state. set for each gart wrap */
L
Linus Torvalds 已提交
93

94 95
static unsigned long alloc_iommu(struct device *dev, int size,
				 unsigned long align_mask)
96
{
L
Linus Torvalds 已提交
97
	unsigned long offset, flags;
98 99 100 101 102
	unsigned long boundary_size;
	unsigned long base_index;

	base_index = ALIGN(iommu_bus_base & dma_get_seg_boundary(dev),
			   PAGE_SIZE) >> PAGE_SHIFT;
I
Ingo Molnar 已提交
103
	boundary_size = ALIGN((u64)dma_get_seg_boundary(dev) + 1,
104
			      PAGE_SIZE) >> PAGE_SHIFT;
L
Linus Torvalds 已提交
105

106
	spin_lock_irqsave(&iommu_bitmap_lock, flags);
107
	offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, next_bit,
108
				  size, base_index, boundary_size, align_mask);
L
Linus Torvalds 已提交
109
	if (offset == -1) {
110
		need_flush = true;
111
		offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, 0,
112 113
					  size, base_index, boundary_size,
					  align_mask);
L
Linus Torvalds 已提交
114
	}
115 116 117
	if (offset != -1) {
		next_bit = offset+size;
		if (next_bit >= iommu_pages) {
L
Linus Torvalds 已提交
118
			next_bit = 0;
119
			need_flush = true;
120 121
		}
	}
L
Linus Torvalds 已提交
122
	if (iommu_fullflush)
123
		need_flush = true;
124 125
	spin_unlock_irqrestore(&iommu_bitmap_lock, flags);

L
Linus Torvalds 已提交
126
	return offset;
127
}
L
Linus Torvalds 已提交
128 129

static void free_iommu(unsigned long offset, int size)
130
{
L
Linus Torvalds 已提交
131
	unsigned long flags;
132

L
Linus Torvalds 已提交
133
	spin_lock_irqsave(&iommu_bitmap_lock, flags);
A
Akinobu Mita 已提交
134
	bitmap_clear(iommu_gart_bitmap, offset, size);
135 136
	if (offset >= next_bit)
		next_bit = offset + size;
L
Linus Torvalds 已提交
137
	spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
138
}
L
Linus Torvalds 已提交
139

140
/*
L
Linus Torvalds 已提交
141 142
 * Use global flush state to avoid races with multiple flushers.
 */
143
static void flush_gart(void)
144
{
L
Linus Torvalds 已提交
145
	unsigned long flags;
146

L
Linus Torvalds 已提交
147
	spin_lock_irqsave(&iommu_bitmap_lock, flags);
148
	if (need_flush) {
149
		amd_flush_garts();
150
		need_flush = false;
151
	}
L
Linus Torvalds 已提交
152
	spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
153
}
L
Linus Torvalds 已提交
154 155 156 157

#ifdef CONFIG_IOMMU_LEAK
/* Debugging aid for drivers that don't free their IOMMU tables */
static int leak_trace;
158
static int iommu_leak_pages = 20;
159

160
static void dump_leak(void)
L
Linus Torvalds 已提交
161
{
162 163
	static int dump;

164
	if (dump)
165
		return;
L
Linus Torvalds 已提交
166
	dump = 1;
167

168 169
	show_stack(NULL, NULL);
	debug_dma_dump_mappings(NULL);
L
Linus Torvalds 已提交
170 171 172
}
#endif

173
static void iommu_full(struct device *dev, size_t size, int dir)
L
Linus Torvalds 已提交
174
{
175
	/*
L
Linus Torvalds 已提交
176 177
	 * Ran out of IOMMU space for this operation. This is very bad.
	 * Unfortunately the drivers cannot handle this operation properly.
178
	 * Return some non mapped prereserved space in the aperture and
L
Linus Torvalds 已提交
179 180
	 * let the Northbridge deal with it. This will result in garbage
	 * in the IO operation. When the size exceeds the prereserved space
181
	 * memory corruption will occur or random memory will be DMAed
L
Linus Torvalds 已提交
182
	 * out. Hopefully no network devices use single mappings that big.
183 184
	 */

185
	dev_err(dev, "PCI-DMA: Out of IOMMU space for %lu bytes\n", size);
L
Linus Torvalds 已提交
186

187
	if (size > PAGE_SIZE*EMERGENCY_PAGES) {
L
Linus Torvalds 已提交
188 189
		if (dir == PCI_DMA_FROMDEVICE || dir == PCI_DMA_BIDIRECTIONAL)
			panic("PCI-DMA: Memory would be corrupted\n");
190 191 192 193
		if (dir == PCI_DMA_TODEVICE || dir == PCI_DMA_BIDIRECTIONAL)
			panic(KERN_ERR
				"PCI-DMA: Random memory would be DMAed\n");
	}
L
Linus Torvalds 已提交
194
#ifdef CONFIG_IOMMU_LEAK
195
	dump_leak();
L
Linus Torvalds 已提交
196
#endif
197
}
L
Linus Torvalds 已提交
198

199 200 201
static inline int
need_iommu(struct device *dev, unsigned long addr, size_t size)
{
202
	return force_iommu || !dma_capable(dev, addr, size);
L
Linus Torvalds 已提交
203 204
}

205 206 207
static inline int
nonforced_iommu(struct device *dev, unsigned long addr, size_t size)
{
208
	return !dma_capable(dev, addr, size);
L
Linus Torvalds 已提交
209 210 211 212 213
}

/* Map a single continuous physical area into the IOMMU.
 * Caller needs to check if the iommu is needed and flush.
 */
214
static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
215
				size_t size, int dir, unsigned long align_mask)
216
{
217
	unsigned long npages = iommu_num_pages(phys_mem, size, PAGE_SIZE);
218
	unsigned long iommu_page;
L
Linus Torvalds 已提交
219
	int i;
220

221 222 223 224
	if (unlikely(phys_mem + size > GART_MAX_PHYS_ADDR))
		return bad_dma_addr;

	iommu_page = alloc_iommu(dev, npages, align_mask);
L
Linus Torvalds 已提交
225 226
	if (iommu_page == -1) {
		if (!nonforced_iommu(dev, phys_mem, size))
227
			return phys_mem;
L
Linus Torvalds 已提交
228 229
		if (panic_on_overflow)
			panic("dma_map_area overflow %lu bytes\n", size);
230
		iommu_full(dev, size, dir);
231
		return bad_dma_addr;
L
Linus Torvalds 已提交
232 233 234 235 236 237 238 239 240 241
	}

	for (i = 0; i < npages; i++) {
		iommu_gatt_base[iommu_page + i] = GPTE_ENCODE(phys_mem);
		phys_mem += PAGE_SIZE;
	}
	return iommu_bus_base + iommu_page*PAGE_SIZE + (phys_mem & ~PAGE_MASK);
}

/* Map a single area into the IOMMU */
242 243 244 245
static dma_addr_t gart_map_page(struct device *dev, struct page *page,
				unsigned long offset, size_t size,
				enum dma_data_direction dir,
				struct dma_attrs *attrs)
L
Linus Torvalds 已提交
246
{
I
Ingo Molnar 已提交
247
	unsigned long bus;
248
	phys_addr_t paddr = page_to_phys(page) + offset;
L
Linus Torvalds 已提交
249 250

	if (!dev)
251
		dev = &x86_dma_fallback_dev;
L
Linus Torvalds 已提交
252

I
Ingo Molnar 已提交
253 254
	if (!need_iommu(dev, paddr, size))
		return paddr;
L
Linus Torvalds 已提交
255

256 257
	bus = dma_map_area(dev, paddr, size, dir, 0);
	flush_gart();
258 259

	return bus;
260 261
}

262 263 264
/*
 * Free a DMA mapping.
 */
265 266 267
static void gart_unmap_page(struct device *dev, dma_addr_t dma_addr,
			    size_t size, enum dma_data_direction dir,
			    struct dma_attrs *attrs)
268 269 270 271 272 273 274 275
{
	unsigned long iommu_page;
	int npages;
	int i;

	if (dma_addr < iommu_bus_base + EMERGENCY_PAGES*PAGE_SIZE ||
	    dma_addr >= iommu_bus_base + iommu_size)
		return;
276

277
	iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT;
278
	npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
279 280 281 282 283 284
	for (i = 0; i < npages; i++) {
		iommu_gatt_base[iommu_page + i] = gart_unmapped_entry;
	}
	free_iommu(iommu_page, npages);
}

285 286 287
/*
 * Wrapper for pci_unmap_single working with scatterlists.
 */
288 289
static void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
			  enum dma_data_direction dir, struct dma_attrs *attrs)
290
{
291
	struct scatterlist *s;
292 293
	int i;

294
	for_each_sg(sg, s, nents, i) {
295
		if (!s->dma_length || !s->length)
296
			break;
297
		gart_unmap_page(dev, s->dma_address, s->dma_length, dir, NULL);
298 299
	}
}
L
Linus Torvalds 已提交
300 301 302 303 304

/* Fallback for dma_map_sg in case of overflow */
static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
			       int nents, int dir)
{
305
	struct scatterlist *s;
L
Linus Torvalds 已提交
306 307 308
	int i;

#ifdef CONFIG_IOMMU_DEBUG
I
Ingo Molnar 已提交
309
	pr_debug("dma_map_sg overflow\n");
L
Linus Torvalds 已提交
310 311
#endif

312
	for_each_sg(sg, s, nents, i) {
J
Jens Axboe 已提交
313
		unsigned long addr = sg_phys(s);
314 315

		if (nonforced_iommu(dev, addr, s->length)) {
316
			addr = dma_map_area(dev, addr, s->length, dir, 0);
317
			if (addr == bad_dma_addr) {
318
				if (i > 0)
319
					gart_unmap_sg(dev, sg, i, dir, NULL);
320
				nents = 0;
L
Linus Torvalds 已提交
321 322 323 324 325 326 327
				sg[0].dma_length = 0;
				break;
			}
		}
		s->dma_address = addr;
		s->dma_length = s->length;
	}
328
	flush_gart();
329

L
Linus Torvalds 已提交
330 331 332 333
	return nents;
}

/* Map multiple scatterlist entries continuous into the first. */
334 335 336
static int __dma_map_cont(struct device *dev, struct scatterlist *start,
			  int nelems, struct scatterlist *sout,
			  unsigned long pages)
L
Linus Torvalds 已提交
337
{
338
	unsigned long iommu_start = alloc_iommu(dev, pages, 0);
339
	unsigned long iommu_page = iommu_start;
340
	struct scatterlist *s;
L
Linus Torvalds 已提交
341 342 343 344
	int i;

	if (iommu_start == -1)
		return -1;
345 346

	for_each_sg(start, s, nelems, i) {
L
Linus Torvalds 已提交
347 348
		unsigned long pages, addr;
		unsigned long phys_addr = s->dma_address;
349

350 351
		BUG_ON(s != start && s->offset);
		if (s == start) {
L
Linus Torvalds 已提交
352 353 354
			sout->dma_address = iommu_bus_base;
			sout->dma_address += iommu_page*PAGE_SIZE + s->offset;
			sout->dma_length = s->length;
355 356
		} else {
			sout->dma_length += s->length;
L
Linus Torvalds 已提交
357 358 359
		}

		addr = phys_addr;
360
		pages = iommu_num_pages(s->offset, s->length, PAGE_SIZE);
361 362
		while (pages--) {
			iommu_gatt_base[iommu_page] = GPTE_ENCODE(addr);
L
Linus Torvalds 已提交
363 364
			addr += PAGE_SIZE;
			iommu_page++;
365
		}
366 367 368
	}
	BUG_ON(iommu_page - iommu_start != pages);

L
Linus Torvalds 已提交
369 370 371
	return 0;
}

372
static inline int
373 374
dma_map_cont(struct device *dev, struct scatterlist *start, int nelems,
	     struct scatterlist *sout, unsigned long pages, int need)
L
Linus Torvalds 已提交
375
{
376 377
	if (!need) {
		BUG_ON(nelems != 1);
F
FUJITA Tomonori 已提交
378
		sout->dma_address = start->dma_address;
379
		sout->dma_length = start->length;
L
Linus Torvalds 已提交
380
		return 0;
381
	}
382
	return __dma_map_cont(dev, start, nelems, sout, pages);
L
Linus Torvalds 已提交
383
}
384

L
Linus Torvalds 已提交
385 386
/*
 * DMA map all entries in a scatterlist.
387
 * Merge chunks that have page aligned sizes into a continuous mapping.
L
Linus Torvalds 已提交
388
 */
389 390
static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents,
		       enum dma_data_direction dir, struct dma_attrs *attrs)
L
Linus Torvalds 已提交
391
{
392
	struct scatterlist *s, *ps, *start_sg, *sgmap;
393 394
	int need = 0, nextneed, i, out, start;
	unsigned long pages = 0;
395 396
	unsigned int seg_size;
	unsigned int max_seg_size;
L
Linus Torvalds 已提交
397

398
	if (nents == 0)
L
Linus Torvalds 已提交
399 400 401
		return 0;

	if (!dev)
402
		dev = &x86_dma_fallback_dev;
L
Linus Torvalds 已提交
403

I
Ingo Molnar 已提交
404 405 406 407 408 409 410 411
	out		= 0;
	start		= 0;
	start_sg	= sg;
	sgmap		= sg;
	seg_size	= 0;
	max_seg_size	= dma_get_max_seg_size(dev);
	ps		= NULL; /* shut up gcc */

412
	for_each_sg(sg, s, nents, i) {
J
Jens Axboe 已提交
413
		dma_addr_t addr = sg_phys(s);
414

L
Linus Torvalds 已提交
415
		s->dma_address = addr;
416
		BUG_ON(s->length == 0);
L
Linus Torvalds 已提交
417

418
		nextneed = need_iommu(dev, addr, s->length);
L
Linus Torvalds 已提交
419 420 421

		/* Handle the previous not yet processed entries */
		if (i > start) {
422 423 424 425 426
			/*
			 * Can only merge when the last chunk ends on a
			 * page boundary and the new one doesn't have an
			 * offset.
			 */
L
Linus Torvalds 已提交
427
			if (!iommu_merge || !nextneed || !need || s->offset ||
428
			    (s->length + seg_size > max_seg_size) ||
429
			    (ps->offset + ps->length) % PAGE_SIZE) {
430 431
				if (dma_map_cont(dev, start_sg, i - start,
						 sgmap, pages, need) < 0)
L
Linus Torvalds 已提交
432 433
					goto error;
				out++;
I
Ingo Molnar 已提交
434 435 436 437 438 439

				seg_size	= 0;
				sgmap		= sg_next(sgmap);
				pages		= 0;
				start		= i;
				start_sg	= s;
L
Linus Torvalds 已提交
440 441 442
			}
		}

443
		seg_size += s->length;
L
Linus Torvalds 已提交
444
		need = nextneed;
445
		pages += iommu_num_pages(s->offset, s->length, PAGE_SIZE);
446
		ps = s;
L
Linus Torvalds 已提交
447
	}
448
	if (dma_map_cont(dev, start_sg, i - start, sgmap, pages, need) < 0)
L
Linus Torvalds 已提交
449 450
		goto error;
	out++;
451
	flush_gart();
452 453 454 455
	if (out < nents) {
		sgmap = sg_next(sgmap);
		sgmap->dma_length = 0;
	}
L
Linus Torvalds 已提交
456 457 458
	return out;

error:
459
	flush_gart();
460
	gart_unmap_sg(dev, sg, out, dir, NULL);
461

462 463 464 465 466 467
	/* When it was forced or merged try again in a dumb way */
	if (force_iommu || iommu_merge) {
		out = dma_map_sg_nonforce(dev, sg, nents, dir);
		if (out > 0)
			return out;
	}
L
Linus Torvalds 已提交
468 469
	if (panic_on_overflow)
		panic("dma_map_sg: overflow on %lu pages\n", pages);
470

471
	iommu_full(dev, pages << PAGE_SHIFT, dir);
472
	for_each_sg(sg, s, nents, i)
473
		s->dma_address = bad_dma_addr;
L
Linus Torvalds 已提交
474
	return 0;
475
}
L
Linus Torvalds 已提交
476

477 478 479
/* allocate and map a coherent mapping */
static void *
gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr,
480
		    gfp_t flag, struct dma_attrs *attrs)
481
{
482
	dma_addr_t paddr;
483
	unsigned long align_mask;
484 485 486 487 488 489 490 491 492 493 494 495 496
	struct page *page;

	if (force_iommu && !(flag & GFP_DMA)) {
		flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
		page = alloc_pages(flag | __GFP_ZERO, get_order(size));
		if (!page)
			return NULL;

		align_mask = (1UL << get_order(size)) - 1;
		paddr = dma_map_area(dev, page_to_phys(page), size,
				     DMA_BIDIRECTIONAL, align_mask);

		flush_gart();
497
		if (paddr != bad_dma_addr) {
498 499 500 501 502
			*dma_addr = paddr;
			return page_address(page);
		}
		__free_pages(page, get_order(size));
	} else
503 504
		return dma_generic_alloc_coherent(dev, size, dma_addr, flag,
						  attrs);
505 506 507 508

	return NULL;
}

509 510 511
/* free a coherent mapping */
static void
gart_free_coherent(struct device *dev, size_t size, void *vaddr,
512
		   dma_addr_t dma_addr, struct dma_attrs *attrs)
513
{
514
	gart_unmap_page(dev, dma_addr, size, DMA_BIDIRECTIONAL, NULL);
515 516 517
	free_pages((unsigned long)vaddr, get_order(size));
}

518 519 520 521 522
static int gart_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
	return (dma_addr == bad_dma_addr);
}

523
static int no_agp;
L
Linus Torvalds 已提交
524 525

static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size)
526 527 528 529 530 531 532 533 534 535
{
	unsigned long a;

	if (!iommu_size) {
		iommu_size = aper_size;
		if (!no_agp)
			iommu_size /= 2;
	}

	a = aper + iommu_size;
536
	iommu_size -= round_up(a, PMD_PAGE_SIZE) - a;
L
Linus Torvalds 已提交
537

538
	if (iommu_size < 64*1024*1024) {
I
Ingo Molnar 已提交
539
		pr_warning(
540 541 542 543 544
			"PCI-DMA: Warning: Small IOMMU %luMB."
			" Consider increasing the AGP aperture in BIOS\n",
				iommu_size >> 20);
	}

L
Linus Torvalds 已提交
545
	return iommu_size;
546
}
L
Linus Torvalds 已提交
547

548 549 550
static __init unsigned read_aperture(struct pci_dev *dev, u32 *size)
{
	unsigned aper_size = 0, aper_base_32, aper_order;
L
Linus Torvalds 已提交
551 552
	u64 aper_base;

P
Pavel Machek 已提交
553 554
	pci_read_config_dword(dev, AMD64_GARTAPERTUREBASE, &aper_base_32);
	pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &aper_order);
555
	aper_order = (aper_order >> 1) & 7;
L
Linus Torvalds 已提交
556

557
	aper_base = aper_base_32 & 0x7fff;
L
Linus Torvalds 已提交
558 559
	aper_base <<= 25;

560 561
	aper_size = (32 * 1024 * 1024) << aper_order;
	if (aper_base + aper_size > 0x100000000UL || !aper_size)
L
Linus Torvalds 已提交
562 563 564 565
		aper_base = 0;

	*size = aper_size;
	return aper_base;
566
}
L
Linus Torvalds 已提交
567

568 569 570 571
static void enable_gart_translations(void)
{
	int i;

572
	if (!amd_nb_has_feature(AMD_NB_GART))
573 574
		return;

575 576
	for (i = 0; i < amd_nb_num(); i++) {
		struct pci_dev *dev = node_to_amd_nb(i)->misc;
577 578 579

		enable_gart_translation(dev, __pa(agp_gatt_table));
	}
580 581

	/* Flush the GART-TLB to remove stale entries */
582
	amd_flush_garts();
583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599
}

/*
 * If fix_up_north_bridges is set, the north bridges have to be fixed up on
 * resume in the same way as they are handled in gart_iommu_hole_init().
 */
static bool fix_up_north_bridges;
static u32 aperture_order;
static u32 aperture_alloc;

void set_up_gart_resume(u32 aper_order, u32 aper_alloc)
{
	fix_up_north_bridges = true;
	aperture_order = aper_order;
	aperture_alloc = aper_alloc;
}

600
static void gart_fixup_northbridges(void)
601
{
I
Ingo Molnar 已提交
602
	int i;
603

I
Ingo Molnar 已提交
604 605
	if (!fix_up_north_bridges)
		return;
606

607
	if (!amd_nb_has_feature(AMD_NB_GART))
608 609
		return;

I
Ingo Molnar 已提交
610
	pr_info("PCI-DMA: Restoring GART aperture settings\n");
611

612 613
	for (i = 0; i < amd_nb_num(); i++) {
		struct pci_dev *dev = node_to_amd_nb(i)->misc;
614

I
Ingo Molnar 已提交
615 616 617 618
		/*
		 * Don't enable translations just yet.  That is the next
		 * step.  Restore the pre-suspend aperture settings.
		 */
619
		gart_set_size_and_enable(dev, aperture_order);
I
Ingo Molnar 已提交
620
		pci_write_config_dword(dev, AMD64_GARTAPERTUREBASE, aperture_alloc >> 25);
621
	}
I
Ingo Molnar 已提交
622 623
}

624
static void gart_resume(void)
I
Ingo Molnar 已提交
625 626 627
{
	pr_info("PCI-DMA: Resuming GART IOMMU\n");

628
	gart_fixup_northbridges();
629 630

	enable_gart_translations();
631 632
}

633
static struct syscore_ops gart_syscore_ops = {
I
Ingo Molnar 已提交
634
	.resume		= gart_resume,
635 636 637

};

638
/*
L
Linus Torvalds 已提交
639
 * Private Northbridge GATT initialization in case we cannot use the
640
 * AGP driver for some reason.
L
Linus Torvalds 已提交
641
 */
642
static __init int init_amd_gatt(struct agp_kern_info *info)
643 644 645
{
	unsigned aper_size, gatt_size, new_aper_size;
	unsigned aper_base, new_aper_base;
L
Linus Torvalds 已提交
646 647
	struct pci_dev *dev;
	void *gatt;
648
	int i;
649

I
Ingo Molnar 已提交
650 651
	pr_info("PCI-DMA: Disabling AGP.\n");

L
Linus Torvalds 已提交
652
	aper_size = aper_base = info->aper_size = 0;
653
	dev = NULL;
654 655
	for (i = 0; i < amd_nb_num(); i++) {
		dev = node_to_amd_nb(i)->misc;
656 657 658 659 660
		new_aper_base = read_aperture(dev, &new_aper_size);
		if (!new_aper_base)
			goto nommu;

		if (!aper_base) {
L
Linus Torvalds 已提交
661 662
			aper_size = new_aper_size;
			aper_base = new_aper_base;
663 664
		}
		if (aper_size != new_aper_size || aper_base != new_aper_base)
L
Linus Torvalds 已提交
665 666 667
			goto nommu;
	}
	if (!aper_base)
668
		goto nommu;
I
Ingo Molnar 已提交
669

L
Linus Torvalds 已提交
670
	info->aper_base = aper_base;
671
	info->aper_size = aper_size >> 20;
L
Linus Torvalds 已提交
672

673
	gatt_size = (aper_size >> PAGE_SHIFT) * sizeof(u32);
674 675
	gatt = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
					get_order(gatt_size));
676
	if (!gatt)
677
		panic("Cannot allocate GATT table");
678
	if (set_memory_uc((unsigned long)gatt, gatt_size >> PAGE_SHIFT))
679 680
		panic("Could not set GART PTEs to uncacheable pages");

L
Linus Torvalds 已提交
681
	agp_gatt_table = gatt;
682

683
	register_syscore_ops(&gart_syscore_ops);
684

685
	flush_gart();
686

I
Ingo Molnar 已提交
687
	pr_info("PCI-DMA: aperture base @ %x size %u KB\n",
688
	       aper_base, aper_size>>10);
Y
Yinghai Lu 已提交
689

L
Linus Torvalds 已提交
690 691 692
	return 0;

 nommu:
693
	/* Should not happen anymore */
I
Ingo Molnar 已提交
694
	pr_warning("PCI-DMA: More than 4GB of RAM and no IOMMU\n"
695
	       "falling back to iommu=soft.\n");
696 697
	return -1;
}
L
Linus Torvalds 已提交
698

699
static struct dma_map_ops gart_dma_ops = {
700 701
	.map_sg				= gart_map_sg,
	.unmap_sg			= gart_unmap_sg,
702 703
	.map_page			= gart_map_page,
	.unmap_page			= gart_unmap_page,
704 705
	.alloc				= gart_alloc_coherent,
	.free				= gart_free_coherent,
706
	.mapping_error			= gart_mapping_error,
707 708
};

709
static void gart_iommu_shutdown(void)
710 711 712 713
{
	struct pci_dev *dev;
	int i;

714 715
	/* don't shutdown it if there is AGP installed */
	if (!no_agp)
716 717
		return;

718
	if (!amd_nb_has_feature(AMD_NB_GART))
719 720
		return;

721
	for (i = 0; i < amd_nb_num(); i++) {
722
		u32 ctl;
723

724
		dev = node_to_amd_nb(i)->misc;
P
Pavel Machek 已提交
725
		pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &ctl);
726

P
Pavel Machek 已提交
727
		ctl &= ~GARTEN;
728

P
Pavel Machek 已提交
729
		pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, ctl);
730
	}
731 732
}

733
int __init gart_iommu_init(void)
734
{
L
Linus Torvalds 已提交
735 736
	struct agp_kern_info info;
	unsigned long iommu_start;
737 738
	unsigned long aper_base, aper_size;
	unsigned long start_pfn, end_pfn;
L
Linus Torvalds 已提交
739 740 741
	unsigned long scratch;
	long i;

742
	if (!amd_nb_has_feature(AMD_NB_GART))
743
		return 0;
744

L
Linus Torvalds 已提交
745
#ifndef CONFIG_AGP_AMD64
746
	no_agp = 1;
L
Linus Torvalds 已提交
747 748
#else
	/* Makefile puts PCI initialization via subsys_initcall first. */
749
	/* Add other AMD AGP bridge drivers here */
750 751
	no_agp = no_agp ||
		(agp_amd64_init() < 0) ||
L
Linus Torvalds 已提交
752
		(agp_copy_info(agp_bridge, &info) < 0);
753
#endif
L
Linus Torvalds 已提交
754 755

	if (no_iommu ||
Y
Yinghai Lu 已提交
756
	    (!force_iommu && max_pfn <= MAX_DMA32_PFN) ||
757
	    !gart_iommu_aperture ||
758
	    (no_agp && init_amd_gatt(&info) < 0)) {
Y
Yinghai Lu 已提交
759
		if (max_pfn > MAX_DMA32_PFN) {
I
Ingo Molnar 已提交
760 761
			pr_warning("More than 4GB of memory but GART IOMMU not available.\n");
			pr_warning("falling back to iommu=soft.\n");
J
Jon Mason 已提交
762
		}
763
		return 0;
L
Linus Torvalds 已提交
764 765
	}

766
	/* need to map that range */
I
Ingo Molnar 已提交
767 768 769 770
	aper_size	= info.aper_size << 20;
	aper_base	= info.aper_base;
	end_pfn		= (aper_base>>PAGE_SHIFT) + (aper_size>>PAGE_SHIFT);

771 772
	start_pfn = PFN_DOWN(aper_base);
	if (!pfn_range_is_mapped(start_pfn, end_pfn))
773 774
		init_memory_mapping(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT);

I
Ingo Molnar 已提交
775
	pr_info("PCI-DMA: using GART IOMMU.\n");
776 777 778
	iommu_size = check_iommu_size(info.aper_base, aper_size);
	iommu_pages = iommu_size >> PAGE_SHIFT;

779
	iommu_gart_bitmap = (void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO,
780 781 782
						      get_order(iommu_pages/8));
	if (!iommu_gart_bitmap)
		panic("Cannot allocate iommu bitmap\n");
L
Linus Torvalds 已提交
783 784

#ifdef CONFIG_IOMMU_LEAK
785
	if (leak_trace) {
786 787 788 789
		int ret;

		ret = dma_debug_resize_entries(iommu_pages);
		if (ret)
I
Ingo Molnar 已提交
790
			pr_debug("PCI-DMA: Cannot trace all the entries\n");
791
	}
L
Linus Torvalds 已提交
792 793
#endif

794
	/*
L
Linus Torvalds 已提交
795
	 * Out of IOMMU space handling.
796 797
	 * Reserve some invalid pages at the beginning of the GART.
	 */
A
Akinobu Mita 已提交
798
	bitmap_set(iommu_gart_bitmap, 0, EMERGENCY_PAGES);
L
Linus Torvalds 已提交
799

I
Ingo Molnar 已提交
800
	pr_info("PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture\n",
801
	       iommu_size >> 20);
L
Linus Torvalds 已提交
802

I
Ingo Molnar 已提交
803 804 805 806 807
	agp_memory_reserved	= iommu_size;
	iommu_start		= aper_size - iommu_size;
	iommu_bus_base		= info.aper_base + iommu_start;
	bad_dma_addr		= iommu_bus_base;
	iommu_gatt_base		= agp_gatt_table + (iommu_start>>PAGE_SHIFT);
L
Linus Torvalds 已提交
808

809
	/*
L
Linus Torvalds 已提交
810 811 812 813 814 815
	 * Unmap the IOMMU part of the GART. The alias of the page is
	 * always mapped with cache enabled and there is no full cache
	 * coherency across the GART remapping. The unmapping avoids
	 * automatic prefetches from the CPU allocating cache lines in
	 * there. All CPU accesses are done via the direct mapping to
	 * the backing memory. The GART address is only used by PCI
816
	 * devices.
L
Linus Torvalds 已提交
817
	 */
818 819
	set_memory_np((unsigned long)__va(iommu_bus_base),
				iommu_size >> PAGE_SHIFT);
I
Ingo Molnar 已提交
820 821 822 823 824 825 826 827 828
	/*
	 * Tricky. The GART table remaps the physical memory range,
	 * so the CPU wont notice potential aliases and if the memory
	 * is remapped to UC later on, we might surprise the PCI devices
	 * with a stray writeout of a cacheline. So play it sure and
	 * do an explicit, full-scale wbinvd() _after_ having marked all
	 * the pages as Not-Present:
	 */
	wbinvd();
I
Ingo Molnar 已提交
829

830 831 832 833 834 835 836
	/*
	 * Now all caches are flushed and we can safely enable
	 * GART hardware.  Doing it early leaves the possibility
	 * of stale cache entries that can lead to GART PTE
	 * errors.
	 */
	enable_gart_translations();
L
Linus Torvalds 已提交
837

838
	/*
839
	 * Try to workaround a bug (thanks to BenH):
840
	 * Set unmapped entries to a scratch page instead of 0.
L
Linus Torvalds 已提交
841
	 * Any prefetches that hit unmapped entries won't get an bus abort
842
	 * then. (P2P bridge may be prefetching on DMA reads).
L
Linus Torvalds 已提交
843
	 */
844 845
	scratch = get_zeroed_page(GFP_KERNEL);
	if (!scratch)
L
Linus Torvalds 已提交
846 847
		panic("Cannot allocate iommu scratch page");
	gart_unmapped_entry = GPTE_ENCODE(__pa(scratch));
848
	for (i = EMERGENCY_PAGES; i < iommu_pages; i++)
L
Linus Torvalds 已提交
849 850
		iommu_gatt_base[i] = gart_unmapped_entry;

851
	flush_gart();
852
	dma_ops = &gart_dma_ops;
853
	x86_platform.iommu_shutdown = gart_iommu_shutdown;
854
	swiotlb = 0;
855 856

	return 0;
857
}
L
Linus Torvalds 已提交
858

859
void __init gart_parse_options(char *p)
860 861 862
{
	int arg;

L
Linus Torvalds 已提交
863
#ifdef CONFIG_IOMMU_LEAK
864
	if (!strncmp(p, "leak", 4)) {
865 866
		leak_trace = 1;
		p += 4;
867 868
		if (*p == '=')
			++p;
869 870 871
		if (isdigit(*p) && get_option(&p, &arg))
			iommu_leak_pages = arg;
	}
L
Linus Torvalds 已提交
872
#endif
873 874
	if (isdigit(*p) && get_option(&p, &arg))
		iommu_size = arg;
875
	if (!strncmp(p, "fullflush", 9))
876
		iommu_fullflush = 1;
877
	if (!strncmp(p, "nofullflush", 11))
878
		iommu_fullflush = 0;
879
	if (!strncmp(p, "noagp", 5))
880
		no_agp = 1;
881
	if (!strncmp(p, "noaperture", 10))
882 883
		fix_aperture = 0;
	/* duplicated from pci-dma.c */
884
	if (!strncmp(p, "force", 5))
885
		gart_iommu_aperture_allowed = 1;
886
	if (!strncmp(p, "allowed", 7))
887
		gart_iommu_aperture_allowed = 1;
888 889 890 891 892 893 894 895 896 897
	if (!strncmp(p, "memaper", 7)) {
		fallback_aper_force = 1;
		p += 7;
		if (*p == '=') {
			++p;
			if (get_option(&p, &arg))
				fallback_aper_order = arg;
		}
	}
}
898
IOMMU_INIT_POST(gart_iommu_hole_init);