pci-gart_64.c 22.6 KB
Newer Older
L
Linus Torvalds 已提交
1 2
/*
 * Dynamic DMA mapping support for AMD Hammer.
3
 *
L
Linus Torvalds 已提交
4 5
 * Use the integrated AGP GART in the Hammer northbridge as an IOMMU for PCI.
 * This allows to use PCI devices that only support 32bit addresses on systems
6
 * with more than 4GB.
L
Linus Torvalds 已提交
7 8
 *
 * See Documentation/DMA-mapping.txt for the interface specification.
9
 *
L
Linus Torvalds 已提交
10
 * Copyright 2002 Andi Kleen, SuSE Labs.
A
Andi Kleen 已提交
11
 * Subject to the GNU General Public License v2 only.
L
Linus Torvalds 已提交
12 13 14 15 16 17 18 19 20 21 22 23 24 25
 */

#include <linux/types.h>
#include <linux/ctype.h>
#include <linux/agp_backend.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/spinlock.h>
#include <linux/pci.h>
#include <linux/module.h>
#include <linux/topology.h>
#include <linux/interrupt.h>
#include <linux/bitops.h>
26
#include <linux/kdebug.h>
27
#include <linux/scatterlist.h>
28
#include <linux/iommu-helper.h>
29
#include <linux/sysdev.h>
30
#include <linux/io.h>
L
Linus Torvalds 已提交
31 32 33 34
#include <asm/atomic.h>
#include <asm/mtrr.h>
#include <asm/pgtable.h>
#include <asm/proto.h>
35
#include <asm/iommu.h>
J
Joerg Roedel 已提交
36
#include <asm/gart.h>
L
Linus Torvalds 已提交
37
#include <asm/cacheflush.h>
38 39
#include <asm/swiotlb.h>
#include <asm/dma.h>
40
#include <asm/k8.h>
L
Linus Torvalds 已提交
41

42
static unsigned long iommu_bus_base;	/* GART remapping area (physical) */
43
static unsigned long iommu_size;	/* size of remapping area bytes */
L
Linus Torvalds 已提交
44 45
static unsigned long iommu_pages;	/* .. and in pages */

46
static u32 *iommu_gatt_base;		/* Remapping table */
L
Linus Torvalds 已提交
47

48 49 50 51 52 53 54
/*
 * If this is disabled the IOMMU will use an optimized flushing strategy
 * of only flushing when an mapping is reused. With it true the GART is
 * flushed for every mapping. Problem is that doing the lazy flush seems
 * to trigger bugs with some popular PCI cards, in particular 3ware (but
 * has been also also seen with Qlogic at least).
 */
L
Linus Torvalds 已提交
55 56
int iommu_fullflush = 1;

57
/* Allocation bitmap for the remapping area: */
L
Linus Torvalds 已提交
58
static DEFINE_SPINLOCK(iommu_bitmap_lock);
59 60
/* Guarded by iommu_bitmap_lock: */
static unsigned long *iommu_gart_bitmap;
L
Linus Torvalds 已提交
61

62
static u32 gart_unmapped_entry;
L
Linus Torvalds 已提交
63 64 65 66 67 68 69

#define GPTE_VALID    1
#define GPTE_COHERENT 2
#define GPTE_ENCODE(x) \
	(((x) & 0xfffff000) | (((x) >> 32) << 4) | GPTE_VALID | GPTE_COHERENT)
#define GPTE_DECODE(x) (((x) & 0xfffff000) | (((u64)(x) & 0xff0) << 28))

70
#define EMERGENCY_PAGES 32 /* = 128KB */
L
Linus Torvalds 已提交
71 72 73 74 75 76 77 78 79 80 81 82

#ifdef CONFIG_AGP
#define AGPEXTERN extern
#else
#define AGPEXTERN
#endif

/* backdoor interface to AGP driver */
AGPEXTERN int agp_memory_reserved;
AGPEXTERN __u32 *agp_gatt_table;

static unsigned long next_bit;  /* protected by iommu_bitmap_lock */
83
static bool need_flush;		/* global flush state. set for each gart wrap */
L
Linus Torvalds 已提交
84

85 86
static unsigned long alloc_iommu(struct device *dev, int size,
				 unsigned long align_mask)
87
{
L
Linus Torvalds 已提交
88
	unsigned long offset, flags;
89 90 91 92 93
	unsigned long boundary_size;
	unsigned long base_index;

	base_index = ALIGN(iommu_bus_base & dma_get_seg_boundary(dev),
			   PAGE_SIZE) >> PAGE_SHIFT;
94
	boundary_size = ALIGN((unsigned long long)dma_get_seg_boundary(dev) + 1,
95
			      PAGE_SIZE) >> PAGE_SHIFT;
L
Linus Torvalds 已提交
96

97
	spin_lock_irqsave(&iommu_bitmap_lock, flags);
98
	offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, next_bit,
99
				  size, base_index, boundary_size, align_mask);
L
Linus Torvalds 已提交
100
	if (offset == -1) {
101
		need_flush = true;
102
		offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, 0,
103 104
					  size, base_index, boundary_size,
					  align_mask);
L
Linus Torvalds 已提交
105
	}
106 107 108
	if (offset != -1) {
		next_bit = offset+size;
		if (next_bit >= iommu_pages) {
L
Linus Torvalds 已提交
109
			next_bit = 0;
110
			need_flush = true;
111 112
		}
	}
L
Linus Torvalds 已提交
113
	if (iommu_fullflush)
114
		need_flush = true;
115 116
	spin_unlock_irqrestore(&iommu_bitmap_lock, flags);

L
Linus Torvalds 已提交
117
	return offset;
118
}
L
Linus Torvalds 已提交
119 120

static void free_iommu(unsigned long offset, int size)
121
{
L
Linus Torvalds 已提交
122
	unsigned long flags;
123

L
Linus Torvalds 已提交
124
	spin_lock_irqsave(&iommu_bitmap_lock, flags);
125
	iommu_area_free(iommu_gart_bitmap, offset, size);
126 127
	if (offset >= next_bit)
		next_bit = offset + size;
L
Linus Torvalds 已提交
128
	spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
129
}
L
Linus Torvalds 已提交
130

131
/*
L
Linus Torvalds 已提交
132 133
 * Use global flush state to avoid races with multiple flushers.
 */
134
static void flush_gart(void)
135
{
L
Linus Torvalds 已提交
136
	unsigned long flags;
137

L
Linus Torvalds 已提交
138
	spin_lock_irqsave(&iommu_bitmap_lock, flags);
139 140
	if (need_flush) {
		k8_flush_garts();
141
		need_flush = false;
142
	}
L
Linus Torvalds 已提交
143
	spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
144
}
L
Linus Torvalds 已提交
145 146 147

#ifdef CONFIG_IOMMU_LEAK

148 149 150 151 152 153 154 155 156 157 158
#define SET_LEAK(x)							\
	do {								\
		if (iommu_leak_tab)					\
			iommu_leak_tab[x] = __builtin_return_address(0);\
	} while (0)

#define CLEAR_LEAK(x)							\
	do {								\
		if (iommu_leak_tab)					\
			iommu_leak_tab[x] = NULL;			\
	} while (0)
L
Linus Torvalds 已提交
159 160

/* Debugging aid for drivers that don't free their IOMMU tables */
161
static void **iommu_leak_tab;
L
Linus Torvalds 已提交
162
static int leak_trace;
163
static int iommu_leak_pages = 20;
164

165
static void dump_leak(void)
L
Linus Torvalds 已提交
166 167
{
	int i;
168 169 170 171
	static int dump;

	if (dump || !iommu_leak_tab)
		return;
L
Linus Torvalds 已提交
172
	dump = 1;
173 174 175 176 177 178 179
	show_stack(NULL, NULL);

	/* Very crude. dump some from the end of the table too */
	printk(KERN_DEBUG "Dumping %d pages from end of IOMMU:\n",
	       iommu_leak_pages);
	for (i = 0; i < iommu_leak_pages; i += 2) {
		printk(KERN_DEBUG "%lu: ", iommu_pages-i);
180 181
		printk_address((unsigned long) iommu_leak_tab[iommu_pages-i],
				0);
182 183 184
		printk(KERN_CONT "%c", (i+1)%2 == 0 ? '\n' : ' ');
	}
	printk(KERN_DEBUG "\n");
L
Linus Torvalds 已提交
185 186
}
#else
187 188
# define SET_LEAK(x)
# define CLEAR_LEAK(x)
L
Linus Torvalds 已提交
189 190
#endif

191
static void iommu_full(struct device *dev, size_t size, int dir)
L
Linus Torvalds 已提交
192
{
193
	/*
L
Linus Torvalds 已提交
194 195
	 * Ran out of IOMMU space for this operation. This is very bad.
	 * Unfortunately the drivers cannot handle this operation properly.
196
	 * Return some non mapped prereserved space in the aperture and
L
Linus Torvalds 已提交
197 198
	 * let the Northbridge deal with it. This will result in garbage
	 * in the IO operation. When the size exceeds the prereserved space
199
	 * memory corruption will occur or random memory will be DMAed
L
Linus Torvalds 已提交
200
	 * out. Hopefully no network devices use single mappings that big.
201 202
	 */

203
	dev_err(dev, "PCI-DMA: Out of IOMMU space for %lu bytes\n", size);
L
Linus Torvalds 已提交
204

205
	if (size > PAGE_SIZE*EMERGENCY_PAGES) {
L
Linus Torvalds 已提交
206 207
		if (dir == PCI_DMA_FROMDEVICE || dir == PCI_DMA_BIDIRECTIONAL)
			panic("PCI-DMA: Memory would be corrupted\n");
208 209 210 211
		if (dir == PCI_DMA_TODEVICE || dir == PCI_DMA_BIDIRECTIONAL)
			panic(KERN_ERR
				"PCI-DMA: Random memory would be DMAed\n");
	}
L
Linus Torvalds 已提交
212
#ifdef CONFIG_IOMMU_LEAK
213
	dump_leak();
L
Linus Torvalds 已提交
214
#endif
215
}
L
Linus Torvalds 已提交
216

217 218 219
static inline int
need_iommu(struct device *dev, unsigned long addr, size_t size)
{
220 221
	return force_iommu ||
		!is_buffer_dma_capable(*dev->dma_mask, addr, size);
L
Linus Torvalds 已提交
222 223
}

224 225 226
static inline int
nonforced_iommu(struct device *dev, unsigned long addr, size_t size)
{
227
	return !is_buffer_dma_capable(*dev->dma_mask, addr, size);
L
Linus Torvalds 已提交
228 229 230 231 232
}

/* Map a single continuous physical area into the IOMMU.
 * Caller needs to check if the iommu is needed and flush.
 */
233
static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
234
				size_t size, int dir, unsigned long align_mask)
235
{
236
	unsigned long npages = iommu_num_pages(phys_mem, size, PAGE_SIZE);
237
	unsigned long iommu_page = alloc_iommu(dev, npages, align_mask);
L
Linus Torvalds 已提交
238
	int i;
239

L
Linus Torvalds 已提交
240 241
	if (iommu_page == -1) {
		if (!nonforced_iommu(dev, phys_mem, size))
242
			return phys_mem;
L
Linus Torvalds 已提交
243 244
		if (panic_on_overflow)
			panic("dma_map_area overflow %lu bytes\n", size);
245
		iommu_full(dev, size, dir);
L
Linus Torvalds 已提交
246 247 248 249 250 251 252 253 254 255 256 257
		return bad_dma_address;
	}

	for (i = 0; i < npages; i++) {
		iommu_gatt_base[iommu_page + i] = GPTE_ENCODE(phys_mem);
		SET_LEAK(iommu_page + i);
		phys_mem += PAGE_SIZE;
	}
	return iommu_bus_base + iommu_page*PAGE_SIZE + (phys_mem & ~PAGE_MASK);
}

/* Map a single area into the IOMMU */
258
static dma_addr_t
I
Ingo Molnar 已提交
259
gart_map_single(struct device *dev, phys_addr_t paddr, size_t size, int dir)
L
Linus Torvalds 已提交
260
{
I
Ingo Molnar 已提交
261
	unsigned long bus;
L
Linus Torvalds 已提交
262 263

	if (!dev)
264
		dev = &x86_dma_fallback_dev;
L
Linus Torvalds 已提交
265

I
Ingo Molnar 已提交
266 267
	if (!need_iommu(dev, paddr, size))
		return paddr;
L
Linus Torvalds 已提交
268

269 270
	bus = dma_map_area(dev, paddr, size, dir, 0);
	flush_gart();
271 272

	return bus;
273 274
}

275 276 277
/*
 * Free a DMA mapping.
 */
278
static void gart_unmap_single(struct device *dev, dma_addr_t dma_addr,
279
			      size_t size, int direction)
280 281 282 283 284 285 286 287
{
	unsigned long iommu_page;
	int npages;
	int i;

	if (dma_addr < iommu_bus_base + EMERGENCY_PAGES*PAGE_SIZE ||
	    dma_addr >= iommu_bus_base + iommu_size)
		return;
288

289
	iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT;
290
	npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
291 292 293 294 295 296 297
	for (i = 0; i < npages; i++) {
		iommu_gatt_base[iommu_page + i] = gart_unmapped_entry;
		CLEAR_LEAK(iommu_page + i);
	}
	free_iommu(iommu_page, npages);
}

298 299 300
/*
 * Wrapper for pci_unmap_single working with scatterlists.
 */
301 302
static void
gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
303
{
304
	struct scatterlist *s;
305 306
	int i;

307
	for_each_sg(sg, s, nents, i) {
308
		if (!s->dma_length || !s->length)
309
			break;
310
		gart_unmap_single(dev, s->dma_address, s->dma_length, dir);
311 312
	}
}
L
Linus Torvalds 已提交
313 314 315 316 317

/* Fallback for dma_map_sg in case of overflow */
static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
			       int nents, int dir)
{
318
	struct scatterlist *s;
L
Linus Torvalds 已提交
319 320 321 322 323 324
	int i;

#ifdef CONFIG_IOMMU_DEBUG
	printk(KERN_DEBUG "dma_map_sg overflow\n");
#endif

325
	for_each_sg(sg, s, nents, i) {
J
Jens Axboe 已提交
326
		unsigned long addr = sg_phys(s);
327 328

		if (nonforced_iommu(dev, addr, s->length)) {
329
			addr = dma_map_area(dev, addr, s->length, dir, 0);
330 331
			if (addr == bad_dma_address) {
				if (i > 0)
332
					gart_unmap_sg(dev, sg, i, dir);
333
				nents = 0;
L
Linus Torvalds 已提交
334 335 336 337 338 339 340
				sg[0].dma_length = 0;
				break;
			}
		}
		s->dma_address = addr;
		s->dma_length = s->length;
	}
341
	flush_gart();
342

L
Linus Torvalds 已提交
343 344 345 346
	return nents;
}

/* Map multiple scatterlist entries continuous into the first. */
347 348 349
static int __dma_map_cont(struct device *dev, struct scatterlist *start,
			  int nelems, struct scatterlist *sout,
			  unsigned long pages)
L
Linus Torvalds 已提交
350
{
351
	unsigned long iommu_start = alloc_iommu(dev, pages, 0);
352
	unsigned long iommu_page = iommu_start;
353
	struct scatterlist *s;
L
Linus Torvalds 已提交
354 355 356 357
	int i;

	if (iommu_start == -1)
		return -1;
358 359

	for_each_sg(start, s, nelems, i) {
L
Linus Torvalds 已提交
360 361
		unsigned long pages, addr;
		unsigned long phys_addr = s->dma_address;
362

363 364
		BUG_ON(s != start && s->offset);
		if (s == start) {
L
Linus Torvalds 已提交
365 366 367
			sout->dma_address = iommu_bus_base;
			sout->dma_address += iommu_page*PAGE_SIZE + s->offset;
			sout->dma_length = s->length;
368 369
		} else {
			sout->dma_length += s->length;
L
Linus Torvalds 已提交
370 371 372
		}

		addr = phys_addr;
373
		pages = iommu_num_pages(s->offset, s->length, PAGE_SIZE);
374 375
		while (pages--) {
			iommu_gatt_base[iommu_page] = GPTE_ENCODE(addr);
L
Linus Torvalds 已提交
376 377 378
			SET_LEAK(iommu_page);
			addr += PAGE_SIZE;
			iommu_page++;
379
		}
380 381 382
	}
	BUG_ON(iommu_page - iommu_start != pages);

L
Linus Torvalds 已提交
383 384 385
	return 0;
}

386
static inline int
387 388
dma_map_cont(struct device *dev, struct scatterlist *start, int nelems,
	     struct scatterlist *sout, unsigned long pages, int need)
L
Linus Torvalds 已提交
389
{
390 391
	if (!need) {
		BUG_ON(nelems != 1);
F
FUJITA Tomonori 已提交
392
		sout->dma_address = start->dma_address;
393
		sout->dma_length = start->length;
L
Linus Torvalds 已提交
394
		return 0;
395
	}
396
	return __dma_map_cont(dev, start, nelems, sout, pages);
L
Linus Torvalds 已提交
397
}
398

L
Linus Torvalds 已提交
399 400
/*
 * DMA map all entries in a scatterlist.
401
 * Merge chunks that have page aligned sizes into a continuous mapping.
L
Linus Torvalds 已提交
402
 */
403 404
static int
gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
L
Linus Torvalds 已提交
405
{
406
	struct scatterlist *s, *ps, *start_sg, *sgmap;
407 408
	int need = 0, nextneed, i, out, start;
	unsigned long pages = 0;
409 410
	unsigned int seg_size;
	unsigned int max_seg_size;
L
Linus Torvalds 已提交
411

412
	if (nents == 0)
L
Linus Torvalds 已提交
413 414 415
		return 0;

	if (!dev)
416
		dev = &x86_dma_fallback_dev;
L
Linus Torvalds 已提交
417 418 419

	out = 0;
	start = 0;
420
	start_sg = sgmap = sg;
421 422
	seg_size = 0;
	max_seg_size = dma_get_max_seg_size(dev);
423 424
	ps = NULL; /* shut up gcc */
	for_each_sg(sg, s, nents, i) {
J
Jens Axboe 已提交
425
		dma_addr_t addr = sg_phys(s);
426

L
Linus Torvalds 已提交
427
		s->dma_address = addr;
428
		BUG_ON(s->length == 0);
L
Linus Torvalds 已提交
429

430
		nextneed = need_iommu(dev, addr, s->length);
L
Linus Torvalds 已提交
431 432 433

		/* Handle the previous not yet processed entries */
		if (i > start) {
434 435 436 437 438
			/*
			 * Can only merge when the last chunk ends on a
			 * page boundary and the new one doesn't have an
			 * offset.
			 */
L
Linus Torvalds 已提交
439
			if (!iommu_merge || !nextneed || !need || s->offset ||
440
			    (s->length + seg_size > max_seg_size) ||
441
			    (ps->offset + ps->length) % PAGE_SIZE) {
442 443
				if (dma_map_cont(dev, start_sg, i - start,
						 sgmap, pages, need) < 0)
L
Linus Torvalds 已提交
444 445
					goto error;
				out++;
446
				seg_size = 0;
447
				sgmap = sg_next(sgmap);
L
Linus Torvalds 已提交
448
				pages = 0;
449 450
				start = i;
				start_sg = s;
L
Linus Torvalds 已提交
451 452 453
			}
		}

454
		seg_size += s->length;
L
Linus Torvalds 已提交
455
		need = nextneed;
456
		pages += iommu_num_pages(s->offset, s->length, PAGE_SIZE);
457
		ps = s;
L
Linus Torvalds 已提交
458
	}
459
	if (dma_map_cont(dev, start_sg, i - start, sgmap, pages, need) < 0)
L
Linus Torvalds 已提交
460 461
		goto error;
	out++;
462
	flush_gart();
463 464 465 466
	if (out < nents) {
		sgmap = sg_next(sgmap);
		sgmap->dma_length = 0;
	}
L
Linus Torvalds 已提交
467 468 469
	return out;

error:
470
	flush_gart();
471
	gart_unmap_sg(dev, sg, out, dir);
472

473 474 475 476 477 478
	/* When it was forced or merged try again in a dumb way */
	if (force_iommu || iommu_merge) {
		out = dma_map_sg_nonforce(dev, sg, nents, dir);
		if (out > 0)
			return out;
	}
L
Linus Torvalds 已提交
479 480
	if (panic_on_overflow)
		panic("dma_map_sg: overflow on %lu pages\n", pages);
481

482
	iommu_full(dev, pages << PAGE_SHIFT, dir);
483 484
	for_each_sg(sg, s, nents, i)
		s->dma_address = bad_dma_address;
L
Linus Torvalds 已提交
485
	return 0;
486
}
L
Linus Torvalds 已提交
487

488 489 490 491 492
/* allocate and map a coherent mapping */
static void *
gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr,
		    gfp_t flag)
{
493
	dma_addr_t paddr;
494
	unsigned long align_mask;
495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514
	struct page *page;

	if (force_iommu && !(flag & GFP_DMA)) {
		flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
		page = alloc_pages(flag | __GFP_ZERO, get_order(size));
		if (!page)
			return NULL;

		align_mask = (1UL << get_order(size)) - 1;
		paddr = dma_map_area(dev, page_to_phys(page), size,
				     DMA_BIDIRECTIONAL, align_mask);

		flush_gart();
		if (paddr != bad_dma_address) {
			*dma_addr = paddr;
			return page_address(page);
		}
		__free_pages(page, get_order(size));
	} else
		return dma_generic_alloc_coherent(dev, size, dma_addr, flag);
515 516 517 518

	return NULL;
}

519 520 521 522 523 524 525 526 527
/* free a coherent mapping */
static void
gart_free_coherent(struct device *dev, size_t size, void *vaddr,
		   dma_addr_t dma_addr)
{
	gart_unmap_single(dev, dma_addr, size, DMA_BIDIRECTIONAL);
	free_pages((unsigned long)vaddr, get_order(size));
}

528
static int no_agp;
L
Linus Torvalds 已提交
529 530

static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size)
531 532 533 534 535 536 537 538 539 540
{
	unsigned long a;

	if (!iommu_size) {
		iommu_size = aper_size;
		if (!no_agp)
			iommu_size /= 2;
	}

	a = aper + iommu_size;
541
	iommu_size -= round_up(a, PMD_PAGE_SIZE) - a;
L
Linus Torvalds 已提交
542

543
	if (iommu_size < 64*1024*1024) {
L
Linus Torvalds 已提交
544
		printk(KERN_WARNING
545 546 547 548 549
			"PCI-DMA: Warning: Small IOMMU %luMB."
			" Consider increasing the AGP aperture in BIOS\n",
				iommu_size >> 20);
	}

L
Linus Torvalds 已提交
550
	return iommu_size;
551
}
L
Linus Torvalds 已提交
552

553 554 555
static __init unsigned read_aperture(struct pci_dev *dev, u32 *size)
{
	unsigned aper_size = 0, aper_base_32, aper_order;
L
Linus Torvalds 已提交
556 557
	u64 aper_base;

P
Pavel Machek 已提交
558 559
	pci_read_config_dword(dev, AMD64_GARTAPERTUREBASE, &aper_base_32);
	pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &aper_order);
560
	aper_order = (aper_order >> 1) & 7;
L
Linus Torvalds 已提交
561

562
	aper_base = aper_base_32 & 0x7fff;
L
Linus Torvalds 已提交
563 564
	aper_base <<= 25;

565 566
	aper_size = (32 * 1024 * 1024) << aper_order;
	if (aper_base + aper_size > 0x100000000UL || !aper_size)
L
Linus Torvalds 已提交
567 568 569 570
		aper_base = 0;

	*size = aper_size;
	return aper_base;
571
}
L
Linus Torvalds 已提交
572

573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598
static void enable_gart_translations(void)
{
	int i;

	for (i = 0; i < num_k8_northbridges; i++) {
		struct pci_dev *dev = k8_northbridges[i];

		enable_gart_translation(dev, __pa(agp_gatt_table));
	}
}

/*
 * If fix_up_north_bridges is set, the north bridges have to be fixed up on
 * resume in the same way as they are handled in gart_iommu_hole_init().
 */
static bool fix_up_north_bridges;
static u32 aperture_order;
static u32 aperture_alloc;

void set_up_gart_resume(u32 aper_order, u32 aper_alloc)
{
	fix_up_north_bridges = true;
	aperture_order = aper_order;
	aperture_alloc = aper_alloc;
}

599 600
static int gart_resume(struct sys_device *dev)
{
601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623
	printk(KERN_INFO "PCI-DMA: Resuming GART IOMMU\n");

	if (fix_up_north_bridges) {
		int i;

		printk(KERN_INFO "PCI-DMA: Restoring GART aperture settings\n");

		for (i = 0; i < num_k8_northbridges; i++) {
			struct pci_dev *dev = k8_northbridges[i];

			/*
			 * Don't enable translations just yet.  That is the next
			 * step.  Restore the pre-suspend aperture settings.
			 */
			pci_write_config_dword(dev, AMD64_GARTAPERTURECTL,
						aperture_order << 1);
			pci_write_config_dword(dev, AMD64_GARTAPERTUREBASE,
						aperture_alloc >> 25);
		}
	}

	enable_gart_translations();

624 625 626 627 628
	return 0;
}

static int gart_suspend(struct sys_device *dev, pm_message_t state)
{
629
	return 0;
630 631 632 633 634 635 636 637 638 639 640 641 642 643
}

static struct sysdev_class gart_sysdev_class = {
	.name = "gart",
	.suspend = gart_suspend,
	.resume = gart_resume,

};

static struct sys_device device_gart = {
	.id	= 0,
	.cls	= &gart_sysdev_class,
};

644
/*
L
Linus Torvalds 已提交
645
 * Private Northbridge GATT initialization in case we cannot use the
646
 * AGP driver for some reason.
L
Linus Torvalds 已提交
647 648
 */
static __init int init_k8_gatt(struct agp_kern_info *info)
649 650 651
{
	unsigned aper_size, gatt_size, new_aper_size;
	unsigned aper_base, new_aper_base;
L
Linus Torvalds 已提交
652 653
	struct pci_dev *dev;
	void *gatt;
654
	int i, error;
655

L
Linus Torvalds 已提交
656 657
	printk(KERN_INFO "PCI-DMA: Disabling AGP.\n");
	aper_size = aper_base = info->aper_size = 0;
658 659 660
	dev = NULL;
	for (i = 0; i < num_k8_northbridges; i++) {
		dev = k8_northbridges[i];
661 662 663 664 665
		new_aper_base = read_aperture(dev, &new_aper_size);
		if (!new_aper_base)
			goto nommu;

		if (!aper_base) {
L
Linus Torvalds 已提交
666 667
			aper_size = new_aper_size;
			aper_base = new_aper_base;
668 669
		}
		if (aper_size != new_aper_size || aper_base != new_aper_base)
L
Linus Torvalds 已提交
670 671 672
			goto nommu;
	}
	if (!aper_base)
673
		goto nommu;
L
Linus Torvalds 已提交
674
	info->aper_base = aper_base;
675
	info->aper_size = aper_size >> 20;
L
Linus Torvalds 已提交
676

677
	gatt_size = (aper_size >> PAGE_SHIFT) * sizeof(u32);
678 679
	gatt = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
					get_order(gatt_size));
680
	if (!gatt)
681
		panic("Cannot allocate GATT table");
682
	if (set_memory_uc((unsigned long)gatt, gatt_size >> PAGE_SHIFT))
683 684
		panic("Could not set GART PTEs to uncacheable pages");

L
Linus Torvalds 已提交
685
	agp_gatt_table = gatt;
686

687
	enable_gart_translations();
688 689 690 691 692

	error = sysdev_class_register(&gart_sysdev_class);
	if (!error)
		error = sysdev_register(&device_gart);
	if (error)
693 694
		panic("Could not register gart_sysdev -- "
		      "would corrupt data on next suspend");
695

696
	flush_gart();
697 698 699

	printk(KERN_INFO "PCI-DMA: aperture base @ %x size %u KB\n",
	       aper_base, aper_size>>10);
Y
Yinghai Lu 已提交
700

L
Linus Torvalds 已提交
701 702 703
	return 0;

 nommu:
704
	/* Should not happen anymore */
705 706
	printk(KERN_WARNING "PCI-DMA: More than 4GB of RAM and no IOMMU\n"
	       KERN_WARNING "falling back to iommu=soft.\n");
707 708
	return -1;
}
L
Linus Torvalds 已提交
709

710
static struct dma_mapping_ops gart_dma_ops = {
711 712 713 714
	.map_single			= gart_map_single,
	.unmap_single			= gart_unmap_single,
	.map_sg				= gart_map_sg,
	.unmap_sg			= gart_unmap_sg,
715
	.alloc_coherent			= gart_alloc_coherent,
716
	.free_coherent			= gart_free_coherent,
717 718
};

719 720 721 722 723 724 725 726
void gart_iommu_shutdown(void)
{
	struct pci_dev *dev;
	int i;

	if (no_agp && (dma_ops != &gart_dma_ops))
		return;

727 728
	for (i = 0; i < num_k8_northbridges; i++) {
		u32 ctl;
729

730
		dev = k8_northbridges[i];
P
Pavel Machek 已提交
731
		pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &ctl);
732

P
Pavel Machek 已提交
733
		ctl &= ~GARTEN;
734

P
Pavel Machek 已提交
735
		pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, ctl);
736
	}
737 738
}

739
void __init gart_iommu_init(void)
740
{
L
Linus Torvalds 已提交
741 742
	struct agp_kern_info info;
	unsigned long iommu_start;
743 744
	unsigned long aper_base, aper_size;
	unsigned long start_pfn, end_pfn;
L
Linus Torvalds 已提交
745 746 747
	unsigned long scratch;
	long i;

748
	if (cache_k8_northbridges() < 0 || num_k8_northbridges == 0) {
749
		printk(KERN_INFO "PCI-GART: No AMD GART found.\n");
750
		return;
751 752
	}

L
Linus Torvalds 已提交
753
#ifndef CONFIG_AGP_AMD64
754
	no_agp = 1;
L
Linus Torvalds 已提交
755 756 757
#else
	/* Makefile puts PCI initialization via subsys_initcall first. */
	/* Add other K8 AGP bridge drivers here */
758 759
	no_agp = no_agp ||
		(agp_amd64_init() < 0) ||
L
Linus Torvalds 已提交
760
		(agp_copy_info(agp_bridge, &info) < 0);
761
#endif
L
Linus Torvalds 已提交
762

763
	if (swiotlb)
764
		return;
765

766
	/* Did we detect a different HW IOMMU? */
767
	if (iommu_detected && !gart_iommu_aperture)
768
		return;
769

L
Linus Torvalds 已提交
770
	if (no_iommu ||
Y
Yinghai Lu 已提交
771
	    (!force_iommu && max_pfn <= MAX_DMA32_PFN) ||
772
	    !gart_iommu_aperture ||
L
Linus Torvalds 已提交
773
	    (no_agp && init_k8_gatt(&info) < 0)) {
Y
Yinghai Lu 已提交
774
		if (max_pfn > MAX_DMA32_PFN) {
775
			printk(KERN_WARNING "More than 4GB of memory "
776 777
			       "but GART IOMMU not available.\n");
			printk(KERN_WARNING "falling back to iommu=soft.\n");
J
Jon Mason 已提交
778
		}
779
		return;
L
Linus Torvalds 已提交
780 781
	}

782 783 784 785 786 787 788 789 790
	/* need to map that range */
	aper_size = info.aper_size << 20;
	aper_base = info.aper_base;
	end_pfn = (aper_base>>PAGE_SHIFT) + (aper_size>>PAGE_SHIFT);
	if (end_pfn > max_low_pfn_mapped) {
		start_pfn = (aper_base>>PAGE_SHIFT);
		init_memory_mapping(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT);
	}

J
Jon Mason 已提交
791
	printk(KERN_INFO "PCI-DMA: using GART IOMMU.\n");
792 793 794
	iommu_size = check_iommu_size(info.aper_base, aper_size);
	iommu_pages = iommu_size >> PAGE_SHIFT;

795
	iommu_gart_bitmap = (void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO,
796 797 798
						      get_order(iommu_pages/8));
	if (!iommu_gart_bitmap)
		panic("Cannot allocate iommu bitmap\n");
L
Linus Torvalds 已提交
799 800

#ifdef CONFIG_IOMMU_LEAK
801
	if (leak_trace) {
802
		iommu_leak_tab = (void *)__get_free_pages(GFP_KERNEL|__GFP_ZERO,
L
Linus Torvalds 已提交
803
				  get_order(iommu_pages*sizeof(void *)));
804
		if (!iommu_leak_tab)
805 806 807
			printk(KERN_DEBUG
			       "PCI-DMA: Cannot allocate leak trace area\n");
	}
L
Linus Torvalds 已提交
808 809
#endif

810
	/*
L
Linus Torvalds 已提交
811
	 * Out of IOMMU space handling.
812 813
	 * Reserve some invalid pages at the beginning of the GART.
	 */
814
	iommu_area_reserve(iommu_gart_bitmap, 0, EMERGENCY_PAGES);
L
Linus Torvalds 已提交
815

816
	agp_memory_reserved = iommu_size;
L
Linus Torvalds 已提交
817 818
	printk(KERN_INFO
	       "PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture\n",
819
	       iommu_size >> 20);
L
Linus Torvalds 已提交
820

821 822
	iommu_start = aper_size - iommu_size;
	iommu_bus_base = info.aper_base + iommu_start;
L
Linus Torvalds 已提交
823 824 825
	bad_dma_address = iommu_bus_base;
	iommu_gatt_base = agp_gatt_table + (iommu_start>>PAGE_SHIFT);

826
	/*
L
Linus Torvalds 已提交
827 828 829 830 831 832
	 * Unmap the IOMMU part of the GART. The alias of the page is
	 * always mapped with cache enabled and there is no full cache
	 * coherency across the GART remapping. The unmapping avoids
	 * automatic prefetches from the CPU allocating cache lines in
	 * there. All CPU accesses are done via the direct mapping to
	 * the backing memory. The GART address is only used by PCI
833
	 * devices.
L
Linus Torvalds 已提交
834
	 */
835 836
	set_memory_np((unsigned long)__va(iommu_bus_base),
				iommu_size >> PAGE_SHIFT);
I
Ingo Molnar 已提交
837 838 839 840 841 842 843 844 845
	/*
	 * Tricky. The GART table remaps the physical memory range,
	 * so the CPU wont notice potential aliases and if the memory
	 * is remapped to UC later on, we might surprise the PCI devices
	 * with a stray writeout of a cacheline. So play it sure and
	 * do an explicit, full-scale wbinvd() _after_ having marked all
	 * the pages as Not-Present:
	 */
	wbinvd();
L
Linus Torvalds 已提交
846

847
	/*
848
	 * Try to workaround a bug (thanks to BenH):
849
	 * Set unmapped entries to a scratch page instead of 0.
L
Linus Torvalds 已提交
850
	 * Any prefetches that hit unmapped entries won't get an bus abort
851
	 * then. (P2P bridge may be prefetching on DMA reads).
L
Linus Torvalds 已提交
852
	 */
853 854
	scratch = get_zeroed_page(GFP_KERNEL);
	if (!scratch)
L
Linus Torvalds 已提交
855 856
		panic("Cannot allocate iommu scratch page");
	gart_unmapped_entry = GPTE_ENCODE(__pa(scratch));
857
	for (i = EMERGENCY_PAGES; i < iommu_pages; i++)
L
Linus Torvalds 已提交
858 859
		iommu_gatt_base[i] = gart_unmapped_entry;

860
	flush_gart();
861
	dma_ops = &gart_dma_ops;
862
}
L
Linus Torvalds 已提交
863

864
void __init gart_parse_options(char *p)
865 866 867
{
	int arg;

L
Linus Torvalds 已提交
868
#ifdef CONFIG_IOMMU_LEAK
869
	if (!strncmp(p, "leak", 4)) {
870 871
		leak_trace = 1;
		p += 4;
872 873
		if (*p == '=')
			++p;
874 875 876
		if (isdigit(*p) && get_option(&p, &arg))
			iommu_leak_pages = arg;
	}
L
Linus Torvalds 已提交
877
#endif
878 879
	if (isdigit(*p) && get_option(&p, &arg))
		iommu_size = arg;
880
	if (!strncmp(p, "fullflush", 8))
881
		iommu_fullflush = 1;
882
	if (!strncmp(p, "nofullflush", 11))
883
		iommu_fullflush = 0;
884
	if (!strncmp(p, "noagp", 5))
885
		no_agp = 1;
886
	if (!strncmp(p, "noaperture", 10))
887 888
		fix_aperture = 0;
	/* duplicated from pci-dma.c */
889
	if (!strncmp(p, "force", 5))
890
		gart_iommu_aperture_allowed = 1;
891
	if (!strncmp(p, "allowed", 7))
892
		gart_iommu_aperture_allowed = 1;
893 894 895 896 897 898 899 900 901 902
	if (!strncmp(p, "memaper", 7)) {
		fallback_aper_force = 1;
		p += 7;
		if (*p == '=') {
			++p;
			if (get_option(&p, &arg))
				fallback_aper_order = arg;
		}
	}
}