pci-gart_64.c 22.8 KB
Newer Older
L
Linus Torvalds 已提交
1 2
/*
 * Dynamic DMA mapping support for AMD Hammer.
3
 *
L
Linus Torvalds 已提交
4 5
 * Use the integrated AGP GART in the Hammer northbridge as an IOMMU for PCI.
 * This allows to use PCI devices that only support 32bit addresses on systems
6
 * with more than 4GB.
L
Linus Torvalds 已提交
7
 *
8
 * See Documentation/PCI/PCI-DMA-mapping.txt for the interface specification.
9
 *
L
Linus Torvalds 已提交
10
 * Copyright 2002 Andi Kleen, SuSE Labs.
A
Andi Kleen 已提交
11
 * Subject to the GNU General Public License v2 only.
L
Linus Torvalds 已提交
12 13 14 15 16 17 18 19 20 21 22 23 24 25
 */

#include <linux/types.h>
#include <linux/ctype.h>
#include <linux/agp_backend.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/spinlock.h>
#include <linux/pci.h>
#include <linux/module.h>
#include <linux/topology.h>
#include <linux/interrupt.h>
#include <linux/bitops.h>
26
#include <linux/kdebug.h>
27
#include <linux/scatterlist.h>
28
#include <linux/iommu-helper.h>
29
#include <linux/sysdev.h>
30
#include <linux/io.h>
L
Linus Torvalds 已提交
31 32 33 34
#include <asm/atomic.h>
#include <asm/mtrr.h>
#include <asm/pgtable.h>
#include <asm/proto.h>
35
#include <asm/iommu.h>
J
Joerg Roedel 已提交
36
#include <asm/gart.h>
L
Linus Torvalds 已提交
37
#include <asm/cacheflush.h>
38 39
#include <asm/swiotlb.h>
#include <asm/dma.h>
40
#include <asm/k8.h>
L
Linus Torvalds 已提交
41

42
static unsigned long iommu_bus_base;	/* GART remapping area (physical) */
43
static unsigned long iommu_size;	/* size of remapping area bytes */
L
Linus Torvalds 已提交
44 45
static unsigned long iommu_pages;	/* .. and in pages */

46
static u32 *iommu_gatt_base;		/* Remapping table */
L
Linus Torvalds 已提交
47

48 49 50 51 52 53 54
/*
 * If this is disabled the IOMMU will use an optimized flushing strategy
 * of only flushing when an mapping is reused. With it true the GART is
 * flushed for every mapping. Problem is that doing the lazy flush seems
 * to trigger bugs with some popular PCI cards, in particular 3ware (but
 * has been also also seen with Qlogic at least).
 */
55
static int iommu_fullflush = 1;
L
Linus Torvalds 已提交
56

57
/* Allocation bitmap for the remapping area: */
L
Linus Torvalds 已提交
58
static DEFINE_SPINLOCK(iommu_bitmap_lock);
59 60
/* Guarded by iommu_bitmap_lock: */
static unsigned long *iommu_gart_bitmap;
L
Linus Torvalds 已提交
61

62
static u32 gart_unmapped_entry;
L
Linus Torvalds 已提交
63 64 65 66 67 68 69

#define GPTE_VALID    1
#define GPTE_COHERENT 2
#define GPTE_ENCODE(x) \
	(((x) & 0xfffff000) | (((x) >> 32) << 4) | GPTE_VALID | GPTE_COHERENT)
#define GPTE_DECODE(x) (((x) & 0xfffff000) | (((u64)(x) & 0xff0) << 28))

70
#define EMERGENCY_PAGES 32 /* = 128KB */
L
Linus Torvalds 已提交
71 72 73 74 75 76 77 78 79 80 81 82

#ifdef CONFIG_AGP
#define AGPEXTERN extern
#else
#define AGPEXTERN
#endif

/* backdoor interface to AGP driver */
AGPEXTERN int agp_memory_reserved;
AGPEXTERN __u32 *agp_gatt_table;

static unsigned long next_bit;  /* protected by iommu_bitmap_lock */
83
static bool need_flush;		/* global flush state. set for each gart wrap */
L
Linus Torvalds 已提交
84

85 86
static unsigned long alloc_iommu(struct device *dev, int size,
				 unsigned long align_mask)
87
{
L
Linus Torvalds 已提交
88
	unsigned long offset, flags;
89 90 91 92 93
	unsigned long boundary_size;
	unsigned long base_index;

	base_index = ALIGN(iommu_bus_base & dma_get_seg_boundary(dev),
			   PAGE_SIZE) >> PAGE_SHIFT;
94
	boundary_size = ALIGN((unsigned long long)dma_get_seg_boundary(dev) + 1,
95
			      PAGE_SIZE) >> PAGE_SHIFT;
L
Linus Torvalds 已提交
96

97
	spin_lock_irqsave(&iommu_bitmap_lock, flags);
98
	offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, next_bit,
99
				  size, base_index, boundary_size, align_mask);
L
Linus Torvalds 已提交
100
	if (offset == -1) {
101
		need_flush = true;
102
		offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, 0,
103 104
					  size, base_index, boundary_size,
					  align_mask);
L
Linus Torvalds 已提交
105
	}
106 107 108
	if (offset != -1) {
		next_bit = offset+size;
		if (next_bit >= iommu_pages) {
L
Linus Torvalds 已提交
109
			next_bit = 0;
110
			need_flush = true;
111 112
		}
	}
L
Linus Torvalds 已提交
113
	if (iommu_fullflush)
114
		need_flush = true;
115 116
	spin_unlock_irqrestore(&iommu_bitmap_lock, flags);

L
Linus Torvalds 已提交
117
	return offset;
118
}
L
Linus Torvalds 已提交
119 120

static void free_iommu(unsigned long offset, int size)
121
{
L
Linus Torvalds 已提交
122
	unsigned long flags;
123

L
Linus Torvalds 已提交
124
	spin_lock_irqsave(&iommu_bitmap_lock, flags);
125
	iommu_area_free(iommu_gart_bitmap, offset, size);
126 127
	if (offset >= next_bit)
		next_bit = offset + size;
L
Linus Torvalds 已提交
128
	spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
129
}
L
Linus Torvalds 已提交
130

131
/*
L
Linus Torvalds 已提交
132 133
 * Use global flush state to avoid races with multiple flushers.
 */
134
static void flush_gart(void)
135
{
L
Linus Torvalds 已提交
136
	unsigned long flags;
137

L
Linus Torvalds 已提交
138
	spin_lock_irqsave(&iommu_bitmap_lock, flags);
139 140
	if (need_flush) {
		k8_flush_garts();
141
		need_flush = false;
142
	}
L
Linus Torvalds 已提交
143
	spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
144
}
L
Linus Torvalds 已提交
145 146 147

#ifdef CONFIG_IOMMU_LEAK

148 149 150 151 152 153 154 155 156 157 158
#define SET_LEAK(x)							\
	do {								\
		if (iommu_leak_tab)					\
			iommu_leak_tab[x] = __builtin_return_address(0);\
	} while (0)

#define CLEAR_LEAK(x)							\
	do {								\
		if (iommu_leak_tab)					\
			iommu_leak_tab[x] = NULL;			\
	} while (0)
L
Linus Torvalds 已提交
159 160

/* Debugging aid for drivers that don't free their IOMMU tables */
161
static void **iommu_leak_tab;
L
Linus Torvalds 已提交
162
static int leak_trace;
163
static int iommu_leak_pages = 20;
164

165
static void dump_leak(void)
L
Linus Torvalds 已提交
166 167
{
	int i;
168 169 170 171
	static int dump;

	if (dump || !iommu_leak_tab)
		return;
L
Linus Torvalds 已提交
172
	dump = 1;
173 174 175 176 177 178 179
	show_stack(NULL, NULL);

	/* Very crude. dump some from the end of the table too */
	printk(KERN_DEBUG "Dumping %d pages from end of IOMMU:\n",
	       iommu_leak_pages);
	for (i = 0; i < iommu_leak_pages; i += 2) {
		printk(KERN_DEBUG "%lu: ", iommu_pages-i);
180 181
		printk_address((unsigned long) iommu_leak_tab[iommu_pages-i],
				0);
182 183 184
		printk(KERN_CONT "%c", (i+1)%2 == 0 ? '\n' : ' ');
	}
	printk(KERN_DEBUG "\n");
L
Linus Torvalds 已提交
185 186
}
#else
187 188
# define SET_LEAK(x)
# define CLEAR_LEAK(x)
L
Linus Torvalds 已提交
189 190
#endif

191
static void iommu_full(struct device *dev, size_t size, int dir)
L
Linus Torvalds 已提交
192
{
193
	/*
L
Linus Torvalds 已提交
194 195
	 * Ran out of IOMMU space for this operation. This is very bad.
	 * Unfortunately the drivers cannot handle this operation properly.
196
	 * Return some non mapped prereserved space in the aperture and
L
Linus Torvalds 已提交
197 198
	 * let the Northbridge deal with it. This will result in garbage
	 * in the IO operation. When the size exceeds the prereserved space
199
	 * memory corruption will occur or random memory will be DMAed
L
Linus Torvalds 已提交
200
	 * out. Hopefully no network devices use single mappings that big.
201 202
	 */

203
	dev_err(dev, "PCI-DMA: Out of IOMMU space for %lu bytes\n", size);
L
Linus Torvalds 已提交
204

205
	if (size > PAGE_SIZE*EMERGENCY_PAGES) {
L
Linus Torvalds 已提交
206 207
		if (dir == PCI_DMA_FROMDEVICE || dir == PCI_DMA_BIDIRECTIONAL)
			panic("PCI-DMA: Memory would be corrupted\n");
208 209 210 211
		if (dir == PCI_DMA_TODEVICE || dir == PCI_DMA_BIDIRECTIONAL)
			panic(KERN_ERR
				"PCI-DMA: Random memory would be DMAed\n");
	}
L
Linus Torvalds 已提交
212
#ifdef CONFIG_IOMMU_LEAK
213
	dump_leak();
L
Linus Torvalds 已提交
214
#endif
215
}
L
Linus Torvalds 已提交
216

217 218 219
static inline int
need_iommu(struct device *dev, unsigned long addr, size_t size)
{
220 221
	return force_iommu ||
		!is_buffer_dma_capable(*dev->dma_mask, addr, size);
L
Linus Torvalds 已提交
222 223
}

224 225 226
static inline int
nonforced_iommu(struct device *dev, unsigned long addr, size_t size)
{
227
	return !is_buffer_dma_capable(*dev->dma_mask, addr, size);
L
Linus Torvalds 已提交
228 229 230 231 232
}

/* Map a single continuous physical area into the IOMMU.
 * Caller needs to check if the iommu is needed and flush.
 */
233
static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
234
				size_t size, int dir, unsigned long align_mask)
235
{
236
	unsigned long npages = iommu_num_pages(phys_mem, size, PAGE_SIZE);
237
	unsigned long iommu_page = alloc_iommu(dev, npages, align_mask);
L
Linus Torvalds 已提交
238
	int i;
239

L
Linus Torvalds 已提交
240 241
	if (iommu_page == -1) {
		if (!nonforced_iommu(dev, phys_mem, size))
242
			return phys_mem;
L
Linus Torvalds 已提交
243 244
		if (panic_on_overflow)
			panic("dma_map_area overflow %lu bytes\n", size);
245
		iommu_full(dev, size, dir);
L
Linus Torvalds 已提交
246 247 248 249 250 251 252 253 254 255 256 257
		return bad_dma_address;
	}

	for (i = 0; i < npages; i++) {
		iommu_gatt_base[iommu_page + i] = GPTE_ENCODE(phys_mem);
		SET_LEAK(iommu_page + i);
		phys_mem += PAGE_SIZE;
	}
	return iommu_bus_base + iommu_page*PAGE_SIZE + (phys_mem & ~PAGE_MASK);
}

/* Map a single area into the IOMMU */
258 259 260 261
static dma_addr_t gart_map_page(struct device *dev, struct page *page,
				unsigned long offset, size_t size,
				enum dma_data_direction dir,
				struct dma_attrs *attrs)
L
Linus Torvalds 已提交
262
{
I
Ingo Molnar 已提交
263
	unsigned long bus;
264
	phys_addr_t paddr = page_to_phys(page) + offset;
L
Linus Torvalds 已提交
265 266

	if (!dev)
267
		dev = &x86_dma_fallback_dev;
L
Linus Torvalds 已提交
268

I
Ingo Molnar 已提交
269 270
	if (!need_iommu(dev, paddr, size))
		return paddr;
L
Linus Torvalds 已提交
271

272 273
	bus = dma_map_area(dev, paddr, size, dir, 0);
	flush_gart();
274 275

	return bus;
276 277
}

278 279 280
/*
 * Free a DMA mapping.
 */
281 282 283
static void gart_unmap_page(struct device *dev, dma_addr_t dma_addr,
			    size_t size, enum dma_data_direction dir,
			    struct dma_attrs *attrs)
284 285 286 287 288 289 290 291
{
	unsigned long iommu_page;
	int npages;
	int i;

	if (dma_addr < iommu_bus_base + EMERGENCY_PAGES*PAGE_SIZE ||
	    dma_addr >= iommu_bus_base + iommu_size)
		return;
292

293
	iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT;
294
	npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
295 296 297 298 299 300 301
	for (i = 0; i < npages; i++) {
		iommu_gatt_base[iommu_page + i] = gart_unmapped_entry;
		CLEAR_LEAK(iommu_page + i);
	}
	free_iommu(iommu_page, npages);
}

302 303 304
/*
 * Wrapper for pci_unmap_single working with scatterlists.
 */
305 306
static void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
			  enum dma_data_direction dir, struct dma_attrs *attrs)
307
{
308
	struct scatterlist *s;
309 310
	int i;

311
	for_each_sg(sg, s, nents, i) {
312
		if (!s->dma_length || !s->length)
313
			break;
314
		gart_unmap_page(dev, s->dma_address, s->dma_length, dir, NULL);
315 316
	}
}
L
Linus Torvalds 已提交
317 318 319 320 321

/* Fallback for dma_map_sg in case of overflow */
static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
			       int nents, int dir)
{
322
	struct scatterlist *s;
L
Linus Torvalds 已提交
323 324 325 326 327 328
	int i;

#ifdef CONFIG_IOMMU_DEBUG
	printk(KERN_DEBUG "dma_map_sg overflow\n");
#endif

329
	for_each_sg(sg, s, nents, i) {
J
Jens Axboe 已提交
330
		unsigned long addr = sg_phys(s);
331 332

		if (nonforced_iommu(dev, addr, s->length)) {
333
			addr = dma_map_area(dev, addr, s->length, dir, 0);
334 335
			if (addr == bad_dma_address) {
				if (i > 0)
336
					gart_unmap_sg(dev, sg, i, dir, NULL);
337
				nents = 0;
L
Linus Torvalds 已提交
338 339 340 341 342 343 344
				sg[0].dma_length = 0;
				break;
			}
		}
		s->dma_address = addr;
		s->dma_length = s->length;
	}
345
	flush_gart();
346

L
Linus Torvalds 已提交
347 348 349 350
	return nents;
}

/* Map multiple scatterlist entries continuous into the first. */
351 352 353
static int __dma_map_cont(struct device *dev, struct scatterlist *start,
			  int nelems, struct scatterlist *sout,
			  unsigned long pages)
L
Linus Torvalds 已提交
354
{
355
	unsigned long iommu_start = alloc_iommu(dev, pages, 0);
356
	unsigned long iommu_page = iommu_start;
357
	struct scatterlist *s;
L
Linus Torvalds 已提交
358 359 360 361
	int i;

	if (iommu_start == -1)
		return -1;
362 363

	for_each_sg(start, s, nelems, i) {
L
Linus Torvalds 已提交
364 365
		unsigned long pages, addr;
		unsigned long phys_addr = s->dma_address;
366

367 368
		BUG_ON(s != start && s->offset);
		if (s == start) {
L
Linus Torvalds 已提交
369 370 371
			sout->dma_address = iommu_bus_base;
			sout->dma_address += iommu_page*PAGE_SIZE + s->offset;
			sout->dma_length = s->length;
372 373
		} else {
			sout->dma_length += s->length;
L
Linus Torvalds 已提交
374 375 376
		}

		addr = phys_addr;
377
		pages = iommu_num_pages(s->offset, s->length, PAGE_SIZE);
378 379
		while (pages--) {
			iommu_gatt_base[iommu_page] = GPTE_ENCODE(addr);
L
Linus Torvalds 已提交
380 381 382
			SET_LEAK(iommu_page);
			addr += PAGE_SIZE;
			iommu_page++;
383
		}
384 385 386
	}
	BUG_ON(iommu_page - iommu_start != pages);

L
Linus Torvalds 已提交
387 388 389
	return 0;
}

390
static inline int
391 392
dma_map_cont(struct device *dev, struct scatterlist *start, int nelems,
	     struct scatterlist *sout, unsigned long pages, int need)
L
Linus Torvalds 已提交
393
{
394 395
	if (!need) {
		BUG_ON(nelems != 1);
F
FUJITA Tomonori 已提交
396
		sout->dma_address = start->dma_address;
397
		sout->dma_length = start->length;
L
Linus Torvalds 已提交
398
		return 0;
399
	}
400
	return __dma_map_cont(dev, start, nelems, sout, pages);
L
Linus Torvalds 已提交
401
}
402

L
Linus Torvalds 已提交
403 404
/*
 * DMA map all entries in a scatterlist.
405
 * Merge chunks that have page aligned sizes into a continuous mapping.
L
Linus Torvalds 已提交
406
 */
407 408
static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents,
		       enum dma_data_direction dir, struct dma_attrs *attrs)
L
Linus Torvalds 已提交
409
{
410
	struct scatterlist *s, *ps, *start_sg, *sgmap;
411 412
	int need = 0, nextneed, i, out, start;
	unsigned long pages = 0;
413 414
	unsigned int seg_size;
	unsigned int max_seg_size;
L
Linus Torvalds 已提交
415

416
	if (nents == 0)
L
Linus Torvalds 已提交
417 418 419
		return 0;

	if (!dev)
420
		dev = &x86_dma_fallback_dev;
L
Linus Torvalds 已提交
421 422 423

	out = 0;
	start = 0;
424
	start_sg = sgmap = sg;
425 426
	seg_size = 0;
	max_seg_size = dma_get_max_seg_size(dev);
427 428
	ps = NULL; /* shut up gcc */
	for_each_sg(sg, s, nents, i) {
J
Jens Axboe 已提交
429
		dma_addr_t addr = sg_phys(s);
430

L
Linus Torvalds 已提交
431
		s->dma_address = addr;
432
		BUG_ON(s->length == 0);
L
Linus Torvalds 已提交
433

434
		nextneed = need_iommu(dev, addr, s->length);
L
Linus Torvalds 已提交
435 436 437

		/* Handle the previous not yet processed entries */
		if (i > start) {
438 439 440 441 442
			/*
			 * Can only merge when the last chunk ends on a
			 * page boundary and the new one doesn't have an
			 * offset.
			 */
L
Linus Torvalds 已提交
443
			if (!iommu_merge || !nextneed || !need || s->offset ||
444
			    (s->length + seg_size > max_seg_size) ||
445
			    (ps->offset + ps->length) % PAGE_SIZE) {
446 447
				if (dma_map_cont(dev, start_sg, i - start,
						 sgmap, pages, need) < 0)
L
Linus Torvalds 已提交
448 449
					goto error;
				out++;
450
				seg_size = 0;
451
				sgmap = sg_next(sgmap);
L
Linus Torvalds 已提交
452
				pages = 0;
453 454
				start = i;
				start_sg = s;
L
Linus Torvalds 已提交
455 456 457
			}
		}

458
		seg_size += s->length;
L
Linus Torvalds 已提交
459
		need = nextneed;
460
		pages += iommu_num_pages(s->offset, s->length, PAGE_SIZE);
461
		ps = s;
L
Linus Torvalds 已提交
462
	}
463
	if (dma_map_cont(dev, start_sg, i - start, sgmap, pages, need) < 0)
L
Linus Torvalds 已提交
464 465
		goto error;
	out++;
466
	flush_gart();
467 468 469 470
	if (out < nents) {
		sgmap = sg_next(sgmap);
		sgmap->dma_length = 0;
	}
L
Linus Torvalds 已提交
471 472 473
	return out;

error:
474
	flush_gart();
475
	gart_unmap_sg(dev, sg, out, dir, NULL);
476

477 478 479 480 481 482
	/* When it was forced or merged try again in a dumb way */
	if (force_iommu || iommu_merge) {
		out = dma_map_sg_nonforce(dev, sg, nents, dir);
		if (out > 0)
			return out;
	}
L
Linus Torvalds 已提交
483 484
	if (panic_on_overflow)
		panic("dma_map_sg: overflow on %lu pages\n", pages);
485

486
	iommu_full(dev, pages << PAGE_SHIFT, dir);
487 488
	for_each_sg(sg, s, nents, i)
		s->dma_address = bad_dma_address;
L
Linus Torvalds 已提交
489
	return 0;
490
}
L
Linus Torvalds 已提交
491

492 493 494 495 496
/* allocate and map a coherent mapping */
static void *
gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr,
		    gfp_t flag)
{
497
	dma_addr_t paddr;
498
	unsigned long align_mask;
499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518
	struct page *page;

	if (force_iommu && !(flag & GFP_DMA)) {
		flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
		page = alloc_pages(flag | __GFP_ZERO, get_order(size));
		if (!page)
			return NULL;

		align_mask = (1UL << get_order(size)) - 1;
		paddr = dma_map_area(dev, page_to_phys(page), size,
				     DMA_BIDIRECTIONAL, align_mask);

		flush_gart();
		if (paddr != bad_dma_address) {
			*dma_addr = paddr;
			return page_address(page);
		}
		__free_pages(page, get_order(size));
	} else
		return dma_generic_alloc_coherent(dev, size, dma_addr, flag);
519 520 521 522

	return NULL;
}

523 524 525 526 527
/* free a coherent mapping */
static void
gart_free_coherent(struct device *dev, size_t size, void *vaddr,
		   dma_addr_t dma_addr)
{
528
	gart_unmap_page(dev, dma_addr, size, DMA_BIDIRECTIONAL, NULL);
529 530 531
	free_pages((unsigned long)vaddr, get_order(size));
}

532
static int no_agp;
L
Linus Torvalds 已提交
533 534

static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size)
535 536 537 538 539 540 541 542 543 544
{
	unsigned long a;

	if (!iommu_size) {
		iommu_size = aper_size;
		if (!no_agp)
			iommu_size /= 2;
	}

	a = aper + iommu_size;
545
	iommu_size -= round_up(a, PMD_PAGE_SIZE) - a;
L
Linus Torvalds 已提交
546

547
	if (iommu_size < 64*1024*1024) {
L
Linus Torvalds 已提交
548
		printk(KERN_WARNING
549 550 551 552 553
			"PCI-DMA: Warning: Small IOMMU %luMB."
			" Consider increasing the AGP aperture in BIOS\n",
				iommu_size >> 20);
	}

L
Linus Torvalds 已提交
554
	return iommu_size;
555
}
L
Linus Torvalds 已提交
556

557 558 559
static __init unsigned read_aperture(struct pci_dev *dev, u32 *size)
{
	unsigned aper_size = 0, aper_base_32, aper_order;
L
Linus Torvalds 已提交
560 561
	u64 aper_base;

P
Pavel Machek 已提交
562 563
	pci_read_config_dword(dev, AMD64_GARTAPERTUREBASE, &aper_base_32);
	pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &aper_order);
564
	aper_order = (aper_order >> 1) & 7;
L
Linus Torvalds 已提交
565

566
	aper_base = aper_base_32 & 0x7fff;
L
Linus Torvalds 已提交
567 568
	aper_base <<= 25;

569 570
	aper_size = (32 * 1024 * 1024) << aper_order;
	if (aper_base + aper_size > 0x100000000UL || !aper_size)
L
Linus Torvalds 已提交
571 572 573 574
		aper_base = 0;

	*size = aper_size;
	return aper_base;
575
}
L
Linus Torvalds 已提交
576

577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602
static void enable_gart_translations(void)
{
	int i;

	for (i = 0; i < num_k8_northbridges; i++) {
		struct pci_dev *dev = k8_northbridges[i];

		enable_gart_translation(dev, __pa(agp_gatt_table));
	}
}

/*
 * If fix_up_north_bridges is set, the north bridges have to be fixed up on
 * resume in the same way as they are handled in gart_iommu_hole_init().
 */
static bool fix_up_north_bridges;
static u32 aperture_order;
static u32 aperture_alloc;

void set_up_gart_resume(u32 aper_order, u32 aper_alloc)
{
	fix_up_north_bridges = true;
	aperture_order = aper_order;
	aperture_alloc = aper_alloc;
}

603 604
static int gart_resume(struct sys_device *dev)
{
605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627
	printk(KERN_INFO "PCI-DMA: Resuming GART IOMMU\n");

	if (fix_up_north_bridges) {
		int i;

		printk(KERN_INFO "PCI-DMA: Restoring GART aperture settings\n");

		for (i = 0; i < num_k8_northbridges; i++) {
			struct pci_dev *dev = k8_northbridges[i];

			/*
			 * Don't enable translations just yet.  That is the next
			 * step.  Restore the pre-suspend aperture settings.
			 */
			pci_write_config_dword(dev, AMD64_GARTAPERTURECTL,
						aperture_order << 1);
			pci_write_config_dword(dev, AMD64_GARTAPERTUREBASE,
						aperture_alloc >> 25);
		}
	}

	enable_gart_translations();

628 629 630 631 632
	return 0;
}

static int gart_suspend(struct sys_device *dev, pm_message_t state)
{
633
	return 0;
634 635 636 637 638 639 640 641 642 643 644 645 646 647
}

static struct sysdev_class gart_sysdev_class = {
	.name = "gart",
	.suspend = gart_suspend,
	.resume = gart_resume,

};

static struct sys_device device_gart = {
	.id	= 0,
	.cls	= &gart_sysdev_class,
};

648
/*
L
Linus Torvalds 已提交
649
 * Private Northbridge GATT initialization in case we cannot use the
650
 * AGP driver for some reason.
L
Linus Torvalds 已提交
651 652
 */
static __init int init_k8_gatt(struct agp_kern_info *info)
653 654 655
{
	unsigned aper_size, gatt_size, new_aper_size;
	unsigned aper_base, new_aper_base;
L
Linus Torvalds 已提交
656 657
	struct pci_dev *dev;
	void *gatt;
658
	int i, error;
659

L
Linus Torvalds 已提交
660 661
	printk(KERN_INFO "PCI-DMA: Disabling AGP.\n");
	aper_size = aper_base = info->aper_size = 0;
662 663 664
	dev = NULL;
	for (i = 0; i < num_k8_northbridges; i++) {
		dev = k8_northbridges[i];
665 666 667 668 669
		new_aper_base = read_aperture(dev, &new_aper_size);
		if (!new_aper_base)
			goto nommu;

		if (!aper_base) {
L
Linus Torvalds 已提交
670 671
			aper_size = new_aper_size;
			aper_base = new_aper_base;
672 673
		}
		if (aper_size != new_aper_size || aper_base != new_aper_base)
L
Linus Torvalds 已提交
674 675 676
			goto nommu;
	}
	if (!aper_base)
677
		goto nommu;
L
Linus Torvalds 已提交
678
	info->aper_base = aper_base;
679
	info->aper_size = aper_size >> 20;
L
Linus Torvalds 已提交
680

681
	gatt_size = (aper_size >> PAGE_SHIFT) * sizeof(u32);
682 683
	gatt = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
					get_order(gatt_size));
684
	if (!gatt)
685
		panic("Cannot allocate GATT table");
686
	if (set_memory_uc((unsigned long)gatt, gatt_size >> PAGE_SHIFT))
687 688
		panic("Could not set GART PTEs to uncacheable pages");

L
Linus Torvalds 已提交
689
	agp_gatt_table = gatt;
690

691
	enable_gart_translations();
692 693 694 695 696

	error = sysdev_class_register(&gart_sysdev_class);
	if (!error)
		error = sysdev_register(&device_gart);
	if (error)
697 698
		panic("Could not register gart_sysdev -- "
		      "would corrupt data on next suspend");
699

700
	flush_gart();
701 702 703

	printk(KERN_INFO "PCI-DMA: aperture base @ %x size %u KB\n",
	       aper_base, aper_size>>10);
Y
Yinghai Lu 已提交
704

L
Linus Torvalds 已提交
705 706 707
	return 0;

 nommu:
708
	/* Should not happen anymore */
709 710
	printk(KERN_WARNING "PCI-DMA: More than 4GB of RAM and no IOMMU\n"
	       KERN_WARNING "falling back to iommu=soft.\n");
711 712
	return -1;
}
L
Linus Torvalds 已提交
713

714
static struct dma_map_ops gart_dma_ops = {
715 716
	.map_sg				= gart_map_sg,
	.unmap_sg			= gart_unmap_sg,
717 718
	.map_page			= gart_map_page,
	.unmap_page			= gart_unmap_page,
719
	.alloc_coherent			= gart_alloc_coherent,
720
	.free_coherent			= gart_free_coherent,
721 722
};

723 724 725 726 727 728 729 730
void gart_iommu_shutdown(void)
{
	struct pci_dev *dev;
	int i;

	if (no_agp && (dma_ops != &gart_dma_ops))
		return;

731 732
	for (i = 0; i < num_k8_northbridges; i++) {
		u32 ctl;
733

734
		dev = k8_northbridges[i];
P
Pavel Machek 已提交
735
		pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &ctl);
736

P
Pavel Machek 已提交
737
		ctl &= ~GARTEN;
738

P
Pavel Machek 已提交
739
		pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, ctl);
740
	}
741 742
}

743
void __init gart_iommu_init(void)
744
{
L
Linus Torvalds 已提交
745 746
	struct agp_kern_info info;
	unsigned long iommu_start;
747 748
	unsigned long aper_base, aper_size;
	unsigned long start_pfn, end_pfn;
L
Linus Torvalds 已提交
749 750 751
	unsigned long scratch;
	long i;

752
	if (cache_k8_northbridges() < 0 || num_k8_northbridges == 0)
753
		return;
754

L
Linus Torvalds 已提交
755
#ifndef CONFIG_AGP_AMD64
756
	no_agp = 1;
L
Linus Torvalds 已提交
757 758 759
#else
	/* Makefile puts PCI initialization via subsys_initcall first. */
	/* Add other K8 AGP bridge drivers here */
760 761
	no_agp = no_agp ||
		(agp_amd64_init() < 0) ||
L
Linus Torvalds 已提交
762
		(agp_copy_info(agp_bridge, &info) < 0);
763
#endif
L
Linus Torvalds 已提交
764

765
	if (swiotlb)
766
		return;
767

768
	/* Did we detect a different HW IOMMU? */
769
	if (iommu_detected && !gart_iommu_aperture)
770
		return;
771

L
Linus Torvalds 已提交
772
	if (no_iommu ||
Y
Yinghai Lu 已提交
773
	    (!force_iommu && max_pfn <= MAX_DMA32_PFN) ||
774
	    !gart_iommu_aperture ||
L
Linus Torvalds 已提交
775
	    (no_agp && init_k8_gatt(&info) < 0)) {
Y
Yinghai Lu 已提交
776
		if (max_pfn > MAX_DMA32_PFN) {
777
			printk(KERN_WARNING "More than 4GB of memory "
778 779
			       "but GART IOMMU not available.\n");
			printk(KERN_WARNING "falling back to iommu=soft.\n");
J
Jon Mason 已提交
780
		}
781
		return;
L
Linus Torvalds 已提交
782 783
	}

784 785 786 787 788 789 790 791 792
	/* need to map that range */
	aper_size = info.aper_size << 20;
	aper_base = info.aper_base;
	end_pfn = (aper_base>>PAGE_SHIFT) + (aper_size>>PAGE_SHIFT);
	if (end_pfn > max_low_pfn_mapped) {
		start_pfn = (aper_base>>PAGE_SHIFT);
		init_memory_mapping(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT);
	}

J
Jon Mason 已提交
793
	printk(KERN_INFO "PCI-DMA: using GART IOMMU.\n");
794 795 796
	iommu_size = check_iommu_size(info.aper_base, aper_size);
	iommu_pages = iommu_size >> PAGE_SHIFT;

797
	iommu_gart_bitmap = (void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO,
798 799 800
						      get_order(iommu_pages/8));
	if (!iommu_gart_bitmap)
		panic("Cannot allocate iommu bitmap\n");
L
Linus Torvalds 已提交
801 802

#ifdef CONFIG_IOMMU_LEAK
803
	if (leak_trace) {
804
		iommu_leak_tab = (void *)__get_free_pages(GFP_KERNEL|__GFP_ZERO,
L
Linus Torvalds 已提交
805
				  get_order(iommu_pages*sizeof(void *)));
806
		if (!iommu_leak_tab)
807 808 809
			printk(KERN_DEBUG
			       "PCI-DMA: Cannot allocate leak trace area\n");
	}
L
Linus Torvalds 已提交
810 811
#endif

812
	/*
L
Linus Torvalds 已提交
813
	 * Out of IOMMU space handling.
814 815
	 * Reserve some invalid pages at the beginning of the GART.
	 */
816
	iommu_area_reserve(iommu_gart_bitmap, 0, EMERGENCY_PAGES);
L
Linus Torvalds 已提交
817

818
	agp_memory_reserved = iommu_size;
L
Linus Torvalds 已提交
819 820
	printk(KERN_INFO
	       "PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture\n",
821
	       iommu_size >> 20);
L
Linus Torvalds 已提交
822

823 824
	iommu_start = aper_size - iommu_size;
	iommu_bus_base = info.aper_base + iommu_start;
L
Linus Torvalds 已提交
825 826 827
	bad_dma_address = iommu_bus_base;
	iommu_gatt_base = agp_gatt_table + (iommu_start>>PAGE_SHIFT);

828
	/*
L
Linus Torvalds 已提交
829 830 831 832 833 834
	 * Unmap the IOMMU part of the GART. The alias of the page is
	 * always mapped with cache enabled and there is no full cache
	 * coherency across the GART remapping. The unmapping avoids
	 * automatic prefetches from the CPU allocating cache lines in
	 * there. All CPU accesses are done via the direct mapping to
	 * the backing memory. The GART address is only used by PCI
835
	 * devices.
L
Linus Torvalds 已提交
836
	 */
837 838
	set_memory_np((unsigned long)__va(iommu_bus_base),
				iommu_size >> PAGE_SHIFT);
I
Ingo Molnar 已提交
839 840 841 842 843 844 845 846 847
	/*
	 * Tricky. The GART table remaps the physical memory range,
	 * so the CPU wont notice potential aliases and if the memory
	 * is remapped to UC later on, we might surprise the PCI devices
	 * with a stray writeout of a cacheline. So play it sure and
	 * do an explicit, full-scale wbinvd() _after_ having marked all
	 * the pages as Not-Present:
	 */
	wbinvd();
L
Linus Torvalds 已提交
848

849
	/*
850
	 * Try to workaround a bug (thanks to BenH):
851
	 * Set unmapped entries to a scratch page instead of 0.
L
Linus Torvalds 已提交
852
	 * Any prefetches that hit unmapped entries won't get an bus abort
853
	 * then. (P2P bridge may be prefetching on DMA reads).
L
Linus Torvalds 已提交
854
	 */
855 856
	scratch = get_zeroed_page(GFP_KERNEL);
	if (!scratch)
L
Linus Torvalds 已提交
857 858
		panic("Cannot allocate iommu scratch page");
	gart_unmapped_entry = GPTE_ENCODE(__pa(scratch));
859
	for (i = EMERGENCY_PAGES; i < iommu_pages; i++)
L
Linus Torvalds 已提交
860 861
		iommu_gatt_base[i] = gart_unmapped_entry;

862
	flush_gart();
863
	dma_ops = &gart_dma_ops;
864
}
L
Linus Torvalds 已提交
865

866
void __init gart_parse_options(char *p)
867 868 869
{
	int arg;

L
Linus Torvalds 已提交
870
#ifdef CONFIG_IOMMU_LEAK
871
	if (!strncmp(p, "leak", 4)) {
872 873
		leak_trace = 1;
		p += 4;
874 875
		if (*p == '=')
			++p;
876 877 878
		if (isdigit(*p) && get_option(&p, &arg))
			iommu_leak_pages = arg;
	}
L
Linus Torvalds 已提交
879
#endif
880 881
	if (isdigit(*p) && get_option(&p, &arg))
		iommu_size = arg;
882
	if (!strncmp(p, "fullflush", 8))
883
		iommu_fullflush = 1;
884
	if (!strncmp(p, "nofullflush", 11))
885
		iommu_fullflush = 0;
886
	if (!strncmp(p, "noagp", 5))
887
		no_agp = 1;
888
	if (!strncmp(p, "noaperture", 10))
889 890
		fix_aperture = 0;
	/* duplicated from pci-dma.c */
891
	if (!strncmp(p, "force", 5))
892
		gart_iommu_aperture_allowed = 1;
893
	if (!strncmp(p, "allowed", 7))
894
		gart_iommu_aperture_allowed = 1;
895 896 897 898 899 900 901 902 903 904
	if (!strncmp(p, "memaper", 7)) {
		fallback_aper_force = 1;
		p += 7;
		if (*p == '=') {
			++p;
			if (get_option(&p, &arg))
				fallback_aper_order = arg;
		}
	}
}