pat.c 23.4 KB
Newer Older
1 2 3 4 5 6 7 8 9
/*
 * Handle caching attributes in page tables (PAT)
 *
 * Authors: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
 *          Suresh B Siddha <suresh.b.siddha@intel.com>
 *
 * Loosely based on earlier PAT patchset from Eric Biederman and Andi Kleen.
 */

I
Ingo Molnar 已提交
10 11 12
#include <linux/seq_file.h>
#include <linux/bootmem.h>
#include <linux/debugfs.h>
13 14
#include <linux/kernel.h>
#include <linux/gfp.h>
I
Ingo Molnar 已提交
15
#include <linux/mm.h>
16 17
#include <linux/fs.h>

I
Ingo Molnar 已提交
18
#include <asm/cacheflush.h>
19
#include <asm/processor.h>
I
Ingo Molnar 已提交
20
#include <asm/tlbflush.h>
21 22
#include <asm/pgtable.h>
#include <asm/fcntl.h>
I
Ingo Molnar 已提交
23
#include <asm/e820.h>
24
#include <asm/mtrr.h>
I
Ingo Molnar 已提交
25 26 27
#include <asm/page.h>
#include <asm/msr.h>
#include <asm/pat.h>
28
#include <asm/io.h>
29

30
#ifdef CONFIG_X86_PAT
31
int __read_mostly pat_enabled = 1;
32

33
void __cpuinit pat_disable(char *reason)
34
{
35
	pat_enabled = 0;
36
	printk(KERN_INFO "%s\n", reason);
37 38
}

A
Andrew Morton 已提交
39
static int __init nopat(char *str)
40
{
41
	pat_disable("PAT support disabled.");
42 43
	return 0;
}
44 45 46
early_param("nopat", nopat);
#endif

47 48

static int debug_enable;
I
Ingo Molnar 已提交
49

50 51 52 53 54 55 56 57 58 59 60
static int __init pat_debug_setup(char *str)
{
	debug_enable = 1;
	return 0;
}
__setup("debugpat", pat_debug_setup);

#define dprintk(fmt, arg...) \
	do { if (debug_enable) printk(KERN_INFO fmt, ##arg); } while (0)


61
static u64 __read_mostly boot_pat_state;
62 63 64 65 66 67 68 69 70 71

enum {
	PAT_UC = 0,		/* uncached */
	PAT_WC = 1,		/* Write combining */
	PAT_WT = 4,		/* Write Through */
	PAT_WP = 5,		/* Write Protected */
	PAT_WB = 6,		/* Write Back (default) */
	PAT_UC_MINUS = 7,	/* UC, but can be overriden by MTRR */
};

72
#define PAT(x, y)	((u64)PAT_ ## y << ((x)*8))
73 74 75 76 77

void pat_init(void)
{
	u64 pat;

78
	if (!pat_enabled)
79 80
		return;

81
	/* Paranoia check. */
82
	if (!cpu_has_pat && boot_pat_state) {
83
		/*
84
		 * If this happens we are on a secondary CPU, but
85 86
		 * switched to PAT on the boot CPU. We have no way to
		 * undo PAT.
87 88 89 90
		 */
		printk(KERN_ERR "PAT enabled, "
		       "but not supported by secondary CPU\n");
		BUG();
91
	}
92 93 94 95 96 97 98 99 100 101 102 103 104 105

	/* Set PWT to Write-Combining. All other bits stay the same */
	/*
	 * PTE encoding used in Linux:
	 *      PAT
	 *      |PCD
	 *      ||PWT
	 *      |||
	 *      000 WB		_PAGE_CACHE_WB
	 *      001 WC		_PAGE_CACHE_WC
	 *      010 UC-		_PAGE_CACHE_UC_MINUS
	 *      011 UC		_PAGE_CACHE_UC
	 * PAT bit unused
	 */
106 107
	pat = PAT(0, WB) | PAT(1, WC) | PAT(2, UC_MINUS) | PAT(3, UC) |
	      PAT(4, WB) | PAT(5, WC) | PAT(6, UC_MINUS) | PAT(7, UC);
108 109

	/* Boot CPU check */
110
	if (!boot_pat_state)
111 112 113 114 115 116 117 118 119 120 121 122
		rdmsrl(MSR_IA32_CR_PAT, boot_pat_state);

	wrmsrl(MSR_IA32_CR_PAT, pat);
	printk(KERN_INFO "x86 PAT enabled: cpu %d, old 0x%Lx, new 0x%Lx\n",
	       smp_processor_id(), boot_pat_state, pat);
}

#undef PAT

static char *cattr_name(unsigned long flags)
{
	switch (flags & _PAGE_CACHE_MASK) {
123 124 125 126 127
	case _PAGE_CACHE_UC:		return "uncached";
	case _PAGE_CACHE_UC_MINUS:	return "uncached-minus";
	case _PAGE_CACHE_WB:		return "write-back";
	case _PAGE_CACHE_WC:		return "write-combining";
	default:			return "broken";
128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148
	}
}

/*
 * The global memtype list keeps track of memory type for specific
 * physical memory areas. Conflicting memory types in different
 * mappings can cause CPU cache corruption. To avoid this we keep track.
 *
 * The list is sorted based on starting address and can contain multiple
 * entries for each address (this allows reference counting for overlapping
 * areas). All the aliases have the same cache attributes of course.
 * Zero attributes are represented as holes.
 *
 * Currently the data structure is a list because the number of mappings
 * are expected to be relatively small. If this should be a problem
 * it could be changed to a rbtree or similar.
 *
 * memtype_lock protects the whole list.
 */

struct memtype {
I
Ingo Molnar 已提交
149 150 151 152
	u64			start;
	u64			end;
	unsigned long		type;
	struct list_head	nd;
153 154 155
};

static LIST_HEAD(memtype_list);
I
Ingo Molnar 已提交
156
static DEFINE_SPINLOCK(memtype_lock);	/* protects memtype list */
157 158 159 160 161 162 163 164

/*
 * Does intersection of PAT memory type and MTRR memory type and returns
 * the resulting memory type as PAT understands it.
 * (Type in pat and mtrr will not have same value)
 * The intersection is based on "Effective Memory Type" tables in IA-32
 * SDM vol 3a
 */
165
static unsigned long pat_x_mtrr_type(u64 start, u64 end, unsigned long req_type)
166
{
167 168 169 170
	/*
	 * Look for MTRR hint to get the effective type in case where PAT
	 * request is for WB.
	 */
171 172 173 174 175 176 177 178 179 180 181
	if (req_type == _PAGE_CACHE_WB) {
		u8 mtrr_type;

		mtrr_type = mtrr_type_lookup(start, end);
		if (mtrr_type == MTRR_TYPE_UNCACHABLE)
			return _PAGE_CACHE_UC;
		if (mtrr_type == MTRR_TYPE_WRCOMB)
			return _PAGE_CACHE_WC;
	}

	return req_type;
182 183
}

I
Ingo Molnar 已提交
184 185
static int
chk_conflict(struct memtype *new, struct memtype *entry, unsigned long *type)
186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210
{
	if (new->type != entry->type) {
		if (type) {
			new->type = entry->type;
			*type = entry->type;
		} else
			goto conflict;
	}

	 /* check overlaps with more than one entry in the list */
	list_for_each_entry_continue(entry, &memtype_list, nd) {
		if (new->end <= entry->start)
			break;
		else if (new->type != entry->type)
			goto conflict;
	}
	return 0;

 conflict:
	printk(KERN_INFO "%s:%d conflicting memory types "
	       "%Lx-%Lx %s<->%s\n", current->comm, current->pid, new->start,
	       new->end, cattr_name(new->type), cattr_name(entry->type));
	return -EBUSY;
}

211 212 213
static struct memtype *cached_entry;
static u64 cached_start;

214 215 216 217 218 219 220 221 222 223 224 225 226
/*
 * For RAM pages, mark the pages as non WB memory type using
 * PageNonWB (PG_arch_1). We allow only one set_memory_uc() or
 * set_memory_wc() on a RAM page at a time before marking it as WB again.
 * This is ok, because only one driver will be owning the page and
 * doing set_memory_*() calls.
 *
 * For now, we use PageNonWB to track that the RAM page is being mapped
 * as non WB. In future, we will have to use one more flag
 * (or some other mechanism in page_struct) to distinguish between
 * UC and WC mapping.
 */
static int reserve_ram_pages_type(u64 start, u64 end, unsigned long req_type,
I
Ingo Molnar 已提交
227
				  unsigned long *new_type)
228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273
{
	struct page *page;
	u64 pfn, end_pfn;

	for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
		page = pfn_to_page(pfn);
		if (page_mapped(page) || PageNonWB(page))
			goto out;

		SetPageNonWB(page);
	}
	return 0;

out:
	end_pfn = pfn;
	for (pfn = (start >> PAGE_SHIFT); pfn < end_pfn; ++pfn) {
		page = pfn_to_page(pfn);
		ClearPageNonWB(page);
	}

	return -EINVAL;
}

static int free_ram_pages_type(u64 start, u64 end)
{
	struct page *page;
	u64 pfn, end_pfn;

	for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
		page = pfn_to_page(pfn);
		if (page_mapped(page) || !PageNonWB(page))
			goto out;

		ClearPageNonWB(page);
	}
	return 0;

out:
	end_pfn = pfn;
	for (pfn = (start >> PAGE_SHIFT); pfn < end_pfn; ++pfn) {
		page = pfn_to_page(pfn);
		SetPageNonWB(page);
	}
	return -EINVAL;
}

274 275 276 277 278 279 280 281 282 283
/*
 * req_type typically has one of the:
 * - _PAGE_CACHE_WB
 * - _PAGE_CACHE_WC
 * - _PAGE_CACHE_UC_MINUS
 * - _PAGE_CACHE_UC
 *
 * req_type will have a special case value '-1', when requester want to inherit
 * the memory type from mtrr (if WB), existing PAT, defaulting to UC_MINUS.
 *
284 285 286
 * If new_type is NULL, function will return an error if it cannot reserve the
 * region with req_type. If new_type is non-NULL, function will return
 * available type in new_type in case of no error. In case of any error
287 288
 * it will return a negative return value.
 */
289
int reserve_memtype(u64 start, u64 end, unsigned long req_type,
I
Ingo Molnar 已提交
290
		    unsigned long *new_type)
291
{
292
	struct memtype *new, *entry;
293
	unsigned long actual_type;
294
	struct list_head *where;
295
	int is_range_ram;
I
Ingo Molnar 已提交
296
	int err = 0;
297

I
Ingo Molnar 已提交
298
	BUG_ON(start >= end); /* end is exclusive */
299

300
	if (!pat_enabled) {
301
		/* This is identical to page table setting without PAT */
302 303 304 305 306
		if (new_type) {
			if (req_type == -1)
				*new_type = _PAGE_CACHE_WB;
			else
				*new_type = req_type & _PAGE_CACHE_MASK;
307
		}
308 309 310 311
		return 0;
	}

	/* Low ISA region is always mapped WB in page table. No need to track */
312
	if (is_ISA_range(start, end - 1)) {
313 314
		if (new_type)
			*new_type = _PAGE_CACHE_WB;
315 316 317
		return 0;
	}

318 319
	if (req_type == -1) {
		/*
320 321 322 323
		 * Call mtrr_lookup to get the type hint. This is an
		 * optimization for /dev/mem mmap'ers into WB memory (BIOS
		 * tools and ACPI tools). Use WB request for WB memory and use
		 * UC_MINUS otherwise.
324 325 326
		 */
		u8 mtrr_type = mtrr_type_lookup(start, end);

327
		if (mtrr_type == MTRR_TYPE_WRBACK)
328
			actual_type = _PAGE_CACHE_WB;
329
		else
330
			actual_type = _PAGE_CACHE_UC_MINUS;
I
Ingo Molnar 已提交
331
	} else {
332 333
		actual_type = pat_x_mtrr_type(start, end,
					      req_type & _PAGE_CACHE_MASK);
I
Ingo Molnar 已提交
334
	}
335

336 337 338 339 340 341 342 343 344 345 346 347 348 349
	/*
	 * For legacy reasons, some parts of the physical address range in the
	 * legacy 1MB region is treated as non-RAM (even when listed as RAM in
	 * the e820 tables).  So we will track the memory attributes of this
	 * legacy 1MB region using the linear memtype_list always.
	 */
	if (end >= ISA_END_ADDRESS) {
		is_range_ram = pagerange_is_ram(start, end);
		if (is_range_ram == 1)
			return reserve_ram_pages_type(start, end, req_type,
						      new_type);
		else if (is_range_ram < 0)
			return -EINVAL;
	}
350

351 352
	new  = kmalloc(sizeof(struct memtype), GFP_KERNEL);
	if (!new)
353 354
		return -ENOMEM;

I
Ingo Molnar 已提交
355 356 357
	new->start	= start;
	new->end	= end;
	new->type	= actual_type;
358

359 360
	if (new_type)
		*new_type = actual_type;
361 362 363

	spin_lock(&memtype_lock);

364 365 366 367 368
	if (cached_entry && start >= cached_start)
		entry = cached_entry;
	else
		entry = list_entry(&memtype_list, struct memtype, nd);

369
	/* Search for existing mapping that overlaps the current range */
370
	where = NULL;
371
	list_for_each_entry_continue(entry, &memtype_list, nd) {
372
		if (end <= entry->start) {
373
			where = entry->nd.prev;
374
			cached_entry = list_entry(where, struct memtype, nd);
375
			break;
376
		} else if (start <= entry->start) { /* end > entry->start */
377
			err = chk_conflict(new, entry, new_type);
378 379 380 381
			if (!err) {
				dprintk("Overlap at 0x%Lx-0x%Lx\n",
					entry->start, entry->end);
				where = entry->nd.prev;
382 383
				cached_entry = list_entry(where,
							struct memtype, nd);
384 385
			}
			break;
386
		} else if (start < entry->end) { /* start > entry->start */
387
			err = chk_conflict(new, entry, new_type);
388 389 390
			if (!err) {
				dprintk("Overlap at 0x%Lx-0x%Lx\n",
					entry->start, entry->end);
391 392 393 394 395 396 397 398 399 400 401 402 403 404
				cached_entry = list_entry(entry->nd.prev,
							struct memtype, nd);

				/*
				 * Move to right position in the linked
				 * list to add this new entry
				 */
				list_for_each_entry_continue(entry,
							&memtype_list, nd) {
					if (start <= entry->start) {
						where = entry->nd.prev;
						break;
					}
				}
405 406 407 408 409 410
			}
			break;
		}
	}

	if (err) {
411 412 413
		printk(KERN_INFO "reserve_memtype failed 0x%Lx-0x%Lx, "
		       "track %s, req %s\n",
		       start, end, cattr_name(new->type), cattr_name(req_type));
414
		kfree(new);
415
		spin_unlock(&memtype_lock);
I
Ingo Molnar 已提交
416

417 418 419
		return err;
	}

420 421
	cached_start = start;

422 423 424
	if (where)
		list_add(&new->nd, where);
	else
425
		list_add_tail(&new->nd, &memtype_list);
426

427
	spin_unlock(&memtype_lock);
428 429 430 431 432

	dprintk("reserve_memtype added 0x%Lx-0x%Lx, track %s, req %s, ret %s\n",
		start, end, cattr_name(new->type), cattr_name(req_type),
		new_type ? cattr_name(*new_type) : "-");

433 434 435 436 437
	return err;
}

int free_memtype(u64 start, u64 end)
{
438
	struct memtype *entry;
439
	int err = -EINVAL;
440
	int is_range_ram;
441

442
	if (!pat_enabled)
443 444 445
		return 0;

	/* Low ISA region is always mapped WB. No need to track */
446
	if (is_ISA_range(start, end - 1))
447 448
		return 0;

449 450 451 452 453 454 455 456 457 458 459 460 461
	/*
	 * For legacy reasons, some parts of the physical address range in the
	 * legacy 1MB region is treated as non-RAM (even when listed as RAM in
	 * the e820 tables).  So we will track the memory attributes of this
	 * legacy 1MB region using the linear memtype_list always.
	 */
	if (end >= ISA_END_ADDRESS) {
		is_range_ram = pagerange_is_ram(start, end);
		if (is_range_ram == 1)
			return free_ram_pages_type(start, end);
		else if (is_range_ram < 0)
			return -EINVAL;
	}
462

463
	spin_lock(&memtype_lock);
464 465
	list_for_each_entry(entry, &memtype_list, nd) {
		if (entry->start == start && entry->end == end) {
466 467 468
			if (cached_entry == entry || cached_start == start)
				cached_entry = NULL;

469 470
			list_del(&entry->nd);
			kfree(entry);
471 472 473 474 475 476 477
			err = 0;
			break;
		}
	}
	spin_unlock(&memtype_lock);

	if (err) {
I
Ingo Molnar 已提交
478
		printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
479 480
			current->comm, current->pid, start, end);
	}
481

482
	dprintk("free_memtype request 0x%Lx-0x%Lx\n", start, end);
I
Ingo Molnar 已提交
483

484 485 486
	return err;
}

487 488 489 490 491 492 493

pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
				unsigned long size, pgprot_t vma_prot)
{
	return vma_prot;
}

494 495
#ifdef CONFIG_STRICT_DEVMEM
/* This check is done in drivers/char/mem.c in case of STRICT_DEVMEM*/
496 497 498 499 500
static inline int range_is_allowed(unsigned long pfn, unsigned long size)
{
	return 1;
}
#else
501
/* This check is needed to avoid cache aliasing when PAT is enabled */
502 503 504 505 506 507
static inline int range_is_allowed(unsigned long pfn, unsigned long size)
{
	u64 from = ((u64)pfn) << PAGE_SHIFT;
	u64 to = from + size;
	u64 cursor = from;

508 509 510
	if (!pat_enabled)
		return 1;

511 512 513 514 515 516 517 518 519 520 521 522
	while (cursor < to) {
		if (!devmem_is_allowed(pfn)) {
			printk(KERN_INFO
		"Program %s tried to access /dev/mem between %Lx->%Lx.\n",
				current->comm, from, to);
			return 0;
		}
		cursor += PAGE_SIZE;
		pfn++;
	}
	return 1;
}
523
#endif /* CONFIG_STRICT_DEVMEM */
524

525 526 527
int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
				unsigned long size, pgprot_t *vma_prot)
{
528
	u64 offset = ((u64) pfn) << PAGE_SHIFT;
529
	unsigned long flags = -1;
530
	int retval;
531

532 533 534
	if (!range_is_allowed(pfn, size))
		return 0;

535
	if (file->f_flags & O_SYNC) {
536
		flags = _PAGE_CACHE_UC_MINUS;
537 538 539 540 541 542 543 544 545 546 547
	}

#ifdef CONFIG_X86_32
	/*
	 * On the PPro and successors, the MTRRs are used to set
	 * memory types for physical addresses outside main memory,
	 * so blindly setting UC or PWT on those pages is wrong.
	 * For Pentiums and earlier, the surround logic should disable
	 * caching for the high addresses through the KEN pin, but
	 * we maintain the tradition of paranoia in this code.
	 */
548
	if (!pat_enabled &&
549 550 551 552 553
	    !(boot_cpu_has(X86_FEATURE_MTRR) ||
	      boot_cpu_has(X86_FEATURE_K6_MTRR) ||
	      boot_cpu_has(X86_FEATURE_CYRIX_ARR) ||
	      boot_cpu_has(X86_FEATURE_CENTAUR_MCR)) &&
	    (pfn << PAGE_SHIFT) >= __pa(high_memory)) {
554
		flags = _PAGE_CACHE_UC;
555 556 557
	}
#endif

558
	/*
559 560
	 * With O_SYNC, we can only take UC_MINUS mapping. Fail if we cannot.
	 *
561 562 563 564 565
	 * Without O_SYNC, we want to get
	 * - WB for WB-able memory and no other conflicting mappings
	 * - UC_MINUS for non-WB-able memory with no other conflicting mappings
	 * - Inherit from confliting mappings otherwise
	 */
566
	if (flags != -1) {
567 568
		retval = reserve_memtype(offset, offset + size, flags, NULL);
	} else {
I
Ingo Molnar 已提交
569
		retval = reserve_memtype(offset, offset + size, -1, &flags);
570 571 572 573 574
	}

	if (retval < 0)
		return 0;

Y
Yinghai Lu 已提交
575 576
	if (((pfn < max_low_pfn_mapped) ||
	     (pfn >= (1UL<<(32 - PAGE_SHIFT)) && pfn < max_pfn_mapped)) &&
577
	    ioremap_change_attr((unsigned long)__va(offset), size, flags) < 0) {
578
		free_memtype(offset, offset + size);
I
Ingo Molnar 已提交
579
		printk(KERN_INFO
580 581 582
		"%s:%d /dev/mem ioremap_change_attr failed %s for %Lx-%Lx\n",
			current->comm, current->pid,
			cattr_name(flags),
583
			offset, (unsigned long long)(offset + size));
584 585 586 587 588
		return 0;
	}

	*vma_prot = __pgprot((pgprot_val(*vma_prot) & ~_PAGE_CACHE_MASK) |
			     flags);
589 590
	return 1;
}
591 592 593

void map_devmem(unsigned long pfn, unsigned long size, pgprot_t vma_prot)
{
I
Ingo Molnar 已提交
594
	unsigned long want_flags = (pgprot_val(vma_prot) & _PAGE_CACHE_MASK);
595 596 597 598 599
	u64 addr = (u64)pfn << PAGE_SHIFT;
	unsigned long flags;

	reserve_memtype(addr, addr + size, want_flags, &flags);
	if (flags != want_flags) {
I
Ingo Molnar 已提交
600
		printk(KERN_INFO
601 602 603
		"%s:%d /dev/mem expected mapping type %s for %Lx-%Lx, got %s\n",
			current->comm, current->pid,
			cattr_name(want_flags),
604
			addr, (unsigned long long)(addr + size),
605 606 607 608 609 610 611 612 613 614 615
			cattr_name(flags));
	}
}

void unmap_devmem(unsigned long pfn, unsigned long size, pgprot_t vma_prot)
{
	u64 addr = (u64)pfn << PAGE_SHIFT;

	free_memtype(addr, addr + size);
}

616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703
/*
 * Internal interface to reserve a range of physical memory with prot.
 * Reserved non RAM regions only and after successful reserve_memtype,
 * this func also keeps identity mapping (if any) in sync with this new prot.
 */
static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t vma_prot)
{
	int is_ram = 0;
	int id_sz, ret;
	unsigned long flags;
	unsigned long want_flags = (pgprot_val(vma_prot) & _PAGE_CACHE_MASK);

	is_ram = pagerange_is_ram(paddr, paddr + size);

	if (is_ram != 0) {
		/*
		 * For mapping RAM pages, drivers need to call
		 * set_memory_[uc|wc|wb] directly, for reserve and free, before
		 * setting up the PTE.
		 */
		WARN_ON_ONCE(1);
		return 0;
	}

	ret = reserve_memtype(paddr, paddr + size, want_flags, &flags);
	if (ret)
		return ret;

	if (flags != want_flags) {
		free_memtype(paddr, paddr + size);
		printk(KERN_ERR
		"%s:%d map pfn expected mapping type %s for %Lx-%Lx, got %s\n",
			current->comm, current->pid,
			cattr_name(want_flags),
			(unsigned long long)paddr,
			(unsigned long long)(paddr + size),
			cattr_name(flags));
		return -EINVAL;
	}

	/* Need to keep identity mapping in sync */
	if (paddr >= __pa(high_memory))
		return 0;

	id_sz = (__pa(high_memory) < paddr + size) ?
				__pa(high_memory) - paddr :
				size;

	if (ioremap_change_attr((unsigned long)__va(paddr), id_sz, flags) < 0) {
		free_memtype(paddr, paddr + size);
		printk(KERN_ERR
			"%s:%d reserve_pfn_range ioremap_change_attr failed %s "
			"for %Lx-%Lx\n",
			current->comm, current->pid,
			cattr_name(flags),
			(unsigned long long)paddr,
			(unsigned long long)(paddr + size));
		return -EINVAL;
	}
	return 0;
}

/*
 * Internal interface to free a range of physical memory.
 * Frees non RAM regions only.
 */
static void free_pfn_range(u64 paddr, unsigned long size)
{
	int is_ram;

	is_ram = pagerange_is_ram(paddr, paddr + size);
	if (is_ram == 0)
		free_memtype(paddr, paddr + size);
}

/*
 * track_pfn_vma_copy is called when vma that is covering the pfnmap gets
 * copied through copy_page_range().
 *
 * If the vma has a linear pfn mapping for the entire range, we get the prot
 * from pte and reserve the entire vma range with single reserve_pfn_range call.
 * Otherwise, we reserve the entire vma range, my ging through the PTEs page
 * by page to get physical address and protection.
 */
int track_pfn_vma_copy(struct vm_area_struct *vma)
{
	int retval = 0;
	unsigned long i, j;
704
	resource_size_t paddr;
705
	unsigned long prot;
706 707 708 709 710 711 712 713 714
	unsigned long vma_start = vma->vm_start;
	unsigned long vma_end = vma->vm_end;
	unsigned long vma_size = vma_end - vma_start;

	if (!pat_enabled)
		return 0;

	if (is_linear_pfn_mapping(vma)) {
		/*
715 716
		 * reserve the whole chunk covered by vma. We need the
		 * starting address and protection from pte.
717
		 */
718
		if (follow_phys(vma, vma_start, 0, &prot, &paddr)) {
719
			WARN_ON_ONCE(1);
720
			return -EINVAL;
721
		}
722
		return reserve_pfn_range(paddr, vma_size, __pgprot(prot));
723 724 725 726
	}

	/* reserve entire vma page by page, using pfn and prot from pte */
	for (i = 0; i < vma_size; i += PAGE_SIZE) {
727
		if (follow_phys(vma, vma_start + i, 0, &prot, &paddr))
728 729
			continue;

730
		retval = reserve_pfn_range(paddr, PAGE_SIZE, __pgprot(prot));
731 732 733 734 735 736 737 738
		if (retval)
			goto cleanup_ret;
	}
	return 0;

cleanup_ret:
	/* Reserve error: Cleanup partial reservation and return error */
	for (j = 0; j < i; j += PAGE_SIZE) {
739
		if (follow_phys(vma, vma_start + j, 0, &prot, &paddr))
740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765
			continue;

		free_pfn_range(paddr, PAGE_SIZE);
	}

	return retval;
}

/*
 * track_pfn_vma_new is called when a _new_ pfn mapping is being established
 * for physical range indicated by pfn and size.
 *
 * prot is passed in as a parameter for the new mapping. If the vma has a
 * linear pfn mapping for the entire range reserve the entire vma range with
 * single reserve_pfn_range call.
 * Otherwise, we look t the pfn and size and reserve only the specified range
 * page by page.
 *
 * Note that this function can be called with caller trying to map only a
 * subrange/page inside the vma.
 */
int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t prot,
			unsigned long pfn, unsigned long size)
{
	int retval = 0;
	unsigned long i, j;
766 767
	resource_size_t base_paddr;
	resource_size_t paddr;
768 769 770 771 772 773 774 775 776
	unsigned long vma_start = vma->vm_start;
	unsigned long vma_end = vma->vm_end;
	unsigned long vma_size = vma_end - vma_start;

	if (!pat_enabled)
		return 0;

	if (is_linear_pfn_mapping(vma)) {
		/* reserve the whole chunk starting from vm_pgoff */
777
		paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT;
778 779 780 781
		return reserve_pfn_range(paddr, vma_size, prot);
	}

	/* reserve page by page using pfn and size */
782
	base_paddr = (resource_size_t)pfn << PAGE_SHIFT;
783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809
	for (i = 0; i < size; i += PAGE_SIZE) {
		paddr = base_paddr + i;
		retval = reserve_pfn_range(paddr, PAGE_SIZE, prot);
		if (retval)
			goto cleanup_ret;
	}
	return 0;

cleanup_ret:
	/* Reserve error: Cleanup partial reservation and return error */
	for (j = 0; j < i; j += PAGE_SIZE) {
		paddr = base_paddr + j;
		free_pfn_range(paddr, PAGE_SIZE);
	}

	return retval;
}

/*
 * untrack_pfn_vma is called while unmapping a pfnmap for a region.
 * untrack can be called for a specific region indicated by pfn and size or
 * can be for the entire vma (in which case size can be zero).
 */
void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
			unsigned long size)
{
	unsigned long i;
810
	resource_size_t paddr;
811
	unsigned long prot;
812 813 814 815 816 817 818 819 820
	unsigned long vma_start = vma->vm_start;
	unsigned long vma_end = vma->vm_end;
	unsigned long vma_size = vma_end - vma_start;

	if (!pat_enabled)
		return;

	if (is_linear_pfn_mapping(vma)) {
		/* free the whole chunk starting from vm_pgoff */
821
		paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT;
822 823 824 825 826 827
		free_pfn_range(paddr, vma_size);
		return;
	}

	if (size != 0 && size != vma_size) {
		/* free page by page, using pfn and size */
828
		paddr = (resource_size_t)pfn << PAGE_SHIFT;
829 830 831 832 833 834 835
		for (i = 0; i < size; i += PAGE_SIZE) {
			paddr = paddr + i;
			free_pfn_range(paddr, PAGE_SIZE);
		}
	} else {
		/* free entire vma, page by page, using the pfn from pte */
		for (i = 0; i < vma_size; i += PAGE_SIZE) {
836
			if (follow_phys(vma, vma_start + i, 0, &prot, &paddr))
837 838 839 840 841 842 843
				continue;

			free_pfn_range(paddr, PAGE_SIZE);
		}
	}
}

844 845 846 847 848 849 850 851
pgprot_t pgprot_writecombine(pgprot_t prot)
{
	if (pat_enabled)
		return __pgprot(pgprot_val(prot) | _PAGE_CACHE_WC);
	else
		return pgprot_noncached(prot);
}

852
#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_X86_PAT)
853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874

/* get Nth element of the linked list */
static struct memtype *memtype_get_idx(loff_t pos)
{
	struct memtype *list_node, *print_entry;
	int i = 1;

	print_entry  = kmalloc(sizeof(struct memtype), GFP_KERNEL);
	if (!print_entry)
		return NULL;

	spin_lock(&memtype_lock);
	list_for_each_entry(list_node, &memtype_list, nd) {
		if (pos == i) {
			*print_entry = *list_node;
			spin_unlock(&memtype_lock);
			return print_entry;
		}
		++i;
	}
	spin_unlock(&memtype_lock);
	kfree(print_entry);
I
Ingo Molnar 已提交
875

876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905
	return NULL;
}

static void *memtype_seq_start(struct seq_file *seq, loff_t *pos)
{
	if (*pos == 0) {
		++*pos;
		seq_printf(seq, "PAT memtype list:\n");
	}

	return memtype_get_idx(*pos);
}

static void *memtype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
	++*pos;
	return memtype_get_idx(*pos);
}

static void memtype_seq_stop(struct seq_file *seq, void *v)
{
}

static int memtype_seq_show(struct seq_file *seq, void *v)
{
	struct memtype *print_entry = (struct memtype *)v;

	seq_printf(seq, "%s @ 0x%Lx-0x%Lx\n", cattr_name(print_entry->type),
			print_entry->start, print_entry->end);
	kfree(print_entry);
I
Ingo Molnar 已提交
906

907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937
	return 0;
}

static struct seq_operations memtype_seq_ops = {
	.start = memtype_seq_start,
	.next  = memtype_seq_next,
	.stop  = memtype_seq_stop,
	.show  = memtype_seq_show,
};

static int memtype_seq_open(struct inode *inode, struct file *file)
{
	return seq_open(file, &memtype_seq_ops);
}

static const struct file_operations memtype_fops = {
	.open    = memtype_seq_open,
	.read    = seq_read,
	.llseek  = seq_lseek,
	.release = seq_release,
};

static int __init pat_memtype_list_init(void)
{
	debugfs_create_file("pat_memtype_list", S_IRUSR, arch_debugfs_dir,
				NULL, &memtype_fops);
	return 0;
}

late_initcall(pat_memtype_list_init);

938
#endif /* CONFIG_DEBUG_FS && CONFIG_X86_PAT */