pat.c 23.9 KB
Newer Older
1 2 3 4 5 6 7 8 9
/*
 * Handle caching attributes in page tables (PAT)
 *
 * Authors: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
 *          Suresh B Siddha <suresh.b.siddha@intel.com>
 *
 * Loosely based on earlier PAT patchset from Eric Biederman and Andi Kleen.
 */

I
Ingo Molnar 已提交
10 11 12
#include <linux/seq_file.h>
#include <linux/bootmem.h>
#include <linux/debugfs.h>
13
#include <linux/kernel.h>
14
#include <linux/module.h>
15
#include <linux/gfp.h>
I
Ingo Molnar 已提交
16
#include <linux/mm.h>
17 18
#include <linux/fs.h>

I
Ingo Molnar 已提交
19
#include <asm/cacheflush.h>
20
#include <asm/processor.h>
I
Ingo Molnar 已提交
21
#include <asm/tlbflush.h>
22 23
#include <asm/pgtable.h>
#include <asm/fcntl.h>
I
Ingo Molnar 已提交
24
#include <asm/e820.h>
25
#include <asm/mtrr.h>
I
Ingo Molnar 已提交
26 27 28
#include <asm/page.h>
#include <asm/msr.h>
#include <asm/pat.h>
29
#include <asm/io.h>
30

31
#ifdef CONFIG_X86_PAT
32
int __read_mostly pat_enabled = 1;
33

34
void __cpuinit pat_disable(char *reason)
35
{
36
	pat_enabled = 0;
37
	printk(KERN_INFO "%s\n", reason);
38 39
}

A
Andrew Morton 已提交
40
static int __init nopat(char *str)
41
{
42
	pat_disable("PAT support disabled.");
43 44
	return 0;
}
45 46 47
early_param("nopat", nopat);
#endif

48 49

static int debug_enable;
I
Ingo Molnar 已提交
50

51 52 53 54 55 56 57 58 59 60 61
static int __init pat_debug_setup(char *str)
{
	debug_enable = 1;
	return 0;
}
__setup("debugpat", pat_debug_setup);

#define dprintk(fmt, arg...) \
	do { if (debug_enable) printk(KERN_INFO fmt, ##arg); } while (0)


62
static u64 __read_mostly boot_pat_state;
63 64 65 66 67 68 69 70 71 72

enum {
	PAT_UC = 0,		/* uncached */
	PAT_WC = 1,		/* Write combining */
	PAT_WT = 4,		/* Write Through */
	PAT_WP = 5,		/* Write Protected */
	PAT_WB = 6,		/* Write Back (default) */
	PAT_UC_MINUS = 7,	/* UC, but can be overriden by MTRR */
};

73
#define PAT(x, y)	((u64)PAT_ ## y << ((x)*8))
74 75 76 77 78

void pat_init(void)
{
	u64 pat;

79
	if (!pat_enabled)
80 81
		return;

82
	/* Paranoia check. */
83
	if (!cpu_has_pat && boot_pat_state) {
84
		/*
85
		 * If this happens we are on a secondary CPU, but
86 87
		 * switched to PAT on the boot CPU. We have no way to
		 * undo PAT.
88 89 90 91
		 */
		printk(KERN_ERR "PAT enabled, "
		       "but not supported by secondary CPU\n");
		BUG();
92
	}
93 94 95 96 97 98 99 100 101 102 103 104 105 106

	/* Set PWT to Write-Combining. All other bits stay the same */
	/*
	 * PTE encoding used in Linux:
	 *      PAT
	 *      |PCD
	 *      ||PWT
	 *      |||
	 *      000 WB		_PAGE_CACHE_WB
	 *      001 WC		_PAGE_CACHE_WC
	 *      010 UC-		_PAGE_CACHE_UC_MINUS
	 *      011 UC		_PAGE_CACHE_UC
	 * PAT bit unused
	 */
107 108
	pat = PAT(0, WB) | PAT(1, WC) | PAT(2, UC_MINUS) | PAT(3, UC) |
	      PAT(4, WB) | PAT(5, WC) | PAT(6, UC_MINUS) | PAT(7, UC);
109 110

	/* Boot CPU check */
111
	if (!boot_pat_state)
112 113 114 115 116 117 118 119 120 121 122 123
		rdmsrl(MSR_IA32_CR_PAT, boot_pat_state);

	wrmsrl(MSR_IA32_CR_PAT, pat);
	printk(KERN_INFO "x86 PAT enabled: cpu %d, old 0x%Lx, new 0x%Lx\n",
	       smp_processor_id(), boot_pat_state, pat);
}

#undef PAT

static char *cattr_name(unsigned long flags)
{
	switch (flags & _PAGE_CACHE_MASK) {
124 125 126 127 128
	case _PAGE_CACHE_UC:		return "uncached";
	case _PAGE_CACHE_UC_MINUS:	return "uncached-minus";
	case _PAGE_CACHE_WB:		return "write-back";
	case _PAGE_CACHE_WC:		return "write-combining";
	default:			return "broken";
129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149
	}
}

/*
 * The global memtype list keeps track of memory type for specific
 * physical memory areas. Conflicting memory types in different
 * mappings can cause CPU cache corruption. To avoid this we keep track.
 *
 * The list is sorted based on starting address and can contain multiple
 * entries for each address (this allows reference counting for overlapping
 * areas). All the aliases have the same cache attributes of course.
 * Zero attributes are represented as holes.
 *
 * Currently the data structure is a list because the number of mappings
 * are expected to be relatively small. If this should be a problem
 * it could be changed to a rbtree or similar.
 *
 * memtype_lock protects the whole list.
 */

struct memtype {
I
Ingo Molnar 已提交
150 151 152 153
	u64			start;
	u64			end;
	unsigned long		type;
	struct list_head	nd;
154 155 156
};

static LIST_HEAD(memtype_list);
I
Ingo Molnar 已提交
157
static DEFINE_SPINLOCK(memtype_lock);	/* protects memtype list */
158 159 160 161 162 163 164 165

/*
 * Does intersection of PAT memory type and MTRR memory type and returns
 * the resulting memory type as PAT understands it.
 * (Type in pat and mtrr will not have same value)
 * The intersection is based on "Effective Memory Type" tables in IA-32
 * SDM vol 3a
 */
166
static unsigned long pat_x_mtrr_type(u64 start, u64 end, unsigned long req_type)
167
{
168 169 170 171
	/*
	 * Look for MTRR hint to get the effective type in case where PAT
	 * request is for WB.
	 */
172 173 174 175 176 177 178 179 180 181 182
	if (req_type == _PAGE_CACHE_WB) {
		u8 mtrr_type;

		mtrr_type = mtrr_type_lookup(start, end);
		if (mtrr_type == MTRR_TYPE_UNCACHABLE)
			return _PAGE_CACHE_UC;
		if (mtrr_type == MTRR_TYPE_WRCOMB)
			return _PAGE_CACHE_WC;
	}

	return req_type;
183 184
}

I
Ingo Molnar 已提交
185 186
static int
chk_conflict(struct memtype *new, struct memtype *entry, unsigned long *type)
187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211
{
	if (new->type != entry->type) {
		if (type) {
			new->type = entry->type;
			*type = entry->type;
		} else
			goto conflict;
	}

	 /* check overlaps with more than one entry in the list */
	list_for_each_entry_continue(entry, &memtype_list, nd) {
		if (new->end <= entry->start)
			break;
		else if (new->type != entry->type)
			goto conflict;
	}
	return 0;

 conflict:
	printk(KERN_INFO "%s:%d conflicting memory types "
	       "%Lx-%Lx %s<->%s\n", current->comm, current->pid, new->start,
	       new->end, cattr_name(new->type), cattr_name(entry->type));
	return -EBUSY;
}

212 213 214
static struct memtype *cached_entry;
static u64 cached_start;

215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241
static int pat_pagerange_is_ram(unsigned long start, unsigned long end)
{
	int ram_page = 0, not_rampage = 0;
	unsigned long page_nr;

	for (page_nr = (start >> PAGE_SHIFT); page_nr < (end >> PAGE_SHIFT);
	     ++page_nr) {
		/*
		 * For legacy reasons, physical address range in the legacy ISA
		 * region is tracked as non-RAM. This will allow users of
		 * /dev/mem to map portions of legacy ISA region, even when
		 * some of those portions are listed(or not even listed) with
		 * different e820 types(RAM/reserved/..)
		 */
		if (page_nr >= (ISA_END_ADDRESS >> PAGE_SHIFT) &&
		    page_is_ram(page_nr))
			ram_page = 1;
		else
			not_rampage = 1;

		if (ram_page == not_rampage)
			return -1;
	}

	return ram_page;
}

242 243 244 245 246 247 248 249 250 251 252 253 254
/*
 * For RAM pages, mark the pages as non WB memory type using
 * PageNonWB (PG_arch_1). We allow only one set_memory_uc() or
 * set_memory_wc() on a RAM page at a time before marking it as WB again.
 * This is ok, because only one driver will be owning the page and
 * doing set_memory_*() calls.
 *
 * For now, we use PageNonWB to track that the RAM page is being mapped
 * as non WB. In future, we will have to use one more flag
 * (or some other mechanism in page_struct) to distinguish between
 * UC and WC mapping.
 */
static int reserve_ram_pages_type(u64 start, u64 end, unsigned long req_type,
I
Ingo Molnar 已提交
255
				  unsigned long *new_type)
256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301
{
	struct page *page;
	u64 pfn, end_pfn;

	for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
		page = pfn_to_page(pfn);
		if (page_mapped(page) || PageNonWB(page))
			goto out;

		SetPageNonWB(page);
	}
	return 0;

out:
	end_pfn = pfn;
	for (pfn = (start >> PAGE_SHIFT); pfn < end_pfn; ++pfn) {
		page = pfn_to_page(pfn);
		ClearPageNonWB(page);
	}

	return -EINVAL;
}

static int free_ram_pages_type(u64 start, u64 end)
{
	struct page *page;
	u64 pfn, end_pfn;

	for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
		page = pfn_to_page(pfn);
		if (page_mapped(page) || !PageNonWB(page))
			goto out;

		ClearPageNonWB(page);
	}
	return 0;

out:
	end_pfn = pfn;
	for (pfn = (start >> PAGE_SHIFT); pfn < end_pfn; ++pfn) {
		page = pfn_to_page(pfn);
		SetPageNonWB(page);
	}
	return -EINVAL;
}

302 303 304 305 306 307 308 309 310 311
/*
 * req_type typically has one of the:
 * - _PAGE_CACHE_WB
 * - _PAGE_CACHE_WC
 * - _PAGE_CACHE_UC_MINUS
 * - _PAGE_CACHE_UC
 *
 * req_type will have a special case value '-1', when requester want to inherit
 * the memory type from mtrr (if WB), existing PAT, defaulting to UC_MINUS.
 *
312 313 314
 * If new_type is NULL, function will return an error if it cannot reserve the
 * region with req_type. If new_type is non-NULL, function will return
 * available type in new_type in case of no error. In case of any error
315 316
 * it will return a negative return value.
 */
317
int reserve_memtype(u64 start, u64 end, unsigned long req_type,
I
Ingo Molnar 已提交
318
		    unsigned long *new_type)
319
{
320
	struct memtype *new, *entry;
321
	unsigned long actual_type;
322
	struct list_head *where;
323
	int is_range_ram;
I
Ingo Molnar 已提交
324
	int err = 0;
325

I
Ingo Molnar 已提交
326
	BUG_ON(start >= end); /* end is exclusive */
327

328
	if (!pat_enabled) {
329
		/* This is identical to page table setting without PAT */
330 331 332 333 334
		if (new_type) {
			if (req_type == -1)
				*new_type = _PAGE_CACHE_WB;
			else
				*new_type = req_type & _PAGE_CACHE_MASK;
335
		}
336 337 338 339
		return 0;
	}

	/* Low ISA region is always mapped WB in page table. No need to track */
340
	if (is_ISA_range(start, end - 1)) {
341 342
		if (new_type)
			*new_type = _PAGE_CACHE_WB;
343 344 345
		return 0;
	}

346 347
	if (req_type == -1) {
		/*
348 349 350 351
		 * Call mtrr_lookup to get the type hint. This is an
		 * optimization for /dev/mem mmap'ers into WB memory (BIOS
		 * tools and ACPI tools). Use WB request for WB memory and use
		 * UC_MINUS otherwise.
352 353 354
		 */
		u8 mtrr_type = mtrr_type_lookup(start, end);

355
		if (mtrr_type == MTRR_TYPE_WRBACK)
356
			actual_type = _PAGE_CACHE_WB;
357
		else
358
			actual_type = _PAGE_CACHE_UC_MINUS;
I
Ingo Molnar 已提交
359
	} else {
360 361
		actual_type = pat_x_mtrr_type(start, end,
					      req_type & _PAGE_CACHE_MASK);
I
Ingo Molnar 已提交
362
	}
363

364 365 366
	if (new_type)
		*new_type = actual_type;

367 368 369 370 371 372
	is_range_ram = pat_pagerange_is_ram(start, end);
	if (is_range_ram == 1)
		return reserve_ram_pages_type(start, end, req_type,
					      new_type);
	else if (is_range_ram < 0)
		return -EINVAL;
373

374 375
	new  = kmalloc(sizeof(struct memtype), GFP_KERNEL);
	if (!new)
376 377
		return -ENOMEM;

I
Ingo Molnar 已提交
378 379 380
	new->start	= start;
	new->end	= end;
	new->type	= actual_type;
381 382 383

	spin_lock(&memtype_lock);

384 385 386 387 388
	if (cached_entry && start >= cached_start)
		entry = cached_entry;
	else
		entry = list_entry(&memtype_list, struct memtype, nd);

389
	/* Search for existing mapping that overlaps the current range */
390
	where = NULL;
391
	list_for_each_entry_continue(entry, &memtype_list, nd) {
392
		if (end <= entry->start) {
393
			where = entry->nd.prev;
394
			cached_entry = list_entry(where, struct memtype, nd);
395
			break;
396
		} else if (start <= entry->start) { /* end > entry->start */
397
			err = chk_conflict(new, entry, new_type);
398 399 400 401
			if (!err) {
				dprintk("Overlap at 0x%Lx-0x%Lx\n",
					entry->start, entry->end);
				where = entry->nd.prev;
402 403
				cached_entry = list_entry(where,
							struct memtype, nd);
404 405
			}
			break;
406
		} else if (start < entry->end) { /* start > entry->start */
407
			err = chk_conflict(new, entry, new_type);
408 409 410
			if (!err) {
				dprintk("Overlap at 0x%Lx-0x%Lx\n",
					entry->start, entry->end);
411 412 413 414 415 416 417 418 419 420 421 422 423 424
				cached_entry = list_entry(entry->nd.prev,
							struct memtype, nd);

				/*
				 * Move to right position in the linked
				 * list to add this new entry
				 */
				list_for_each_entry_continue(entry,
							&memtype_list, nd) {
					if (start <= entry->start) {
						where = entry->nd.prev;
						break;
					}
				}
425 426 427 428 429 430
			}
			break;
		}
	}

	if (err) {
431 432 433
		printk(KERN_INFO "reserve_memtype failed 0x%Lx-0x%Lx, "
		       "track %s, req %s\n",
		       start, end, cattr_name(new->type), cattr_name(req_type));
434
		kfree(new);
435
		spin_unlock(&memtype_lock);
I
Ingo Molnar 已提交
436

437 438 439
		return err;
	}

440 441
	cached_start = start;

442 443 444
	if (where)
		list_add(&new->nd, where);
	else
445
		list_add_tail(&new->nd, &memtype_list);
446

447
	spin_unlock(&memtype_lock);
448 449 450 451 452

	dprintk("reserve_memtype added 0x%Lx-0x%Lx, track %s, req %s, ret %s\n",
		start, end, cattr_name(new->type), cattr_name(req_type),
		new_type ? cattr_name(*new_type) : "-");

453 454 455 456 457
	return err;
}

int free_memtype(u64 start, u64 end)
{
458
	struct memtype *entry;
459
	int err = -EINVAL;
460
	int is_range_ram;
461

462
	if (!pat_enabled)
463 464 465
		return 0;

	/* Low ISA region is always mapped WB. No need to track */
466
	if (is_ISA_range(start, end - 1))
467 468
		return 0;

469 470 471 472 473
	is_range_ram = pat_pagerange_is_ram(start, end);
	if (is_range_ram == 1)
		return free_ram_pages_type(start, end);
	else if (is_range_ram < 0)
		return -EINVAL;
474

475
	spin_lock(&memtype_lock);
476 477
	list_for_each_entry(entry, &memtype_list, nd) {
		if (entry->start == start && entry->end == end) {
478 479 480
			if (cached_entry == entry || cached_start == start)
				cached_entry = NULL;

481 482
			list_del(&entry->nd);
			kfree(entry);
483 484 485 486 487 488 489
			err = 0;
			break;
		}
	}
	spin_unlock(&memtype_lock);

	if (err) {
I
Ingo Molnar 已提交
490
		printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
491 492
			current->comm, current->pid, start, end);
	}
493

494
	dprintk("free_memtype request 0x%Lx-0x%Lx\n", start, end);
I
Ingo Molnar 已提交
495

496 497 498
	return err;
}

499 500 501 502 503 504 505

pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
				unsigned long size, pgprot_t vma_prot)
{
	return vma_prot;
}

506 507
#ifdef CONFIG_STRICT_DEVMEM
/* This check is done in drivers/char/mem.c in case of STRICT_DEVMEM*/
508 509 510 511 512
static inline int range_is_allowed(unsigned long pfn, unsigned long size)
{
	return 1;
}
#else
513
/* This check is needed to avoid cache aliasing when PAT is enabled */
514 515 516 517 518 519
static inline int range_is_allowed(unsigned long pfn, unsigned long size)
{
	u64 from = ((u64)pfn) << PAGE_SHIFT;
	u64 to = from + size;
	u64 cursor = from;

520 521 522
	if (!pat_enabled)
		return 1;

523 524 525 526 527 528 529 530 531 532 533 534
	while (cursor < to) {
		if (!devmem_is_allowed(pfn)) {
			printk(KERN_INFO
		"Program %s tried to access /dev/mem between %Lx->%Lx.\n",
				current->comm, from, to);
			return 0;
		}
		cursor += PAGE_SIZE;
		pfn++;
	}
	return 1;
}
535
#endif /* CONFIG_STRICT_DEVMEM */
536

537 538 539
int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
				unsigned long size, pgprot_t *vma_prot)
{
540
	u64 offset = ((u64) pfn) << PAGE_SHIFT;
541
	unsigned long flags = -1;
542
	int retval;
543

544 545 546
	if (!range_is_allowed(pfn, size))
		return 0;

547
	if (file->f_flags & O_SYNC) {
548
		flags = _PAGE_CACHE_UC_MINUS;
549 550 551 552 553 554 555 556 557 558 559
	}

#ifdef CONFIG_X86_32
	/*
	 * On the PPro and successors, the MTRRs are used to set
	 * memory types for physical addresses outside main memory,
	 * so blindly setting UC or PWT on those pages is wrong.
	 * For Pentiums and earlier, the surround logic should disable
	 * caching for the high addresses through the KEN pin, but
	 * we maintain the tradition of paranoia in this code.
	 */
560
	if (!pat_enabled &&
561 562 563 564 565
	    !(boot_cpu_has(X86_FEATURE_MTRR) ||
	      boot_cpu_has(X86_FEATURE_K6_MTRR) ||
	      boot_cpu_has(X86_FEATURE_CYRIX_ARR) ||
	      boot_cpu_has(X86_FEATURE_CENTAUR_MCR)) &&
	    (pfn << PAGE_SHIFT) >= __pa(high_memory)) {
566
		flags = _PAGE_CACHE_UC;
567 568 569
	}
#endif

570
	/*
571 572
	 * With O_SYNC, we can only take UC_MINUS mapping. Fail if we cannot.
	 *
573 574 575 576 577
	 * Without O_SYNC, we want to get
	 * - WB for WB-able memory and no other conflicting mappings
	 * - UC_MINUS for non-WB-able memory with no other conflicting mappings
	 * - Inherit from confliting mappings otherwise
	 */
578
	if (flags != -1) {
579 580
		retval = reserve_memtype(offset, offset + size, flags, NULL);
	} else {
I
Ingo Molnar 已提交
581
		retval = reserve_memtype(offset, offset + size, -1, &flags);
582 583 584 585 586
	}

	if (retval < 0)
		return 0;

587 588 589
	if (((pfn < max_low_pfn_mapped) ||
	     (pfn >= (1UL<<(32 - PAGE_SHIFT)) && pfn < max_pfn_mapped)) &&
	    ioremap_change_attr((unsigned long)__va(offset), size, flags) < 0) {
590
		free_memtype(offset, offset + size);
I
Ingo Molnar 已提交
591
		printk(KERN_INFO
592 593 594
		"%s:%d /dev/mem ioremap_change_attr failed %s for %Lx-%Lx\n",
			current->comm, current->pid,
			cattr_name(flags),
595
			offset, (unsigned long long)(offset + size));
596 597 598 599 600
		return 0;
	}

	*vma_prot = __pgprot((pgprot_val(*vma_prot) & ~_PAGE_CACHE_MASK) |
			     flags);
601 602
	return 1;
}
603 604 605

void map_devmem(unsigned long pfn, unsigned long size, pgprot_t vma_prot)
{
I
Ingo Molnar 已提交
606
	unsigned long want_flags = (pgprot_val(vma_prot) & _PAGE_CACHE_MASK);
607 608 609 610 611
	u64 addr = (u64)pfn << PAGE_SHIFT;
	unsigned long flags;

	reserve_memtype(addr, addr + size, want_flags, &flags);
	if (flags != want_flags) {
I
Ingo Molnar 已提交
612
		printk(KERN_INFO
613 614 615
		"%s:%d /dev/mem expected mapping type %s for %Lx-%Lx, got %s\n",
			current->comm, current->pid,
			cattr_name(want_flags),
616
			addr, (unsigned long long)(addr + size),
617 618 619 620 621 622 623 624 625 626 627
			cattr_name(flags));
	}
}

void unmap_devmem(unsigned long pfn, unsigned long size, pgprot_t vma_prot)
{
	u64 addr = (u64)pfn << PAGE_SHIFT;

	free_memtype(addr, addr + size);
}

628 629 630 631 632
/*
 * Internal interface to reserve a range of physical memory with prot.
 * Reserved non RAM regions only and after successful reserve_memtype,
 * this func also keeps identity mapping (if any) in sync with this new prot.
 */
633 634
static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
				int strict_prot)
635 636
{
	int is_ram = 0;
637
	int id_sz, ret;
638
	unsigned long flags;
639
	unsigned long want_flags = (pgprot_val(*vma_prot) & _PAGE_CACHE_MASK);
640

641
	is_ram = pat_pagerange_is_ram(paddr, paddr + size);
642

643 644 645 646 647
	/*
	 * reserve_pfn_range() doesn't support RAM pages.
	 */
	if (is_ram != 0)
		return -EINVAL;
648 649 650 651 652 653

	ret = reserve_memtype(paddr, paddr + size, want_flags, &flags);
	if (ret)
		return ret;

	if (flags != want_flags) {
654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671
		if (strict_prot || !is_new_memtype_allowed(want_flags, flags)) {
			free_memtype(paddr, paddr + size);
			printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
				" for %Lx-%Lx, got %s\n",
				current->comm, current->pid,
				cattr_name(want_flags),
				(unsigned long long)paddr,
				(unsigned long long)(paddr + size),
				cattr_name(flags));
			return -EINVAL;
		}
		/*
		 * We allow returning different type than the one requested in
		 * non strict case.
		 */
		*vma_prot = __pgprot((pgprot_val(*vma_prot) &
				      (~_PAGE_CACHE_MASK)) |
				     flags);
672 673
	}

674 675 676 677 678 679 680 681 682
	/* Need to keep identity mapping in sync */
	if (paddr >= __pa(high_memory))
		return 0;

	id_sz = (__pa(high_memory) < paddr + size) ?
				__pa(high_memory) - paddr :
				size;

	if (ioremap_change_attr((unsigned long)__va(paddr), id_sz, flags) < 0) {
683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703
		free_memtype(paddr, paddr + size);
		printk(KERN_ERR
			"%s:%d reserve_pfn_range ioremap_change_attr failed %s "
			"for %Lx-%Lx\n",
			current->comm, current->pid,
			cattr_name(flags),
			(unsigned long long)paddr,
			(unsigned long long)(paddr + size));
		return -EINVAL;
	}
	return 0;
}

/*
 * Internal interface to free a range of physical memory.
 * Frees non RAM regions only.
 */
static void free_pfn_range(u64 paddr, unsigned long size)
{
	int is_ram;

704
	is_ram = pat_pagerange_is_ram(paddr, paddr + size);
705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721
	if (is_ram == 0)
		free_memtype(paddr, paddr + size);
}

/*
 * track_pfn_vma_copy is called when vma that is covering the pfnmap gets
 * copied through copy_page_range().
 *
 * If the vma has a linear pfn mapping for the entire range, we get the prot
 * from pte and reserve the entire vma range with single reserve_pfn_range call.
 * Otherwise, we reserve the entire vma range, my ging through the PTEs page
 * by page to get physical address and protection.
 */
int track_pfn_vma_copy(struct vm_area_struct *vma)
{
	int retval = 0;
	unsigned long i, j;
722
	resource_size_t paddr;
723
	unsigned long prot;
724 725 726
	unsigned long vma_start = vma->vm_start;
	unsigned long vma_end = vma->vm_end;
	unsigned long vma_size = vma_end - vma_start;
727
	pgprot_t pgprot;
728 729 730 731 732 733

	if (!pat_enabled)
		return 0;

	if (is_linear_pfn_mapping(vma)) {
		/*
734 735
		 * reserve the whole chunk covered by vma. We need the
		 * starting address and protection from pte.
736
		 */
737
		if (follow_phys(vma, vma_start, 0, &prot, &paddr)) {
738
			WARN_ON_ONCE(1);
739
			return -EINVAL;
740
		}
741 742
		pgprot = __pgprot(prot);
		return reserve_pfn_range(paddr, vma_size, &pgprot, 1);
743 744 745 746
	}

	/* reserve entire vma page by page, using pfn and prot from pte */
	for (i = 0; i < vma_size; i += PAGE_SIZE) {
747
		if (follow_phys(vma, vma_start + i, 0, &prot, &paddr))
748 749
			continue;

750 751
		pgprot = __pgprot(prot);
		retval = reserve_pfn_range(paddr, PAGE_SIZE, &pgprot, 1);
752 753 754 755 756 757 758 759
		if (retval)
			goto cleanup_ret;
	}
	return 0;

cleanup_ret:
	/* Reserve error: Cleanup partial reservation and return error */
	for (j = 0; j < i; j += PAGE_SIZE) {
760
		if (follow_phys(vma, vma_start + j, 0, &prot, &paddr))
761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781
			continue;

		free_pfn_range(paddr, PAGE_SIZE);
	}

	return retval;
}

/*
 * track_pfn_vma_new is called when a _new_ pfn mapping is being established
 * for physical range indicated by pfn and size.
 *
 * prot is passed in as a parameter for the new mapping. If the vma has a
 * linear pfn mapping for the entire range reserve the entire vma range with
 * single reserve_pfn_range call.
 * Otherwise, we look t the pfn and size and reserve only the specified range
 * page by page.
 *
 * Note that this function can be called with caller trying to map only a
 * subrange/page inside the vma.
 */
782
int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot,
783 784 785 786
			unsigned long pfn, unsigned long size)
{
	int retval = 0;
	unsigned long i, j;
787 788
	resource_size_t base_paddr;
	resource_size_t paddr;
789 790 791 792 793 794 795 796 797
	unsigned long vma_start = vma->vm_start;
	unsigned long vma_end = vma->vm_end;
	unsigned long vma_size = vma_end - vma_start;

	if (!pat_enabled)
		return 0;

	if (is_linear_pfn_mapping(vma)) {
		/* reserve the whole chunk starting from vm_pgoff */
798
		paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT;
799
		return reserve_pfn_range(paddr, vma_size, prot, 0);
800 801 802
	}

	/* reserve page by page using pfn and size */
803
	base_paddr = (resource_size_t)pfn << PAGE_SHIFT;
804 805
	for (i = 0; i < size; i += PAGE_SIZE) {
		paddr = base_paddr + i;
806
		retval = reserve_pfn_range(paddr, PAGE_SIZE, prot, 0);
807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830
		if (retval)
			goto cleanup_ret;
	}
	return 0;

cleanup_ret:
	/* Reserve error: Cleanup partial reservation and return error */
	for (j = 0; j < i; j += PAGE_SIZE) {
		paddr = base_paddr + j;
		free_pfn_range(paddr, PAGE_SIZE);
	}

	return retval;
}

/*
 * untrack_pfn_vma is called while unmapping a pfnmap for a region.
 * untrack can be called for a specific region indicated by pfn and size or
 * can be for the entire vma (in which case size can be zero).
 */
void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
			unsigned long size)
{
	unsigned long i;
831
	resource_size_t paddr;
832
	unsigned long prot;
833 834 835 836 837 838 839 840 841
	unsigned long vma_start = vma->vm_start;
	unsigned long vma_end = vma->vm_end;
	unsigned long vma_size = vma_end - vma_start;

	if (!pat_enabled)
		return;

	if (is_linear_pfn_mapping(vma)) {
		/* free the whole chunk starting from vm_pgoff */
842
		paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT;
843 844 845 846 847 848
		free_pfn_range(paddr, vma_size);
		return;
	}

	if (size != 0 && size != vma_size) {
		/* free page by page, using pfn and size */
849
		paddr = (resource_size_t)pfn << PAGE_SHIFT;
850 851 852 853 854 855 856
		for (i = 0; i < size; i += PAGE_SIZE) {
			paddr = paddr + i;
			free_pfn_range(paddr, PAGE_SIZE);
		}
	} else {
		/* free entire vma, page by page, using the pfn from pte */
		for (i = 0; i < vma_size; i += PAGE_SIZE) {
857
			if (follow_phys(vma, vma_start + i, 0, &prot, &paddr))
858 859 860 861 862 863 864
				continue;

			free_pfn_range(paddr, PAGE_SIZE);
		}
	}
}

865 866 867 868 869 870 871
pgprot_t pgprot_writecombine(pgprot_t prot)
{
	if (pat_enabled)
		return __pgprot(pgprot_val(prot) | _PAGE_CACHE_WC);
	else
		return pgprot_noncached(prot);
}
872
EXPORT_SYMBOL_GPL(pgprot_writecombine);
873

874
#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_X86_PAT)
875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896

/* get Nth element of the linked list */
static struct memtype *memtype_get_idx(loff_t pos)
{
	struct memtype *list_node, *print_entry;
	int i = 1;

	print_entry  = kmalloc(sizeof(struct memtype), GFP_KERNEL);
	if (!print_entry)
		return NULL;

	spin_lock(&memtype_lock);
	list_for_each_entry(list_node, &memtype_list, nd) {
		if (pos == i) {
			*print_entry = *list_node;
			spin_unlock(&memtype_lock);
			return print_entry;
		}
		++i;
	}
	spin_unlock(&memtype_lock);
	kfree(print_entry);
I
Ingo Molnar 已提交
897

898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927
	return NULL;
}

static void *memtype_seq_start(struct seq_file *seq, loff_t *pos)
{
	if (*pos == 0) {
		++*pos;
		seq_printf(seq, "PAT memtype list:\n");
	}

	return memtype_get_idx(*pos);
}

static void *memtype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
	++*pos;
	return memtype_get_idx(*pos);
}

static void memtype_seq_stop(struct seq_file *seq, void *v)
{
}

static int memtype_seq_show(struct seq_file *seq, void *v)
{
	struct memtype *print_entry = (struct memtype *)v;

	seq_printf(seq, "%s @ 0x%Lx-0x%Lx\n", cattr_name(print_entry->type),
			print_entry->start, print_entry->end);
	kfree(print_entry);
I
Ingo Molnar 已提交
928

929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959
	return 0;
}

static struct seq_operations memtype_seq_ops = {
	.start = memtype_seq_start,
	.next  = memtype_seq_next,
	.stop  = memtype_seq_stop,
	.show  = memtype_seq_show,
};

static int memtype_seq_open(struct inode *inode, struct file *file)
{
	return seq_open(file, &memtype_seq_ops);
}

static const struct file_operations memtype_fops = {
	.open    = memtype_seq_open,
	.read    = seq_read,
	.llseek  = seq_lseek,
	.release = seq_release,
};

static int __init pat_memtype_list_init(void)
{
	debugfs_create_file("pat_memtype_list", S_IRUSR, arch_debugfs_dir,
				NULL, &memtype_fops);
	return 0;
}

late_initcall(pat_memtype_list_init);

960
#endif /* CONFIG_DEBUG_FS && CONFIG_X86_PAT */