pat.c 19.7 KB
Newer Older
1 2 3 4 5 6 7 8 9
/*
 * Handle caching attributes in page tables (PAT)
 *
 * Authors: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
 *          Suresh B Siddha <suresh.b.siddha@intel.com>
 *
 * Loosely based on earlier PAT patchset from Eric Biederman and Andi Kleen.
 */

I
Ingo Molnar 已提交
10 11 12
#include <linux/seq_file.h>
#include <linux/bootmem.h>
#include <linux/debugfs.h>
13
#include <linux/kernel.h>
14
#include <linux/module.h>
15
#include <linux/slab.h>
I
Ingo Molnar 已提交
16
#include <linux/mm.h>
17
#include <linux/fs.h>
18
#include <linux/rbtree.h>
19

I
Ingo Molnar 已提交
20
#include <asm/cacheflush.h>
21
#include <asm/processor.h>
I
Ingo Molnar 已提交
22
#include <asm/tlbflush.h>
23
#include <asm/x86_init.h>
24 25
#include <asm/pgtable.h>
#include <asm/fcntl.h>
I
Ingo Molnar 已提交
26
#include <asm/e820.h>
27
#include <asm/mtrr.h>
I
Ingo Molnar 已提交
28 29 30
#include <asm/page.h>
#include <asm/msr.h>
#include <asm/pat.h>
31
#include <asm/io.h>
32

33 34
#include "pat_internal.h"

35
#ifdef CONFIG_X86_PAT
36
int __read_mostly pat_enabled = 1;
37

38
static inline void pat_disable(const char *reason)
39
{
40
	pat_enabled = 0;
41
	printk(KERN_INFO "%s\n", reason);
42 43
}

A
Andrew Morton 已提交
44
static int __init nopat(char *str)
45
{
46
	pat_disable("PAT support disabled.");
47 48
	return 0;
}
49
early_param("nopat", nopat);
50 51 52 53 54
#else
static inline void pat_disable(const char *reason)
{
	(void)reason;
}
55 56
#endif

57

58
int pat_debug_enable;
I
Ingo Molnar 已提交
59

60 61
static int __init pat_debug_setup(char *str)
{
62
	pat_debug_enable = 1;
63 64 65 66
	return 0;
}
__setup("debugpat", pat_debug_setup);

67
static u64 __read_mostly boot_pat_state;
68 69 70 71 72 73 74 75 76 77

enum {
	PAT_UC = 0,		/* uncached */
	PAT_WC = 1,		/* Write combining */
	PAT_WT = 4,		/* Write Through */
	PAT_WP = 5,		/* Write Protected */
	PAT_WB = 6,		/* Write Back (default) */
	PAT_UC_MINUS = 7,	/* UC, but can be overriden by MTRR */
};

78
#define PAT(x, y)	((u64)PAT_ ## y << ((x)*8))
79 80 81 82

void pat_init(void)
{
	u64 pat;
83
	bool boot_cpu = !boot_pat_state;
84

85
	if (!pat_enabled)
86 87
		return;

88 89 90 91 92 93 94 95 96 97 98 99 100 101
	if (!cpu_has_pat) {
		if (!boot_pat_state) {
			pat_disable("PAT not supported by CPU.");
			return;
		} else {
			/*
			 * If this happens we are on a secondary CPU, but
			 * switched to PAT on the boot CPU. We have no way to
			 * undo PAT.
			 */
			printk(KERN_ERR "PAT enabled, "
			       "but not supported by secondary CPU\n");
			BUG();
		}
102
	}
103 104 105 106 107 108 109 110 111 112 113 114 115 116

	/* Set PWT to Write-Combining. All other bits stay the same */
	/*
	 * PTE encoding used in Linux:
	 *      PAT
	 *      |PCD
	 *      ||PWT
	 *      |||
	 *      000 WB		_PAGE_CACHE_WB
	 *      001 WC		_PAGE_CACHE_WC
	 *      010 UC-		_PAGE_CACHE_UC_MINUS
	 *      011 UC		_PAGE_CACHE_UC
	 * PAT bit unused
	 */
117 118
	pat = PAT(0, WB) | PAT(1, WC) | PAT(2, UC_MINUS) | PAT(3, UC) |
	      PAT(4, WB) | PAT(5, WC) | PAT(6, UC_MINUS) | PAT(7, UC);
119 120

	/* Boot CPU check */
121
	if (!boot_pat_state)
122 123 124
		rdmsrl(MSR_IA32_CR_PAT, boot_pat_state);

	wrmsrl(MSR_IA32_CR_PAT, pat);
125 126 127 128

	if (boot_cpu)
		printk(KERN_INFO "x86 PAT enabled: cpu %d, old 0x%Lx, new 0x%Lx\n",
		       smp_processor_id(), boot_pat_state, pat);
129 130 131 132
}

#undef PAT

133
static DEFINE_SPINLOCK(memtype_lock);	/* protects memtype accesses */
134

135 136 137 138 139 140 141
/*
 * Does intersection of PAT memory type and MTRR memory type and returns
 * the resulting memory type as PAT understands it.
 * (Type in pat and mtrr will not have same value)
 * The intersection is based on "Effective Memory Type" tables in IA-32
 * SDM vol 3a
 */
142
static unsigned long pat_x_mtrr_type(u64 start, u64 end, unsigned long req_type)
143
{
144 145 146 147
	/*
	 * Look for MTRR hint to get the effective type in case where PAT
	 * request is for WB.
	 */
148 149 150 151
	if (req_type == _PAGE_CACHE_WB) {
		u8 mtrr_type;

		mtrr_type = mtrr_type_lookup(start, end);
152 153 154 155
		if (mtrr_type != MTRR_TYPE_WRBACK)
			return _PAGE_CACHE_UC_MINUS;

		return _PAGE_CACHE_WB;
156 157 158
	}

	return req_type;
159 160
}

161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187
static int pat_pagerange_is_ram(unsigned long start, unsigned long end)
{
	int ram_page = 0, not_rampage = 0;
	unsigned long page_nr;

	for (page_nr = (start >> PAGE_SHIFT); page_nr < (end >> PAGE_SHIFT);
	     ++page_nr) {
		/*
		 * For legacy reasons, physical address range in the legacy ISA
		 * region is tracked as non-RAM. This will allow users of
		 * /dev/mem to map portions of legacy ISA region, even when
		 * some of those portions are listed(or not even listed) with
		 * different e820 types(RAM/reserved/..)
		 */
		if (page_nr >= (ISA_END_ADDRESS >> PAGE_SHIFT) &&
		    page_is_ram(page_nr))
			ram_page = 1;
		else
			not_rampage = 1;

		if (ram_page == not_rampage)
			return -1;
	}

	return ram_page;
}

188
/*
189 190 191 192
 * For RAM pages, we use page flags to mark the pages with appropriate type.
 * Here we do two pass:
 * - Find the memtype of all the pages in the range, look for any conflicts
 * - In case of no conflicts, set the new memtype for pages in the range
193 194
 */
static int reserve_ram_pages_type(u64 start, u64 end, unsigned long req_type,
I
Ingo Molnar 已提交
195
				  unsigned long *new_type)
196 197
{
	struct page *page;
198 199 200 201 202 203 204
	u64 pfn;

	if (req_type == _PAGE_CACHE_UC) {
		/* We do not support strong UC */
		WARN_ON_ONCE(1);
		req_type = _PAGE_CACHE_UC_MINUS;
	}
205 206

	for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
207
		unsigned long type;
208

209 210 211 212 213 214 215 216 217 218 219
		page = pfn_to_page(pfn);
		type = get_page_memtype(page);
		if (type != -1) {
			printk(KERN_INFO "reserve_ram_pages_type failed "
				"0x%Lx-0x%Lx, track 0x%lx, req 0x%lx\n",
				start, end, type, req_type);
			if (new_type)
				*new_type = type;

			return -EBUSY;
		}
220 221
	}

222 223 224 225
	if (new_type)
		*new_type = req_type;

	for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
226
		page = pfn_to_page(pfn);
227
		set_page_memtype(page, req_type);
228
	}
229
	return 0;
230 231 232 233 234
}

static int free_ram_pages_type(u64 start, u64 end)
{
	struct page *page;
235
	u64 pfn;
236 237 238

	for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
		page = pfn_to_page(pfn);
239
		set_page_memtype(page, -1);
240 241 242 243
	}
	return 0;
}

244 245 246 247 248 249 250
/*
 * req_type typically has one of the:
 * - _PAGE_CACHE_WB
 * - _PAGE_CACHE_WC
 * - _PAGE_CACHE_UC_MINUS
 * - _PAGE_CACHE_UC
 *
251 252 253
 * If new_type is NULL, function will return an error if it cannot reserve the
 * region with req_type. If new_type is non-NULL, function will return
 * available type in new_type in case of no error. In case of any error
254 255
 * it will return a negative return value.
 */
256
int reserve_memtype(u64 start, u64 end, unsigned long req_type,
I
Ingo Molnar 已提交
257
		    unsigned long *new_type)
258
{
259
	struct memtype *new;
260
	unsigned long actual_type;
261
	int is_range_ram;
I
Ingo Molnar 已提交
262
	int err = 0;
263

I
Ingo Molnar 已提交
264
	BUG_ON(start >= end); /* end is exclusive */
265

266
	if (!pat_enabled) {
267
		/* This is identical to page table setting without PAT */
268
		if (new_type) {
269
			if (req_type == _PAGE_CACHE_WC)
270
				*new_type = _PAGE_CACHE_UC_MINUS;
271 272
			else
				*new_type = req_type & _PAGE_CACHE_MASK;
273
		}
274 275 276 277
		return 0;
	}

	/* Low ISA region is always mapped WB in page table. No need to track */
278
	if (x86_platform.is_untracked_pat_range(start, end)) {
279 280
		if (new_type)
			*new_type = _PAGE_CACHE_WB;
281 282 283
		return 0;
	}

284 285 286 287 288 289 290
	/*
	 * Call mtrr_lookup to get the type hint. This is an
	 * optimization for /dev/mem mmap'ers into WB memory (BIOS
	 * tools and ACPI tools). Use WB request for WB memory and use
	 * UC_MINUS otherwise.
	 */
	actual_type = pat_x_mtrr_type(start, end, req_type & _PAGE_CACHE_MASK);
291

292 293 294
	if (new_type)
		*new_type = actual_type;

295
	is_range_ram = pat_pagerange_is_ram(start, end);
296 297 298 299 300 301
	if (is_range_ram == 1) {

		err = reserve_ram_pages_type(start, end, req_type, new_type);

		return err;
	} else if (is_range_ram < 0) {
302
		return -EINVAL;
303
	}
304

305 306
	new  = kmalloc(sizeof(struct memtype), GFP_KERNEL);
	if (!new)
307 308
		return -ENOMEM;

I
Ingo Molnar 已提交
309 310 311
	new->start	= start;
	new->end	= end;
	new->type	= actual_type;
312 313 314

	spin_lock(&memtype_lock);

315
	err = rbt_memtype_check_insert(new, new_type);
316
	if (err) {
317 318 319
		printk(KERN_INFO "reserve_memtype failed 0x%Lx-0x%Lx, "
		       "track %s, req %s\n",
		       start, end, cattr_name(new->type), cattr_name(req_type));
320
		kfree(new);
321
		spin_unlock(&memtype_lock);
I
Ingo Molnar 已提交
322

323 324 325 326
		return err;
	}

	spin_unlock(&memtype_lock);
327 328 329 330 331

	dprintk("reserve_memtype added 0x%Lx-0x%Lx, track %s, req %s, ret %s\n",
		start, end, cattr_name(new->type), cattr_name(req_type),
		new_type ? cattr_name(*new_type) : "-");

332 333 334 335 336 337
	return err;
}

int free_memtype(u64 start, u64 end)
{
	int err = -EINVAL;
338
	int is_range_ram;
339
	struct memtype *entry;
340

341
	if (!pat_enabled)
342 343 344
		return 0;

	/* Low ISA region is always mapped WB. No need to track */
345
	if (x86_platform.is_untracked_pat_range(start, end))
346 347
		return 0;

348
	is_range_ram = pat_pagerange_is_ram(start, end);
349 350 351 352 353 354
	if (is_range_ram == 1) {

		err = free_ram_pages_type(start, end);

		return err;
	} else if (is_range_ram < 0) {
355
		return -EINVAL;
356
	}
357

358
	spin_lock(&memtype_lock);
359
	entry = rbt_memtype_erase(start, end);
360 361
	spin_unlock(&memtype_lock);

362
	if (!entry) {
I
Ingo Molnar 已提交
363
		printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
364
			current->comm, current->pid, start, end);
365
		return -EINVAL;
366
	}
367

368 369
	kfree(entry);

370
	dprintk("free_memtype request 0x%Lx-0x%Lx\n", start, end);
I
Ingo Molnar 已提交
371

372
	return 0;
373 374
}

375

376 377 378 379 380 381 382 383 384 385 386 387 388 389
/**
 * lookup_memtype - Looksup the memory type for a physical address
 * @paddr: physical address of which memory type needs to be looked up
 *
 * Only to be called when PAT is enabled
 *
 * Returns _PAGE_CACHE_WB, _PAGE_CACHE_WC, _PAGE_CACHE_UC_MINUS or
 * _PAGE_CACHE_UC
 */
static unsigned long lookup_memtype(u64 paddr)
{
	int rettype = _PAGE_CACHE_WB;
	struct memtype *entry;

390
	if (x86_platform.is_untracked_pat_range(paddr, paddr + PAGE_SIZE))
391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408
		return rettype;

	if (pat_pagerange_is_ram(paddr, paddr + PAGE_SIZE)) {
		struct page *page;
		page = pfn_to_page(paddr >> PAGE_SHIFT);
		rettype = get_page_memtype(page);
		/*
		 * -1 from get_page_memtype() implies RAM page is in its
		 * default state and not reserved, and hence of type WB
		 */
		if (rettype == -1)
			rettype = _PAGE_CACHE_WB;

		return rettype;
	}

	spin_lock(&memtype_lock);

409
	entry = rbt_memtype_lookup(paddr);
410 411 412 413 414 415 416 417 418
	if (entry != NULL)
		rettype = entry->type;
	else
		rettype = _PAGE_CACHE_UC_MINUS;

	spin_unlock(&memtype_lock);
	return rettype;
}

419 420 421 422 423 424 425 426 427 428 429 430 431
/**
 * io_reserve_memtype - Request a memory type mapping for a region of memory
 * @start: start (physical address) of the region
 * @end: end (physical address) of the region
 * @type: A pointer to memtype, with requested type. On success, requested
 * or any other compatible type that was available for the region is returned
 *
 * On success, returns 0
 * On failure, returns non-zero
 */
int io_reserve_memtype(resource_size_t start, resource_size_t end,
			unsigned long *type)
{
432
	resource_size_t size = end - start;
433 434 435 436
	unsigned long req_type = *type;
	unsigned long new_type;
	int ret;

437
	WARN_ON_ONCE(iomem_map_sanity_check(start, size));
438 439 440 441 442

	ret = reserve_memtype(start, end, req_type, &new_type);
	if (ret)
		goto out_err;

443
	if (!is_new_memtype_allowed(start, size, req_type, new_type))
444 445
		goto out_free;

446
	if (kernel_map_sync_memtype(start, size, new_type) < 0)
447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468
		goto out_free;

	*type = new_type;
	return 0;

out_free:
	free_memtype(start, end);
	ret = -EBUSY;
out_err:
	return ret;
}

/**
 * io_free_memtype - Release a memory type mapping for a region of memory
 * @start: start (physical address) of the region
 * @end: end (physical address) of the region
 */
void io_free_memtype(resource_size_t start, resource_size_t end)
{
	free_memtype(start, end);
}

469 470 471 472 473 474
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
				unsigned long size, pgprot_t vma_prot)
{
	return vma_prot;
}

475 476
#ifdef CONFIG_STRICT_DEVMEM
/* This check is done in drivers/char/mem.c in case of STRICT_DEVMEM*/
477 478 479 480 481
static inline int range_is_allowed(unsigned long pfn, unsigned long size)
{
	return 1;
}
#else
482
/* This check is needed to avoid cache aliasing when PAT is enabled */
483 484 485 486 487 488
static inline int range_is_allowed(unsigned long pfn, unsigned long size)
{
	u64 from = ((u64)pfn) << PAGE_SHIFT;
	u64 to = from + size;
	u64 cursor = from;

489 490 491
	if (!pat_enabled)
		return 1;

492 493 494 495 496 497 498 499 500 501 502 503
	while (cursor < to) {
		if (!devmem_is_allowed(pfn)) {
			printk(KERN_INFO
		"Program %s tried to access /dev/mem between %Lx->%Lx.\n",
				current->comm, from, to);
			return 0;
		}
		cursor += PAGE_SIZE;
		pfn++;
	}
	return 1;
}
504
#endif /* CONFIG_STRICT_DEVMEM */
505

506 507 508
int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
				unsigned long size, pgprot_t *vma_prot)
{
509
	unsigned long flags = _PAGE_CACHE_WB;
510

511 512 513
	if (!range_is_allowed(pfn, size))
		return 0;

514
	if (file->f_flags & O_DSYNC)
515
		flags = _PAGE_CACHE_UC_MINUS;
516 517 518 519 520 521 522 523 524 525

#ifdef CONFIG_X86_32
	/*
	 * On the PPro and successors, the MTRRs are used to set
	 * memory types for physical addresses outside main memory,
	 * so blindly setting UC or PWT on those pages is wrong.
	 * For Pentiums and earlier, the surround logic should disable
	 * caching for the high addresses through the KEN pin, but
	 * we maintain the tradition of paranoia in this code.
	 */
526
	if (!pat_enabled &&
527 528 529 530 531
	    !(boot_cpu_has(X86_FEATURE_MTRR) ||
	      boot_cpu_has(X86_FEATURE_K6_MTRR) ||
	      boot_cpu_has(X86_FEATURE_CYRIX_ARR) ||
	      boot_cpu_has(X86_FEATURE_CENTAUR_MCR)) &&
	    (pfn << PAGE_SHIFT) >= __pa(high_memory)) {
532
		flags = _PAGE_CACHE_UC;
533 534 535
	}
#endif

536 537
	*vma_prot = __pgprot((pgprot_val(*vma_prot) & ~_PAGE_CACHE_MASK) |
			     flags);
538 539
	return 1;
}
540

541 542 543 544 545 546 547 548
/*
 * Change the memory type for the physial address range in kernel identity
 * mapping space if that range is a part of identity map.
 */
int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
{
	unsigned long id_sz;

549
	if (base >= __pa(high_memory))
550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567
		return 0;

	id_sz = (__pa(high_memory) < base + size) ?
				__pa(high_memory) - base :
				size;

	if (ioremap_change_attr((unsigned long)__va(base), id_sz, flags) < 0) {
		printk(KERN_INFO
			"%s:%d ioremap_change_attr failed %s "
			"for %Lx-%Lx\n",
			current->comm, current->pid,
			cattr_name(flags),
			base, (unsigned long long)(base + size));
		return -EINVAL;
	}
	return 0;
}

568 569 570 571 572
/*
 * Internal interface to reserve a range of physical memory with prot.
 * Reserved non RAM regions only and after successful reserve_memtype,
 * this func also keeps identity mapping (if any) in sync with this new prot.
 */
573 574
static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
				int strict_prot)
575 576
{
	int is_ram = 0;
577
	int ret;
578
	unsigned long want_flags = (pgprot_val(*vma_prot) & _PAGE_CACHE_MASK);
579
	unsigned long flags = want_flags;
580

581
	is_ram = pat_pagerange_is_ram(paddr, paddr + size);
582

583
	/*
584 585 586
	 * reserve_pfn_range() for RAM pages. We do not refcount to keep
	 * track of number of mappings of RAM pages. We can assert that
	 * the type requested matches the type of first page in the range.
587
	 */
588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604
	if (is_ram) {
		if (!pat_enabled)
			return 0;

		flags = lookup_memtype(paddr);
		if (want_flags != flags) {
			printk(KERN_WARNING
			"%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n",
				current->comm, current->pid,
				cattr_name(want_flags),
				(unsigned long long)paddr,
				(unsigned long long)(paddr + size),
				cattr_name(flags));
			*vma_prot = __pgprot((pgprot_val(*vma_prot) &
					      (~_PAGE_CACHE_MASK)) |
					     flags);
		}
605
		return 0;
606
	}
607 608 609 610 611 612

	ret = reserve_memtype(paddr, paddr + size, want_flags, &flags);
	if (ret)
		return ret;

	if (flags != want_flags) {
613 614
		if (strict_prot ||
		    !is_new_memtype_allowed(paddr, size, want_flags, flags)) {
615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631
			free_memtype(paddr, paddr + size);
			printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
				" for %Lx-%Lx, got %s\n",
				current->comm, current->pid,
				cattr_name(want_flags),
				(unsigned long long)paddr,
				(unsigned long long)(paddr + size),
				cattr_name(flags));
			return -EINVAL;
		}
		/*
		 * We allow returning different type than the one requested in
		 * non strict case.
		 */
		*vma_prot = __pgprot((pgprot_val(*vma_prot) &
				      (~_PAGE_CACHE_MASK)) |
				     flags);
632 633
	}

634
	if (kernel_map_sync_memtype(paddr, size, flags) < 0) {
635 636 637 638 639 640 641 642 643 644 645 646 647 648
		free_memtype(paddr, paddr + size);
		return -EINVAL;
	}
	return 0;
}

/*
 * Internal interface to free a range of physical memory.
 * Frees non RAM regions only.
 */
static void free_pfn_range(u64 paddr, unsigned long size)
{
	int is_ram;

649
	is_ram = pat_pagerange_is_ram(paddr, paddr + size);
650 651 652 653 654 655 656 657 658 659 660 661 662
	if (is_ram == 0)
		free_memtype(paddr, paddr + size);
}

/*
 * track_pfn_vma_copy is called when vma that is covering the pfnmap gets
 * copied through copy_page_range().
 *
 * If the vma has a linear pfn mapping for the entire range, we get the prot
 * from pte and reserve the entire vma range with single reserve_pfn_range call.
 */
int track_pfn_vma_copy(struct vm_area_struct *vma)
{
663
	resource_size_t paddr;
664
	unsigned long prot;
665
	unsigned long vma_size = vma->vm_end - vma->vm_start;
666
	pgprot_t pgprot;
667 668 669

	if (is_linear_pfn_mapping(vma)) {
		/*
670 671
		 * reserve the whole chunk covered by vma. We need the
		 * starting address and protection from pte.
672
		 */
673
		if (follow_phys(vma, vma->vm_start, 0, &prot, &paddr)) {
674
			WARN_ON_ONCE(1);
675
			return -EINVAL;
676
		}
677 678
		pgprot = __pgprot(prot);
		return reserve_pfn_range(paddr, vma_size, &pgprot, 1);
679 680 681 682 683 684 685 686 687 688 689 690 691
	}

	return 0;
}

/*
 * track_pfn_vma_new is called when a _new_ pfn mapping is being established
 * for physical range indicated by pfn and size.
 *
 * prot is passed in as a parameter for the new mapping. If the vma has a
 * linear pfn mapping for the entire range reserve the entire vma range with
 * single reserve_pfn_range call.
 */
692
int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot,
693 694
			unsigned long pfn, unsigned long size)
{
695
	unsigned long flags;
696
	resource_size_t paddr;
697
	unsigned long vma_size = vma->vm_end - vma->vm_start;
698 699 700

	if (is_linear_pfn_mapping(vma)) {
		/* reserve the whole chunk starting from vm_pgoff */
701
		paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT;
702
		return reserve_pfn_range(paddr, vma_size, prot, 0);
703 704
	}

705 706 707 708 709 710 711 712
	if (!pat_enabled)
		return 0;

	/* for vm_insert_pfn and friends, we set prot based on lookup */
	flags = lookup_memtype(pfn << PAGE_SHIFT);
	*prot = __pgprot((pgprot_val(vma->vm_page_prot) & (~_PAGE_CACHE_MASK)) |
			 flags);

713 714 715 716 717 718 719 720 721 722 723
	return 0;
}

/*
 * untrack_pfn_vma is called while unmapping a pfnmap for a region.
 * untrack can be called for a specific region indicated by pfn and size or
 * can be for the entire vma (in which case size can be zero).
 */
void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
			unsigned long size)
{
724
	resource_size_t paddr;
725
	unsigned long vma_size = vma->vm_end - vma->vm_start;
726 727 728

	if (is_linear_pfn_mapping(vma)) {
		/* free the whole chunk starting from vm_pgoff */
729
		paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT;
730 731 732 733 734
		free_pfn_range(paddr, vma_size);
		return;
	}
}

735 736 737 738 739 740 741
pgprot_t pgprot_writecombine(pgprot_t prot)
{
	if (pat_enabled)
		return __pgprot(pgprot_val(prot) | _PAGE_CACHE_WC);
	else
		return pgprot_noncached(prot);
}
742
EXPORT_SYMBOL_GPL(pgprot_writecombine);
743

744
#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_X86_PAT)
745 746 747

static struct memtype *memtype_get_idx(loff_t pos)
{
748 749
	struct memtype *print_entry;
	int ret;
750

751
	print_entry  = kzalloc(sizeof(struct memtype), GFP_KERNEL);
752 753 754 755
	if (!print_entry)
		return NULL;

	spin_lock(&memtype_lock);
756
	ret = rbt_memtype_copy_nth_element(print_entry, pos);
757
	spin_unlock(&memtype_lock);
I
Ingo Molnar 已提交
758

759 760 761 762 763 764
	if (!ret) {
		return print_entry;
	} else {
		kfree(print_entry);
		return NULL;
	}
765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793
}

static void *memtype_seq_start(struct seq_file *seq, loff_t *pos)
{
	if (*pos == 0) {
		++*pos;
		seq_printf(seq, "PAT memtype list:\n");
	}

	return memtype_get_idx(*pos);
}

static void *memtype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
	++*pos;
	return memtype_get_idx(*pos);
}

static void memtype_seq_stop(struct seq_file *seq, void *v)
{
}

static int memtype_seq_show(struct seq_file *seq, void *v)
{
	struct memtype *print_entry = (struct memtype *)v;

	seq_printf(seq, "%s @ 0x%Lx-0x%Lx\n", cattr_name(print_entry->type),
			print_entry->start, print_entry->end);
	kfree(print_entry);
I
Ingo Molnar 已提交
794

795 796 797
	return 0;
}

T
Tobias Klauser 已提交
798
static const struct seq_operations memtype_seq_ops = {
799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818
	.start = memtype_seq_start,
	.next  = memtype_seq_next,
	.stop  = memtype_seq_stop,
	.show  = memtype_seq_show,
};

static int memtype_seq_open(struct inode *inode, struct file *file)
{
	return seq_open(file, &memtype_seq_ops);
}

static const struct file_operations memtype_fops = {
	.open    = memtype_seq_open,
	.read    = seq_read,
	.llseek  = seq_lseek,
	.release = seq_release,
};

static int __init pat_memtype_list_init(void)
{
819 820 821 822
	if (pat_enabled) {
		debugfs_create_file("pat_memtype_list", S_IRUSR,
				    arch_debugfs_dir, NULL, &memtype_fops);
	}
823 824 825 826 827
	return 0;
}

late_initcall(pat_memtype_list_init);

828
#endif /* CONFIG_DEBUG_FS && CONFIG_X86_PAT */