pat.c 24.6 KB
Newer Older
1 2 3 4 5 6 7 8 9
/*
 * Handle caching attributes in page tables (PAT)
 *
 * Authors: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
 *          Suresh B Siddha <suresh.b.siddha@intel.com>
 *
 * Loosely based on earlier PAT patchset from Eric Biederman and Andi Kleen.
 */

I
Ingo Molnar 已提交
10 11 12
#include <linux/seq_file.h>
#include <linux/bootmem.h>
#include <linux/debugfs.h>
13
#include <linux/kernel.h>
14
#include <linux/module.h>
15
#include <linux/slab.h>
I
Ingo Molnar 已提交
16
#include <linux/mm.h>
17
#include <linux/fs.h>
18
#include <linux/rbtree.h>
19

I
Ingo Molnar 已提交
20
#include <asm/cacheflush.h>
21
#include <asm/processor.h>
I
Ingo Molnar 已提交
22
#include <asm/tlbflush.h>
23
#include <asm/x86_init.h>
24 25
#include <asm/pgtable.h>
#include <asm/fcntl.h>
I
Ingo Molnar 已提交
26
#include <asm/e820.h>
27
#include <asm/mtrr.h>
I
Ingo Molnar 已提交
28 29 30
#include <asm/page.h>
#include <asm/msr.h>
#include <asm/pat.h>
31
#include <asm/io.h>
32

33
#include "pat_internal.h"
34
#include "mm_internal.h"
35

36 37 38
#undef pr_fmt
#define pr_fmt(fmt) "" fmt

39 40
static bool boot_cpu_done;

41
static int __read_mostly __pat_enabled = IS_ENABLED(CONFIG_X86_PAT);
42

43
static inline void pat_disable(const char *reason)
44
{
45
	__pat_enabled = 0;
46
	pr_info("x86/PAT: %s\n", reason);
47 48
}

A
Andrew Morton 已提交
49
static int __init nopat(char *str)
50
{
51
	pat_disable("PAT support disabled.");
52 53
	return 0;
}
54
early_param("nopat", nopat);
55 56

bool pat_enabled(void)
57
{
58
	return !!__pat_enabled;
59
}
60
EXPORT_SYMBOL_GPL(pat_enabled);
61

62
int pat_debug_enable;
I
Ingo Molnar 已提交
63

64 65
static int __init pat_debug_setup(char *str)
{
66
	pat_debug_enable = 1;
67 68 69 70
	return 0;
}
__setup("debugpat", pat_debug_setup);

71
static u64 __read_mostly boot_pat_state;
72

73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141
#ifdef CONFIG_X86_PAT
/*
 * X86 PAT uses page flags WC and Uncached together to keep track of
 * memory type of pages that have backing page struct. X86 PAT supports 3
 * different memory types, _PAGE_CACHE_MODE_WB, _PAGE_CACHE_MODE_WC and
 * _PAGE_CACHE_MODE_UC_MINUS and fourth state where page's memory type has not
 * been changed from its default (value of -1 used to denote this).
 * Note we do not support _PAGE_CACHE_MODE_UC here.
 */

#define _PGMT_DEFAULT		0
#define _PGMT_WC		(1UL << PG_arch_1)
#define _PGMT_UC_MINUS		(1UL << PG_uncached)
#define _PGMT_WB		(1UL << PG_uncached | 1UL << PG_arch_1)
#define _PGMT_MASK		(1UL << PG_uncached | 1UL << PG_arch_1)
#define _PGMT_CLEAR_MASK	(~_PGMT_MASK)

static inline enum page_cache_mode get_page_memtype(struct page *pg)
{
	unsigned long pg_flags = pg->flags & _PGMT_MASK;

	if (pg_flags == _PGMT_DEFAULT)
		return -1;
	else if (pg_flags == _PGMT_WC)
		return _PAGE_CACHE_MODE_WC;
	else if (pg_flags == _PGMT_UC_MINUS)
		return _PAGE_CACHE_MODE_UC_MINUS;
	else
		return _PAGE_CACHE_MODE_WB;
}

static inline void set_page_memtype(struct page *pg,
				    enum page_cache_mode memtype)
{
	unsigned long memtype_flags;
	unsigned long old_flags;
	unsigned long new_flags;

	switch (memtype) {
	case _PAGE_CACHE_MODE_WC:
		memtype_flags = _PGMT_WC;
		break;
	case _PAGE_CACHE_MODE_UC_MINUS:
		memtype_flags = _PGMT_UC_MINUS;
		break;
	case _PAGE_CACHE_MODE_WB:
		memtype_flags = _PGMT_WB;
		break;
	default:
		memtype_flags = _PGMT_DEFAULT;
		break;
	}

	do {
		old_flags = pg->flags;
		new_flags = (old_flags & _PGMT_CLEAR_MASK) | memtype_flags;
	} while (cmpxchg(&pg->flags, old_flags, new_flags) != old_flags);
}
#else
static inline enum page_cache_mode get_page_memtype(struct page *pg)
{
	return -1;
}
static inline void set_page_memtype(struct page *pg,
				    enum page_cache_mode memtype)
{
}
#endif

142 143 144 145 146 147 148 149 150
enum {
	PAT_UC = 0,		/* uncached */
	PAT_WC = 1,		/* Write combining */
	PAT_WT = 4,		/* Write Through */
	PAT_WP = 5,		/* Write Protected */
	PAT_WB = 6,		/* Write Back (default) */
	PAT_UC_MINUS = 7,	/* UC, but can be overriden by MTRR */
};

151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193
#define CM(c) (_PAGE_CACHE_MODE_ ## c)

static enum page_cache_mode pat_get_cache_mode(unsigned pat_val, char *msg)
{
	enum page_cache_mode cache;
	char *cache_mode;

	switch (pat_val) {
	case PAT_UC:       cache = CM(UC);       cache_mode = "UC  "; break;
	case PAT_WC:       cache = CM(WC);       cache_mode = "WC  "; break;
	case PAT_WT:       cache = CM(WT);       cache_mode = "WT  "; break;
	case PAT_WP:       cache = CM(WP);       cache_mode = "WP  "; break;
	case PAT_WB:       cache = CM(WB);       cache_mode = "WB  "; break;
	case PAT_UC_MINUS: cache = CM(UC_MINUS); cache_mode = "UC- "; break;
	default:           cache = CM(WB);       cache_mode = "WB  "; break;
	}

	memcpy(msg, cache_mode, 4);

	return cache;
}

#undef CM

/*
 * Update the cache mode to pgprot translation tables according to PAT
 * configuration.
 * Using lower indices is preferred, so we start with highest index.
 */
void pat_init_cache_modes(void)
{
	int i;
	enum page_cache_mode cache;
	char pat_msg[33];
	u64 pat;

	rdmsrl(MSR_IA32_CR_PAT, pat);
	pat_msg[32] = 0;
	for (i = 7; i >= 0; i--) {
		cache = pat_get_cache_mode((pat >> (i * 8)) & 7,
					   pat_msg + 4 * i);
		update_cache_mode_entry(i, cache);
	}
194
	pr_info("x86/PAT: Configuration [0-7]: %s\n", pat_msg);
195 196
}

197
#define PAT(x, y)	((u64)PAT_ ## y << ((x)*8))
198

199
static void pat_bsp_init(u64 pat)
200
{
201 202 203 204
	if (!cpu_has_pat) {
		pat_disable("PAT not supported by CPU.");
		return;
	}
205

206 207 208
	rdmsrl(MSR_IA32_CR_PAT, boot_pat_state);
	if (!boot_pat_state) {
		pat_disable("PAT MSR is 0, disabled.");
209
		return;
210 211 212
	}

	wrmsrl(MSR_IA32_CR_PAT, pat);
213

214 215 216 217 218
	pat_init_cache_modes();
}

static void pat_ap_init(u64 pat)
{
219
	if (!cpu_has_pat) {
220 221 222 223 224
		/*
		 * If this happens we are on a secondary CPU, but switched to
		 * PAT on the boot CPU. We have no way to undo PAT.
		 */
		panic("x86/PAT: PAT enabled, but not supported by secondary CPU\n");
225
	}
226

227 228 229 230 231 232 233 234 235 236
	wrmsrl(MSR_IA32_CR_PAT, pat);
}

void pat_init(void)
{
	u64 pat;

	if (!pat_enabled())
		return;

237
	/*
238 239
	 * Set PWT to Write-Combining. All other bits stay the same:
	 *
240 241 242 243 244 245 246 247 248 249 250
	 * PTE encoding used in Linux:
	 *      PAT
	 *      |PCD
	 *      ||PWT
	 *      |||
	 *      000 WB		_PAGE_CACHE_WB
	 *      001 WC		_PAGE_CACHE_WC
	 *      010 UC-		_PAGE_CACHE_UC_MINUS
	 *      011 UC		_PAGE_CACHE_UC
	 * PAT bit unused
	 */
251 252
	pat = PAT(0, WB) | PAT(1, WC) | PAT(2, UC_MINUS) | PAT(3, UC) |
	      PAT(4, WB) | PAT(5, WC) | PAT(6, UC_MINUS) | PAT(7, UC);
253

254 255 256 257 258
	if (!boot_cpu_done) {
		pat_bsp_init(pat);
		boot_cpu_done = true;
	} else {
		pat_ap_init(pat);
259
	}
260 261 262 263
}

#undef PAT

264
static DEFINE_SPINLOCK(memtype_lock);	/* protects memtype accesses */
265

266 267 268 269 270 271 272
/*
 * Does intersection of PAT memory type and MTRR memory type and returns
 * the resulting memory type as PAT understands it.
 * (Type in pat and mtrr will not have same value)
 * The intersection is based on "Effective Memory Type" tables in IA-32
 * SDM vol 3a
 */
273 274
static unsigned long pat_x_mtrr_type(u64 start, u64 end,
				     enum page_cache_mode req_type)
275
{
276 277 278 279
	/*
	 * Look for MTRR hint to get the effective type in case where PAT
	 * request is for WB.
	 */
280
	if (req_type == _PAGE_CACHE_MODE_WB) {
281
		u8 mtrr_type, uniform;
282

283
		mtrr_type = mtrr_type_lookup(start, end, &uniform);
284
		if (mtrr_type != MTRR_TYPE_WRBACK)
285
			return _PAGE_CACHE_MODE_UC_MINUS;
286

287
		return _PAGE_CACHE_MODE_WB;
288 289 290
	}

	return req_type;
291 292
}

293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310
struct pagerange_state {
	unsigned long		cur_pfn;
	int			ram;
	int			not_ram;
};

static int
pagerange_is_ram_callback(unsigned long initial_pfn, unsigned long total_nr_pages, void *arg)
{
	struct pagerange_state *state = arg;

	state->not_ram	|= initial_pfn > state->cur_pfn;
	state->ram	|= total_nr_pages > 0;
	state->cur_pfn	 = initial_pfn + total_nr_pages;

	return state->ram && state->not_ram;
}

311
static int pat_pagerange_is_ram(resource_size_t start, resource_size_t end)
312
{
313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330
	int ret = 0;
	unsigned long start_pfn = start >> PAGE_SHIFT;
	unsigned long end_pfn = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
	struct pagerange_state state = {start_pfn, 0, 0};

	/*
	 * For legacy reasons, physical address range in the legacy ISA
	 * region is tracked as non-RAM. This will allow users of
	 * /dev/mem to map portions of legacy ISA region, even when
	 * some of those portions are listed(or not even listed) with
	 * different e820 types(RAM/reserved/..)
	 */
	if (start_pfn < ISA_END_ADDRESS >> PAGE_SHIFT)
		start_pfn = ISA_END_ADDRESS >> PAGE_SHIFT;

	if (start_pfn < end_pfn) {
		ret = walk_system_ram_range(start_pfn, end_pfn - start_pfn,
				&state, pagerange_is_ram_callback);
331 332
	}

333
	return (ret > 0) ? -1 : (state.ram ? 1 : 0);
334 335
}

336
/*
337 338 339 340
 * For RAM pages, we use page flags to mark the pages with appropriate type.
 * Here we do two pass:
 * - Find the memtype of all the pages in the range, look for any conflicts
 * - In case of no conflicts, set the new memtype for pages in the range
341
 */
342 343 344
static int reserve_ram_pages_type(u64 start, u64 end,
				  enum page_cache_mode req_type,
				  enum page_cache_mode *new_type)
345 346
{
	struct page *page;
347 348
	u64 pfn;

349
	if (req_type == _PAGE_CACHE_MODE_UC) {
350 351
		/* We do not support strong UC */
		WARN_ON_ONCE(1);
352
		req_type = _PAGE_CACHE_MODE_UC_MINUS;
353
	}
354 355

	for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
356
		enum page_cache_mode type;
357

358 359 360
		page = pfn_to_page(pfn);
		type = get_page_memtype(page);
		if (type != -1) {
361
			pr_info("x86/PAT: reserve_ram_pages_type failed [mem %#010Lx-%#010Lx], track 0x%x, req 0x%x\n",
362
				start, end - 1, type, req_type);
363 364 365 366 367
			if (new_type)
				*new_type = type;

			return -EBUSY;
		}
368 369
	}

370 371 372 373
	if (new_type)
		*new_type = req_type;

	for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
374
		page = pfn_to_page(pfn);
375
		set_page_memtype(page, req_type);
376
	}
377
	return 0;
378 379 380 381 382
}

static int free_ram_pages_type(u64 start, u64 end)
{
	struct page *page;
383
	u64 pfn;
384 385 386

	for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
		page = pfn_to_page(pfn);
387
		set_page_memtype(page, -1);
388 389 390 391
	}
	return 0;
}

392 393
/*
 * req_type typically has one of the:
394 395 396 397
 * - _PAGE_CACHE_MODE_WB
 * - _PAGE_CACHE_MODE_WC
 * - _PAGE_CACHE_MODE_UC_MINUS
 * - _PAGE_CACHE_MODE_UC
398
 *
399 400 401
 * If new_type is NULL, function will return an error if it cannot reserve the
 * region with req_type. If new_type is non-NULL, function will return
 * available type in new_type in case of no error. In case of any error
402 403
 * it will return a negative return value.
 */
404 405
int reserve_memtype(u64 start, u64 end, enum page_cache_mode req_type,
		    enum page_cache_mode *new_type)
406
{
407
	struct memtype *new;
408
	enum page_cache_mode actual_type;
409
	int is_range_ram;
I
Ingo Molnar 已提交
410
	int err = 0;
411

I
Ingo Molnar 已提交
412
	BUG_ON(start >= end); /* end is exclusive */
413

414
	if (!pat_enabled()) {
415
		/* This is identical to page table setting without PAT */
416
		if (new_type) {
417 418
			if (req_type == _PAGE_CACHE_MODE_WC)
				*new_type = _PAGE_CACHE_MODE_UC_MINUS;
419
			else
420
				*new_type = req_type;
421
		}
422 423 424 425
		return 0;
	}

	/* Low ISA region is always mapped WB in page table. No need to track */
426
	if (x86_platform.is_untracked_pat_range(start, end)) {
427
		if (new_type)
428
			*new_type = _PAGE_CACHE_MODE_WB;
429 430 431
		return 0;
	}

432 433 434 435 436 437
	/*
	 * Call mtrr_lookup to get the type hint. This is an
	 * optimization for /dev/mem mmap'ers into WB memory (BIOS
	 * tools and ACPI tools). Use WB request for WB memory and use
	 * UC_MINUS otherwise.
	 */
438
	actual_type = pat_x_mtrr_type(start, end, req_type);
439

440 441 442
	if (new_type)
		*new_type = actual_type;

443
	is_range_ram = pat_pagerange_is_ram(start, end);
444 445 446 447 448 449
	if (is_range_ram == 1) {

		err = reserve_ram_pages_type(start, end, req_type, new_type);

		return err;
	} else if (is_range_ram < 0) {
450
		return -EINVAL;
451
	}
452

453
	new  = kzalloc(sizeof(struct memtype), GFP_KERNEL);
454
	if (!new)
455 456
		return -ENOMEM;

I
Ingo Molnar 已提交
457 458 459
	new->start	= start;
	new->end	= end;
	new->type	= actual_type;
460 461 462

	spin_lock(&memtype_lock);

463
	err = rbt_memtype_check_insert(new, new_type);
464
	if (err) {
465 466 467
		pr_info("x86/PAT: reserve_memtype failed [mem %#010Lx-%#010Lx], track %s, req %s\n",
			start, end - 1,
			cattr_name(new->type), cattr_name(req_type));
468
		kfree(new);
469
		spin_unlock(&memtype_lock);
I
Ingo Molnar 已提交
470

471 472 473 474
		return err;
	}

	spin_unlock(&memtype_lock);
475

476 477
	dprintk("reserve_memtype added [mem %#010Lx-%#010Lx], track %s, req %s, ret %s\n",
		start, end - 1, cattr_name(new->type), cattr_name(req_type),
478 479
		new_type ? cattr_name(*new_type) : "-");

480 481 482 483 484 485
	return err;
}

int free_memtype(u64 start, u64 end)
{
	int err = -EINVAL;
486
	int is_range_ram;
487
	struct memtype *entry;
488

489
	if (!pat_enabled())
490 491 492
		return 0;

	/* Low ISA region is always mapped WB. No need to track */
493
	if (x86_platform.is_untracked_pat_range(start, end))
494 495
		return 0;

496
	is_range_ram = pat_pagerange_is_ram(start, end);
497 498 499 500 501 502
	if (is_range_ram == 1) {

		err = free_ram_pages_type(start, end);

		return err;
	} else if (is_range_ram < 0) {
503
		return -EINVAL;
504
	}
505

506
	spin_lock(&memtype_lock);
507
	entry = rbt_memtype_erase(start, end);
508 509
	spin_unlock(&memtype_lock);

510
	if (!entry) {
511 512
		pr_info("x86/PAT: %s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n",
			current->comm, current->pid, start, end - 1);
513
		return -EINVAL;
514
	}
515

516 517
	kfree(entry);

518
	dprintk("free_memtype request [mem %#010Lx-%#010Lx]\n", start, end - 1);
I
Ingo Molnar 已提交
519

520
	return 0;
521 522
}

523

524 525 526 527 528 529
/**
 * lookup_memtype - Looksup the memory type for a physical address
 * @paddr: physical address of which memory type needs to be looked up
 *
 * Only to be called when PAT is enabled
 *
530 531
 * Returns _PAGE_CACHE_MODE_WB, _PAGE_CACHE_MODE_WC, _PAGE_CACHE_MODE_UC_MINUS
 * or _PAGE_CACHE_MODE_UC
532
 */
533
static enum page_cache_mode lookup_memtype(u64 paddr)
534
{
535
	enum page_cache_mode rettype = _PAGE_CACHE_MODE_WB;
536 537
	struct memtype *entry;

538
	if (x86_platform.is_untracked_pat_range(paddr, paddr + PAGE_SIZE))
539 540 541 542 543 544 545 546 547 548 549
		return rettype;

	if (pat_pagerange_is_ram(paddr, paddr + PAGE_SIZE)) {
		struct page *page;
		page = pfn_to_page(paddr >> PAGE_SHIFT);
		rettype = get_page_memtype(page);
		/*
		 * -1 from get_page_memtype() implies RAM page is in its
		 * default state and not reserved, and hence of type WB
		 */
		if (rettype == -1)
550
			rettype = _PAGE_CACHE_MODE_WB;
551 552 553 554 555 556

		return rettype;
	}

	spin_lock(&memtype_lock);

557
	entry = rbt_memtype_lookup(paddr);
558 559 560
	if (entry != NULL)
		rettype = entry->type;
	else
561
		rettype = _PAGE_CACHE_MODE_UC_MINUS;
562 563 564 565 566

	spin_unlock(&memtype_lock);
	return rettype;
}

567 568 569 570 571 572 573 574 575 576 577
/**
 * io_reserve_memtype - Request a memory type mapping for a region of memory
 * @start: start (physical address) of the region
 * @end: end (physical address) of the region
 * @type: A pointer to memtype, with requested type. On success, requested
 * or any other compatible type that was available for the region is returned
 *
 * On success, returns 0
 * On failure, returns non-zero
 */
int io_reserve_memtype(resource_size_t start, resource_size_t end,
578
			enum page_cache_mode *type)
579
{
580
	resource_size_t size = end - start;
581 582
	enum page_cache_mode req_type = *type;
	enum page_cache_mode new_type;
583 584
	int ret;

585
	WARN_ON_ONCE(iomem_map_sanity_check(start, size));
586 587 588 589 590

	ret = reserve_memtype(start, end, req_type, &new_type);
	if (ret)
		goto out_err;

591
	if (!is_new_memtype_allowed(start, size, req_type, new_type))
592 593
		goto out_free;

594
	if (kernel_map_sync_memtype(start, size, new_type) < 0)
595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616
		goto out_free;

	*type = new_type;
	return 0;

out_free:
	free_memtype(start, end);
	ret = -EBUSY;
out_err:
	return ret;
}

/**
 * io_free_memtype - Release a memory type mapping for a region of memory
 * @start: start (physical address) of the region
 * @end: end (physical address) of the region
 */
void io_free_memtype(resource_size_t start, resource_size_t end)
{
	free_memtype(start, end);
}

617 618 619 620 621 622
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
				unsigned long size, pgprot_t vma_prot)
{
	return vma_prot;
}

623
#ifdef CONFIG_STRICT_DEVMEM
624
/* This check is done in drivers/char/mem.c in case of STRICT_DEVMEM */
625 626 627 628 629
static inline int range_is_allowed(unsigned long pfn, unsigned long size)
{
	return 1;
}
#else
630
/* This check is needed to avoid cache aliasing when PAT is enabled */
631 632 633 634 635 636
static inline int range_is_allowed(unsigned long pfn, unsigned long size)
{
	u64 from = ((u64)pfn) << PAGE_SHIFT;
	u64 to = from + size;
	u64 cursor = from;

637
	if (!pat_enabled())
638 639
		return 1;

640 641
	while (cursor < to) {
		if (!devmem_is_allowed(pfn)) {
642 643
			pr_info("x86/PAT: Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx], PAT prevents it\n",
				current->comm, from, to - 1);
644 645 646 647 648 649 650
			return 0;
		}
		cursor += PAGE_SIZE;
		pfn++;
	}
	return 1;
}
651
#endif /* CONFIG_STRICT_DEVMEM */
652

653 654 655
int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
				unsigned long size, pgprot_t *vma_prot)
{
656
	enum page_cache_mode pcm = _PAGE_CACHE_MODE_WB;
657

658 659 660
	if (!range_is_allowed(pfn, size))
		return 0;

661
	if (file->f_flags & O_DSYNC)
662
		pcm = _PAGE_CACHE_MODE_UC_MINUS;
663 664 665 666 667 668 669 670 671 672

#ifdef CONFIG_X86_32
	/*
	 * On the PPro and successors, the MTRRs are used to set
	 * memory types for physical addresses outside main memory,
	 * so blindly setting UC or PWT on those pages is wrong.
	 * For Pentiums and earlier, the surround logic should disable
	 * caching for the high addresses through the KEN pin, but
	 * we maintain the tradition of paranoia in this code.
	 */
673
	if (!pat_enabled() &&
674 675 676 677 678
	    !(boot_cpu_has(X86_FEATURE_MTRR) ||
	      boot_cpu_has(X86_FEATURE_K6_MTRR) ||
	      boot_cpu_has(X86_FEATURE_CYRIX_ARR) ||
	      boot_cpu_has(X86_FEATURE_CENTAUR_MCR)) &&
	    (pfn << PAGE_SHIFT) >= __pa(high_memory)) {
679
		pcm = _PAGE_CACHE_MODE_UC;
680 681 682
	}
#endif

683
	*vma_prot = __pgprot((pgprot_val(*vma_prot) & ~_PAGE_CACHE_MASK) |
684
			     cachemode2protval(pcm));
685 686
	return 1;
}
687

688 689 690 691
/*
 * Change the memory type for the physial address range in kernel identity
 * mapping space if that range is a part of identity map.
 */
692 693
int kernel_map_sync_memtype(u64 base, unsigned long size,
			    enum page_cache_mode pcm)
694 695 696
{
	unsigned long id_sz;

697
	if (base > __pa(high_memory-1))
698 699
		return 0;

700 701 702 703 704 705 706
	/*
	 * some areas in the middle of the kernel identity range
	 * are not mapped, like the PCI space.
	 */
	if (!page_is_ram(base >> PAGE_SHIFT))
		return 0;

707
	id_sz = (__pa(high_memory-1) <= base + size) ?
708 709 710
				__pa(high_memory) - base :
				size;

711
	if (ioremap_change_attr((unsigned long)__va(base), id_sz, pcm) < 0) {
712
		pr_info("x86/PAT: %s:%d ioremap_change_attr failed %s for [mem %#010Lx-%#010Lx]\n",
713
			current->comm, current->pid,
714
			cattr_name(pcm),
715
			base, (unsigned long long)(base + size-1));
716 717 718 719 720
		return -EINVAL;
	}
	return 0;
}

721 722 723 724 725
/*
 * Internal interface to reserve a range of physical memory with prot.
 * Reserved non RAM regions only and after successful reserve_memtype,
 * this func also keeps identity mapping (if any) in sync with this new prot.
 */
726 727
static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
				int strict_prot)
728 729
{
	int is_ram = 0;
730
	int ret;
731 732
	enum page_cache_mode want_pcm = pgprot2cachemode(*vma_prot);
	enum page_cache_mode pcm = want_pcm;
733

734
	is_ram = pat_pagerange_is_ram(paddr, paddr + size);
735

736
	/*
737 738 739
	 * reserve_pfn_range() for RAM pages. We do not refcount to keep
	 * track of number of mappings of RAM pages. We can assert that
	 * the type requested matches the type of first page in the range.
740
	 */
741
	if (is_ram) {
742
		if (!pat_enabled())
743 744
			return 0;

745 746
		pcm = lookup_memtype(paddr);
		if (want_pcm != pcm) {
747
			pr_warn("x86/PAT: %s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n",
748
				current->comm, current->pid,
749
				cattr_name(want_pcm),
750
				(unsigned long long)paddr,
751
				(unsigned long long)(paddr + size - 1),
752
				cattr_name(pcm));
753
			*vma_prot = __pgprot((pgprot_val(*vma_prot) &
754 755
					     (~_PAGE_CACHE_MASK)) |
					     cachemode2protval(pcm));
756
		}
757
		return 0;
758
	}
759

760
	ret = reserve_memtype(paddr, paddr + size, want_pcm, &pcm);
761 762 763
	if (ret)
		return ret;

764
	if (pcm != want_pcm) {
765
		if (strict_prot ||
766
		    !is_new_memtype_allowed(paddr, size, want_pcm, pcm)) {
767
			free_memtype(paddr, paddr + size);
768 769 770 771 772 773
			pr_err("x86/PAT: %s:%d map pfn expected mapping type %s for [mem %#010Lx-%#010Lx], got %s\n",
			       current->comm, current->pid,
			       cattr_name(want_pcm),
			       (unsigned long long)paddr,
			       (unsigned long long)(paddr + size - 1),
			       cattr_name(pcm));
774 775 776 777 778 779 780 781
			return -EINVAL;
		}
		/*
		 * We allow returning different type than the one requested in
		 * non strict case.
		 */
		*vma_prot = __pgprot((pgprot_val(*vma_prot) &
				      (~_PAGE_CACHE_MASK)) |
782
				     cachemode2protval(pcm));
783 784
	}

785
	if (kernel_map_sync_memtype(paddr, size, pcm) < 0) {
786 787 788 789 790 791 792 793 794 795 796 797 798 799
		free_memtype(paddr, paddr + size);
		return -EINVAL;
	}
	return 0;
}

/*
 * Internal interface to free a range of physical memory.
 * Frees non RAM regions only.
 */
static void free_pfn_range(u64 paddr, unsigned long size)
{
	int is_ram;

800
	is_ram = pat_pagerange_is_ram(paddr, paddr + size);
801 802 803 804 805
	if (is_ram == 0)
		free_memtype(paddr, paddr + size);
}

/*
806
 * track_pfn_copy is called when vma that is covering the pfnmap gets
807 808 809 810 811
 * copied through copy_page_range().
 *
 * If the vma has a linear pfn mapping for the entire range, we get the prot
 * from pte and reserve the entire vma range with single reserve_pfn_range call.
 */
812
int track_pfn_copy(struct vm_area_struct *vma)
813
{
814
	resource_size_t paddr;
815
	unsigned long prot;
816
	unsigned long vma_size = vma->vm_end - vma->vm_start;
817
	pgprot_t pgprot;
818

819
	if (vma->vm_flags & VM_PAT) {
820
		/*
821 822
		 * reserve the whole chunk covered by vma. We need the
		 * starting address and protection from pte.
823
		 */
824
		if (follow_phys(vma, vma->vm_start, 0, &prot, &paddr)) {
825
			WARN_ON_ONCE(1);
826
			return -EINVAL;
827
		}
828 829
		pgprot = __pgprot(prot);
		return reserve_pfn_range(paddr, vma_size, &pgprot, 1);
830 831 832 833 834 835 836 837 838 839
	}

	return 0;
}

/*
 * prot is passed in as a parameter for the new mapping. If the vma has a
 * linear pfn mapping for the entire range reserve the entire vma range with
 * single reserve_pfn_range call.
 */
840
int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
841
		    unsigned long pfn, unsigned long addr, unsigned long size)
842
{
843
	resource_size_t paddr = (resource_size_t)pfn << PAGE_SHIFT;
844
	enum page_cache_mode pcm;
845

846
	/* reserve the whole chunk starting from paddr */
847 848 849 850 851 852 853 854
	if (addr == vma->vm_start && size == (vma->vm_end - vma->vm_start)) {
		int ret;

		ret = reserve_pfn_range(paddr, size, prot, 0);
		if (!ret)
			vma->vm_flags |= VM_PAT;
		return ret;
	}
855

856
	if (!pat_enabled())
857 858
		return 0;

859 860 861 862
	/*
	 * For anything smaller than the vma size we set prot based on the
	 * lookup.
	 */
863
	pcm = lookup_memtype(paddr);
864 865 866 867 868

	/* Check memtype for the remaining pages */
	while (size > PAGE_SIZE) {
		size -= PAGE_SIZE;
		paddr += PAGE_SIZE;
869
		if (pcm != lookup_memtype(paddr))
870 871 872 873
			return -EINVAL;
	}

	*prot = __pgprot((pgprot_val(vma->vm_page_prot) & (~_PAGE_CACHE_MASK)) |
874
			 cachemode2protval(pcm));
875 876 877 878 879 880 881

	return 0;
}

int track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot,
		     unsigned long pfn)
{
882
	enum page_cache_mode pcm;
883

884
	if (!pat_enabled())
885 886 887
		return 0;

	/* Set prot based on lookup */
888
	pcm = lookup_memtype((resource_size_t)pfn << PAGE_SHIFT);
889
	*prot = __pgprot((pgprot_val(vma->vm_page_prot) & (~_PAGE_CACHE_MASK)) |
890
			 cachemode2protval(pcm));
891

892 893 894 895
	return 0;
}

/*
896
 * untrack_pfn is called while unmapping a pfnmap for a region.
897
 * untrack can be called for a specific region indicated by pfn and size or
898
 * can be for the entire vma (in which case pfn, size are zero).
899
 */
900 901
void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
		 unsigned long size)
902
{
903
	resource_size_t paddr;
904
	unsigned long prot;
905

906
	if (!(vma->vm_flags & VM_PAT))
907
		return;
908 909 910 911 912 913 914 915 916 917

	/* free the chunk starting from pfn or the whole chunk */
	paddr = (resource_size_t)pfn << PAGE_SHIFT;
	if (!paddr && !size) {
		if (follow_phys(vma, vma->vm_start, 0, &prot, &paddr)) {
			WARN_ON_ONCE(1);
			return;
		}

		size = vma->vm_end - vma->vm_start;
918
	}
919
	free_pfn_range(paddr, size);
920
	vma->vm_flags &= ~VM_PAT;
921 922
}

923 924
pgprot_t pgprot_writecombine(pgprot_t prot)
{
925
	if (pat_enabled())
926 927
		return __pgprot(pgprot_val(prot) |
				cachemode2protval(_PAGE_CACHE_MODE_WC));
928 929 930
	else
		return pgprot_noncached(prot);
}
931
EXPORT_SYMBOL_GPL(pgprot_writecombine);
932

933
#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_X86_PAT)
934 935 936

static struct memtype *memtype_get_idx(loff_t pos)
{
937 938
	struct memtype *print_entry;
	int ret;
939

940
	print_entry  = kzalloc(sizeof(struct memtype), GFP_KERNEL);
941 942 943 944
	if (!print_entry)
		return NULL;

	spin_lock(&memtype_lock);
945
	ret = rbt_memtype_copy_nth_element(print_entry, pos);
946
	spin_unlock(&memtype_lock);
I
Ingo Molnar 已提交
947

948 949 950 951 952 953
	if (!ret) {
		return print_entry;
	} else {
		kfree(print_entry);
		return NULL;
	}
954 955 956 957 958 959
}

static void *memtype_seq_start(struct seq_file *seq, loff_t *pos)
{
	if (*pos == 0) {
		++*pos;
960
		seq_puts(seq, "PAT memtype list:\n");
961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982
	}

	return memtype_get_idx(*pos);
}

static void *memtype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
	++*pos;
	return memtype_get_idx(*pos);
}

static void memtype_seq_stop(struct seq_file *seq, void *v)
{
}

static int memtype_seq_show(struct seq_file *seq, void *v)
{
	struct memtype *print_entry = (struct memtype *)v;

	seq_printf(seq, "%s @ 0x%Lx-0x%Lx\n", cattr_name(print_entry->type),
			print_entry->start, print_entry->end);
	kfree(print_entry);
I
Ingo Molnar 已提交
983

984 985 986
	return 0;
}

T
Tobias Klauser 已提交
987
static const struct seq_operations memtype_seq_ops = {
988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007
	.start = memtype_seq_start,
	.next  = memtype_seq_next,
	.stop  = memtype_seq_stop,
	.show  = memtype_seq_show,
};

static int memtype_seq_open(struct inode *inode, struct file *file)
{
	return seq_open(file, &memtype_seq_ops);
}

static const struct file_operations memtype_fops = {
	.open    = memtype_seq_open,
	.read    = seq_read,
	.llseek  = seq_lseek,
	.release = seq_release,
};

static int __init pat_memtype_list_init(void)
{
1008
	if (pat_enabled()) {
1009 1010 1011
		debugfs_create_file("pat_memtype_list", S_IRUSR,
				    arch_debugfs_dir, NULL, &memtype_fops);
	}
1012 1013 1014 1015 1016
	return 0;
}

late_initcall(pat_memtype_list_init);

1017
#endif /* CONFIG_DEBUG_FS && CONFIG_X86_PAT */