pat.c 28.1 KB
Newer Older
1 2 3 4 5 6 7 8 9
/*
 * Handle caching attributes in page tables (PAT)
 *
 * Authors: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
 *          Suresh B Siddha <suresh.b.siddha@intel.com>
 *
 * Loosely based on earlier PAT patchset from Eric Biederman and Andi Kleen.
 */

I
Ingo Molnar 已提交
10 11 12
#include <linux/seq_file.h>
#include <linux/bootmem.h>
#include <linux/debugfs.h>
13
#include <linux/ioport.h>
14
#include <linux/kernel.h>
15
#include <linux/pfn_t.h>
16
#include <linux/slab.h>
I
Ingo Molnar 已提交
17
#include <linux/mm.h>
18
#include <linux/fs.h>
19
#include <linux/rbtree.h>
20

I
Ingo Molnar 已提交
21
#include <asm/cacheflush.h>
22
#include <asm/processor.h>
I
Ingo Molnar 已提交
23
#include <asm/tlbflush.h>
24
#include <asm/x86_init.h>
25 26
#include <asm/pgtable.h>
#include <asm/fcntl.h>
27
#include <asm/e820/api.h>
28
#include <asm/mtrr.h>
I
Ingo Molnar 已提交
29 30 31
#include <asm/page.h>
#include <asm/msr.h>
#include <asm/pat.h>
32
#include <asm/io.h>
33

34
#include "pat_internal.h"
35
#include "mm_internal.h"
36

37 38 39
#undef pr_fmt
#define pr_fmt(fmt) "" fmt

40 41 42 43
static bool __read_mostly boot_cpu_done;
static bool __read_mostly pat_disabled = !IS_ENABLED(CONFIG_X86_PAT);
static bool __read_mostly pat_initialized;
static bool __read_mostly init_cm_done;
44

45
void pat_disable(const char *reason)
46
{
47
	if (pat_disabled)
48 49 50 51 52 53 54
		return;

	if (boot_cpu_done) {
		WARN_ONCE(1, "x86/PAT: PAT cannot be disabled after initialization\n");
		return;
	}

55
	pat_disabled = true;
56
	pr_info("x86/PAT: %s\n", reason);
57 58
}

A
Andrew Morton 已提交
59
static int __init nopat(char *str)
60
{
61
	pat_disable("PAT support disabled.");
62 63
	return 0;
}
64
early_param("nopat", nopat);
65 66

bool pat_enabled(void)
67
{
68
	return pat_initialized;
69
}
70
EXPORT_SYMBOL_GPL(pat_enabled);
71

72
int pat_debug_enable;
I
Ingo Molnar 已提交
73

74 75
static int __init pat_debug_setup(char *str)
{
76
	pat_debug_enable = 1;
77 78 79 80
	return 0;
}
__setup("debugpat", pat_debug_setup);

81 82
#ifdef CONFIG_X86_PAT
/*
83 84 85 86 87 88 89 90 91 92
 * X86 PAT uses page flags arch_1 and uncached together to keep track of
 * memory type of pages that have backing page struct.
 *
 * X86 PAT supports 4 different memory types:
 *  - _PAGE_CACHE_MODE_WB
 *  - _PAGE_CACHE_MODE_WC
 *  - _PAGE_CACHE_MODE_UC_MINUS
 *  - _PAGE_CACHE_MODE_WT
 *
 * _PAGE_CACHE_MODE_WB is the default type.
93 94
 */

95
#define _PGMT_WB		0
96 97
#define _PGMT_WC		(1UL << PG_arch_1)
#define _PGMT_UC_MINUS		(1UL << PG_uncached)
98
#define _PGMT_WT		(1UL << PG_uncached | 1UL << PG_arch_1)
99 100 101 102 103 104 105
#define _PGMT_MASK		(1UL << PG_uncached | 1UL << PG_arch_1)
#define _PGMT_CLEAR_MASK	(~_PGMT_MASK)

static inline enum page_cache_mode get_page_memtype(struct page *pg)
{
	unsigned long pg_flags = pg->flags & _PGMT_MASK;

106 107
	if (pg_flags == _PGMT_WB)
		return _PAGE_CACHE_MODE_WB;
108 109 110 111 112
	else if (pg_flags == _PGMT_WC)
		return _PAGE_CACHE_MODE_WC;
	else if (pg_flags == _PGMT_UC_MINUS)
		return _PAGE_CACHE_MODE_UC_MINUS;
	else
113
		return _PAGE_CACHE_MODE_WT;
114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129
}

static inline void set_page_memtype(struct page *pg,
				    enum page_cache_mode memtype)
{
	unsigned long memtype_flags;
	unsigned long old_flags;
	unsigned long new_flags;

	switch (memtype) {
	case _PAGE_CACHE_MODE_WC:
		memtype_flags = _PGMT_WC;
		break;
	case _PAGE_CACHE_MODE_UC_MINUS:
		memtype_flags = _PGMT_UC_MINUS;
		break;
130 131
	case _PAGE_CACHE_MODE_WT:
		memtype_flags = _PGMT_WT;
132
		break;
133
	case _PAGE_CACHE_MODE_WB:
134
	default:
135
		memtype_flags = _PGMT_WB;
136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154
		break;
	}

	do {
		old_flags = pg->flags;
		new_flags = (old_flags & _PGMT_CLEAR_MASK) | memtype_flags;
	} while (cmpxchg(&pg->flags, old_flags, new_flags) != old_flags);
}
#else
static inline enum page_cache_mode get_page_memtype(struct page *pg)
{
	return -1;
}
static inline void set_page_memtype(struct page *pg,
				    enum page_cache_mode memtype)
{
}
#endif

155 156 157 158 159 160
enum {
	PAT_UC = 0,		/* uncached */
	PAT_WC = 1,		/* Write combining */
	PAT_WT = 4,		/* Write Through */
	PAT_WP = 5,		/* Write Protected */
	PAT_WB = 6,		/* Write Back (default) */
161
	PAT_UC_MINUS = 7,	/* UC, but can be overridden by MTRR */
162 163
};

164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192
#define CM(c) (_PAGE_CACHE_MODE_ ## c)

static enum page_cache_mode pat_get_cache_mode(unsigned pat_val, char *msg)
{
	enum page_cache_mode cache;
	char *cache_mode;

	switch (pat_val) {
	case PAT_UC:       cache = CM(UC);       cache_mode = "UC  "; break;
	case PAT_WC:       cache = CM(WC);       cache_mode = "WC  "; break;
	case PAT_WT:       cache = CM(WT);       cache_mode = "WT  "; break;
	case PAT_WP:       cache = CM(WP);       cache_mode = "WP  "; break;
	case PAT_WB:       cache = CM(WB);       cache_mode = "WB  "; break;
	case PAT_UC_MINUS: cache = CM(UC_MINUS); cache_mode = "UC- "; break;
	default:           cache = CM(WB);       cache_mode = "WB  "; break;
	}

	memcpy(msg, cache_mode, 4);

	return cache;
}

#undef CM

/*
 * Update the cache mode to pgprot translation tables according to PAT
 * configuration.
 * Using lower indices is preferred, so we start with highest index.
 */
193
static void __init_cache_modes(u64 pat)
194 195 196
{
	enum page_cache_mode cache;
	char pat_msg[33];
197
	int i;
198 199 200 201 202 203 204

	pat_msg[32] = 0;
	for (i = 7; i >= 0; i--) {
		cache = pat_get_cache_mode((pat >> (i * 8)) & 7,
					   pat_msg + 4 * i);
		update_cache_mode_entry(i, cache);
	}
205
	pr_info("x86/PAT: Configuration [0-7]: %s\n", pat_msg);
206 207

	init_cm_done = true;
208 209
}

210
#define PAT(x, y)	((u64)PAT_ ## y << ((x)*8))
211

212
static void pat_bsp_init(u64 pat)
213
{
214 215
	u64 tmp_pat;

216
	if (!boot_cpu_has(X86_FEATURE_PAT)) {
217 218 219
		pat_disable("PAT not supported by CPU.");
		return;
	}
220

221 222
	rdmsrl(MSR_IA32_CR_PAT, tmp_pat);
	if (!tmp_pat) {
223
		pat_disable("PAT MSR is 0, disabled.");
224
		return;
225 226 227
	}

	wrmsrl(MSR_IA32_CR_PAT, pat);
228
	pat_initialized = true;
229

230
	__init_cache_modes(pat);
231 232 233 234
}

static void pat_ap_init(u64 pat)
{
235
	if (!boot_cpu_has(X86_FEATURE_PAT)) {
236 237 238 239 240
		/*
		 * If this happens we are on a secondary CPU, but switched to
		 * PAT on the boot CPU. We have no way to undo PAT.
		 */
		panic("x86/PAT: PAT enabled, but not supported by secondary CPU\n");
241
	}
242

243 244 245
	wrmsrl(MSR_IA32_CR_PAT, pat);
}

246
void init_cache_modes(void)
247
{
248
	u64 pat = 0;
249

250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266
	if (init_cm_done)
		return;

	if (boot_cpu_has(X86_FEATURE_PAT)) {
		/*
		 * CPU supports PAT. Set PAT table to be consistent with
		 * PAT MSR. This case supports "nopat" boot option, and
		 * virtual machine environments which support PAT without
		 * MTRRs. In specific, Xen has unique setup to PAT MSR.
		 *
		 * If PAT MSR returns 0, it is considered invalid and emulates
		 * as No PAT.
		 */
		rdmsrl(MSR_IA32_CR_PAT, pat);
	}

	if (!pat) {
267 268
		/*
		 * No PAT. Emulate the PAT table that corresponds to the two
269 270
		 * cache bits, PWT (Write Through) and PCD (Cache Disable).
		 * This setup is also the same as the BIOS default setup.
271
		 *
272
		 * PTE encoding:
273 274 275 276 277 278 279 280 281 282 283 284 285 286
		 *
		 *       PCD
		 *       |PWT  PAT
		 *       ||    slot
		 *       00    0    WB : _PAGE_CACHE_MODE_WB
		 *       01    1    WT : _PAGE_CACHE_MODE_WT
		 *       10    2    UC-: _PAGE_CACHE_MODE_UC_MINUS
		 *       11    3    UC : _PAGE_CACHE_MODE_UC
		 *
		 * NOTE: When WC or WP is used, it is redirected to UC- per
		 * the default setup in __cachemode2pte_tbl[].
		 */
		pat = PAT(0, WB) | PAT(1, WT) | PAT(2, UC_MINUS) | PAT(3, UC) |
		      PAT(4, WB) | PAT(5, WT) | PAT(6, UC_MINUS) | PAT(7, UC);
287 288 289 290 291 292 293 294 295
	}

	__init_cache_modes(pat);
}

/**
 * pat_init - Initialize PAT MSR and PAT table
 *
 * This function initializes PAT MSR and PAT table with an OS-defined value
296
 * to enable additional cache attributes, WC, WT and WP.
297 298 299 300 301 302 303 304 305 306
 *
 * This function must be called on all CPUs using the specific sequence of
 * operations defined in Intel SDM. mtrr_rendezvous_handler() provides this
 * procedure for PAT.
 */
void pat_init(void)
{
	u64 pat;
	struct cpuinfo_x86 *c = &boot_cpu_data;

307
	if (pat_disabled)
308
		return;
309

310 311 312
	if ((c->x86_vendor == X86_VENDOR_INTEL) &&
	    (((c->x86 == 0x6) && (c->x86_model <= 0xd)) ||
	     ((c->x86 == 0xf) && (c->x86_model <= 0x6)))) {
313
		/*
314 315 316 317 318 319
		 * PAT support with the lower four entries. Intel Pentium 2,
		 * 3, M, and 4 are affected by PAT errata, which makes the
		 * upper four entries unusable. To be on the safe side, we don't
		 * use those.
		 *
		 *  PTE encoding:
320 321
		 *      PAT
		 *      |PCD
322 323 324 325 326 327
		 *      ||PWT  PAT
		 *      |||    slot
		 *      000    0    WB : _PAGE_CACHE_MODE_WB
		 *      001    1    WC : _PAGE_CACHE_MODE_WC
		 *      010    2    UC-: _PAGE_CACHE_MODE_UC_MINUS
		 *      011    3    UC : _PAGE_CACHE_MODE_UC
328
		 * PAT bit unused
329 330 331
		 *
		 * NOTE: When WT or WP is used, it is redirected to UC- per
		 * the default setup in __cachemode2pte_tbl[].
332 333 334
		 */
		pat = PAT(0, WB) | PAT(1, WC) | PAT(2, UC_MINUS) | PAT(3, UC) |
		      PAT(4, WB) | PAT(5, WC) | PAT(6, UC_MINUS) | PAT(7, UC);
335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354
	} else {
		/*
		 * Full PAT support.  We put WT in slot 7 to improve
		 * robustness in the presence of errata that might cause
		 * the high PAT bit to be ignored.  This way, a buggy slot 7
		 * access will hit slot 3, and slot 3 is UC, so at worst
		 * we lose performance without causing a correctness issue.
		 * Pentium 4 erratum N46 is an example for such an erratum,
		 * although we try not to use PAT at all on affected CPUs.
		 *
		 *  PTE encoding:
		 *      PAT
		 *      |PCD
		 *      ||PWT  PAT
		 *      |||    slot
		 *      000    0    WB : _PAGE_CACHE_MODE_WB
		 *      001    1    WC : _PAGE_CACHE_MODE_WC
		 *      010    2    UC-: _PAGE_CACHE_MODE_UC_MINUS
		 *      011    3    UC : _PAGE_CACHE_MODE_UC
		 *      100    4    WB : Reserved
355
		 *      101    5    WP : _PAGE_CACHE_MODE_WP
356 357 358 359 360 361 362
		 *      110    6    UC-: Reserved
		 *      111    7    WT : _PAGE_CACHE_MODE_WT
		 *
		 * The reserved slots are unused, but mapped to their
		 * corresponding types in the presence of PAT errata.
		 */
		pat = PAT(0, WB) | PAT(1, WC) | PAT(2, UC_MINUS) | PAT(3, UC) |
363
		      PAT(4, WB) | PAT(5, WP) | PAT(6, UC_MINUS) | PAT(7, WT);
364
	}
365

366 367 368 369 370
	if (!boot_cpu_done) {
		pat_bsp_init(pat);
		boot_cpu_done = true;
	} else {
		pat_ap_init(pat);
371
	}
372 373 374 375
}

#undef PAT

376
static DEFINE_SPINLOCK(memtype_lock);	/* protects memtype accesses */
377

378 379 380 381 382 383 384
/*
 * Does intersection of PAT memory type and MTRR memory type and returns
 * the resulting memory type as PAT understands it.
 * (Type in pat and mtrr will not have same value)
 * The intersection is based on "Effective Memory Type" tables in IA-32
 * SDM vol 3a
 */
385 386
static unsigned long pat_x_mtrr_type(u64 start, u64 end,
				     enum page_cache_mode req_type)
387
{
388 389 390 391
	/*
	 * Look for MTRR hint to get the effective type in case where PAT
	 * request is for WB.
	 */
392
	if (req_type == _PAGE_CACHE_MODE_WB) {
393
		u8 mtrr_type, uniform;
394

395
		mtrr_type = mtrr_type_lookup(start, end, &uniform);
396
		if (mtrr_type != MTRR_TYPE_WRBACK)
397
			return _PAGE_CACHE_MODE_UC_MINUS;
398

399
		return _PAGE_CACHE_MODE_WB;
400 401 402
	}

	return req_type;
403 404
}

405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422
struct pagerange_state {
	unsigned long		cur_pfn;
	int			ram;
	int			not_ram;
};

static int
pagerange_is_ram_callback(unsigned long initial_pfn, unsigned long total_nr_pages, void *arg)
{
	struct pagerange_state *state = arg;

	state->not_ram	|= initial_pfn > state->cur_pfn;
	state->ram	|= total_nr_pages > 0;
	state->cur_pfn	 = initial_pfn + total_nr_pages;

	return state->ram && state->not_ram;
}

423
static int pat_pagerange_is_ram(resource_size_t start, resource_size_t end)
424
{
425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442
	int ret = 0;
	unsigned long start_pfn = start >> PAGE_SHIFT;
	unsigned long end_pfn = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
	struct pagerange_state state = {start_pfn, 0, 0};

	/*
	 * For legacy reasons, physical address range in the legacy ISA
	 * region is tracked as non-RAM. This will allow users of
	 * /dev/mem to map portions of legacy ISA region, even when
	 * some of those portions are listed(or not even listed) with
	 * different e820 types(RAM/reserved/..)
	 */
	if (start_pfn < ISA_END_ADDRESS >> PAGE_SHIFT)
		start_pfn = ISA_END_ADDRESS >> PAGE_SHIFT;

	if (start_pfn < end_pfn) {
		ret = walk_system_ram_range(start_pfn, end_pfn - start_pfn,
				&state, pagerange_is_ram_callback);
443 444
	}

445
	return (ret > 0) ? -1 : (state.ram ? 1 : 0);
446 447
}

448
/*
449
 * For RAM pages, we use page flags to mark the pages with appropriate type.
450 451 452 453
 * The page flags are limited to four types, WB (default), WC, WT and UC-.
 * WP request fails with -EINVAL, and UC gets redirected to UC-.  Setting
 * a new memory type is only allowed for a page mapped with the default WB
 * type.
454 455 456 457
 *
 * Here we do two passes:
 * - Find the memtype of all the pages in the range, look for any conflicts.
 * - In case of no conflicts, set the new memtype for pages in the range.
458
 */
459 460 461
static int reserve_ram_pages_type(u64 start, u64 end,
				  enum page_cache_mode req_type,
				  enum page_cache_mode *new_type)
462 463
{
	struct page *page;
464 465
	u64 pfn;

466
	if (req_type == _PAGE_CACHE_MODE_WP) {
467 468 469 470 471
		if (new_type)
			*new_type = _PAGE_CACHE_MODE_UC_MINUS;
		return -EINVAL;
	}

472
	if (req_type == _PAGE_CACHE_MODE_UC) {
473 474
		/* We do not support strong UC */
		WARN_ON_ONCE(1);
475
		req_type = _PAGE_CACHE_MODE_UC_MINUS;
476
	}
477 478

	for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
479
		enum page_cache_mode type;
480

481 482
		page = pfn_to_page(pfn);
		type = get_page_memtype(page);
483
		if (type != _PAGE_CACHE_MODE_WB) {
484
			pr_info("x86/PAT: reserve_ram_pages_type failed [mem %#010Lx-%#010Lx], track 0x%x, req 0x%x\n",
485
				start, end - 1, type, req_type);
486 487 488 489 490
			if (new_type)
				*new_type = type;

			return -EBUSY;
		}
491 492
	}

493 494 495 496
	if (new_type)
		*new_type = req_type;

	for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
497
		page = pfn_to_page(pfn);
498
		set_page_memtype(page, req_type);
499
	}
500
	return 0;
501 502 503 504 505
}

static int free_ram_pages_type(u64 start, u64 end)
{
	struct page *page;
506
	u64 pfn;
507 508 509

	for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
		page = pfn_to_page(pfn);
510
		set_page_memtype(page, _PAGE_CACHE_MODE_WB);
511 512 513 514
	}
	return 0;
}

515 516
/*
 * req_type typically has one of the:
517 518 519 520
 * - _PAGE_CACHE_MODE_WB
 * - _PAGE_CACHE_MODE_WC
 * - _PAGE_CACHE_MODE_UC_MINUS
 * - _PAGE_CACHE_MODE_UC
521
 * - _PAGE_CACHE_MODE_WT
522
 *
523 524 525
 * If new_type is NULL, function will return an error if it cannot reserve the
 * region with req_type. If new_type is non-NULL, function will return
 * available type in new_type in case of no error. In case of any error
526 527
 * it will return a negative return value.
 */
528 529
int reserve_memtype(u64 start, u64 end, enum page_cache_mode req_type,
		    enum page_cache_mode *new_type)
530
{
531
	struct memtype *new;
532
	enum page_cache_mode actual_type;
533
	int is_range_ram;
I
Ingo Molnar 已提交
534
	int err = 0;
535

I
Ingo Molnar 已提交
536
	BUG_ON(start >= end); /* end is exclusive */
537

538
	if (!pat_enabled()) {
539
		/* This is identical to page table setting without PAT */
540 541
		if (new_type)
			*new_type = req_type;
542 543 544 545
		return 0;
	}

	/* Low ISA region is always mapped WB in page table. No need to track */
546
	if (x86_platform.is_untracked_pat_range(start, end)) {
547
		if (new_type)
548
			*new_type = _PAGE_CACHE_MODE_WB;
549 550 551
		return 0;
	}

552 553 554 555 556 557
	/*
	 * Call mtrr_lookup to get the type hint. This is an
	 * optimization for /dev/mem mmap'ers into WB memory (BIOS
	 * tools and ACPI tools). Use WB request for WB memory and use
	 * UC_MINUS otherwise.
	 */
558
	actual_type = pat_x_mtrr_type(start, end, req_type);
559

560 561 562
	if (new_type)
		*new_type = actual_type;

563
	is_range_ram = pat_pagerange_is_ram(start, end);
564 565 566 567 568 569
	if (is_range_ram == 1) {

		err = reserve_ram_pages_type(start, end, req_type, new_type);

		return err;
	} else if (is_range_ram < 0) {
570
		return -EINVAL;
571
	}
572

573
	new  = kzalloc(sizeof(struct memtype), GFP_KERNEL);
574
	if (!new)
575 576
		return -ENOMEM;

I
Ingo Molnar 已提交
577 578 579
	new->start	= start;
	new->end	= end;
	new->type	= actual_type;
580 581 582

	spin_lock(&memtype_lock);

583
	err = rbt_memtype_check_insert(new, new_type);
584
	if (err) {
585 586 587
		pr_info("x86/PAT: reserve_memtype failed [mem %#010Lx-%#010Lx], track %s, req %s\n",
			start, end - 1,
			cattr_name(new->type), cattr_name(req_type));
588
		kfree(new);
589
		spin_unlock(&memtype_lock);
I
Ingo Molnar 已提交
590

591 592 593 594
		return err;
	}

	spin_unlock(&memtype_lock);
595

596 597
	dprintk("reserve_memtype added [mem %#010Lx-%#010Lx], track %s, req %s, ret %s\n",
		start, end - 1, cattr_name(new->type), cattr_name(req_type),
598 599
		new_type ? cattr_name(*new_type) : "-");

600 601 602 603 604 605
	return err;
}

int free_memtype(u64 start, u64 end)
{
	int err = -EINVAL;
606
	int is_range_ram;
607
	struct memtype *entry;
608

609
	if (!pat_enabled())
610 611 612
		return 0;

	/* Low ISA region is always mapped WB. No need to track */
613
	if (x86_platform.is_untracked_pat_range(start, end))
614 615
		return 0;

616
	is_range_ram = pat_pagerange_is_ram(start, end);
617 618 619 620 621 622
	if (is_range_ram == 1) {

		err = free_ram_pages_type(start, end);

		return err;
	} else if (is_range_ram < 0) {
623
		return -EINVAL;
624
	}
625

626
	spin_lock(&memtype_lock);
627
	entry = rbt_memtype_erase(start, end);
628 629
	spin_unlock(&memtype_lock);

630
	if (IS_ERR(entry)) {
631 632
		pr_info("x86/PAT: %s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n",
			current->comm, current->pid, start, end - 1);
633
		return -EINVAL;
634
	}
635

636 637
	kfree(entry);

638
	dprintk("free_memtype request [mem %#010Lx-%#010Lx]\n", start, end - 1);
I
Ingo Molnar 已提交
639

640
	return 0;
641 642
}

643

644 645 646 647 648 649
/**
 * lookup_memtype - Looksup the memory type for a physical address
 * @paddr: physical address of which memory type needs to be looked up
 *
 * Only to be called when PAT is enabled
 *
650
 * Returns _PAGE_CACHE_MODE_WB, _PAGE_CACHE_MODE_WC, _PAGE_CACHE_MODE_UC_MINUS
651
 * or _PAGE_CACHE_MODE_WT.
652
 */
653
static enum page_cache_mode lookup_memtype(u64 paddr)
654
{
655
	enum page_cache_mode rettype = _PAGE_CACHE_MODE_WB;
656 657
	struct memtype *entry;

658
	if (x86_platform.is_untracked_pat_range(paddr, paddr + PAGE_SIZE))
659 660 661 662 663
		return rettype;

	if (pat_pagerange_is_ram(paddr, paddr + PAGE_SIZE)) {
		struct page *page;

664 665
		page = pfn_to_page(paddr >> PAGE_SHIFT);
		return get_page_memtype(page);
666 667 668 669
	}

	spin_lock(&memtype_lock);

670
	entry = rbt_memtype_lookup(paddr);
671 672 673
	if (entry != NULL)
		rettype = entry->type;
	else
674
		rettype = _PAGE_CACHE_MODE_UC_MINUS;
675 676 677 678 679

	spin_unlock(&memtype_lock);
	return rettype;
}

680 681 682 683 684 685 686 687 688 689 690
/**
 * io_reserve_memtype - Request a memory type mapping for a region of memory
 * @start: start (physical address) of the region
 * @end: end (physical address) of the region
 * @type: A pointer to memtype, with requested type. On success, requested
 * or any other compatible type that was available for the region is returned
 *
 * On success, returns 0
 * On failure, returns non-zero
 */
int io_reserve_memtype(resource_size_t start, resource_size_t end,
691
			enum page_cache_mode *type)
692
{
693
	resource_size_t size = end - start;
694 695
	enum page_cache_mode req_type = *type;
	enum page_cache_mode new_type;
696 697
	int ret;

698
	WARN_ON_ONCE(iomem_map_sanity_check(start, size));
699 700 701 702 703

	ret = reserve_memtype(start, end, req_type, &new_type);
	if (ret)
		goto out_err;

704
	if (!is_new_memtype_allowed(start, size, req_type, new_type))
705 706
		goto out_free;

707
	if (kernel_map_sync_memtype(start, size, new_type) < 0)
708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729
		goto out_free;

	*type = new_type;
	return 0;

out_free:
	free_memtype(start, end);
	ret = -EBUSY;
out_err:
	return ret;
}

/**
 * io_free_memtype - Release a memory type mapping for a region of memory
 * @start: start (physical address) of the region
 * @end: end (physical address) of the region
 */
void io_free_memtype(resource_size_t start, resource_size_t end)
{
	free_memtype(start, end);
}

730 731 732 733 734 735 736 737 738 739 740 741 742 743
int arch_io_reserve_memtype_wc(resource_size_t start, resource_size_t size)
{
	enum page_cache_mode type = _PAGE_CACHE_MODE_WC;

	return io_reserve_memtype(start, start + size, &type);
}
EXPORT_SYMBOL(arch_io_reserve_memtype_wc);

void arch_io_free_memtype_wc(resource_size_t start, resource_size_t size)
{
	io_free_memtype(start, start + size);
}
EXPORT_SYMBOL(arch_io_free_memtype_wc);

744 745 746 747 748 749
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
				unsigned long size, pgprot_t vma_prot)
{
	return vma_prot;
}

750
#ifdef CONFIG_STRICT_DEVMEM
751
/* This check is done in drivers/char/mem.c in case of STRICT_DEVMEM */
752 753 754 755 756
static inline int range_is_allowed(unsigned long pfn, unsigned long size)
{
	return 1;
}
#else
757
/* This check is needed to avoid cache aliasing when PAT is enabled */
758 759 760 761 762 763
static inline int range_is_allowed(unsigned long pfn, unsigned long size)
{
	u64 from = ((u64)pfn) << PAGE_SHIFT;
	u64 to = from + size;
	u64 cursor = from;

764
	if (!pat_enabled())
765 766
		return 1;

767
	while (cursor < to) {
768
		if (!devmem_is_allowed(pfn))
769 770 771 772 773 774
			return 0;
		cursor += PAGE_SIZE;
		pfn++;
	}
	return 1;
}
775
#endif /* CONFIG_STRICT_DEVMEM */
776

777 778 779
int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
				unsigned long size, pgprot_t *vma_prot)
{
780
	enum page_cache_mode pcm = _PAGE_CACHE_MODE_WB;
781

782 783 784
	if (!range_is_allowed(pfn, size))
		return 0;

785
	if (file->f_flags & O_DSYNC)
786
		pcm = _PAGE_CACHE_MODE_UC_MINUS;
787

788
	*vma_prot = __pgprot((pgprot_val(*vma_prot) & ~_PAGE_CACHE_MASK) |
789
			     cachemode2protval(pcm));
790 791
	return 1;
}
792

793 794 795 796
/*
 * Change the memory type for the physial address range in kernel identity
 * mapping space if that range is a part of identity map.
 */
797 798
int kernel_map_sync_memtype(u64 base, unsigned long size,
			    enum page_cache_mode pcm)
799 800 801
{
	unsigned long id_sz;

802
	if (base > __pa(high_memory-1))
803 804
		return 0;

805 806 807 808 809 810 811
	/*
	 * some areas in the middle of the kernel identity range
	 * are not mapped, like the PCI space.
	 */
	if (!page_is_ram(base >> PAGE_SHIFT))
		return 0;

812
	id_sz = (__pa(high_memory-1) <= base + size) ?
813 814 815
				__pa(high_memory) - base :
				size;

816
	if (ioremap_change_attr((unsigned long)__va(base), id_sz, pcm) < 0) {
817
		pr_info("x86/PAT: %s:%d ioremap_change_attr failed %s for [mem %#010Lx-%#010Lx]\n",
818
			current->comm, current->pid,
819
			cattr_name(pcm),
820
			base, (unsigned long long)(base + size-1));
821 822 823 824 825
		return -EINVAL;
	}
	return 0;
}

826 827 828 829 830
/*
 * Internal interface to reserve a range of physical memory with prot.
 * Reserved non RAM regions only and after successful reserve_memtype,
 * this func also keeps identity mapping (if any) in sync with this new prot.
 */
831 832
static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
				int strict_prot)
833 834
{
	int is_ram = 0;
835
	int ret;
836 837
	enum page_cache_mode want_pcm = pgprot2cachemode(*vma_prot);
	enum page_cache_mode pcm = want_pcm;
838

839
	is_ram = pat_pagerange_is_ram(paddr, paddr + size);
840

841
	/*
842 843 844
	 * reserve_pfn_range() for RAM pages. We do not refcount to keep
	 * track of number of mappings of RAM pages. We can assert that
	 * the type requested matches the type of first page in the range.
845
	 */
846
	if (is_ram) {
847
		if (!pat_enabled())
848 849
			return 0;

850 851
		pcm = lookup_memtype(paddr);
		if (want_pcm != pcm) {
852
			pr_warn("x86/PAT: %s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n",
853
				current->comm, current->pid,
854
				cattr_name(want_pcm),
855
				(unsigned long long)paddr,
856
				(unsigned long long)(paddr + size - 1),
857
				cattr_name(pcm));
858
			*vma_prot = __pgprot((pgprot_val(*vma_prot) &
859 860
					     (~_PAGE_CACHE_MASK)) |
					     cachemode2protval(pcm));
861
		}
862
		return 0;
863
	}
864

865
	ret = reserve_memtype(paddr, paddr + size, want_pcm, &pcm);
866 867 868
	if (ret)
		return ret;

869
	if (pcm != want_pcm) {
870
		if (strict_prot ||
871
		    !is_new_memtype_allowed(paddr, size, want_pcm, pcm)) {
872
			free_memtype(paddr, paddr + size);
873 874 875 876 877 878
			pr_err("x86/PAT: %s:%d map pfn expected mapping type %s for [mem %#010Lx-%#010Lx], got %s\n",
			       current->comm, current->pid,
			       cattr_name(want_pcm),
			       (unsigned long long)paddr,
			       (unsigned long long)(paddr + size - 1),
			       cattr_name(pcm));
879 880 881 882 883 884 885 886
			return -EINVAL;
		}
		/*
		 * We allow returning different type than the one requested in
		 * non strict case.
		 */
		*vma_prot = __pgprot((pgprot_val(*vma_prot) &
				      (~_PAGE_CACHE_MASK)) |
887
				     cachemode2protval(pcm));
888 889
	}

890
	if (kernel_map_sync_memtype(paddr, size, pcm) < 0) {
891 892 893 894 895 896 897 898 899 900 901 902 903 904
		free_memtype(paddr, paddr + size);
		return -EINVAL;
	}
	return 0;
}

/*
 * Internal interface to free a range of physical memory.
 * Frees non RAM regions only.
 */
static void free_pfn_range(u64 paddr, unsigned long size)
{
	int is_ram;

905
	is_ram = pat_pagerange_is_ram(paddr, paddr + size);
906 907 908 909 910
	if (is_ram == 0)
		free_memtype(paddr, paddr + size);
}

/*
911
 * track_pfn_copy is called when vma that is covering the pfnmap gets
912 913 914 915 916
 * copied through copy_page_range().
 *
 * If the vma has a linear pfn mapping for the entire range, we get the prot
 * from pte and reserve the entire vma range with single reserve_pfn_range call.
 */
917
int track_pfn_copy(struct vm_area_struct *vma)
918
{
919
	resource_size_t paddr;
920
	unsigned long prot;
921
	unsigned long vma_size = vma->vm_end - vma->vm_start;
922
	pgprot_t pgprot;
923

924
	if (vma->vm_flags & VM_PAT) {
925
		/*
926 927
		 * reserve the whole chunk covered by vma. We need the
		 * starting address and protection from pte.
928
		 */
929
		if (follow_phys(vma, vma->vm_start, 0, &prot, &paddr)) {
930
			WARN_ON_ONCE(1);
931
			return -EINVAL;
932
		}
933 934
		pgprot = __pgprot(prot);
		return reserve_pfn_range(paddr, vma_size, &pgprot, 1);
935 936 937 938 939 940
	}

	return 0;
}

/*
941 942 943 944
 * prot is passed in as a parameter for the new mapping. If the vma has
 * a linear pfn mapping for the entire range, or no vma is provided,
 * reserve the entire pfn + size range with single reserve_pfn_range
 * call.
945
 */
946
int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
947
		    unsigned long pfn, unsigned long addr, unsigned long size)
948
{
949
	resource_size_t paddr = (resource_size_t)pfn << PAGE_SHIFT;
950
	enum page_cache_mode pcm;
951

952
	/* reserve the whole chunk starting from paddr */
953 954
	if (!vma || (addr == vma->vm_start
				&& size == (vma->vm_end - vma->vm_start))) {
955 956 957
		int ret;

		ret = reserve_pfn_range(paddr, size, prot, 0);
958
		if (ret == 0 && vma)
959 960 961
			vma->vm_flags |= VM_PAT;
		return ret;
	}
962

963
	if (!pat_enabled())
964 965
		return 0;

966 967 968 969
	/*
	 * For anything smaller than the vma size we set prot based on the
	 * lookup.
	 */
970
	pcm = lookup_memtype(paddr);
971 972 973 974 975

	/* Check memtype for the remaining pages */
	while (size > PAGE_SIZE) {
		size -= PAGE_SIZE;
		paddr += PAGE_SIZE;
976
		if (pcm != lookup_memtype(paddr))
977 978 979
			return -EINVAL;
	}

980
	*prot = __pgprot((pgprot_val(*prot) & (~_PAGE_CACHE_MASK)) |
981
			 cachemode2protval(pcm));
982 983 984 985

	return 0;
}

986
void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot, pfn_t pfn)
987
{
988
	enum page_cache_mode pcm;
989

990
	if (!pat_enabled())
991
		return;
992 993

	/* Set prot based on lookup */
994
	pcm = lookup_memtype(pfn_t_to_phys(pfn));
995
	*prot = __pgprot((pgprot_val(*prot) & (~_PAGE_CACHE_MASK)) |
996
			 cachemode2protval(pcm));
997 998 999
}

/*
1000
 * untrack_pfn is called while unmapping a pfnmap for a region.
1001
 * untrack can be called for a specific region indicated by pfn and size or
1002
 * can be for the entire vma (in which case pfn, size are zero).
1003
 */
1004 1005
void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
		 unsigned long size)
1006
{
1007
	resource_size_t paddr;
1008
	unsigned long prot;
1009

1010
	if (vma && !(vma->vm_flags & VM_PAT))
1011
		return;
1012 1013 1014 1015 1016 1017 1018 1019 1020 1021

	/* free the chunk starting from pfn or the whole chunk */
	paddr = (resource_size_t)pfn << PAGE_SHIFT;
	if (!paddr && !size) {
		if (follow_phys(vma, vma->vm_start, 0, &prot, &paddr)) {
			WARN_ON_ONCE(1);
			return;
		}

		size = vma->vm_end - vma->vm_start;
1022
	}
1023
	free_pfn_range(paddr, size);
1024 1025
	if (vma)
		vma->vm_flags &= ~VM_PAT;
1026 1027
}

1028 1029 1030 1031 1032 1033 1034 1035 1036 1037
/*
 * untrack_pfn_moved is called, while mremapping a pfnmap for a new region,
 * with the old vma after its pfnmap page table has been removed.  The new
 * vma has a new pfnmap to the same pfn & cache type with VM_PAT set.
 */
void untrack_pfn_moved(struct vm_area_struct *vma)
{
	vma->vm_flags &= ~VM_PAT;
}

1038 1039
pgprot_t pgprot_writecombine(pgprot_t prot)
{
1040
	return __pgprot(pgprot_val(prot) |
1041
				cachemode2protval(_PAGE_CACHE_MODE_WC));
1042
}
1043
EXPORT_SYMBOL_GPL(pgprot_writecombine);
1044

1045 1046 1047 1048 1049 1050 1051
pgprot_t pgprot_writethrough(pgprot_t prot)
{
	return __pgprot(pgprot_val(prot) |
				cachemode2protval(_PAGE_CACHE_MODE_WT));
}
EXPORT_SYMBOL_GPL(pgprot_writethrough);

1052
#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_X86_PAT)
1053 1054 1055

static struct memtype *memtype_get_idx(loff_t pos)
{
1056 1057
	struct memtype *print_entry;
	int ret;
1058

1059
	print_entry  = kzalloc(sizeof(struct memtype), GFP_KERNEL);
1060 1061 1062 1063
	if (!print_entry)
		return NULL;

	spin_lock(&memtype_lock);
1064
	ret = rbt_memtype_copy_nth_element(print_entry, pos);
1065
	spin_unlock(&memtype_lock);
I
Ingo Molnar 已提交
1066

1067 1068 1069 1070 1071 1072
	if (!ret) {
		return print_entry;
	} else {
		kfree(print_entry);
		return NULL;
	}
1073 1074 1075 1076 1077 1078
}

static void *memtype_seq_start(struct seq_file *seq, loff_t *pos)
{
	if (*pos == 0) {
		++*pos;
1079
		seq_puts(seq, "PAT memtype list:\n");
1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101
	}

	return memtype_get_idx(*pos);
}

static void *memtype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
	++*pos;
	return memtype_get_idx(*pos);
}

static void memtype_seq_stop(struct seq_file *seq, void *v)
{
}

static int memtype_seq_show(struct seq_file *seq, void *v)
{
	struct memtype *print_entry = (struct memtype *)v;

	seq_printf(seq, "%s @ 0x%Lx-0x%Lx\n", cattr_name(print_entry->type),
			print_entry->start, print_entry->end);
	kfree(print_entry);
I
Ingo Molnar 已提交
1102

1103 1104 1105
	return 0;
}

T
Tobias Klauser 已提交
1106
static const struct seq_operations memtype_seq_ops = {
1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126
	.start = memtype_seq_start,
	.next  = memtype_seq_next,
	.stop  = memtype_seq_stop,
	.show  = memtype_seq_show,
};

static int memtype_seq_open(struct inode *inode, struct file *file)
{
	return seq_open(file, &memtype_seq_ops);
}

static const struct file_operations memtype_fops = {
	.open    = memtype_seq_open,
	.read    = seq_read,
	.llseek  = seq_lseek,
	.release = seq_release,
};

static int __init pat_memtype_list_init(void)
{
1127
	if (pat_enabled()) {
1128 1129 1130
		debugfs_create_file("pat_memtype_list", S_IRUSR,
				    arch_debugfs_dir, NULL, &memtype_fops);
	}
1131 1132 1133 1134 1135
	return 0;
}

late_initcall(pat_memtype_list_init);

1136
#endif /* CONFIG_DEBUG_FS && CONFIG_X86_PAT */