pat.c 24.4 KB
Newer Older
1 2 3 4 5 6 7 8 9
/*
 * Handle caching attributes in page tables (PAT)
 *
 * Authors: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
 *          Suresh B Siddha <suresh.b.siddha@intel.com>
 *
 * Loosely based on earlier PAT patchset from Eric Biederman and Andi Kleen.
 */

I
Ingo Molnar 已提交
10 11 12
#include <linux/seq_file.h>
#include <linux/bootmem.h>
#include <linux/debugfs.h>
13
#include <linux/kernel.h>
14
#include <linux/module.h>
15
#include <linux/slab.h>
I
Ingo Molnar 已提交
16
#include <linux/mm.h>
17
#include <linux/fs.h>
18
#include <linux/rbtree.h>
19

I
Ingo Molnar 已提交
20
#include <asm/cacheflush.h>
21
#include <asm/processor.h>
I
Ingo Molnar 已提交
22
#include <asm/tlbflush.h>
23
#include <asm/x86_init.h>
24 25
#include <asm/pgtable.h>
#include <asm/fcntl.h>
I
Ingo Molnar 已提交
26
#include <asm/e820.h>
27
#include <asm/mtrr.h>
I
Ingo Molnar 已提交
28 29 30
#include <asm/page.h>
#include <asm/msr.h>
#include <asm/pat.h>
31
#include <asm/io.h>
32

33
#include "pat_internal.h"
34
#include "mm_internal.h"
35

36 37 38
#undef pr_fmt
#define pr_fmt(fmt) "" fmt

39
static int __read_mostly __pat_enabled = IS_ENABLED(CONFIG_X86_PAT);
40

41
static inline void pat_disable(const char *reason)
42
{
43
	__pat_enabled = 0;
44
	pr_info("x86/PAT: %s\n", reason);
45 46
}

A
Andrew Morton 已提交
47
static int __init nopat(char *str)
48
{
49
	pat_disable("PAT support disabled.");
50 51
	return 0;
}
52
early_param("nopat", nopat);
53 54

bool pat_enabled(void)
55
{
56
	return !!__pat_enabled;
57
}
58

59
int pat_debug_enable;
I
Ingo Molnar 已提交
60

61 62
static int __init pat_debug_setup(char *str)
{
63
	pat_debug_enable = 1;
64 65 66 67
	return 0;
}
__setup("debugpat", pat_debug_setup);

68
static u64 __read_mostly boot_pat_state;
69

70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138
#ifdef CONFIG_X86_PAT
/*
 * X86 PAT uses page flags WC and Uncached together to keep track of
 * memory type of pages that have backing page struct. X86 PAT supports 3
 * different memory types, _PAGE_CACHE_MODE_WB, _PAGE_CACHE_MODE_WC and
 * _PAGE_CACHE_MODE_UC_MINUS and fourth state where page's memory type has not
 * been changed from its default (value of -1 used to denote this).
 * Note we do not support _PAGE_CACHE_MODE_UC here.
 */

#define _PGMT_DEFAULT		0
#define _PGMT_WC		(1UL << PG_arch_1)
#define _PGMT_UC_MINUS		(1UL << PG_uncached)
#define _PGMT_WB		(1UL << PG_uncached | 1UL << PG_arch_1)
#define _PGMT_MASK		(1UL << PG_uncached | 1UL << PG_arch_1)
#define _PGMT_CLEAR_MASK	(~_PGMT_MASK)

static inline enum page_cache_mode get_page_memtype(struct page *pg)
{
	unsigned long pg_flags = pg->flags & _PGMT_MASK;

	if (pg_flags == _PGMT_DEFAULT)
		return -1;
	else if (pg_flags == _PGMT_WC)
		return _PAGE_CACHE_MODE_WC;
	else if (pg_flags == _PGMT_UC_MINUS)
		return _PAGE_CACHE_MODE_UC_MINUS;
	else
		return _PAGE_CACHE_MODE_WB;
}

static inline void set_page_memtype(struct page *pg,
				    enum page_cache_mode memtype)
{
	unsigned long memtype_flags;
	unsigned long old_flags;
	unsigned long new_flags;

	switch (memtype) {
	case _PAGE_CACHE_MODE_WC:
		memtype_flags = _PGMT_WC;
		break;
	case _PAGE_CACHE_MODE_UC_MINUS:
		memtype_flags = _PGMT_UC_MINUS;
		break;
	case _PAGE_CACHE_MODE_WB:
		memtype_flags = _PGMT_WB;
		break;
	default:
		memtype_flags = _PGMT_DEFAULT;
		break;
	}

	do {
		old_flags = pg->flags;
		new_flags = (old_flags & _PGMT_CLEAR_MASK) | memtype_flags;
	} while (cmpxchg(&pg->flags, old_flags, new_flags) != old_flags);
}
#else
static inline enum page_cache_mode get_page_memtype(struct page *pg)
{
	return -1;
}
static inline void set_page_memtype(struct page *pg,
				    enum page_cache_mode memtype)
{
}
#endif

139 140 141 142 143 144 145 146 147
enum {
	PAT_UC = 0,		/* uncached */
	PAT_WC = 1,		/* Write combining */
	PAT_WT = 4,		/* Write Through */
	PAT_WP = 5,		/* Write Protected */
	PAT_WB = 6,		/* Write Back (default) */
	PAT_UC_MINUS = 7,	/* UC, but can be overriden by MTRR */
};

148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190
#define CM(c) (_PAGE_CACHE_MODE_ ## c)

static enum page_cache_mode pat_get_cache_mode(unsigned pat_val, char *msg)
{
	enum page_cache_mode cache;
	char *cache_mode;

	switch (pat_val) {
	case PAT_UC:       cache = CM(UC);       cache_mode = "UC  "; break;
	case PAT_WC:       cache = CM(WC);       cache_mode = "WC  "; break;
	case PAT_WT:       cache = CM(WT);       cache_mode = "WT  "; break;
	case PAT_WP:       cache = CM(WP);       cache_mode = "WP  "; break;
	case PAT_WB:       cache = CM(WB);       cache_mode = "WB  "; break;
	case PAT_UC_MINUS: cache = CM(UC_MINUS); cache_mode = "UC- "; break;
	default:           cache = CM(WB);       cache_mode = "WB  "; break;
	}

	memcpy(msg, cache_mode, 4);

	return cache;
}

#undef CM

/*
 * Update the cache mode to pgprot translation tables according to PAT
 * configuration.
 * Using lower indices is preferred, so we start with highest index.
 */
void pat_init_cache_modes(void)
{
	int i;
	enum page_cache_mode cache;
	char pat_msg[33];
	u64 pat;

	rdmsrl(MSR_IA32_CR_PAT, pat);
	pat_msg[32] = 0;
	for (i = 7; i >= 0; i--) {
		cache = pat_get_cache_mode((pat >> (i * 8)) & 7,
					   pat_msg + 4 * i);
		update_cache_mode_entry(i, cache);
	}
191
	pr_info("x86/PAT: Configuration [0-7]: %s\n", pat_msg);
192 193
}

194
#define PAT(x, y)	((u64)PAT_ ## y << ((x)*8))
195 196 197 198

void pat_init(void)
{
	u64 pat;
199
	bool boot_cpu = !boot_pat_state;
200

201
	if (!pat_enabled())
202 203
		return;

204 205 206 207 208 209 210 211 212 213
	if (!cpu_has_pat) {
		if (!boot_pat_state) {
			pat_disable("PAT not supported by CPU.");
			return;
		} else {
			/*
			 * If this happens we are on a secondary CPU, but
			 * switched to PAT on the boot CPU. We have no way to
			 * undo PAT.
			 */
214
			pr_err("x86/PAT: PAT enabled, but not supported by secondary CPU\n");
215 216
			BUG();
		}
217
	}
218 219 220 221 222 223 224 225 226 227 228 229 230 231

	/* Set PWT to Write-Combining. All other bits stay the same */
	/*
	 * PTE encoding used in Linux:
	 *      PAT
	 *      |PCD
	 *      ||PWT
	 *      |||
	 *      000 WB		_PAGE_CACHE_WB
	 *      001 WC		_PAGE_CACHE_WC
	 *      010 UC-		_PAGE_CACHE_UC_MINUS
	 *      011 UC		_PAGE_CACHE_UC
	 * PAT bit unused
	 */
232 233
	pat = PAT(0, WB) | PAT(1, WC) | PAT(2, UC_MINUS) | PAT(3, UC) |
	      PAT(4, WB) | PAT(5, WC) | PAT(6, UC_MINUS) | PAT(7, UC);
234 235

	/* Boot CPU check */
236
	if (!boot_pat_state) {
237
		rdmsrl(MSR_IA32_CR_PAT, boot_pat_state);
238 239 240 241 242
		if (!boot_pat_state) {
			pat_disable("PAT read returns always zero, disabled.");
			return;
		}
	}
243 244

	wrmsrl(MSR_IA32_CR_PAT, pat);
245 246

	if (boot_cpu)
247
		pat_init_cache_modes();
248 249 250 251
}

#undef PAT

252
static DEFINE_SPINLOCK(memtype_lock);	/* protects memtype accesses */
253

254 255 256 257 258 259 260
/*
 * Does intersection of PAT memory type and MTRR memory type and returns
 * the resulting memory type as PAT understands it.
 * (Type in pat and mtrr will not have same value)
 * The intersection is based on "Effective Memory Type" tables in IA-32
 * SDM vol 3a
 */
261 262
static unsigned long pat_x_mtrr_type(u64 start, u64 end,
				     enum page_cache_mode req_type)
263
{
264 265 266 267
	/*
	 * Look for MTRR hint to get the effective type in case where PAT
	 * request is for WB.
	 */
268
	if (req_type == _PAGE_CACHE_MODE_WB) {
269
		u8 mtrr_type, uniform;
270

271
		mtrr_type = mtrr_type_lookup(start, end, &uniform);
272
		if (mtrr_type != MTRR_TYPE_WRBACK)
273
			return _PAGE_CACHE_MODE_UC_MINUS;
274

275
		return _PAGE_CACHE_MODE_WB;
276 277 278
	}

	return req_type;
279 280
}

281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298
struct pagerange_state {
	unsigned long		cur_pfn;
	int			ram;
	int			not_ram;
};

static int
pagerange_is_ram_callback(unsigned long initial_pfn, unsigned long total_nr_pages, void *arg)
{
	struct pagerange_state *state = arg;

	state->not_ram	|= initial_pfn > state->cur_pfn;
	state->ram	|= total_nr_pages > 0;
	state->cur_pfn	 = initial_pfn + total_nr_pages;

	return state->ram && state->not_ram;
}

299
static int pat_pagerange_is_ram(resource_size_t start, resource_size_t end)
300
{
301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318
	int ret = 0;
	unsigned long start_pfn = start >> PAGE_SHIFT;
	unsigned long end_pfn = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
	struct pagerange_state state = {start_pfn, 0, 0};

	/*
	 * For legacy reasons, physical address range in the legacy ISA
	 * region is tracked as non-RAM. This will allow users of
	 * /dev/mem to map portions of legacy ISA region, even when
	 * some of those portions are listed(or not even listed) with
	 * different e820 types(RAM/reserved/..)
	 */
	if (start_pfn < ISA_END_ADDRESS >> PAGE_SHIFT)
		start_pfn = ISA_END_ADDRESS >> PAGE_SHIFT;

	if (start_pfn < end_pfn) {
		ret = walk_system_ram_range(start_pfn, end_pfn - start_pfn,
				&state, pagerange_is_ram_callback);
319 320
	}

321
	return (ret > 0) ? -1 : (state.ram ? 1 : 0);
322 323
}

324
/*
325 326 327 328
 * For RAM pages, we use page flags to mark the pages with appropriate type.
 * Here we do two pass:
 * - Find the memtype of all the pages in the range, look for any conflicts
 * - In case of no conflicts, set the new memtype for pages in the range
329
 */
330 331 332
static int reserve_ram_pages_type(u64 start, u64 end,
				  enum page_cache_mode req_type,
				  enum page_cache_mode *new_type)
333 334
{
	struct page *page;
335 336
	u64 pfn;

337
	if (req_type == _PAGE_CACHE_MODE_UC) {
338 339
		/* We do not support strong UC */
		WARN_ON_ONCE(1);
340
		req_type = _PAGE_CACHE_MODE_UC_MINUS;
341
	}
342 343

	for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
344
		enum page_cache_mode type;
345

346 347 348
		page = pfn_to_page(pfn);
		type = get_page_memtype(page);
		if (type != -1) {
349
			pr_info("x86/PAT: reserve_ram_pages_type failed [mem %#010Lx-%#010Lx], track 0x%x, req 0x%x\n",
350
				start, end - 1, type, req_type);
351 352 353 354 355
			if (new_type)
				*new_type = type;

			return -EBUSY;
		}
356 357
	}

358 359 360 361
	if (new_type)
		*new_type = req_type;

	for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
362
		page = pfn_to_page(pfn);
363
		set_page_memtype(page, req_type);
364
	}
365
	return 0;
366 367 368 369 370
}

static int free_ram_pages_type(u64 start, u64 end)
{
	struct page *page;
371
	u64 pfn;
372 373 374

	for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
		page = pfn_to_page(pfn);
375
		set_page_memtype(page, -1);
376 377 378 379
	}
	return 0;
}

380 381
/*
 * req_type typically has one of the:
382 383 384 385
 * - _PAGE_CACHE_MODE_WB
 * - _PAGE_CACHE_MODE_WC
 * - _PAGE_CACHE_MODE_UC_MINUS
 * - _PAGE_CACHE_MODE_UC
386
 *
387 388 389
 * If new_type is NULL, function will return an error if it cannot reserve the
 * region with req_type. If new_type is non-NULL, function will return
 * available type in new_type in case of no error. In case of any error
390 391
 * it will return a negative return value.
 */
392 393
int reserve_memtype(u64 start, u64 end, enum page_cache_mode req_type,
		    enum page_cache_mode *new_type)
394
{
395
	struct memtype *new;
396
	enum page_cache_mode actual_type;
397
	int is_range_ram;
I
Ingo Molnar 已提交
398
	int err = 0;
399

I
Ingo Molnar 已提交
400
	BUG_ON(start >= end); /* end is exclusive */
401

402
	if (!pat_enabled()) {
403
		/* This is identical to page table setting without PAT */
404
		if (new_type) {
405 406
			if (req_type == _PAGE_CACHE_MODE_WC)
				*new_type = _PAGE_CACHE_MODE_UC_MINUS;
407
			else
408
				*new_type = req_type;
409
		}
410 411 412 413
		return 0;
	}

	/* Low ISA region is always mapped WB in page table. No need to track */
414
	if (x86_platform.is_untracked_pat_range(start, end)) {
415
		if (new_type)
416
			*new_type = _PAGE_CACHE_MODE_WB;
417 418 419
		return 0;
	}

420 421 422 423 424 425
	/*
	 * Call mtrr_lookup to get the type hint. This is an
	 * optimization for /dev/mem mmap'ers into WB memory (BIOS
	 * tools and ACPI tools). Use WB request for WB memory and use
	 * UC_MINUS otherwise.
	 */
426
	actual_type = pat_x_mtrr_type(start, end, req_type);
427

428 429 430
	if (new_type)
		*new_type = actual_type;

431
	is_range_ram = pat_pagerange_is_ram(start, end);
432 433 434 435 436 437
	if (is_range_ram == 1) {

		err = reserve_ram_pages_type(start, end, req_type, new_type);

		return err;
	} else if (is_range_ram < 0) {
438
		return -EINVAL;
439
	}
440

441
	new  = kzalloc(sizeof(struct memtype), GFP_KERNEL);
442
	if (!new)
443 444
		return -ENOMEM;

I
Ingo Molnar 已提交
445 446 447
	new->start	= start;
	new->end	= end;
	new->type	= actual_type;
448 449 450

	spin_lock(&memtype_lock);

451
	err = rbt_memtype_check_insert(new, new_type);
452
	if (err) {
453 454 455
		pr_info("x86/PAT: reserve_memtype failed [mem %#010Lx-%#010Lx], track %s, req %s\n",
			start, end - 1,
			cattr_name(new->type), cattr_name(req_type));
456
		kfree(new);
457
		spin_unlock(&memtype_lock);
I
Ingo Molnar 已提交
458

459 460 461 462
		return err;
	}

	spin_unlock(&memtype_lock);
463

464 465
	dprintk("reserve_memtype added [mem %#010Lx-%#010Lx], track %s, req %s, ret %s\n",
		start, end - 1, cattr_name(new->type), cattr_name(req_type),
466 467
		new_type ? cattr_name(*new_type) : "-");

468 469 470 471 472 473
	return err;
}

int free_memtype(u64 start, u64 end)
{
	int err = -EINVAL;
474
	int is_range_ram;
475
	struct memtype *entry;
476

477
	if (!pat_enabled())
478 479 480
		return 0;

	/* Low ISA region is always mapped WB. No need to track */
481
	if (x86_platform.is_untracked_pat_range(start, end))
482 483
		return 0;

484
	is_range_ram = pat_pagerange_is_ram(start, end);
485 486 487 488 489 490
	if (is_range_ram == 1) {

		err = free_ram_pages_type(start, end);

		return err;
	} else if (is_range_ram < 0) {
491
		return -EINVAL;
492
	}
493

494
	spin_lock(&memtype_lock);
495
	entry = rbt_memtype_erase(start, end);
496 497
	spin_unlock(&memtype_lock);

498
	if (!entry) {
499 500
		pr_info("x86/PAT: %s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n",
			current->comm, current->pid, start, end - 1);
501
		return -EINVAL;
502
	}
503

504 505
	kfree(entry);

506
	dprintk("free_memtype request [mem %#010Lx-%#010Lx]\n", start, end - 1);
I
Ingo Molnar 已提交
507

508
	return 0;
509 510
}

511

512 513 514 515 516 517
/**
 * lookup_memtype - Looksup the memory type for a physical address
 * @paddr: physical address of which memory type needs to be looked up
 *
 * Only to be called when PAT is enabled
 *
518 519
 * Returns _PAGE_CACHE_MODE_WB, _PAGE_CACHE_MODE_WC, _PAGE_CACHE_MODE_UC_MINUS
 * or _PAGE_CACHE_MODE_UC
520
 */
521
static enum page_cache_mode lookup_memtype(u64 paddr)
522
{
523
	enum page_cache_mode rettype = _PAGE_CACHE_MODE_WB;
524 525
	struct memtype *entry;

526
	if (x86_platform.is_untracked_pat_range(paddr, paddr + PAGE_SIZE))
527 528 529 530 531 532 533 534 535 536 537
		return rettype;

	if (pat_pagerange_is_ram(paddr, paddr + PAGE_SIZE)) {
		struct page *page;
		page = pfn_to_page(paddr >> PAGE_SHIFT);
		rettype = get_page_memtype(page);
		/*
		 * -1 from get_page_memtype() implies RAM page is in its
		 * default state and not reserved, and hence of type WB
		 */
		if (rettype == -1)
538
			rettype = _PAGE_CACHE_MODE_WB;
539 540 541 542 543 544

		return rettype;
	}

	spin_lock(&memtype_lock);

545
	entry = rbt_memtype_lookup(paddr);
546 547 548
	if (entry != NULL)
		rettype = entry->type;
	else
549
		rettype = _PAGE_CACHE_MODE_UC_MINUS;
550 551 552 553 554

	spin_unlock(&memtype_lock);
	return rettype;
}

555 556 557 558 559 560 561 562 563 564 565
/**
 * io_reserve_memtype - Request a memory type mapping for a region of memory
 * @start: start (physical address) of the region
 * @end: end (physical address) of the region
 * @type: A pointer to memtype, with requested type. On success, requested
 * or any other compatible type that was available for the region is returned
 *
 * On success, returns 0
 * On failure, returns non-zero
 */
int io_reserve_memtype(resource_size_t start, resource_size_t end,
566
			enum page_cache_mode *type)
567
{
568
	resource_size_t size = end - start;
569 570
	enum page_cache_mode req_type = *type;
	enum page_cache_mode new_type;
571 572
	int ret;

573
	WARN_ON_ONCE(iomem_map_sanity_check(start, size));
574 575 576 577 578

	ret = reserve_memtype(start, end, req_type, &new_type);
	if (ret)
		goto out_err;

579
	if (!is_new_memtype_allowed(start, size, req_type, new_type))
580 581
		goto out_free;

582
	if (kernel_map_sync_memtype(start, size, new_type) < 0)
583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604
		goto out_free;

	*type = new_type;
	return 0;

out_free:
	free_memtype(start, end);
	ret = -EBUSY;
out_err:
	return ret;
}

/**
 * io_free_memtype - Release a memory type mapping for a region of memory
 * @start: start (physical address) of the region
 * @end: end (physical address) of the region
 */
void io_free_memtype(resource_size_t start, resource_size_t end)
{
	free_memtype(start, end);
}

605 606 607 608 609 610
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
				unsigned long size, pgprot_t vma_prot)
{
	return vma_prot;
}

611
#ifdef CONFIG_STRICT_DEVMEM
612
/* This check is done in drivers/char/mem.c in case of STRICT_DEVMEM */
613 614 615 616 617
static inline int range_is_allowed(unsigned long pfn, unsigned long size)
{
	return 1;
}
#else
618
/* This check is needed to avoid cache aliasing when PAT is enabled */
619 620 621 622 623 624
static inline int range_is_allowed(unsigned long pfn, unsigned long size)
{
	u64 from = ((u64)pfn) << PAGE_SHIFT;
	u64 to = from + size;
	u64 cursor = from;

625
	if (!pat_enabled())
626 627
		return 1;

628 629
	while (cursor < to) {
		if (!devmem_is_allowed(pfn)) {
630 631
			pr_info("x86/PAT: Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx], PAT prevents it\n",
				current->comm, from, to - 1);
632 633 634 635 636 637 638
			return 0;
		}
		cursor += PAGE_SIZE;
		pfn++;
	}
	return 1;
}
639
#endif /* CONFIG_STRICT_DEVMEM */
640

641 642 643
int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
				unsigned long size, pgprot_t *vma_prot)
{
644
	enum page_cache_mode pcm = _PAGE_CACHE_MODE_WB;
645

646 647 648
	if (!range_is_allowed(pfn, size))
		return 0;

649
	if (file->f_flags & O_DSYNC)
650
		pcm = _PAGE_CACHE_MODE_UC_MINUS;
651 652 653 654 655 656 657 658 659 660

#ifdef CONFIG_X86_32
	/*
	 * On the PPro and successors, the MTRRs are used to set
	 * memory types for physical addresses outside main memory,
	 * so blindly setting UC or PWT on those pages is wrong.
	 * For Pentiums and earlier, the surround logic should disable
	 * caching for the high addresses through the KEN pin, but
	 * we maintain the tradition of paranoia in this code.
	 */
661
	if (!pat_enabled() &&
662 663 664 665 666
	    !(boot_cpu_has(X86_FEATURE_MTRR) ||
	      boot_cpu_has(X86_FEATURE_K6_MTRR) ||
	      boot_cpu_has(X86_FEATURE_CYRIX_ARR) ||
	      boot_cpu_has(X86_FEATURE_CENTAUR_MCR)) &&
	    (pfn << PAGE_SHIFT) >= __pa(high_memory)) {
667
		pcm = _PAGE_CACHE_MODE_UC;
668 669 670
	}
#endif

671
	*vma_prot = __pgprot((pgprot_val(*vma_prot) & ~_PAGE_CACHE_MASK) |
672
			     cachemode2protval(pcm));
673 674
	return 1;
}
675

676 677 678 679
/*
 * Change the memory type for the physial address range in kernel identity
 * mapping space if that range is a part of identity map.
 */
680 681
int kernel_map_sync_memtype(u64 base, unsigned long size,
			    enum page_cache_mode pcm)
682 683 684
{
	unsigned long id_sz;

685
	if (base > __pa(high_memory-1))
686 687
		return 0;

688 689 690 691 692 693 694
	/*
	 * some areas in the middle of the kernel identity range
	 * are not mapped, like the PCI space.
	 */
	if (!page_is_ram(base >> PAGE_SHIFT))
		return 0;

695
	id_sz = (__pa(high_memory-1) <= base + size) ?
696 697 698
				__pa(high_memory) - base :
				size;

699
	if (ioremap_change_attr((unsigned long)__va(base), id_sz, pcm) < 0) {
700
		pr_info("x86/PAT: %s:%d ioremap_change_attr failed %s for [mem %#010Lx-%#010Lx]\n",
701
			current->comm, current->pid,
702
			cattr_name(pcm),
703
			base, (unsigned long long)(base + size-1));
704 705 706 707 708
		return -EINVAL;
	}
	return 0;
}

709 710 711 712 713
/*
 * Internal interface to reserve a range of physical memory with prot.
 * Reserved non RAM regions only and after successful reserve_memtype,
 * this func also keeps identity mapping (if any) in sync with this new prot.
 */
714 715
static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
				int strict_prot)
716 717
{
	int is_ram = 0;
718
	int ret;
719 720
	enum page_cache_mode want_pcm = pgprot2cachemode(*vma_prot);
	enum page_cache_mode pcm = want_pcm;
721

722
	is_ram = pat_pagerange_is_ram(paddr, paddr + size);
723

724
	/*
725 726 727
	 * reserve_pfn_range() for RAM pages. We do not refcount to keep
	 * track of number of mappings of RAM pages. We can assert that
	 * the type requested matches the type of first page in the range.
728
	 */
729
	if (is_ram) {
730
		if (!pat_enabled())
731 732
			return 0;

733 734
		pcm = lookup_memtype(paddr);
		if (want_pcm != pcm) {
735
			pr_warn("x86/PAT: %s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n",
736
				current->comm, current->pid,
737
				cattr_name(want_pcm),
738
				(unsigned long long)paddr,
739
				(unsigned long long)(paddr + size - 1),
740
				cattr_name(pcm));
741
			*vma_prot = __pgprot((pgprot_val(*vma_prot) &
742 743
					     (~_PAGE_CACHE_MASK)) |
					     cachemode2protval(pcm));
744
		}
745
		return 0;
746
	}
747

748
	ret = reserve_memtype(paddr, paddr + size, want_pcm, &pcm);
749 750 751
	if (ret)
		return ret;

752
	if (pcm != want_pcm) {
753
		if (strict_prot ||
754
		    !is_new_memtype_allowed(paddr, size, want_pcm, pcm)) {
755
			free_memtype(paddr, paddr + size);
756 757 758 759 760 761
			pr_err("x86/PAT: %s:%d map pfn expected mapping type %s for [mem %#010Lx-%#010Lx], got %s\n",
			       current->comm, current->pid,
			       cattr_name(want_pcm),
			       (unsigned long long)paddr,
			       (unsigned long long)(paddr + size - 1),
			       cattr_name(pcm));
762 763 764 765 766 767 768 769
			return -EINVAL;
		}
		/*
		 * We allow returning different type than the one requested in
		 * non strict case.
		 */
		*vma_prot = __pgprot((pgprot_val(*vma_prot) &
				      (~_PAGE_CACHE_MASK)) |
770
				     cachemode2protval(pcm));
771 772
	}

773
	if (kernel_map_sync_memtype(paddr, size, pcm) < 0) {
774 775 776 777 778 779 780 781 782 783 784 785 786 787
		free_memtype(paddr, paddr + size);
		return -EINVAL;
	}
	return 0;
}

/*
 * Internal interface to free a range of physical memory.
 * Frees non RAM regions only.
 */
static void free_pfn_range(u64 paddr, unsigned long size)
{
	int is_ram;

788
	is_ram = pat_pagerange_is_ram(paddr, paddr + size);
789 790 791 792 793
	if (is_ram == 0)
		free_memtype(paddr, paddr + size);
}

/*
794
 * track_pfn_copy is called when vma that is covering the pfnmap gets
795 796 797 798 799
 * copied through copy_page_range().
 *
 * If the vma has a linear pfn mapping for the entire range, we get the prot
 * from pte and reserve the entire vma range with single reserve_pfn_range call.
 */
800
int track_pfn_copy(struct vm_area_struct *vma)
801
{
802
	resource_size_t paddr;
803
	unsigned long prot;
804
	unsigned long vma_size = vma->vm_end - vma->vm_start;
805
	pgprot_t pgprot;
806

807
	if (vma->vm_flags & VM_PAT) {
808
		/*
809 810
		 * reserve the whole chunk covered by vma. We need the
		 * starting address and protection from pte.
811
		 */
812
		if (follow_phys(vma, vma->vm_start, 0, &prot, &paddr)) {
813
			WARN_ON_ONCE(1);
814
			return -EINVAL;
815
		}
816 817
		pgprot = __pgprot(prot);
		return reserve_pfn_range(paddr, vma_size, &pgprot, 1);
818 819 820 821 822 823 824 825 826 827
	}

	return 0;
}

/*
 * prot is passed in as a parameter for the new mapping. If the vma has a
 * linear pfn mapping for the entire range reserve the entire vma range with
 * single reserve_pfn_range call.
 */
828
int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
829
		    unsigned long pfn, unsigned long addr, unsigned long size)
830
{
831
	resource_size_t paddr = (resource_size_t)pfn << PAGE_SHIFT;
832
	enum page_cache_mode pcm;
833

834
	/* reserve the whole chunk starting from paddr */
835 836 837 838 839 840 841 842
	if (addr == vma->vm_start && size == (vma->vm_end - vma->vm_start)) {
		int ret;

		ret = reserve_pfn_range(paddr, size, prot, 0);
		if (!ret)
			vma->vm_flags |= VM_PAT;
		return ret;
	}
843

844
	if (!pat_enabled())
845 846
		return 0;

847 848 849 850
	/*
	 * For anything smaller than the vma size we set prot based on the
	 * lookup.
	 */
851
	pcm = lookup_memtype(paddr);
852 853 854 855 856

	/* Check memtype for the remaining pages */
	while (size > PAGE_SIZE) {
		size -= PAGE_SIZE;
		paddr += PAGE_SIZE;
857
		if (pcm != lookup_memtype(paddr))
858 859 860 861
			return -EINVAL;
	}

	*prot = __pgprot((pgprot_val(vma->vm_page_prot) & (~_PAGE_CACHE_MASK)) |
862
			 cachemode2protval(pcm));
863 864 865 866 867 868 869

	return 0;
}

int track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot,
		     unsigned long pfn)
{
870
	enum page_cache_mode pcm;
871

872
	if (!pat_enabled())
873 874 875
		return 0;

	/* Set prot based on lookup */
876
	pcm = lookup_memtype((resource_size_t)pfn << PAGE_SHIFT);
877
	*prot = __pgprot((pgprot_val(vma->vm_page_prot) & (~_PAGE_CACHE_MASK)) |
878
			 cachemode2protval(pcm));
879

880 881 882 883
	return 0;
}

/*
884
 * untrack_pfn is called while unmapping a pfnmap for a region.
885
 * untrack can be called for a specific region indicated by pfn and size or
886
 * can be for the entire vma (in which case pfn, size are zero).
887
 */
888 889
void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
		 unsigned long size)
890
{
891
	resource_size_t paddr;
892
	unsigned long prot;
893

894
	if (!(vma->vm_flags & VM_PAT))
895
		return;
896 897 898 899 900 901 902 903 904 905

	/* free the chunk starting from pfn or the whole chunk */
	paddr = (resource_size_t)pfn << PAGE_SHIFT;
	if (!paddr && !size) {
		if (follow_phys(vma, vma->vm_start, 0, &prot, &paddr)) {
			WARN_ON_ONCE(1);
			return;
		}

		size = vma->vm_end - vma->vm_start;
906
	}
907
	free_pfn_range(paddr, size);
908
	vma->vm_flags &= ~VM_PAT;
909 910
}

911 912
pgprot_t pgprot_writecombine(pgprot_t prot)
{
913
	if (pat_enabled())
914 915
		return __pgprot(pgprot_val(prot) |
				cachemode2protval(_PAGE_CACHE_MODE_WC));
916 917 918
	else
		return pgprot_noncached(prot);
}
919
EXPORT_SYMBOL_GPL(pgprot_writecombine);
920

921
#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_X86_PAT)
922 923 924

static struct memtype *memtype_get_idx(loff_t pos)
{
925 926
	struct memtype *print_entry;
	int ret;
927

928
	print_entry  = kzalloc(sizeof(struct memtype), GFP_KERNEL);
929 930 931 932
	if (!print_entry)
		return NULL;

	spin_lock(&memtype_lock);
933
	ret = rbt_memtype_copy_nth_element(print_entry, pos);
934
	spin_unlock(&memtype_lock);
I
Ingo Molnar 已提交
935

936 937 938 939 940 941
	if (!ret) {
		return print_entry;
	} else {
		kfree(print_entry);
		return NULL;
	}
942 943 944 945 946 947
}

static void *memtype_seq_start(struct seq_file *seq, loff_t *pos)
{
	if (*pos == 0) {
		++*pos;
948
		seq_puts(seq, "PAT memtype list:\n");
949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970
	}

	return memtype_get_idx(*pos);
}

static void *memtype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
	++*pos;
	return memtype_get_idx(*pos);
}

static void memtype_seq_stop(struct seq_file *seq, void *v)
{
}

static int memtype_seq_show(struct seq_file *seq, void *v)
{
	struct memtype *print_entry = (struct memtype *)v;

	seq_printf(seq, "%s @ 0x%Lx-0x%Lx\n", cattr_name(print_entry->type),
			print_entry->start, print_entry->end);
	kfree(print_entry);
I
Ingo Molnar 已提交
971

972 973 974
	return 0;
}

T
Tobias Klauser 已提交
975
static const struct seq_operations memtype_seq_ops = {
976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995
	.start = memtype_seq_start,
	.next  = memtype_seq_next,
	.stop  = memtype_seq_stop,
	.show  = memtype_seq_show,
};

static int memtype_seq_open(struct inode *inode, struct file *file)
{
	return seq_open(file, &memtype_seq_ops);
}

static const struct file_operations memtype_fops = {
	.open    = memtype_seq_open,
	.read    = seq_read,
	.llseek  = seq_lseek,
	.release = seq_release,
};

static int __init pat_memtype_list_init(void)
{
996
	if (pat_enabled()) {
997 998 999
		debugfs_create_file("pat_memtype_list", S_IRUSR,
				    arch_debugfs_dir, NULL, &memtype_fops);
	}
1000 1001 1002 1003 1004
	return 0;
}

late_initcall(pat_memtype_list_init);

1005
#endif /* CONFIG_DEBUG_FS && CONFIG_X86_PAT */