pat.c 24.3 KB
Newer Older
1 2 3 4 5 6 7 8 9
/*
 * Handle caching attributes in page tables (PAT)
 *
 * Authors: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
 *          Suresh B Siddha <suresh.b.siddha@intel.com>
 *
 * Loosely based on earlier PAT patchset from Eric Biederman and Andi Kleen.
 */

I
Ingo Molnar 已提交
10 11 12
#include <linux/seq_file.h>
#include <linux/bootmem.h>
#include <linux/debugfs.h>
13
#include <linux/kernel.h>
14
#include <linux/module.h>
15
#include <linux/slab.h>
I
Ingo Molnar 已提交
16
#include <linux/mm.h>
17
#include <linux/fs.h>
18
#include <linux/rbtree.h>
19

I
Ingo Molnar 已提交
20
#include <asm/cacheflush.h>
21
#include <asm/processor.h>
I
Ingo Molnar 已提交
22
#include <asm/tlbflush.h>
23
#include <asm/x86_init.h>
24 25
#include <asm/pgtable.h>
#include <asm/fcntl.h>
I
Ingo Molnar 已提交
26
#include <asm/e820.h>
27
#include <asm/mtrr.h>
I
Ingo Molnar 已提交
28 29 30
#include <asm/page.h>
#include <asm/msr.h>
#include <asm/pat.h>
31
#include <asm/io.h>
32

33
#include "pat_internal.h"
34
#include "mm_internal.h"
35

36
#ifdef CONFIG_X86_PAT
37
int __read_mostly pat_enabled = 1;
38

39
static inline void pat_disable(const char *reason)
40
{
41
	pat_enabled = 0;
42
	printk(KERN_INFO "%s\n", reason);
43 44
}

A
Andrew Morton 已提交
45
static int __init nopat(char *str)
46
{
47
	pat_disable("PAT support disabled.");
48 49
	return 0;
}
50
early_param("nopat", nopat);
51 52 53 54 55
#else
static inline void pat_disable(const char *reason)
{
	(void)reason;
}
56 57
#endif

58

59
int pat_debug_enable;
I
Ingo Molnar 已提交
60

61 62
static int __init pat_debug_setup(char *str)
{
63
	pat_debug_enable = 1;
64 65 66 67
	return 0;
}
__setup("debugpat", pat_debug_setup);

68
static u64 __read_mostly boot_pat_state;
69

70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138
#ifdef CONFIG_X86_PAT
/*
 * X86 PAT uses page flags WC and Uncached together to keep track of
 * memory type of pages that have backing page struct. X86 PAT supports 3
 * different memory types, _PAGE_CACHE_MODE_WB, _PAGE_CACHE_MODE_WC and
 * _PAGE_CACHE_MODE_UC_MINUS and fourth state where page's memory type has not
 * been changed from its default (value of -1 used to denote this).
 * Note we do not support _PAGE_CACHE_MODE_UC here.
 */

#define _PGMT_DEFAULT		0
#define _PGMT_WC		(1UL << PG_arch_1)
#define _PGMT_UC_MINUS		(1UL << PG_uncached)
#define _PGMT_WB		(1UL << PG_uncached | 1UL << PG_arch_1)
#define _PGMT_MASK		(1UL << PG_uncached | 1UL << PG_arch_1)
#define _PGMT_CLEAR_MASK	(~_PGMT_MASK)

static inline enum page_cache_mode get_page_memtype(struct page *pg)
{
	unsigned long pg_flags = pg->flags & _PGMT_MASK;

	if (pg_flags == _PGMT_DEFAULT)
		return -1;
	else if (pg_flags == _PGMT_WC)
		return _PAGE_CACHE_MODE_WC;
	else if (pg_flags == _PGMT_UC_MINUS)
		return _PAGE_CACHE_MODE_UC_MINUS;
	else
		return _PAGE_CACHE_MODE_WB;
}

static inline void set_page_memtype(struct page *pg,
				    enum page_cache_mode memtype)
{
	unsigned long memtype_flags;
	unsigned long old_flags;
	unsigned long new_flags;

	switch (memtype) {
	case _PAGE_CACHE_MODE_WC:
		memtype_flags = _PGMT_WC;
		break;
	case _PAGE_CACHE_MODE_UC_MINUS:
		memtype_flags = _PGMT_UC_MINUS;
		break;
	case _PAGE_CACHE_MODE_WB:
		memtype_flags = _PGMT_WB;
		break;
	default:
		memtype_flags = _PGMT_DEFAULT;
		break;
	}

	do {
		old_flags = pg->flags;
		new_flags = (old_flags & _PGMT_CLEAR_MASK) | memtype_flags;
	} while (cmpxchg(&pg->flags, old_flags, new_flags) != old_flags);
}
#else
static inline enum page_cache_mode get_page_memtype(struct page *pg)
{
	return -1;
}
static inline void set_page_memtype(struct page *pg,
				    enum page_cache_mode memtype)
{
}
#endif

139 140 141 142 143 144 145 146 147
enum {
	PAT_UC = 0,		/* uncached */
	PAT_WC = 1,		/* Write combining */
	PAT_WT = 4,		/* Write Through */
	PAT_WP = 5,		/* Write Protected */
	PAT_WB = 6,		/* Write Back (default) */
	PAT_UC_MINUS = 7,	/* UC, but can be overriden by MTRR */
};

148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193
#define CM(c) (_PAGE_CACHE_MODE_ ## c)

static enum page_cache_mode pat_get_cache_mode(unsigned pat_val, char *msg)
{
	enum page_cache_mode cache;
	char *cache_mode;

	switch (pat_val) {
	case PAT_UC:       cache = CM(UC);       cache_mode = "UC  "; break;
	case PAT_WC:       cache = CM(WC);       cache_mode = "WC  "; break;
	case PAT_WT:       cache = CM(WT);       cache_mode = "WT  "; break;
	case PAT_WP:       cache = CM(WP);       cache_mode = "WP  "; break;
	case PAT_WB:       cache = CM(WB);       cache_mode = "WB  "; break;
	case PAT_UC_MINUS: cache = CM(UC_MINUS); cache_mode = "UC- "; break;
	default:           cache = CM(WB);       cache_mode = "WB  "; break;
	}

	memcpy(msg, cache_mode, 4);

	return cache;
}

#undef CM

/*
 * Update the cache mode to pgprot translation tables according to PAT
 * configuration.
 * Using lower indices is preferred, so we start with highest index.
 */
void pat_init_cache_modes(void)
{
	int i;
	enum page_cache_mode cache;
	char pat_msg[33];
	u64 pat;

	rdmsrl(MSR_IA32_CR_PAT, pat);
	pat_msg[32] = 0;
	for (i = 7; i >= 0; i--) {
		cache = pat_get_cache_mode((pat >> (i * 8)) & 7,
					   pat_msg + 4 * i);
		update_cache_mode_entry(i, cache);
	}
	pr_info("PAT configuration [0-7]: %s\n", pat_msg);
}

194
#define PAT(x, y)	((u64)PAT_ ## y << ((x)*8))
195 196 197 198

void pat_init(void)
{
	u64 pat;
199
	bool boot_cpu = !boot_pat_state;
200

201
	if (!pat_enabled)
202 203
		return;

204 205 206 207 208 209 210 211 212 213 214 215 216 217
	if (!cpu_has_pat) {
		if (!boot_pat_state) {
			pat_disable("PAT not supported by CPU.");
			return;
		} else {
			/*
			 * If this happens we are on a secondary CPU, but
			 * switched to PAT on the boot CPU. We have no way to
			 * undo PAT.
			 */
			printk(KERN_ERR "PAT enabled, "
			       "but not supported by secondary CPU\n");
			BUG();
		}
218
	}
219 220 221 222 223 224 225 226 227 228 229 230 231 232

	/* Set PWT to Write-Combining. All other bits stay the same */
	/*
	 * PTE encoding used in Linux:
	 *      PAT
	 *      |PCD
	 *      ||PWT
	 *      |||
	 *      000 WB		_PAGE_CACHE_WB
	 *      001 WC		_PAGE_CACHE_WC
	 *      010 UC-		_PAGE_CACHE_UC_MINUS
	 *      011 UC		_PAGE_CACHE_UC
	 * PAT bit unused
	 */
233 234
	pat = PAT(0, WB) | PAT(1, WC) | PAT(2, UC_MINUS) | PAT(3, UC) |
	      PAT(4, WB) | PAT(5, WC) | PAT(6, UC_MINUS) | PAT(7, UC);
235 236

	/* Boot CPU check */
237
	if (!boot_pat_state)
238 239 240
		rdmsrl(MSR_IA32_CR_PAT, boot_pat_state);

	wrmsrl(MSR_IA32_CR_PAT, pat);
241 242

	if (boot_cpu)
243
		pat_init_cache_modes();
244 245 246 247
}

#undef PAT

248
static DEFINE_SPINLOCK(memtype_lock);	/* protects memtype accesses */
249

250 251 252 253 254 255 256
/*
 * Does intersection of PAT memory type and MTRR memory type and returns
 * the resulting memory type as PAT understands it.
 * (Type in pat and mtrr will not have same value)
 * The intersection is based on "Effective Memory Type" tables in IA-32
 * SDM vol 3a
 */
257 258
static unsigned long pat_x_mtrr_type(u64 start, u64 end,
				     enum page_cache_mode req_type)
259
{
260 261 262 263
	/*
	 * Look for MTRR hint to get the effective type in case where PAT
	 * request is for WB.
	 */
264
	if (req_type == _PAGE_CACHE_MODE_WB) {
265 266 267
		u8 mtrr_type;

		mtrr_type = mtrr_type_lookup(start, end);
268
		if (mtrr_type != MTRR_TYPE_WRBACK)
269
			return _PAGE_CACHE_MODE_UC_MINUS;
270

271
		return _PAGE_CACHE_MODE_WB;
272 273 274
	}

	return req_type;
275 276
}

277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294
struct pagerange_state {
	unsigned long		cur_pfn;
	int			ram;
	int			not_ram;
};

static int
pagerange_is_ram_callback(unsigned long initial_pfn, unsigned long total_nr_pages, void *arg)
{
	struct pagerange_state *state = arg;

	state->not_ram	|= initial_pfn > state->cur_pfn;
	state->ram	|= total_nr_pages > 0;
	state->cur_pfn	 = initial_pfn + total_nr_pages;

	return state->ram && state->not_ram;
}

295
static int pat_pagerange_is_ram(resource_size_t start, resource_size_t end)
296
{
297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314
	int ret = 0;
	unsigned long start_pfn = start >> PAGE_SHIFT;
	unsigned long end_pfn = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
	struct pagerange_state state = {start_pfn, 0, 0};

	/*
	 * For legacy reasons, physical address range in the legacy ISA
	 * region is tracked as non-RAM. This will allow users of
	 * /dev/mem to map portions of legacy ISA region, even when
	 * some of those portions are listed(or not even listed) with
	 * different e820 types(RAM/reserved/..)
	 */
	if (start_pfn < ISA_END_ADDRESS >> PAGE_SHIFT)
		start_pfn = ISA_END_ADDRESS >> PAGE_SHIFT;

	if (start_pfn < end_pfn) {
		ret = walk_system_ram_range(start_pfn, end_pfn - start_pfn,
				&state, pagerange_is_ram_callback);
315 316
	}

317
	return (ret > 0) ? -1 : (state.ram ? 1 : 0);
318 319
}

320
/*
321 322 323 324
 * For RAM pages, we use page flags to mark the pages with appropriate type.
 * Here we do two pass:
 * - Find the memtype of all the pages in the range, look for any conflicts
 * - In case of no conflicts, set the new memtype for pages in the range
325
 */
326 327 328
static int reserve_ram_pages_type(u64 start, u64 end,
				  enum page_cache_mode req_type,
				  enum page_cache_mode *new_type)
329 330
{
	struct page *page;
331 332
	u64 pfn;

333
	if (req_type == _PAGE_CACHE_MODE_UC) {
334 335
		/* We do not support strong UC */
		WARN_ON_ONCE(1);
336
		req_type = _PAGE_CACHE_MODE_UC_MINUS;
337
	}
338 339

	for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
340
		enum page_cache_mode type;
341

342 343 344
		page = pfn_to_page(pfn);
		type = get_page_memtype(page);
		if (type != -1) {
345
			pr_info("reserve_ram_pages_type failed [mem %#010Lx-%#010Lx], track 0x%x, req 0x%x\n",
346
				start, end - 1, type, req_type);
347 348 349 350 351
			if (new_type)
				*new_type = type;

			return -EBUSY;
		}
352 353
	}

354 355 356 357
	if (new_type)
		*new_type = req_type;

	for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
358
		page = pfn_to_page(pfn);
359
		set_page_memtype(page, req_type);
360
	}
361
	return 0;
362 363 364 365 366
}

static int free_ram_pages_type(u64 start, u64 end)
{
	struct page *page;
367
	u64 pfn;
368 369 370

	for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
		page = pfn_to_page(pfn);
371
		set_page_memtype(page, -1);
372 373 374 375
	}
	return 0;
}

376 377
/*
 * req_type typically has one of the:
378 379 380 381
 * - _PAGE_CACHE_MODE_WB
 * - _PAGE_CACHE_MODE_WC
 * - _PAGE_CACHE_MODE_UC_MINUS
 * - _PAGE_CACHE_MODE_UC
382
 *
383 384 385
 * If new_type is NULL, function will return an error if it cannot reserve the
 * region with req_type. If new_type is non-NULL, function will return
 * available type in new_type in case of no error. In case of any error
386 387
 * it will return a negative return value.
 */
388 389
int reserve_memtype(u64 start, u64 end, enum page_cache_mode req_type,
		    enum page_cache_mode *new_type)
390
{
391
	struct memtype *new;
392
	enum page_cache_mode actual_type;
393
	int is_range_ram;
I
Ingo Molnar 已提交
394
	int err = 0;
395

I
Ingo Molnar 已提交
396
	BUG_ON(start >= end); /* end is exclusive */
397

398
	if (!pat_enabled) {
399
		/* This is identical to page table setting without PAT */
400
		if (new_type) {
401 402
			if (req_type == _PAGE_CACHE_MODE_WC)
				*new_type = _PAGE_CACHE_MODE_UC_MINUS;
403
			else
404
				*new_type = req_type;
405
		}
406 407 408 409
		return 0;
	}

	/* Low ISA region is always mapped WB in page table. No need to track */
410
	if (x86_platform.is_untracked_pat_range(start, end)) {
411
		if (new_type)
412
			*new_type = _PAGE_CACHE_MODE_WB;
413 414 415
		return 0;
	}

416 417 418 419 420 421
	/*
	 * Call mtrr_lookup to get the type hint. This is an
	 * optimization for /dev/mem mmap'ers into WB memory (BIOS
	 * tools and ACPI tools). Use WB request for WB memory and use
	 * UC_MINUS otherwise.
	 */
422
	actual_type = pat_x_mtrr_type(start, end, req_type);
423

424 425 426
	if (new_type)
		*new_type = actual_type;

427
	is_range_ram = pat_pagerange_is_ram(start, end);
428 429 430 431 432 433
	if (is_range_ram == 1) {

		err = reserve_ram_pages_type(start, end, req_type, new_type);

		return err;
	} else if (is_range_ram < 0) {
434
		return -EINVAL;
435
	}
436

437
	new  = kzalloc(sizeof(struct memtype), GFP_KERNEL);
438
	if (!new)
439 440
		return -ENOMEM;

I
Ingo Molnar 已提交
441 442 443
	new->start	= start;
	new->end	= end;
	new->type	= actual_type;
444 445 446

	spin_lock(&memtype_lock);

447
	err = rbt_memtype_check_insert(new, new_type);
448
	if (err) {
449 450 451
		printk(KERN_INFO "reserve_memtype failed [mem %#010Lx-%#010Lx], track %s, req %s\n",
		       start, end - 1,
		       cattr_name(new->type), cattr_name(req_type));
452
		kfree(new);
453
		spin_unlock(&memtype_lock);
I
Ingo Molnar 已提交
454

455 456 457 458
		return err;
	}

	spin_unlock(&memtype_lock);
459

460 461
	dprintk("reserve_memtype added [mem %#010Lx-%#010Lx], track %s, req %s, ret %s\n",
		start, end - 1, cattr_name(new->type), cattr_name(req_type),
462 463
		new_type ? cattr_name(*new_type) : "-");

464 465 466 467 468 469
	return err;
}

int free_memtype(u64 start, u64 end)
{
	int err = -EINVAL;
470
	int is_range_ram;
471
	struct memtype *entry;
472

473
	if (!pat_enabled)
474 475 476
		return 0;

	/* Low ISA region is always mapped WB. No need to track */
477
	if (x86_platform.is_untracked_pat_range(start, end))
478 479
		return 0;

480
	is_range_ram = pat_pagerange_is_ram(start, end);
481 482 483 484 485 486
	if (is_range_ram == 1) {

		err = free_ram_pages_type(start, end);

		return err;
	} else if (is_range_ram < 0) {
487
		return -EINVAL;
488
	}
489

490
	spin_lock(&memtype_lock);
491
	entry = rbt_memtype_erase(start, end);
492 493
	spin_unlock(&memtype_lock);

494
	if (!entry) {
495 496
		printk(KERN_INFO "%s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n",
		       current->comm, current->pid, start, end - 1);
497
		return -EINVAL;
498
	}
499

500 501
	kfree(entry);

502
	dprintk("free_memtype request [mem %#010Lx-%#010Lx]\n", start, end - 1);
I
Ingo Molnar 已提交
503

504
	return 0;
505 506
}

507

508 509 510 511 512 513
/**
 * lookup_memtype - Looksup the memory type for a physical address
 * @paddr: physical address of which memory type needs to be looked up
 *
 * Only to be called when PAT is enabled
 *
514 515
 * Returns _PAGE_CACHE_MODE_WB, _PAGE_CACHE_MODE_WC, _PAGE_CACHE_MODE_UC_MINUS
 * or _PAGE_CACHE_MODE_UC
516
 */
517
static enum page_cache_mode lookup_memtype(u64 paddr)
518
{
519
	enum page_cache_mode rettype = _PAGE_CACHE_MODE_WB;
520 521
	struct memtype *entry;

522
	if (x86_platform.is_untracked_pat_range(paddr, paddr + PAGE_SIZE))
523 524 525 526 527 528 529 530 531 532 533
		return rettype;

	if (pat_pagerange_is_ram(paddr, paddr + PAGE_SIZE)) {
		struct page *page;
		page = pfn_to_page(paddr >> PAGE_SHIFT);
		rettype = get_page_memtype(page);
		/*
		 * -1 from get_page_memtype() implies RAM page is in its
		 * default state and not reserved, and hence of type WB
		 */
		if (rettype == -1)
534
			rettype = _PAGE_CACHE_MODE_WB;
535 536 537 538 539 540

		return rettype;
	}

	spin_lock(&memtype_lock);

541
	entry = rbt_memtype_lookup(paddr);
542 543 544
	if (entry != NULL)
		rettype = entry->type;
	else
545
		rettype = _PAGE_CACHE_MODE_UC_MINUS;
546 547 548 549 550

	spin_unlock(&memtype_lock);
	return rettype;
}

551 552 553 554 555 556 557 558 559 560 561
/**
 * io_reserve_memtype - Request a memory type mapping for a region of memory
 * @start: start (physical address) of the region
 * @end: end (physical address) of the region
 * @type: A pointer to memtype, with requested type. On success, requested
 * or any other compatible type that was available for the region is returned
 *
 * On success, returns 0
 * On failure, returns non-zero
 */
int io_reserve_memtype(resource_size_t start, resource_size_t end,
562
			enum page_cache_mode *type)
563
{
564
	resource_size_t size = end - start;
565 566
	enum page_cache_mode req_type = *type;
	enum page_cache_mode new_type;
567 568
	int ret;

569
	WARN_ON_ONCE(iomem_map_sanity_check(start, size));
570 571 572 573 574

	ret = reserve_memtype(start, end, req_type, &new_type);
	if (ret)
		goto out_err;

575
	if (!is_new_memtype_allowed(start, size, req_type, new_type))
576 577
		goto out_free;

578
	if (kernel_map_sync_memtype(start, size, new_type) < 0)
579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600
		goto out_free;

	*type = new_type;
	return 0;

out_free:
	free_memtype(start, end);
	ret = -EBUSY;
out_err:
	return ret;
}

/**
 * io_free_memtype - Release a memory type mapping for a region of memory
 * @start: start (physical address) of the region
 * @end: end (physical address) of the region
 */
void io_free_memtype(resource_size_t start, resource_size_t end)
{
	free_memtype(start, end);
}

601 602 603 604 605 606
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
				unsigned long size, pgprot_t vma_prot)
{
	return vma_prot;
}

607 608
#ifdef CONFIG_STRICT_DEVMEM
/* This check is done in drivers/char/mem.c in case of STRICT_DEVMEM*/
609 610 611 612 613
static inline int range_is_allowed(unsigned long pfn, unsigned long size)
{
	return 1;
}
#else
614
/* This check is needed to avoid cache aliasing when PAT is enabled */
615 616 617 618 619 620
static inline int range_is_allowed(unsigned long pfn, unsigned long size)
{
	u64 from = ((u64)pfn) << PAGE_SHIFT;
	u64 to = from + size;
	u64 cursor = from;

621 622 623
	if (!pat_enabled)
		return 1;

624 625
	while (cursor < to) {
		if (!devmem_is_allowed(pfn)) {
626 627
			printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx]\n",
				current->comm, from, to - 1);
628 629 630 631 632 633 634
			return 0;
		}
		cursor += PAGE_SIZE;
		pfn++;
	}
	return 1;
}
635
#endif /* CONFIG_STRICT_DEVMEM */
636

637 638 639
int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
				unsigned long size, pgprot_t *vma_prot)
{
640
	enum page_cache_mode pcm = _PAGE_CACHE_MODE_WB;
641

642 643 644
	if (!range_is_allowed(pfn, size))
		return 0;

645
	if (file->f_flags & O_DSYNC)
646
		pcm = _PAGE_CACHE_MODE_UC_MINUS;
647 648 649 650 651 652 653 654 655 656

#ifdef CONFIG_X86_32
	/*
	 * On the PPro and successors, the MTRRs are used to set
	 * memory types for physical addresses outside main memory,
	 * so blindly setting UC or PWT on those pages is wrong.
	 * For Pentiums and earlier, the surround logic should disable
	 * caching for the high addresses through the KEN pin, but
	 * we maintain the tradition of paranoia in this code.
	 */
657
	if (!pat_enabled &&
658 659 660 661 662
	    !(boot_cpu_has(X86_FEATURE_MTRR) ||
	      boot_cpu_has(X86_FEATURE_K6_MTRR) ||
	      boot_cpu_has(X86_FEATURE_CYRIX_ARR) ||
	      boot_cpu_has(X86_FEATURE_CENTAUR_MCR)) &&
	    (pfn << PAGE_SHIFT) >= __pa(high_memory)) {
663
		pcm = _PAGE_CACHE_MODE_UC;
664 665 666
	}
#endif

667
	*vma_prot = __pgprot((pgprot_val(*vma_prot) & ~_PAGE_CACHE_MASK) |
668
			     cachemode2protval(pcm));
669 670
	return 1;
}
671

672 673 674 675
/*
 * Change the memory type for the physial address range in kernel identity
 * mapping space if that range is a part of identity map.
 */
676 677
int kernel_map_sync_memtype(u64 base, unsigned long size,
			    enum page_cache_mode pcm)
678 679 680
{
	unsigned long id_sz;

681
	if (base > __pa(high_memory-1))
682 683
		return 0;

684 685 686 687 688 689 690
	/*
	 * some areas in the middle of the kernel identity range
	 * are not mapped, like the PCI space.
	 */
	if (!page_is_ram(base >> PAGE_SHIFT))
		return 0;

691
	id_sz = (__pa(high_memory-1) <= base + size) ?
692 693 694
				__pa(high_memory) - base :
				size;

695
	if (ioremap_change_attr((unsigned long)__va(base), id_sz, pcm) < 0) {
696 697
		printk(KERN_INFO "%s:%d ioremap_change_attr failed %s "
			"for [mem %#010Lx-%#010Lx]\n",
698
			current->comm, current->pid,
699
			cattr_name(pcm),
700
			base, (unsigned long long)(base + size-1));
701 702 703 704 705
		return -EINVAL;
	}
	return 0;
}

706 707 708 709 710
/*
 * Internal interface to reserve a range of physical memory with prot.
 * Reserved non RAM regions only and after successful reserve_memtype,
 * this func also keeps identity mapping (if any) in sync with this new prot.
 */
711 712
static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
				int strict_prot)
713 714
{
	int is_ram = 0;
715
	int ret;
716 717
	enum page_cache_mode want_pcm = pgprot2cachemode(*vma_prot);
	enum page_cache_mode pcm = want_pcm;
718

719
	is_ram = pat_pagerange_is_ram(paddr, paddr + size);
720

721
	/*
722 723 724
	 * reserve_pfn_range() for RAM pages. We do not refcount to keep
	 * track of number of mappings of RAM pages. We can assert that
	 * the type requested matches the type of first page in the range.
725
	 */
726 727 728 729
	if (is_ram) {
		if (!pat_enabled)
			return 0;

730 731
		pcm = lookup_memtype(paddr);
		if (want_pcm != pcm) {
732
			printk(KERN_WARNING "%s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n",
733
				current->comm, current->pid,
734
				cattr_name(want_pcm),
735
				(unsigned long long)paddr,
736
				(unsigned long long)(paddr + size - 1),
737
				cattr_name(pcm));
738
			*vma_prot = __pgprot((pgprot_val(*vma_prot) &
739 740
					     (~_PAGE_CACHE_MASK)) |
					     cachemode2protval(pcm));
741
		}
742
		return 0;
743
	}
744

745
	ret = reserve_memtype(paddr, paddr + size, want_pcm, &pcm);
746 747 748
	if (ret)
		return ret;

749
	if (pcm != want_pcm) {
750
		if (strict_prot ||
751
		    !is_new_memtype_allowed(paddr, size, want_pcm, pcm)) {
752 753
			free_memtype(paddr, paddr + size);
			printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
754
				" for [mem %#010Lx-%#010Lx], got %s\n",
755
				current->comm, current->pid,
756
				cattr_name(want_pcm),
757
				(unsigned long long)paddr,
758
				(unsigned long long)(paddr + size - 1),
759
				cattr_name(pcm));
760 761 762 763 764 765 766 767
			return -EINVAL;
		}
		/*
		 * We allow returning different type than the one requested in
		 * non strict case.
		 */
		*vma_prot = __pgprot((pgprot_val(*vma_prot) &
				      (~_PAGE_CACHE_MASK)) |
768
				     cachemode2protval(pcm));
769 770
	}

771
	if (kernel_map_sync_memtype(paddr, size, pcm) < 0) {
772 773 774 775 776 777 778 779 780 781 782 783 784 785
		free_memtype(paddr, paddr + size);
		return -EINVAL;
	}
	return 0;
}

/*
 * Internal interface to free a range of physical memory.
 * Frees non RAM regions only.
 */
static void free_pfn_range(u64 paddr, unsigned long size)
{
	int is_ram;

786
	is_ram = pat_pagerange_is_ram(paddr, paddr + size);
787 788 789 790 791
	if (is_ram == 0)
		free_memtype(paddr, paddr + size);
}

/*
792
 * track_pfn_copy is called when vma that is covering the pfnmap gets
793 794 795 796 797
 * copied through copy_page_range().
 *
 * If the vma has a linear pfn mapping for the entire range, we get the prot
 * from pte and reserve the entire vma range with single reserve_pfn_range call.
 */
798
int track_pfn_copy(struct vm_area_struct *vma)
799
{
800
	resource_size_t paddr;
801
	unsigned long prot;
802
	unsigned long vma_size = vma->vm_end - vma->vm_start;
803
	pgprot_t pgprot;
804

805
	if (vma->vm_flags & VM_PAT) {
806
		/*
807 808
		 * reserve the whole chunk covered by vma. We need the
		 * starting address and protection from pte.
809
		 */
810
		if (follow_phys(vma, vma->vm_start, 0, &prot, &paddr)) {
811
			WARN_ON_ONCE(1);
812
			return -EINVAL;
813
		}
814 815
		pgprot = __pgprot(prot);
		return reserve_pfn_range(paddr, vma_size, &pgprot, 1);
816 817 818 819 820 821 822 823 824 825
	}

	return 0;
}

/*
 * prot is passed in as a parameter for the new mapping. If the vma has a
 * linear pfn mapping for the entire range reserve the entire vma range with
 * single reserve_pfn_range call.
 */
826
int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
827
		    unsigned long pfn, unsigned long addr, unsigned long size)
828
{
829
	resource_size_t paddr = (resource_size_t)pfn << PAGE_SHIFT;
830
	enum page_cache_mode pcm;
831

832
	/* reserve the whole chunk starting from paddr */
833 834 835 836 837 838 839 840
	if (addr == vma->vm_start && size == (vma->vm_end - vma->vm_start)) {
		int ret;

		ret = reserve_pfn_range(paddr, size, prot, 0);
		if (!ret)
			vma->vm_flags |= VM_PAT;
		return ret;
	}
841

842 843 844
	if (!pat_enabled)
		return 0;

845 846 847 848
	/*
	 * For anything smaller than the vma size we set prot based on the
	 * lookup.
	 */
849
	pcm = lookup_memtype(paddr);
850 851 852 853 854

	/* Check memtype for the remaining pages */
	while (size > PAGE_SIZE) {
		size -= PAGE_SIZE;
		paddr += PAGE_SIZE;
855
		if (pcm != lookup_memtype(paddr))
856 857 858 859
			return -EINVAL;
	}

	*prot = __pgprot((pgprot_val(vma->vm_page_prot) & (~_PAGE_CACHE_MASK)) |
860
			 cachemode2protval(pcm));
861 862 863 864 865 866 867

	return 0;
}

int track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot,
		     unsigned long pfn)
{
868
	enum page_cache_mode pcm;
869 870 871 872 873

	if (!pat_enabled)
		return 0;

	/* Set prot based on lookup */
874
	pcm = lookup_memtype((resource_size_t)pfn << PAGE_SHIFT);
875
	*prot = __pgprot((pgprot_val(vma->vm_page_prot) & (~_PAGE_CACHE_MASK)) |
876
			 cachemode2protval(pcm));
877

878 879 880 881
	return 0;
}

/*
882
 * untrack_pfn is called while unmapping a pfnmap for a region.
883
 * untrack can be called for a specific region indicated by pfn and size or
884
 * can be for the entire vma (in which case pfn, size are zero).
885
 */
886 887
void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
		 unsigned long size)
888
{
889
	resource_size_t paddr;
890
	unsigned long prot;
891

892
	if (!(vma->vm_flags & VM_PAT))
893
		return;
894 895 896 897 898 899 900 901 902 903

	/* free the chunk starting from pfn or the whole chunk */
	paddr = (resource_size_t)pfn << PAGE_SHIFT;
	if (!paddr && !size) {
		if (follow_phys(vma, vma->vm_start, 0, &prot, &paddr)) {
			WARN_ON_ONCE(1);
			return;
		}

		size = vma->vm_end - vma->vm_start;
904
	}
905
	free_pfn_range(paddr, size);
906
	vma->vm_flags &= ~VM_PAT;
907 908
}

909 910 911
pgprot_t pgprot_writecombine(pgprot_t prot)
{
	if (pat_enabled)
912 913
		return __pgprot(pgprot_val(prot) |
				cachemode2protval(_PAGE_CACHE_MODE_WC));
914 915 916
	else
		return pgprot_noncached(prot);
}
917
EXPORT_SYMBOL_GPL(pgprot_writecombine);
918

919
#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_X86_PAT)
920 921 922

static struct memtype *memtype_get_idx(loff_t pos)
{
923 924
	struct memtype *print_entry;
	int ret;
925

926
	print_entry  = kzalloc(sizeof(struct memtype), GFP_KERNEL);
927 928 929 930
	if (!print_entry)
		return NULL;

	spin_lock(&memtype_lock);
931
	ret = rbt_memtype_copy_nth_element(print_entry, pos);
932
	spin_unlock(&memtype_lock);
I
Ingo Molnar 已提交
933

934 935 936 937 938 939
	if (!ret) {
		return print_entry;
	} else {
		kfree(print_entry);
		return NULL;
	}
940 941 942 943 944 945
}

static void *memtype_seq_start(struct seq_file *seq, loff_t *pos)
{
	if (*pos == 0) {
		++*pos;
946
		seq_puts(seq, "PAT memtype list:\n");
947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968
	}

	return memtype_get_idx(*pos);
}

static void *memtype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
	++*pos;
	return memtype_get_idx(*pos);
}

static void memtype_seq_stop(struct seq_file *seq, void *v)
{
}

static int memtype_seq_show(struct seq_file *seq, void *v)
{
	struct memtype *print_entry = (struct memtype *)v;

	seq_printf(seq, "%s @ 0x%Lx-0x%Lx\n", cattr_name(print_entry->type),
			print_entry->start, print_entry->end);
	kfree(print_entry);
I
Ingo Molnar 已提交
969

970 971 972
	return 0;
}

T
Tobias Klauser 已提交
973
static const struct seq_operations memtype_seq_ops = {
974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993
	.start = memtype_seq_start,
	.next  = memtype_seq_next,
	.stop  = memtype_seq_stop,
	.show  = memtype_seq_show,
};

static int memtype_seq_open(struct inode *inode, struct file *file)
{
	return seq_open(file, &memtype_seq_ops);
}

static const struct file_operations memtype_fops = {
	.open    = memtype_seq_open,
	.read    = seq_read,
	.llseek  = seq_lseek,
	.release = seq_release,
};

static int __init pat_memtype_list_init(void)
{
994 995 996 997
	if (pat_enabled) {
		debugfs_create_file("pat_memtype_list", S_IRUSR,
				    arch_debugfs_dir, NULL, &memtype_fops);
	}
998 999 1000 1001 1002
	return 0;
}

late_initcall(pat_memtype_list_init);

1003
#endif /* CONFIG_DEBUG_FS && CONFIG_X86_PAT */