pat.c 23.8 KB
Newer Older
1 2 3 4 5 6 7 8 9
/*
 * Handle caching attributes in page tables (PAT)
 *
 * Authors: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
 *          Suresh B Siddha <suresh.b.siddha@intel.com>
 *
 * Loosely based on earlier PAT patchset from Eric Biederman and Andi Kleen.
 */

I
Ingo Molnar 已提交
10 11 12
#include <linux/seq_file.h>
#include <linux/bootmem.h>
#include <linux/debugfs.h>
13 14
#include <linux/kernel.h>
#include <linux/gfp.h>
I
Ingo Molnar 已提交
15
#include <linux/mm.h>
16 17
#include <linux/fs.h>

I
Ingo Molnar 已提交
18
#include <asm/cacheflush.h>
19
#include <asm/processor.h>
I
Ingo Molnar 已提交
20
#include <asm/tlbflush.h>
21 22
#include <asm/pgtable.h>
#include <asm/fcntl.h>
I
Ingo Molnar 已提交
23
#include <asm/e820.h>
24
#include <asm/mtrr.h>
I
Ingo Molnar 已提交
25 26 27
#include <asm/page.h>
#include <asm/msr.h>
#include <asm/pat.h>
28
#include <asm/io.h>
29

30
#ifdef CONFIG_X86_PAT
31
int __read_mostly pat_enabled = 1;
32

33
void __cpuinit pat_disable(char *reason)
34
{
35
	pat_enabled = 0;
36
	printk(KERN_INFO "%s\n", reason);
37 38
}

A
Andrew Morton 已提交
39
static int __init nopat(char *str)
40
{
41
	pat_disable("PAT support disabled.");
42 43
	return 0;
}
44 45 46
early_param("nopat", nopat);
#endif

47 48

static int debug_enable;
I
Ingo Molnar 已提交
49

50 51 52 53 54 55 56 57 58 59 60
static int __init pat_debug_setup(char *str)
{
	debug_enable = 1;
	return 0;
}
__setup("debugpat", pat_debug_setup);

#define dprintk(fmt, arg...) \
	do { if (debug_enable) printk(KERN_INFO fmt, ##arg); } while (0)


61
static u64 __read_mostly boot_pat_state;
62 63 64 65 66 67 68 69 70 71

enum {
	PAT_UC = 0,		/* uncached */
	PAT_WC = 1,		/* Write combining */
	PAT_WT = 4,		/* Write Through */
	PAT_WP = 5,		/* Write Protected */
	PAT_WB = 6,		/* Write Back (default) */
	PAT_UC_MINUS = 7,	/* UC, but can be overriden by MTRR */
};

72
#define PAT(x, y)	((u64)PAT_ ## y << ((x)*8))
73 74 75 76 77

void pat_init(void)
{
	u64 pat;

78
	if (!pat_enabled)
79 80
		return;

81
	/* Paranoia check. */
82
	if (!cpu_has_pat && boot_pat_state) {
83
		/*
84
		 * If this happens we are on a secondary CPU, but
85 86
		 * switched to PAT on the boot CPU. We have no way to
		 * undo PAT.
87 88 89 90
		 */
		printk(KERN_ERR "PAT enabled, "
		       "but not supported by secondary CPU\n");
		BUG();
91
	}
92 93 94 95 96 97 98 99 100 101 102 103 104 105

	/* Set PWT to Write-Combining. All other bits stay the same */
	/*
	 * PTE encoding used in Linux:
	 *      PAT
	 *      |PCD
	 *      ||PWT
	 *      |||
	 *      000 WB		_PAGE_CACHE_WB
	 *      001 WC		_PAGE_CACHE_WC
	 *      010 UC-		_PAGE_CACHE_UC_MINUS
	 *      011 UC		_PAGE_CACHE_UC
	 * PAT bit unused
	 */
106 107
	pat = PAT(0, WB) | PAT(1, WC) | PAT(2, UC_MINUS) | PAT(3, UC) |
	      PAT(4, WB) | PAT(5, WC) | PAT(6, UC_MINUS) | PAT(7, UC);
108 109

	/* Boot CPU check */
110
	if (!boot_pat_state)
111 112 113 114 115 116 117 118 119 120 121 122
		rdmsrl(MSR_IA32_CR_PAT, boot_pat_state);

	wrmsrl(MSR_IA32_CR_PAT, pat);
	printk(KERN_INFO "x86 PAT enabled: cpu %d, old 0x%Lx, new 0x%Lx\n",
	       smp_processor_id(), boot_pat_state, pat);
}

#undef PAT

static char *cattr_name(unsigned long flags)
{
	switch (flags & _PAGE_CACHE_MASK) {
123 124 125 126 127
	case _PAGE_CACHE_UC:		return "uncached";
	case _PAGE_CACHE_UC_MINUS:	return "uncached-minus";
	case _PAGE_CACHE_WB:		return "write-back";
	case _PAGE_CACHE_WC:		return "write-combining";
	default:			return "broken";
128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148
	}
}

/*
 * The global memtype list keeps track of memory type for specific
 * physical memory areas. Conflicting memory types in different
 * mappings can cause CPU cache corruption. To avoid this we keep track.
 *
 * The list is sorted based on starting address and can contain multiple
 * entries for each address (this allows reference counting for overlapping
 * areas). All the aliases have the same cache attributes of course.
 * Zero attributes are represented as holes.
 *
 * Currently the data structure is a list because the number of mappings
 * are expected to be relatively small. If this should be a problem
 * it could be changed to a rbtree or similar.
 *
 * memtype_lock protects the whole list.
 */

struct memtype {
I
Ingo Molnar 已提交
149 150 151 152
	u64			start;
	u64			end;
	unsigned long		type;
	struct list_head	nd;
153 154 155
};

static LIST_HEAD(memtype_list);
I
Ingo Molnar 已提交
156
static DEFINE_SPINLOCK(memtype_lock);	/* protects memtype list */
157 158 159 160 161 162 163 164

/*
 * Does intersection of PAT memory type and MTRR memory type and returns
 * the resulting memory type as PAT understands it.
 * (Type in pat and mtrr will not have same value)
 * The intersection is based on "Effective Memory Type" tables in IA-32
 * SDM vol 3a
 */
165
static unsigned long pat_x_mtrr_type(u64 start, u64 end, unsigned long req_type)
166
{
167 168 169 170
	/*
	 * Look for MTRR hint to get the effective type in case where PAT
	 * request is for WB.
	 */
171 172 173 174 175 176 177 178 179 180 181
	if (req_type == _PAGE_CACHE_WB) {
		u8 mtrr_type;

		mtrr_type = mtrr_type_lookup(start, end);
		if (mtrr_type == MTRR_TYPE_UNCACHABLE)
			return _PAGE_CACHE_UC;
		if (mtrr_type == MTRR_TYPE_WRCOMB)
			return _PAGE_CACHE_WC;
	}

	return req_type;
182 183
}

I
Ingo Molnar 已提交
184 185
static int
chk_conflict(struct memtype *new, struct memtype *entry, unsigned long *type)
186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210
{
	if (new->type != entry->type) {
		if (type) {
			new->type = entry->type;
			*type = entry->type;
		} else
			goto conflict;
	}

	 /* check overlaps with more than one entry in the list */
	list_for_each_entry_continue(entry, &memtype_list, nd) {
		if (new->end <= entry->start)
			break;
		else if (new->type != entry->type)
			goto conflict;
	}
	return 0;

 conflict:
	printk(KERN_INFO "%s:%d conflicting memory types "
	       "%Lx-%Lx %s<->%s\n", current->comm, current->pid, new->start,
	       new->end, cattr_name(new->type), cattr_name(entry->type));
	return -EBUSY;
}

211 212 213
static struct memtype *cached_entry;
static u64 cached_start;

214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240
static int pat_pagerange_is_ram(unsigned long start, unsigned long end)
{
	int ram_page = 0, not_rampage = 0;
	unsigned long page_nr;

	for (page_nr = (start >> PAGE_SHIFT); page_nr < (end >> PAGE_SHIFT);
	     ++page_nr) {
		/*
		 * For legacy reasons, physical address range in the legacy ISA
		 * region is tracked as non-RAM. This will allow users of
		 * /dev/mem to map portions of legacy ISA region, even when
		 * some of those portions are listed(or not even listed) with
		 * different e820 types(RAM/reserved/..)
		 */
		if (page_nr >= (ISA_END_ADDRESS >> PAGE_SHIFT) &&
		    page_is_ram(page_nr))
			ram_page = 1;
		else
			not_rampage = 1;

		if (ram_page == not_rampage)
			return -1;
	}

	return ram_page;
}

241 242 243 244 245 246 247 248 249 250 251 252 253
/*
 * For RAM pages, mark the pages as non WB memory type using
 * PageNonWB (PG_arch_1). We allow only one set_memory_uc() or
 * set_memory_wc() on a RAM page at a time before marking it as WB again.
 * This is ok, because only one driver will be owning the page and
 * doing set_memory_*() calls.
 *
 * For now, we use PageNonWB to track that the RAM page is being mapped
 * as non WB. In future, we will have to use one more flag
 * (or some other mechanism in page_struct) to distinguish between
 * UC and WC mapping.
 */
static int reserve_ram_pages_type(u64 start, u64 end, unsigned long req_type,
I
Ingo Molnar 已提交
254
				  unsigned long *new_type)
255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300
{
	struct page *page;
	u64 pfn, end_pfn;

	for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
		page = pfn_to_page(pfn);
		if (page_mapped(page) || PageNonWB(page))
			goto out;

		SetPageNonWB(page);
	}
	return 0;

out:
	end_pfn = pfn;
	for (pfn = (start >> PAGE_SHIFT); pfn < end_pfn; ++pfn) {
		page = pfn_to_page(pfn);
		ClearPageNonWB(page);
	}

	return -EINVAL;
}

static int free_ram_pages_type(u64 start, u64 end)
{
	struct page *page;
	u64 pfn, end_pfn;

	for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
		page = pfn_to_page(pfn);
		if (page_mapped(page) || !PageNonWB(page))
			goto out;

		ClearPageNonWB(page);
	}
	return 0;

out:
	end_pfn = pfn;
	for (pfn = (start >> PAGE_SHIFT); pfn < end_pfn; ++pfn) {
		page = pfn_to_page(pfn);
		SetPageNonWB(page);
	}
	return -EINVAL;
}

301 302 303 304 305 306 307 308 309 310
/*
 * req_type typically has one of the:
 * - _PAGE_CACHE_WB
 * - _PAGE_CACHE_WC
 * - _PAGE_CACHE_UC_MINUS
 * - _PAGE_CACHE_UC
 *
 * req_type will have a special case value '-1', when requester want to inherit
 * the memory type from mtrr (if WB), existing PAT, defaulting to UC_MINUS.
 *
311 312 313
 * If new_type is NULL, function will return an error if it cannot reserve the
 * region with req_type. If new_type is non-NULL, function will return
 * available type in new_type in case of no error. In case of any error
314 315
 * it will return a negative return value.
 */
316
int reserve_memtype(u64 start, u64 end, unsigned long req_type,
I
Ingo Molnar 已提交
317
		    unsigned long *new_type)
318
{
319
	struct memtype *new, *entry;
320
	unsigned long actual_type;
321
	struct list_head *where;
322
	int is_range_ram;
I
Ingo Molnar 已提交
323
	int err = 0;
324

I
Ingo Molnar 已提交
325
	BUG_ON(start >= end); /* end is exclusive */
326

327
	if (!pat_enabled) {
328
		/* This is identical to page table setting without PAT */
329 330 331 332 333
		if (new_type) {
			if (req_type == -1)
				*new_type = _PAGE_CACHE_WB;
			else
				*new_type = req_type & _PAGE_CACHE_MASK;
334
		}
335 336 337 338
		return 0;
	}

	/* Low ISA region is always mapped WB in page table. No need to track */
339
	if (is_ISA_range(start, end - 1)) {
340 341
		if (new_type)
			*new_type = _PAGE_CACHE_WB;
342 343 344
		return 0;
	}

345 346
	if (req_type == -1) {
		/*
347 348 349 350
		 * Call mtrr_lookup to get the type hint. This is an
		 * optimization for /dev/mem mmap'ers into WB memory (BIOS
		 * tools and ACPI tools). Use WB request for WB memory and use
		 * UC_MINUS otherwise.
351 352 353
		 */
		u8 mtrr_type = mtrr_type_lookup(start, end);

354
		if (mtrr_type == MTRR_TYPE_WRBACK)
355
			actual_type = _PAGE_CACHE_WB;
356
		else
357
			actual_type = _PAGE_CACHE_UC_MINUS;
I
Ingo Molnar 已提交
358
	} else {
359 360
		actual_type = pat_x_mtrr_type(start, end,
					      req_type & _PAGE_CACHE_MASK);
I
Ingo Molnar 已提交
361
	}
362

363 364 365
	if (new_type)
		*new_type = actual_type;

366 367 368 369 370 371
	is_range_ram = pat_pagerange_is_ram(start, end);
	if (is_range_ram == 1)
		return reserve_ram_pages_type(start, end, req_type,
					      new_type);
	else if (is_range_ram < 0)
		return -EINVAL;
372

373 374
	new  = kmalloc(sizeof(struct memtype), GFP_KERNEL);
	if (!new)
375 376
		return -ENOMEM;

I
Ingo Molnar 已提交
377 378 379
	new->start	= start;
	new->end	= end;
	new->type	= actual_type;
380 381 382

	spin_lock(&memtype_lock);

383 384 385 386 387
	if (cached_entry && start >= cached_start)
		entry = cached_entry;
	else
		entry = list_entry(&memtype_list, struct memtype, nd);

388
	/* Search for existing mapping that overlaps the current range */
389
	where = NULL;
390
	list_for_each_entry_continue(entry, &memtype_list, nd) {
391
		if (end <= entry->start) {
392
			where = entry->nd.prev;
393
			cached_entry = list_entry(where, struct memtype, nd);
394
			break;
395
		} else if (start <= entry->start) { /* end > entry->start */
396
			err = chk_conflict(new, entry, new_type);
397 398 399 400
			if (!err) {
				dprintk("Overlap at 0x%Lx-0x%Lx\n",
					entry->start, entry->end);
				where = entry->nd.prev;
401 402
				cached_entry = list_entry(where,
							struct memtype, nd);
403 404
			}
			break;
405
		} else if (start < entry->end) { /* start > entry->start */
406
			err = chk_conflict(new, entry, new_type);
407 408 409
			if (!err) {
				dprintk("Overlap at 0x%Lx-0x%Lx\n",
					entry->start, entry->end);
410 411 412 413 414 415 416 417 418 419 420 421 422 423
				cached_entry = list_entry(entry->nd.prev,
							struct memtype, nd);

				/*
				 * Move to right position in the linked
				 * list to add this new entry
				 */
				list_for_each_entry_continue(entry,
							&memtype_list, nd) {
					if (start <= entry->start) {
						where = entry->nd.prev;
						break;
					}
				}
424 425 426 427 428 429
			}
			break;
		}
	}

	if (err) {
430 431 432
		printk(KERN_INFO "reserve_memtype failed 0x%Lx-0x%Lx, "
		       "track %s, req %s\n",
		       start, end, cattr_name(new->type), cattr_name(req_type));
433
		kfree(new);
434
		spin_unlock(&memtype_lock);
I
Ingo Molnar 已提交
435

436 437 438
		return err;
	}

439 440
	cached_start = start;

441 442 443
	if (where)
		list_add(&new->nd, where);
	else
444
		list_add_tail(&new->nd, &memtype_list);
445

446
	spin_unlock(&memtype_lock);
447 448 449 450 451

	dprintk("reserve_memtype added 0x%Lx-0x%Lx, track %s, req %s, ret %s\n",
		start, end, cattr_name(new->type), cattr_name(req_type),
		new_type ? cattr_name(*new_type) : "-");

452 453 454 455 456
	return err;
}

int free_memtype(u64 start, u64 end)
{
457
	struct memtype *entry;
458
	int err = -EINVAL;
459
	int is_range_ram;
460

461
	if (!pat_enabled)
462 463 464
		return 0;

	/* Low ISA region is always mapped WB. No need to track */
465
	if (is_ISA_range(start, end - 1))
466 467
		return 0;

468 469 470 471 472
	is_range_ram = pat_pagerange_is_ram(start, end);
	if (is_range_ram == 1)
		return free_ram_pages_type(start, end);
	else if (is_range_ram < 0)
		return -EINVAL;
473

474
	spin_lock(&memtype_lock);
475 476
	list_for_each_entry(entry, &memtype_list, nd) {
		if (entry->start == start && entry->end == end) {
477 478 479
			if (cached_entry == entry || cached_start == start)
				cached_entry = NULL;

480 481
			list_del(&entry->nd);
			kfree(entry);
482 483 484 485 486 487 488
			err = 0;
			break;
		}
	}
	spin_unlock(&memtype_lock);

	if (err) {
I
Ingo Molnar 已提交
489
		printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
490 491
			current->comm, current->pid, start, end);
	}
492

493
	dprintk("free_memtype request 0x%Lx-0x%Lx\n", start, end);
I
Ingo Molnar 已提交
494

495 496 497
	return err;
}

498 499 500 501 502 503 504

pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
				unsigned long size, pgprot_t vma_prot)
{
	return vma_prot;
}

505 506
#ifdef CONFIG_STRICT_DEVMEM
/* This check is done in drivers/char/mem.c in case of STRICT_DEVMEM*/
507 508 509 510 511
static inline int range_is_allowed(unsigned long pfn, unsigned long size)
{
	return 1;
}
#else
512
/* This check is needed to avoid cache aliasing when PAT is enabled */
513 514 515 516 517 518
static inline int range_is_allowed(unsigned long pfn, unsigned long size)
{
	u64 from = ((u64)pfn) << PAGE_SHIFT;
	u64 to = from + size;
	u64 cursor = from;

519 520 521
	if (!pat_enabled)
		return 1;

522 523 524 525 526 527 528 529 530 531 532 533
	while (cursor < to) {
		if (!devmem_is_allowed(pfn)) {
			printk(KERN_INFO
		"Program %s tried to access /dev/mem between %Lx->%Lx.\n",
				current->comm, from, to);
			return 0;
		}
		cursor += PAGE_SIZE;
		pfn++;
	}
	return 1;
}
534
#endif /* CONFIG_STRICT_DEVMEM */
535

536 537 538
int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
				unsigned long size, pgprot_t *vma_prot)
{
539
	u64 offset = ((u64) pfn) << PAGE_SHIFT;
540
	unsigned long flags = -1;
541
	int retval;
542

543 544 545
	if (!range_is_allowed(pfn, size))
		return 0;

546
	if (file->f_flags & O_SYNC) {
547
		flags = _PAGE_CACHE_UC_MINUS;
548 549 550 551 552 553 554 555 556 557 558
	}

#ifdef CONFIG_X86_32
	/*
	 * On the PPro and successors, the MTRRs are used to set
	 * memory types for physical addresses outside main memory,
	 * so blindly setting UC or PWT on those pages is wrong.
	 * For Pentiums and earlier, the surround logic should disable
	 * caching for the high addresses through the KEN pin, but
	 * we maintain the tradition of paranoia in this code.
	 */
559
	if (!pat_enabled &&
560 561 562 563 564
	    !(boot_cpu_has(X86_FEATURE_MTRR) ||
	      boot_cpu_has(X86_FEATURE_K6_MTRR) ||
	      boot_cpu_has(X86_FEATURE_CYRIX_ARR) ||
	      boot_cpu_has(X86_FEATURE_CENTAUR_MCR)) &&
	    (pfn << PAGE_SHIFT) >= __pa(high_memory)) {
565
		flags = _PAGE_CACHE_UC;
566 567 568
	}
#endif

569
	/*
570 571
	 * With O_SYNC, we can only take UC_MINUS mapping. Fail if we cannot.
	 *
572 573 574 575 576
	 * Without O_SYNC, we want to get
	 * - WB for WB-able memory and no other conflicting mappings
	 * - UC_MINUS for non-WB-able memory with no other conflicting mappings
	 * - Inherit from confliting mappings otherwise
	 */
577
	if (flags != -1) {
578 579
		retval = reserve_memtype(offset, offset + size, flags, NULL);
	} else {
I
Ingo Molnar 已提交
580
		retval = reserve_memtype(offset, offset + size, -1, &flags);
581 582 583 584 585
	}

	if (retval < 0)
		return 0;

586 587 588
	if (((pfn < max_low_pfn_mapped) ||
	     (pfn >= (1UL<<(32 - PAGE_SHIFT)) && pfn < max_pfn_mapped)) &&
	    ioremap_change_attr((unsigned long)__va(offset), size, flags) < 0) {
589
		free_memtype(offset, offset + size);
I
Ingo Molnar 已提交
590
		printk(KERN_INFO
591 592 593
		"%s:%d /dev/mem ioremap_change_attr failed %s for %Lx-%Lx\n",
			current->comm, current->pid,
			cattr_name(flags),
594
			offset, (unsigned long long)(offset + size));
595 596 597 598 599
		return 0;
	}

	*vma_prot = __pgprot((pgprot_val(*vma_prot) & ~_PAGE_CACHE_MASK) |
			     flags);
600 601
	return 1;
}
602 603 604

void map_devmem(unsigned long pfn, unsigned long size, pgprot_t vma_prot)
{
I
Ingo Molnar 已提交
605
	unsigned long want_flags = (pgprot_val(vma_prot) & _PAGE_CACHE_MASK);
606 607 608 609 610
	u64 addr = (u64)pfn << PAGE_SHIFT;
	unsigned long flags;

	reserve_memtype(addr, addr + size, want_flags, &flags);
	if (flags != want_flags) {
I
Ingo Molnar 已提交
611
		printk(KERN_INFO
612 613 614
		"%s:%d /dev/mem expected mapping type %s for %Lx-%Lx, got %s\n",
			current->comm, current->pid,
			cattr_name(want_flags),
615
			addr, (unsigned long long)(addr + size),
616 617 618 619 620 621 622 623 624 625 626
			cattr_name(flags));
	}
}

void unmap_devmem(unsigned long pfn, unsigned long size, pgprot_t vma_prot)
{
	u64 addr = (u64)pfn << PAGE_SHIFT;

	free_memtype(addr, addr + size);
}

627 628 629 630 631
/*
 * Internal interface to reserve a range of physical memory with prot.
 * Reserved non RAM regions only and after successful reserve_memtype,
 * this func also keeps identity mapping (if any) in sync with this new prot.
 */
632 633
static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
				int strict_prot)
634 635
{
	int is_ram = 0;
636
	int id_sz, ret;
637
	unsigned long flags;
638
	unsigned long want_flags = (pgprot_val(*vma_prot) & _PAGE_CACHE_MASK);
639

640
	is_ram = pat_pagerange_is_ram(paddr, paddr + size);
641

642 643 644 645 646
	/*
	 * reserve_pfn_range() doesn't support RAM pages.
	 */
	if (is_ram != 0)
		return -EINVAL;
647 648 649 650 651 652

	ret = reserve_memtype(paddr, paddr + size, want_flags, &flags);
	if (ret)
		return ret;

	if (flags != want_flags) {
653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670
		if (strict_prot || !is_new_memtype_allowed(want_flags, flags)) {
			free_memtype(paddr, paddr + size);
			printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
				" for %Lx-%Lx, got %s\n",
				current->comm, current->pid,
				cattr_name(want_flags),
				(unsigned long long)paddr,
				(unsigned long long)(paddr + size),
				cattr_name(flags));
			return -EINVAL;
		}
		/*
		 * We allow returning different type than the one requested in
		 * non strict case.
		 */
		*vma_prot = __pgprot((pgprot_val(*vma_prot) &
				      (~_PAGE_CACHE_MASK)) |
				     flags);
671 672
	}

673 674 675 676 677 678 679 680 681
	/* Need to keep identity mapping in sync */
	if (paddr >= __pa(high_memory))
		return 0;

	id_sz = (__pa(high_memory) < paddr + size) ?
				__pa(high_memory) - paddr :
				size;

	if (ioremap_change_attr((unsigned long)__va(paddr), id_sz, flags) < 0) {
682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702
		free_memtype(paddr, paddr + size);
		printk(KERN_ERR
			"%s:%d reserve_pfn_range ioremap_change_attr failed %s "
			"for %Lx-%Lx\n",
			current->comm, current->pid,
			cattr_name(flags),
			(unsigned long long)paddr,
			(unsigned long long)(paddr + size));
		return -EINVAL;
	}
	return 0;
}

/*
 * Internal interface to free a range of physical memory.
 * Frees non RAM regions only.
 */
static void free_pfn_range(u64 paddr, unsigned long size)
{
	int is_ram;

703
	is_ram = pat_pagerange_is_ram(paddr, paddr + size);
704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720
	if (is_ram == 0)
		free_memtype(paddr, paddr + size);
}

/*
 * track_pfn_vma_copy is called when vma that is covering the pfnmap gets
 * copied through copy_page_range().
 *
 * If the vma has a linear pfn mapping for the entire range, we get the prot
 * from pte and reserve the entire vma range with single reserve_pfn_range call.
 * Otherwise, we reserve the entire vma range, my ging through the PTEs page
 * by page to get physical address and protection.
 */
int track_pfn_vma_copy(struct vm_area_struct *vma)
{
	int retval = 0;
	unsigned long i, j;
721
	resource_size_t paddr;
722
	unsigned long prot;
723 724 725
	unsigned long vma_start = vma->vm_start;
	unsigned long vma_end = vma->vm_end;
	unsigned long vma_size = vma_end - vma_start;
726
	pgprot_t pgprot;
727 728 729 730 731 732

	if (!pat_enabled)
		return 0;

	if (is_linear_pfn_mapping(vma)) {
		/*
733 734
		 * reserve the whole chunk covered by vma. We need the
		 * starting address and protection from pte.
735
		 */
736
		if (follow_phys(vma, vma_start, 0, &prot, &paddr)) {
737
			WARN_ON_ONCE(1);
738
			return -EINVAL;
739
		}
740 741
		pgprot = __pgprot(prot);
		return reserve_pfn_range(paddr, vma_size, &pgprot, 1);
742 743 744 745
	}

	/* reserve entire vma page by page, using pfn and prot from pte */
	for (i = 0; i < vma_size; i += PAGE_SIZE) {
746
		if (follow_phys(vma, vma_start + i, 0, &prot, &paddr))
747 748
			continue;

749 750
		pgprot = __pgprot(prot);
		retval = reserve_pfn_range(paddr, PAGE_SIZE, &pgprot, 1);
751 752 753 754 755 756 757 758
		if (retval)
			goto cleanup_ret;
	}
	return 0;

cleanup_ret:
	/* Reserve error: Cleanup partial reservation and return error */
	for (j = 0; j < i; j += PAGE_SIZE) {
759
		if (follow_phys(vma, vma_start + j, 0, &prot, &paddr))
760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780
			continue;

		free_pfn_range(paddr, PAGE_SIZE);
	}

	return retval;
}

/*
 * track_pfn_vma_new is called when a _new_ pfn mapping is being established
 * for physical range indicated by pfn and size.
 *
 * prot is passed in as a parameter for the new mapping. If the vma has a
 * linear pfn mapping for the entire range reserve the entire vma range with
 * single reserve_pfn_range call.
 * Otherwise, we look t the pfn and size and reserve only the specified range
 * page by page.
 *
 * Note that this function can be called with caller trying to map only a
 * subrange/page inside the vma.
 */
781
int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot,
782 783 784 785
			unsigned long pfn, unsigned long size)
{
	int retval = 0;
	unsigned long i, j;
786 787
	resource_size_t base_paddr;
	resource_size_t paddr;
788 789 790 791 792 793 794 795 796
	unsigned long vma_start = vma->vm_start;
	unsigned long vma_end = vma->vm_end;
	unsigned long vma_size = vma_end - vma_start;

	if (!pat_enabled)
		return 0;

	if (is_linear_pfn_mapping(vma)) {
		/* reserve the whole chunk starting from vm_pgoff */
797
		paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT;
798
		return reserve_pfn_range(paddr, vma_size, prot, 0);
799 800 801
	}

	/* reserve page by page using pfn and size */
802
	base_paddr = (resource_size_t)pfn << PAGE_SHIFT;
803 804
	for (i = 0; i < size; i += PAGE_SIZE) {
		paddr = base_paddr + i;
805
		retval = reserve_pfn_range(paddr, PAGE_SIZE, prot, 0);
806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829
		if (retval)
			goto cleanup_ret;
	}
	return 0;

cleanup_ret:
	/* Reserve error: Cleanup partial reservation and return error */
	for (j = 0; j < i; j += PAGE_SIZE) {
		paddr = base_paddr + j;
		free_pfn_range(paddr, PAGE_SIZE);
	}

	return retval;
}

/*
 * untrack_pfn_vma is called while unmapping a pfnmap for a region.
 * untrack can be called for a specific region indicated by pfn and size or
 * can be for the entire vma (in which case size can be zero).
 */
void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
			unsigned long size)
{
	unsigned long i;
830
	resource_size_t paddr;
831
	unsigned long prot;
832 833 834 835 836 837 838 839 840
	unsigned long vma_start = vma->vm_start;
	unsigned long vma_end = vma->vm_end;
	unsigned long vma_size = vma_end - vma_start;

	if (!pat_enabled)
		return;

	if (is_linear_pfn_mapping(vma)) {
		/* free the whole chunk starting from vm_pgoff */
841
		paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT;
842 843 844 845 846 847
		free_pfn_range(paddr, vma_size);
		return;
	}

	if (size != 0 && size != vma_size) {
		/* free page by page, using pfn and size */
848
		paddr = (resource_size_t)pfn << PAGE_SHIFT;
849 850 851 852 853 854 855
		for (i = 0; i < size; i += PAGE_SIZE) {
			paddr = paddr + i;
			free_pfn_range(paddr, PAGE_SIZE);
		}
	} else {
		/* free entire vma, page by page, using the pfn from pte */
		for (i = 0; i < vma_size; i += PAGE_SIZE) {
856
			if (follow_phys(vma, vma_start + i, 0, &prot, &paddr))
857 858 859 860 861 862 863
				continue;

			free_pfn_range(paddr, PAGE_SIZE);
		}
	}
}

864 865 866 867 868 869 870 871
pgprot_t pgprot_writecombine(pgprot_t prot)
{
	if (pat_enabled)
		return __pgprot(pgprot_val(prot) | _PAGE_CACHE_WC);
	else
		return pgprot_noncached(prot);
}

872
#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_X86_PAT)
873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894

/* get Nth element of the linked list */
static struct memtype *memtype_get_idx(loff_t pos)
{
	struct memtype *list_node, *print_entry;
	int i = 1;

	print_entry  = kmalloc(sizeof(struct memtype), GFP_KERNEL);
	if (!print_entry)
		return NULL;

	spin_lock(&memtype_lock);
	list_for_each_entry(list_node, &memtype_list, nd) {
		if (pos == i) {
			*print_entry = *list_node;
			spin_unlock(&memtype_lock);
			return print_entry;
		}
		++i;
	}
	spin_unlock(&memtype_lock);
	kfree(print_entry);
I
Ingo Molnar 已提交
895

896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925
	return NULL;
}

static void *memtype_seq_start(struct seq_file *seq, loff_t *pos)
{
	if (*pos == 0) {
		++*pos;
		seq_printf(seq, "PAT memtype list:\n");
	}

	return memtype_get_idx(*pos);
}

static void *memtype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
	++*pos;
	return memtype_get_idx(*pos);
}

static void memtype_seq_stop(struct seq_file *seq, void *v)
{
}

static int memtype_seq_show(struct seq_file *seq, void *v)
{
	struct memtype *print_entry = (struct memtype *)v;

	seq_printf(seq, "%s @ 0x%Lx-0x%Lx\n", cattr_name(print_entry->type),
			print_entry->start, print_entry->end);
	kfree(print_entry);
I
Ingo Molnar 已提交
926

927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957
	return 0;
}

static struct seq_operations memtype_seq_ops = {
	.start = memtype_seq_start,
	.next  = memtype_seq_next,
	.stop  = memtype_seq_stop,
	.show  = memtype_seq_show,
};

static int memtype_seq_open(struct inode *inode, struct file *file)
{
	return seq_open(file, &memtype_seq_ops);
}

static const struct file_operations memtype_fops = {
	.open    = memtype_seq_open,
	.read    = seq_read,
	.llseek  = seq_lseek,
	.release = seq_release,
};

static int __init pat_memtype_list_init(void)
{
	debugfs_create_file("pat_memtype_list", S_IRUSR, arch_debugfs_dir,
				NULL, &memtype_fops);
	return 0;
}

late_initcall(pat_memtype_list_init);

958
#endif /* CONFIG_DEBUG_FS && CONFIG_X86_PAT */