pat.c 24.7 KB
Newer Older
1 2 3 4 5 6 7 8 9
/*
 * Handle caching attributes in page tables (PAT)
 *
 * Authors: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
 *          Suresh B Siddha <suresh.b.siddha@intel.com>
 *
 * Loosely based on earlier PAT patchset from Eric Biederman and Andi Kleen.
 */

I
Ingo Molnar 已提交
10 11 12
#include <linux/seq_file.h>
#include <linux/bootmem.h>
#include <linux/debugfs.h>
13
#include <linux/kernel.h>
14
#include <linux/module.h>
15
#include <linux/gfp.h>
I
Ingo Molnar 已提交
16
#include <linux/mm.h>
17
#include <linux/fs.h>
18
#include <linux/rbtree.h>
19

I
Ingo Molnar 已提交
20
#include <asm/cacheflush.h>
21
#include <asm/processor.h>
I
Ingo Molnar 已提交
22
#include <asm/tlbflush.h>
23 24
#include <asm/pgtable.h>
#include <asm/fcntl.h>
I
Ingo Molnar 已提交
25
#include <asm/e820.h>
26
#include <asm/mtrr.h>
I
Ingo Molnar 已提交
27 28 29
#include <asm/page.h>
#include <asm/msr.h>
#include <asm/pat.h>
30
#include <asm/io.h>
31

32
#ifdef CONFIG_X86_PAT
33
int __read_mostly pat_enabled = 1;
34

35
static inline void pat_disable(const char *reason)
36
{
37
	pat_enabled = 0;
38
	printk(KERN_INFO "%s\n", reason);
39 40
}

A
Andrew Morton 已提交
41
static int __init nopat(char *str)
42
{
43
	pat_disable("PAT support disabled.");
44 45
	return 0;
}
46
early_param("nopat", nopat);
47 48 49 50 51
#else
static inline void pat_disable(const char *reason)
{
	(void)reason;
}
52 53
#endif

54 55

static int debug_enable;
I
Ingo Molnar 已提交
56

57 58 59 60 61 62 63 64 65 66 67
static int __init pat_debug_setup(char *str)
{
	debug_enable = 1;
	return 0;
}
__setup("debugpat", pat_debug_setup);

#define dprintk(fmt, arg...) \
	do { if (debug_enable) printk(KERN_INFO fmt, ##arg); } while (0)


68
static u64 __read_mostly boot_pat_state;
69 70 71 72 73 74 75 76 77 78

enum {
	PAT_UC = 0,		/* uncached */
	PAT_WC = 1,		/* Write combining */
	PAT_WT = 4,		/* Write Through */
	PAT_WP = 5,		/* Write Protected */
	PAT_WB = 6,		/* Write Back (default) */
	PAT_UC_MINUS = 7,	/* UC, but can be overriden by MTRR */
};

79
#define PAT(x, y)	((u64)PAT_ ## y << ((x)*8))
80 81 82 83 84

void pat_init(void)
{
	u64 pat;

85
	if (!pat_enabled)
86 87
		return;

88 89 90 91 92 93 94 95 96 97 98 99 100 101
	if (!cpu_has_pat) {
		if (!boot_pat_state) {
			pat_disable("PAT not supported by CPU.");
			return;
		} else {
			/*
			 * If this happens we are on a secondary CPU, but
			 * switched to PAT on the boot CPU. We have no way to
			 * undo PAT.
			 */
			printk(KERN_ERR "PAT enabled, "
			       "but not supported by secondary CPU\n");
			BUG();
		}
102
	}
103 104 105 106 107 108 109 110 111 112 113 114 115 116

	/* Set PWT to Write-Combining. All other bits stay the same */
	/*
	 * PTE encoding used in Linux:
	 *      PAT
	 *      |PCD
	 *      ||PWT
	 *      |||
	 *      000 WB		_PAGE_CACHE_WB
	 *      001 WC		_PAGE_CACHE_WC
	 *      010 UC-		_PAGE_CACHE_UC_MINUS
	 *      011 UC		_PAGE_CACHE_UC
	 * PAT bit unused
	 */
117 118
	pat = PAT(0, WB) | PAT(1, WC) | PAT(2, UC_MINUS) | PAT(3, UC) |
	      PAT(4, WB) | PAT(5, WC) | PAT(6, UC_MINUS) | PAT(7, UC);
119 120

	/* Boot CPU check */
121
	if (!boot_pat_state)
122 123 124 125 126 127 128 129 130 131 132 133
		rdmsrl(MSR_IA32_CR_PAT, boot_pat_state);

	wrmsrl(MSR_IA32_CR_PAT, pat);
	printk(KERN_INFO "x86 PAT enabled: cpu %d, old 0x%Lx, new 0x%Lx\n",
	       smp_processor_id(), boot_pat_state, pat);
}

#undef PAT

static char *cattr_name(unsigned long flags)
{
	switch (flags & _PAGE_CACHE_MASK) {
134 135 136 137 138
	case _PAGE_CACHE_UC:		return "uncached";
	case _PAGE_CACHE_UC_MINUS:	return "uncached-minus";
	case _PAGE_CACHE_WB:		return "write-back";
	case _PAGE_CACHE_WC:		return "write-combining";
	default:			return "broken";
139 140 141 142 143 144 145 146 147 148 149 150 151
	}
}

/*
 * The global memtype list keeps track of memory type for specific
 * physical memory areas. Conflicting memory types in different
 * mappings can cause CPU cache corruption. To avoid this we keep track.
 *
 * The list is sorted based on starting address and can contain multiple
 * entries for each address (this allows reference counting for overlapping
 * areas). All the aliases have the same cache attributes of course.
 * Zero attributes are represented as holes.
 *
152 153
 * The data structure is a list that is also organized as an rbtree
 * sorted on the start address of memtype range.
154
 *
155
 * memtype_lock protects both the linear list and rbtree.
156 157 158
 */

struct memtype {
I
Ingo Molnar 已提交
159 160 161 162
	u64			start;
	u64			end;
	unsigned long		type;
	struct list_head	nd;
163
	struct rb_node		rb;
164 165
};

166
static struct rb_root memtype_rbroot = RB_ROOT;
167
static LIST_HEAD(memtype_list);
I
Ingo Molnar 已提交
168
static DEFINE_SPINLOCK(memtype_lock);	/* protects memtype list */
169

170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209
static struct memtype *memtype_rb_search(struct rb_root *root, u64 start)
{
	struct rb_node *node = root->rb_node;
	struct memtype *last_lower = NULL;

	while (node) {
		struct memtype *data = container_of(node, struct memtype, rb);

		if (data->start < start) {
			last_lower = data;
			node = node->rb_right;
		} else if (data->start > start) {
			node = node->rb_left;
		} else
			return data;
	}

	/* Will return NULL if there is no entry with its start <= start */
	return last_lower;
}

static void memtype_rb_insert(struct rb_root *root, struct memtype *data)
{
	struct rb_node **new = &(root->rb_node);
	struct rb_node *parent = NULL;

	while (*new) {
		struct memtype *this = container_of(*new, struct memtype, rb);

		parent = *new;
		if (data->start <= this->start)
			new = &((*new)->rb_left);
		else if (data->start > this->start)
			new = &((*new)->rb_right);
	}

	rb_link_node(&data->rb, parent, new);
	rb_insert_color(&data->rb, root);
}

210 211 212 213 214 215 216
/*
 * Does intersection of PAT memory type and MTRR memory type and returns
 * the resulting memory type as PAT understands it.
 * (Type in pat and mtrr will not have same value)
 * The intersection is based on "Effective Memory Type" tables in IA-32
 * SDM vol 3a
 */
217
static unsigned long pat_x_mtrr_type(u64 start, u64 end, unsigned long req_type)
218
{
219 220 221 222
	/*
	 * Look for MTRR hint to get the effective type in case where PAT
	 * request is for WB.
	 */
223 224 225 226
	if (req_type == _PAGE_CACHE_WB) {
		u8 mtrr_type;

		mtrr_type = mtrr_type_lookup(start, end);
227 228 229 230
		if (mtrr_type != MTRR_TYPE_WRBACK)
			return _PAGE_CACHE_UC_MINUS;

		return _PAGE_CACHE_WB;
231 232 233
	}

	return req_type;
234 235
}

I
Ingo Molnar 已提交
236 237
static int
chk_conflict(struct memtype *new, struct memtype *entry, unsigned long *type)
238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262
{
	if (new->type != entry->type) {
		if (type) {
			new->type = entry->type;
			*type = entry->type;
		} else
			goto conflict;
	}

	 /* check overlaps with more than one entry in the list */
	list_for_each_entry_continue(entry, &memtype_list, nd) {
		if (new->end <= entry->start)
			break;
		else if (new->type != entry->type)
			goto conflict;
	}
	return 0;

 conflict:
	printk(KERN_INFO "%s:%d conflicting memory types "
	       "%Lx-%Lx %s<->%s\n", current->comm, current->pid, new->start,
	       new->end, cattr_name(new->type), cattr_name(entry->type));
	return -EBUSY;
}

263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289
static int pat_pagerange_is_ram(unsigned long start, unsigned long end)
{
	int ram_page = 0, not_rampage = 0;
	unsigned long page_nr;

	for (page_nr = (start >> PAGE_SHIFT); page_nr < (end >> PAGE_SHIFT);
	     ++page_nr) {
		/*
		 * For legacy reasons, physical address range in the legacy ISA
		 * region is tracked as non-RAM. This will allow users of
		 * /dev/mem to map portions of legacy ISA region, even when
		 * some of those portions are listed(or not even listed) with
		 * different e820 types(RAM/reserved/..)
		 */
		if (page_nr >= (ISA_END_ADDRESS >> PAGE_SHIFT) &&
		    page_is_ram(page_nr))
			ram_page = 1;
		else
			not_rampage = 1;

		if (ram_page == not_rampage)
			return -1;
	}

	return ram_page;
}

290
/*
291 292 293 294
 * For RAM pages, we use page flags to mark the pages with appropriate type.
 * Here we do two pass:
 * - Find the memtype of all the pages in the range, look for any conflicts
 * - In case of no conflicts, set the new memtype for pages in the range
295
 *
296
 * Caller must hold memtype_lock for atomicity.
297 298
 */
static int reserve_ram_pages_type(u64 start, u64 end, unsigned long req_type,
I
Ingo Molnar 已提交
299
				  unsigned long *new_type)
300 301
{
	struct page *page;
302 303 304 305 306 307 308
	u64 pfn;

	if (req_type == _PAGE_CACHE_UC) {
		/* We do not support strong UC */
		WARN_ON_ONCE(1);
		req_type = _PAGE_CACHE_UC_MINUS;
	}
309 310

	for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
311
		unsigned long type;
312

313 314 315 316 317 318 319 320 321 322 323
		page = pfn_to_page(pfn);
		type = get_page_memtype(page);
		if (type != -1) {
			printk(KERN_INFO "reserve_ram_pages_type failed "
				"0x%Lx-0x%Lx, track 0x%lx, req 0x%lx\n",
				start, end, type, req_type);
			if (new_type)
				*new_type = type;

			return -EBUSY;
		}
324 325
	}

326 327 328 329
	if (new_type)
		*new_type = req_type;

	for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
330
		page = pfn_to_page(pfn);
331
		set_page_memtype(page, req_type);
332
	}
333
	return 0;
334 335 336 337 338
}

static int free_ram_pages_type(u64 start, u64 end)
{
	struct page *page;
339
	u64 pfn;
340 341 342

	for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
		page = pfn_to_page(pfn);
343
		set_page_memtype(page, -1);
344 345 346 347
	}
	return 0;
}

348 349 350 351 352 353 354 355 356 357
/*
 * req_type typically has one of the:
 * - _PAGE_CACHE_WB
 * - _PAGE_CACHE_WC
 * - _PAGE_CACHE_UC_MINUS
 * - _PAGE_CACHE_UC
 *
 * req_type will have a special case value '-1', when requester want to inherit
 * the memory type from mtrr (if WB), existing PAT, defaulting to UC_MINUS.
 *
358 359 360
 * If new_type is NULL, function will return an error if it cannot reserve the
 * region with req_type. If new_type is non-NULL, function will return
 * available type in new_type in case of no error. In case of any error
361 362
 * it will return a negative return value.
 */
363
int reserve_memtype(u64 start, u64 end, unsigned long req_type,
I
Ingo Molnar 已提交
364
		    unsigned long *new_type)
365
{
366
	struct memtype *new, *entry;
367
	unsigned long actual_type;
368
	struct list_head *where;
369
	int is_range_ram;
I
Ingo Molnar 已提交
370
	int err = 0;
371

I
Ingo Molnar 已提交
372
	BUG_ON(start >= end); /* end is exclusive */
373

374
	if (!pat_enabled) {
375
		/* This is identical to page table setting without PAT */
376 377 378
		if (new_type) {
			if (req_type == -1)
				*new_type = _PAGE_CACHE_WB;
379 380
			else if (req_type == _PAGE_CACHE_WC)
				*new_type = _PAGE_CACHE_UC_MINUS;
381 382
			else
				*new_type = req_type & _PAGE_CACHE_MASK;
383
		}
384 385 386 387
		return 0;
	}

	/* Low ISA region is always mapped WB in page table. No need to track */
388
	if (is_ISA_range(start, end - 1)) {
389 390
		if (new_type)
			*new_type = _PAGE_CACHE_WB;
391 392 393
		return 0;
	}

394 395 396 397 398 399 400
	/*
	 * Call mtrr_lookup to get the type hint. This is an
	 * optimization for /dev/mem mmap'ers into WB memory (BIOS
	 * tools and ACPI tools). Use WB request for WB memory and use
	 * UC_MINUS otherwise.
	 */
	actual_type = pat_x_mtrr_type(start, end, req_type & _PAGE_CACHE_MASK);
401

402 403 404
	if (new_type)
		*new_type = actual_type;

405
	is_range_ram = pat_pagerange_is_ram(start, end);
406 407 408 409 410 411 412 413
	if (is_range_ram == 1) {

		spin_lock(&memtype_lock);
		err = reserve_ram_pages_type(start, end, req_type, new_type);
		spin_unlock(&memtype_lock);

		return err;
	} else if (is_range_ram < 0) {
414
		return -EINVAL;
415
	}
416

417 418
	new  = kmalloc(sizeof(struct memtype), GFP_KERNEL);
	if (!new)
419 420
		return -ENOMEM;

I
Ingo Molnar 已提交
421 422 423
	new->start	= start;
	new->end	= end;
	new->type	= actual_type;
424 425 426

	spin_lock(&memtype_lock);

427 428 429 430 431
	entry = memtype_rb_search(&memtype_rbroot, new->start);
	if (likely(entry != NULL)) {
		/* To work correctly with list_for_each_entry_continue */
		entry = list_entry(entry->nd.prev, struct memtype, nd);
	} else {
432
		entry = list_entry(&memtype_list, struct memtype, nd);
433
	}
434

435
	/* Search for existing mapping that overlaps the current range */
436
	where = NULL;
437
	list_for_each_entry_continue(entry, &memtype_list, nd) {
438
		if (end <= entry->start) {
439
			where = entry->nd.prev;
440
			break;
441
		} else if (start <= entry->start) { /* end > entry->start */
442
			err = chk_conflict(new, entry, new_type);
443 444 445 446
			if (!err) {
				dprintk("Overlap at 0x%Lx-0x%Lx\n",
					entry->start, entry->end);
				where = entry->nd.prev;
447 448
			}
			break;
449
		} else if (start < entry->end) { /* start > entry->start */
450
			err = chk_conflict(new, entry, new_type);
451 452 453
			if (!err) {
				dprintk("Overlap at 0x%Lx-0x%Lx\n",
					entry->start, entry->end);
454 455 456 457 458 459 460 461 462 463 464 465

				/*
				 * Move to right position in the linked
				 * list to add this new entry
				 */
				list_for_each_entry_continue(entry,
							&memtype_list, nd) {
					if (start <= entry->start) {
						where = entry->nd.prev;
						break;
					}
				}
466 467 468 469 470 471
			}
			break;
		}
	}

	if (err) {
472 473 474
		printk(KERN_INFO "reserve_memtype failed 0x%Lx-0x%Lx, "
		       "track %s, req %s\n",
		       start, end, cattr_name(new->type), cattr_name(req_type));
475
		kfree(new);
476
		spin_unlock(&memtype_lock);
I
Ingo Molnar 已提交
477

478 479 480
		return err;
	}

481 482 483
	if (where)
		list_add(&new->nd, where);
	else
484
		list_add_tail(&new->nd, &memtype_list);
485

486 487
	memtype_rb_insert(&memtype_rbroot, new);

488
	spin_unlock(&memtype_lock);
489 490 491 492 493

	dprintk("reserve_memtype added 0x%Lx-0x%Lx, track %s, req %s, ret %s\n",
		start, end, cattr_name(new->type), cattr_name(req_type),
		new_type ? cattr_name(*new_type) : "-");

494 495 496 497 498
	return err;
}

int free_memtype(u64 start, u64 end)
{
499
	struct memtype *entry, *saved_entry;
500
	int err = -EINVAL;
501
	int is_range_ram;
502

503
	if (!pat_enabled)
504 505 506
		return 0;

	/* Low ISA region is always mapped WB. No need to track */
507
	if (is_ISA_range(start, end - 1))
508 509
		return 0;

510
	is_range_ram = pat_pagerange_is_ram(start, end);
511 512 513 514 515 516 517 518
	if (is_range_ram == 1) {

		spin_lock(&memtype_lock);
		err = free_ram_pages_type(start, end);
		spin_unlock(&memtype_lock);

		return err;
	} else if (is_range_ram < 0) {
519
		return -EINVAL;
520
	}
521

522
	spin_lock(&memtype_lock);
523 524 525 526 527 528 529 530 531 532 533 534

	entry = memtype_rb_search(&memtype_rbroot, start);
	if (unlikely(entry == NULL))
		goto unlock_ret;

	/*
	 * Saved entry points to an entry with start same or less than what
	 * we searched for. Now go through the list in both directions to look
	 * for the entry that matches with both start and end, with list stored
	 * in sorted start address
	 */
	saved_entry = entry;
535 536
	list_for_each_entry(entry, &memtype_list, nd) {
		if (entry->start == start && entry->end == end) {
537 538 539 540 541 542 543 544 545 546 547 548
			rb_erase(&entry->rb, &memtype_rbroot);
			list_del(&entry->nd);
			kfree(entry);
			err = 0;
			break;
		} else if (entry->start > start) {
			break;
		}
	}

	if (!err)
		goto unlock_ret;
549

550 551 552 553
	entry = saved_entry;
	list_for_each_entry_reverse(entry, &memtype_list, nd) {
		if (entry->start == start && entry->end == end) {
			rb_erase(&entry->rb, &memtype_rbroot);
554 555
			list_del(&entry->nd);
			kfree(entry);
556 557
			err = 0;
			break;
558 559
		} else if (entry->start < start) {
			break;
560 561
		}
	}
562
unlock_ret:
563 564 565
	spin_unlock(&memtype_lock);

	if (err) {
I
Ingo Molnar 已提交
566
		printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
567 568
			current->comm, current->pid, start, end);
	}
569

570
	dprintk("free_memtype request 0x%Lx-0x%Lx\n", start, end);
I
Ingo Molnar 已提交
571

572 573 574
	return err;
}

575

576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620
/**
 * lookup_memtype - Looksup the memory type for a physical address
 * @paddr: physical address of which memory type needs to be looked up
 *
 * Only to be called when PAT is enabled
 *
 * Returns _PAGE_CACHE_WB, _PAGE_CACHE_WC, _PAGE_CACHE_UC_MINUS or
 * _PAGE_CACHE_UC
 */
static unsigned long lookup_memtype(u64 paddr)
{
	int rettype = _PAGE_CACHE_WB;
	struct memtype *entry;

	if (is_ISA_range(paddr, paddr + PAGE_SIZE - 1))
		return rettype;

	if (pat_pagerange_is_ram(paddr, paddr + PAGE_SIZE)) {
		struct page *page;
		spin_lock(&memtype_lock);
		page = pfn_to_page(paddr >> PAGE_SHIFT);
		rettype = get_page_memtype(page);
		spin_unlock(&memtype_lock);
		/*
		 * -1 from get_page_memtype() implies RAM page is in its
		 * default state and not reserved, and hence of type WB
		 */
		if (rettype == -1)
			rettype = _PAGE_CACHE_WB;

		return rettype;
	}

	spin_lock(&memtype_lock);

	entry = memtype_rb_search(&memtype_rbroot, paddr);
	if (entry != NULL)
		rettype = entry->type;
	else
		rettype = _PAGE_CACHE_UC_MINUS;

	spin_unlock(&memtype_lock);
	return rettype;
}

621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669
/**
 * io_reserve_memtype - Request a memory type mapping for a region of memory
 * @start: start (physical address) of the region
 * @end: end (physical address) of the region
 * @type: A pointer to memtype, with requested type. On success, requested
 * or any other compatible type that was available for the region is returned
 *
 * On success, returns 0
 * On failure, returns non-zero
 */
int io_reserve_memtype(resource_size_t start, resource_size_t end,
			unsigned long *type)
{
	unsigned long req_type = *type;
	unsigned long new_type;
	int ret;

	WARN_ON_ONCE(iomem_map_sanity_check(start, end - start));

	ret = reserve_memtype(start, end, req_type, &new_type);
	if (ret)
		goto out_err;

	if (!is_new_memtype_allowed(req_type, new_type))
		goto out_free;

	if (kernel_map_sync_memtype(start, end - start, new_type) < 0)
		goto out_free;

	*type = new_type;
	return 0;

out_free:
	free_memtype(start, end);
	ret = -EBUSY;
out_err:
	return ret;
}

/**
 * io_free_memtype - Release a memory type mapping for a region of memory
 * @start: start (physical address) of the region
 * @end: end (physical address) of the region
 */
void io_free_memtype(resource_size_t start, resource_size_t end)
{
	free_memtype(start, end);
}

670 671 672 673 674 675
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
				unsigned long size, pgprot_t vma_prot)
{
	return vma_prot;
}

676 677
#ifdef CONFIG_STRICT_DEVMEM
/* This check is done in drivers/char/mem.c in case of STRICT_DEVMEM*/
678 679 680 681 682
static inline int range_is_allowed(unsigned long pfn, unsigned long size)
{
	return 1;
}
#else
683
/* This check is needed to avoid cache aliasing when PAT is enabled */
684 685 686 687 688 689
static inline int range_is_allowed(unsigned long pfn, unsigned long size)
{
	u64 from = ((u64)pfn) << PAGE_SHIFT;
	u64 to = from + size;
	u64 cursor = from;

690 691 692
	if (!pat_enabled)
		return 1;

693 694 695 696 697 698 699 700 701 702 703 704
	while (cursor < to) {
		if (!devmem_is_allowed(pfn)) {
			printk(KERN_INFO
		"Program %s tried to access /dev/mem between %Lx->%Lx.\n",
				current->comm, from, to);
			return 0;
		}
		cursor += PAGE_SIZE;
		pfn++;
	}
	return 1;
}
705
#endif /* CONFIG_STRICT_DEVMEM */
706

707 708 709
int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
				unsigned long size, pgprot_t *vma_prot)
{
710
	unsigned long flags = _PAGE_CACHE_WB;
711

712 713 714
	if (!range_is_allowed(pfn, size))
		return 0;

715
	if (file->f_flags & O_SYNC) {
716
		flags = _PAGE_CACHE_UC_MINUS;
717 718 719 720 721 722 723 724 725 726 727
	}

#ifdef CONFIG_X86_32
	/*
	 * On the PPro and successors, the MTRRs are used to set
	 * memory types for physical addresses outside main memory,
	 * so blindly setting UC or PWT on those pages is wrong.
	 * For Pentiums and earlier, the surround logic should disable
	 * caching for the high addresses through the KEN pin, but
	 * we maintain the tradition of paranoia in this code.
	 */
728
	if (!pat_enabled &&
729 730 731 732 733
	    !(boot_cpu_has(X86_FEATURE_MTRR) ||
	      boot_cpu_has(X86_FEATURE_K6_MTRR) ||
	      boot_cpu_has(X86_FEATURE_CYRIX_ARR) ||
	      boot_cpu_has(X86_FEATURE_CENTAUR_MCR)) &&
	    (pfn << PAGE_SHIFT) >= __pa(high_memory)) {
734
		flags = _PAGE_CACHE_UC;
735 736 737
	}
#endif

738 739
	*vma_prot = __pgprot((pgprot_val(*vma_prot) & ~_PAGE_CACHE_MASK) |
			     flags);
740 741
	return 1;
}
742

743 744 745 746 747 748 749 750
/*
 * Change the memory type for the physial address range in kernel identity
 * mapping space if that range is a part of identity map.
 */
int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
{
	unsigned long id_sz;

751
	if (base >= __pa(high_memory))
752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769
		return 0;

	id_sz = (__pa(high_memory) < base + size) ?
				__pa(high_memory) - base :
				size;

	if (ioremap_change_attr((unsigned long)__va(base), id_sz, flags) < 0) {
		printk(KERN_INFO
			"%s:%d ioremap_change_attr failed %s "
			"for %Lx-%Lx\n",
			current->comm, current->pid,
			cattr_name(flags),
			base, (unsigned long long)(base + size));
		return -EINVAL;
	}
	return 0;
}

770 771 772 773 774
/*
 * Internal interface to reserve a range of physical memory with prot.
 * Reserved non RAM regions only and after successful reserve_memtype,
 * this func also keeps identity mapping (if any) in sync with this new prot.
 */
775 776
static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
				int strict_prot)
777 778
{
	int is_ram = 0;
779
	int ret;
780
	unsigned long want_flags = (pgprot_val(*vma_prot) & _PAGE_CACHE_MASK);
781
	unsigned long flags = want_flags;
782

783
	is_ram = pat_pagerange_is_ram(paddr, paddr + size);
784

785
	/*
786 787
	 * reserve_pfn_range() doesn't support RAM pages. Maintain the current
	 * behavior with RAM pages by returning success.
788 789
	 */
	if (is_ram != 0)
790
		return 0;
791 792 793 794 795 796

	ret = reserve_memtype(paddr, paddr + size, want_flags, &flags);
	if (ret)
		return ret;

	if (flags != want_flags) {
797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814
		if (strict_prot || !is_new_memtype_allowed(want_flags, flags)) {
			free_memtype(paddr, paddr + size);
			printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
				" for %Lx-%Lx, got %s\n",
				current->comm, current->pid,
				cattr_name(want_flags),
				(unsigned long long)paddr,
				(unsigned long long)(paddr + size),
				cattr_name(flags));
			return -EINVAL;
		}
		/*
		 * We allow returning different type than the one requested in
		 * non strict case.
		 */
		*vma_prot = __pgprot((pgprot_val(*vma_prot) &
				      (~_PAGE_CACHE_MASK)) |
				     flags);
815 816
	}

817
	if (kernel_map_sync_memtype(paddr, size, flags) < 0) {
818 819 820 821 822 823 824 825 826 827 828 829 830 831
		free_memtype(paddr, paddr + size);
		return -EINVAL;
	}
	return 0;
}

/*
 * Internal interface to free a range of physical memory.
 * Frees non RAM regions only.
 */
static void free_pfn_range(u64 paddr, unsigned long size)
{
	int is_ram;

832
	is_ram = pat_pagerange_is_ram(paddr, paddr + size);
833 834 835 836 837 838 839 840 841 842 843 844 845
	if (is_ram == 0)
		free_memtype(paddr, paddr + size);
}

/*
 * track_pfn_vma_copy is called when vma that is covering the pfnmap gets
 * copied through copy_page_range().
 *
 * If the vma has a linear pfn mapping for the entire range, we get the prot
 * from pte and reserve the entire vma range with single reserve_pfn_range call.
 */
int track_pfn_vma_copy(struct vm_area_struct *vma)
{
846
	resource_size_t paddr;
847
	unsigned long prot;
848
	unsigned long vma_size = vma->vm_end - vma->vm_start;
849
	pgprot_t pgprot;
850

851 852 853 854 855
	/*
	 * For now, only handle remap_pfn_range() vmas where
	 * is_linear_pfn_mapping() == TRUE. Handling of
	 * vm_insert_pfn() is TBD.
	 */
856 857
	if (is_linear_pfn_mapping(vma)) {
		/*
858 859
		 * reserve the whole chunk covered by vma. We need the
		 * starting address and protection from pte.
860
		 */
861
		if (follow_phys(vma, vma->vm_start, 0, &prot, &paddr)) {
862
			WARN_ON_ONCE(1);
863
			return -EINVAL;
864
		}
865 866
		pgprot = __pgprot(prot);
		return reserve_pfn_range(paddr, vma_size, &pgprot, 1);
867 868 869 870 871 872 873 874 875 876 877 878 879
	}

	return 0;
}

/*
 * track_pfn_vma_new is called when a _new_ pfn mapping is being established
 * for physical range indicated by pfn and size.
 *
 * prot is passed in as a parameter for the new mapping. If the vma has a
 * linear pfn mapping for the entire range reserve the entire vma range with
 * single reserve_pfn_range call.
 */
880
int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot,
881 882
			unsigned long pfn, unsigned long size)
{
883
	resource_size_t paddr;
884
	unsigned long vma_size = vma->vm_end - vma->vm_start;
885

886 887 888 889 890
	/*
	 * For now, only handle remap_pfn_range() vmas where
	 * is_linear_pfn_mapping() == TRUE. Handling of
	 * vm_insert_pfn() is TBD.
	 */
891 892
	if (is_linear_pfn_mapping(vma)) {
		/* reserve the whole chunk starting from vm_pgoff */
893
		paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT;
894
		return reserve_pfn_range(paddr, vma_size, prot, 0);
895 896 897 898 899 900 901 902 903 904 905 906 907
	}

	return 0;
}

/*
 * untrack_pfn_vma is called while unmapping a pfnmap for a region.
 * untrack can be called for a specific region indicated by pfn and size or
 * can be for the entire vma (in which case size can be zero).
 */
void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
			unsigned long size)
{
908
	resource_size_t paddr;
909
	unsigned long vma_size = vma->vm_end - vma->vm_start;
910

911 912 913 914 915
	/*
	 * For now, only handle remap_pfn_range() vmas where
	 * is_linear_pfn_mapping() == TRUE. Handling of
	 * vm_insert_pfn() is TBD.
	 */
916 917
	if (is_linear_pfn_mapping(vma)) {
		/* free the whole chunk starting from vm_pgoff */
918
		paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT;
919 920 921 922 923
		free_pfn_range(paddr, vma_size);
		return;
	}
}

924 925 926 927 928 929 930
pgprot_t pgprot_writecombine(pgprot_t prot)
{
	if (pat_enabled)
		return __pgprot(pgprot_val(prot) | _PAGE_CACHE_WC);
	else
		return pgprot_noncached(prot);
}
931
EXPORT_SYMBOL_GPL(pgprot_writecombine);
932

933
#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_X86_PAT)
934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955

/* get Nth element of the linked list */
static struct memtype *memtype_get_idx(loff_t pos)
{
	struct memtype *list_node, *print_entry;
	int i = 1;

	print_entry  = kmalloc(sizeof(struct memtype), GFP_KERNEL);
	if (!print_entry)
		return NULL;

	spin_lock(&memtype_lock);
	list_for_each_entry(list_node, &memtype_list, nd) {
		if (pos == i) {
			*print_entry = *list_node;
			spin_unlock(&memtype_lock);
			return print_entry;
		}
		++i;
	}
	spin_unlock(&memtype_lock);
	kfree(print_entry);
I
Ingo Molnar 已提交
956

957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986
	return NULL;
}

static void *memtype_seq_start(struct seq_file *seq, loff_t *pos)
{
	if (*pos == 0) {
		++*pos;
		seq_printf(seq, "PAT memtype list:\n");
	}

	return memtype_get_idx(*pos);
}

static void *memtype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
	++*pos;
	return memtype_get_idx(*pos);
}

static void memtype_seq_stop(struct seq_file *seq, void *v)
{
}

static int memtype_seq_show(struct seq_file *seq, void *v)
{
	struct memtype *print_entry = (struct memtype *)v;

	seq_printf(seq, "%s @ 0x%Lx-0x%Lx\n", cattr_name(print_entry->type),
			print_entry->start, print_entry->end);
	kfree(print_entry);
I
Ingo Molnar 已提交
987

988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018
	return 0;
}

static struct seq_operations memtype_seq_ops = {
	.start = memtype_seq_start,
	.next  = memtype_seq_next,
	.stop  = memtype_seq_stop,
	.show  = memtype_seq_show,
};

static int memtype_seq_open(struct inode *inode, struct file *file)
{
	return seq_open(file, &memtype_seq_ops);
}

static const struct file_operations memtype_fops = {
	.open    = memtype_seq_open,
	.read    = seq_read,
	.llseek  = seq_lseek,
	.release = seq_release,
};

static int __init pat_memtype_list_init(void)
{
	debugfs_create_file("pat_memtype_list", S_IRUSR, arch_debugfs_dir,
				NULL, &memtype_fops);
	return 0;
}

late_initcall(pat_memtype_list_init);

1019
#endif /* CONFIG_DEBUG_FS && CONFIG_X86_PAT */