pageattr.c 18.2 KB
Newer Older
1 2
/*
 * Copyright 2002 Andi Kleen, SuSE Labs.
L
Linus Torvalds 已提交
3
 * Thanks to Ben LaHaise for precious feedback.
4
 */
L
Linus Torvalds 已提交
5
#include <linux/highmem.h>
I
Ingo Molnar 已提交
6
#include <linux/bootmem.h>
L
Linus Torvalds 已提交
7
#include <linux/module.h>
8
#include <linux/sched.h>
L
Linus Torvalds 已提交
9
#include <linux/slab.h>
10 11
#include <linux/mm.h>

12
#include <asm/e820.h>
L
Linus Torvalds 已提交
13 14
#include <asm/processor.h>
#include <asm/tlbflush.h>
D
Dave Jones 已提交
15
#include <asm/sections.h>
16 17
#include <asm/uaccess.h>
#include <asm/pgalloc.h>
L
Linus Torvalds 已提交
18

T
Thomas Gleixner 已提交
19 20 21 22
struct cpa_data {
	unsigned long	vaddr;
	pgprot_t	mask_set;
	pgprot_t	mask_clr;
23
	int		numpages;
24
	int		flushtlb;
T
Thomas Gleixner 已提交
25 26
};

27 28 29 30 31
enum {
	CPA_NO_SPLIT = 0,
	CPA_SPLIT,
};

32 33
static inline int
within(unsigned long addr, unsigned long start, unsigned long end)
I
Ingo Molnar 已提交
34
{
35 36 37
	return addr >= start && addr < end;
}

T
Thomas Gleixner 已提交
38 39 40
/*
 * Flushing functions
 */
41 42 43 44 45 46 47 48 49

/**
 * clflush_cache_range - flush a cache range with clflush
 * @addr:	virtual start address
 * @size:	number of bytes to flush
 *
 * clflush is an unordered instruction which needs fencing with mfence
 * to avoid ordering issues.
 */
I
Ingo Molnar 已提交
50
void clflush_cache_range(void *vaddr, unsigned int size)
T
Thomas Gleixner 已提交
51
{
I
Ingo Molnar 已提交
52
	void *vend = vaddr + size - 1;
T
Thomas Gleixner 已提交
53

54
	mb();
I
Ingo Molnar 已提交
55 56 57 58 59 60 61 62

	for (; vaddr < vend; vaddr += boot_cpu_data.x86_clflush_size)
		clflush(vaddr);
	/*
	 * Flush any possible final partial cacheline:
	 */
	clflush(vend);

63
	mb();
T
Thomas Gleixner 已提交
64 65
}

66
static void __cpa_flush_all(void *arg)
T
Thomas Gleixner 已提交
67
{
68 69
	unsigned long cache = (unsigned long)arg;

T
Thomas Gleixner 已提交
70 71 72 73 74 75
	/*
	 * Flush all to work around Errata in early athlons regarding
	 * large page flushing.
	 */
	__flush_tlb_all();

76
	if (cache && boot_cpu_data.x86_model >= 4)
T
Thomas Gleixner 已提交
77 78 79
		wbinvd();
}

80
static void cpa_flush_all(unsigned long cache)
T
Thomas Gleixner 已提交
81 82 83
{
	BUG_ON(irqs_disabled());

84
	on_each_cpu(__cpa_flush_all, (void *) cache, 1, 1);
T
Thomas Gleixner 已提交
85 86
}

87 88 89 90 91 92 93 94 95 96
static void __cpa_flush_range(void *arg)
{
	/*
	 * We could optimize that further and do individual per page
	 * tlb invalidates for a low number of pages. Caveat: we must
	 * flush the high aliases on 64bit as well.
	 */
	__flush_tlb_all();
}

97
static void cpa_flush_range(unsigned long start, int numpages, int cache)
98
{
I
Ingo Molnar 已提交
99 100 101
	unsigned int i, level;
	unsigned long addr;

102
	BUG_ON(irqs_disabled());
I
Ingo Molnar 已提交
103
	WARN_ON(PAGE_ALIGN(start) != start);
104

T
Thomas Gleixner 已提交
105
	on_each_cpu(__cpa_flush_range, NULL, 1, 1);
106

107 108 109
	if (!cache)
		return;

T
Thomas Gleixner 已提交
110 111 112 113 114 115
	/*
	 * We only need to flush on one CPU,
	 * clflush is a MESI-coherent instruction that
	 * will cause all other CPUs to flush the same
	 * cachelines:
	 */
I
Ingo Molnar 已提交
116 117 118 119 120 121 122 123 124
	for (i = 0, addr = start; i < numpages; i++, addr += PAGE_SIZE) {
		pte_t *pte = lookup_address(addr, &level);

		/*
		 * Only flush present addresses:
		 */
		if (pte && pte_present(*pte))
			clflush_cache_range((void *) addr, PAGE_SIZE);
	}
125 126
}

127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142
#define HIGH_MAP_START	__START_KERNEL_map
#define HIGH_MAP_END	(__START_KERNEL_map + KERNEL_TEXT_SIZE)


/*
 * Converts a virtual address to a X86-64 highmap address
 */
static unsigned long virt_to_highmap(void *address)
{
#ifdef CONFIG_X86_64
	return __pa((unsigned long)address) + HIGH_MAP_START - phys_base;
#else
	return (unsigned long)address;
#endif
}

143 144 145 146 147 148 149 150 151 152
/*
 * Certain areas of memory on x86 require very specific protection flags,
 * for example the BIOS area or kernel text. Callers don't always get this
 * right (again, ioremap() on BIOS memory is not uncommon) so this function
 * checks and fixes these known static required protection bits.
 */
static inline pgprot_t static_protections(pgprot_t prot, unsigned long address)
{
	pgprot_t forbidden = __pgprot(0);

I
Ingo Molnar 已提交
153
	/*
154 155
	 * The BIOS area between 640k and 1Mb needs to be executable for
	 * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support.
I
Ingo Molnar 已提交
156
	 */
157 158 159 160 161 162 163 164 165
	if (within(__pa(address), BIOS_BEGIN, BIOS_END))
		pgprot_val(forbidden) |= _PAGE_NX;

	/*
	 * The kernel text needs to be executable for obvious reasons
	 * Does not cover __inittext since that is gone later on
	 */
	if (within(address, (unsigned long)_text, (unsigned long)_etext))
		pgprot_val(forbidden) |= _PAGE_NX;
166 167 168 169 170 171
	/*
	 * Do the same for the x86-64 high kernel mapping
	 */
	if (within(address, virt_to_highmap(_text), virt_to_highmap(_etext)))
		pgprot_val(forbidden) |= _PAGE_NX;

172 173 174 175 176 177

#ifdef CONFIG_DEBUG_RODATA
	/* The .rodata section needs to be read-only */
	if (within(address, (unsigned long)__start_rodata,
				(unsigned long)__end_rodata))
		pgprot_val(forbidden) |= _PAGE_RW;
178 179 180 181 182 183
	/*
	 * Do the same for the x86-64 high kernel mapping
	 */
	if (within(address, virt_to_highmap(__start_rodata),
				virt_to_highmap(__end_rodata)))
		pgprot_val(forbidden) |= _PAGE_RW;
184 185 186
#endif

	prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
I
Ingo Molnar 已提交
187 188 189 190

	return prot;
}

T
Thomas Gleixner 已提交
191 192 193 194 195 196 197 198
/*
 * Lookup the page table entry for a virtual address. Return a pointer
 * to the entry and the level of the mapping.
 *
 * Note: We return pud and pmd either when the entry is marked large
 * or when the present bit is not set. Otherwise we would return a
 * pointer to a nonexisting mapping.
 */
199
pte_t *lookup_address(unsigned long address, int *level)
200
{
L
Linus Torvalds 已提交
201 202 203
	pgd_t *pgd = pgd_offset_k(address);
	pud_t *pud;
	pmd_t *pmd;
204

T
Thomas Gleixner 已提交
205 206
	*level = PG_LEVEL_NONE;

L
Linus Torvalds 已提交
207 208 209 210 211 212 213 214
	if (pgd_none(*pgd))
		return NULL;
	pud = pud_offset(pgd, address);
	if (pud_none(*pud))
		return NULL;
	pmd = pmd_offset(pud, address);
	if (pmd_none(*pmd))
		return NULL;
T
Thomas Gleixner 已提交
215 216

	*level = PG_LEVEL_2M;
T
Thomas Gleixner 已提交
217
	if (pmd_large(*pmd) || !pmd_present(*pmd))
L
Linus Torvalds 已提交
218 219
		return (pte_t *)pmd;

T
Thomas Gleixner 已提交
220
	*level = PG_LEVEL_4K;
221 222 223
	return pte_offset_kernel(pmd, address);
}

I
Ingo Molnar 已提交
224
static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
225 226 227
{
	/* change init_mm */
	set_pte_atomic(kpte, pte);
228
#ifdef CONFIG_X86_32
229
	if (!SHARED_KERNEL_PMD) {
230 231
		struct page *page;

232
		list_for_each_entry(page, &pgd_list, lru) {
233 234 235 236 237 238 239 240 241
			pgd_t *pgd;
			pud_t *pud;
			pmd_t *pmd;

			pgd = (pgd_t *)page_address(page) + pgd_index(address);
			pud = pud_offset(pgd, address);
			pmd = pmd_offset(pud, address);
			set_pte_atomic((pte_t *)pmd, pte);
		}
L
Linus Torvalds 已提交
242
	}
243
#endif
L
Linus Torvalds 已提交
244 245
}

246 247 248 249 250 251 252 253
static int try_preserve_large_page(pte_t *kpte, unsigned long address,
				   struct cpa_data *cpa)
{
	unsigned long nextpage_addr, numpages, pmask, psize, flags;
	pte_t new_pte, old_pte, *tmp;
	pgprot_t old_prot, new_prot;
	int level, res = CPA_SPLIT;

I
Ingo Molnar 已提交
254 255 256 257 258 259 260 261 262 263 264
	/*
	 * An Athlon 64 X2 showed hard hangs if we tried to preserve
	 * largepages and changed the PSE entry from RW to RO.
	 *
	 * As AMD CPUs have a long series of erratas in this area,
	 * (and none of the known ones seem to explain this hang),
	 * disable this code until the hang can be debugged:
	 */
	if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
		return res;

265 266 267 268 269 270 271 272 273 274 275
	spin_lock_irqsave(&pgd_lock, flags);
	/*
	 * Check for races, another CPU might have split this page
	 * up already:
	 */
	tmp = lookup_address(address, &level);
	if (tmp != kpte)
		goto out_unlock;

	switch (level) {
	case PG_LEVEL_2M:
276 277
		psize = PMD_PAGE_SIZE;
		pmask = PMD_PAGE_MASK;
278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336
		break;
	case PG_LEVEL_1G:
	default:
		res = -EINVAL;
		goto out_unlock;
	}

	/*
	 * Calculate the number of pages, which fit into this large
	 * page starting at address:
	 */
	nextpage_addr = (address + psize) & pmask;
	numpages = (nextpage_addr - address) >> PAGE_SHIFT;
	if (numpages < cpa->numpages)
		cpa->numpages = numpages;

	/*
	 * We are safe now. Check whether the new pgprot is the same:
	 */
	old_pte = *kpte;
	old_prot = new_prot = pte_pgprot(old_pte);

	pgprot_val(new_prot) &= ~pgprot_val(cpa->mask_clr);
	pgprot_val(new_prot) |= pgprot_val(cpa->mask_set);
	new_prot = static_protections(new_prot, address);

	/*
	 * If there are no changes, return. maxpages has been updated
	 * above:
	 */
	if (pgprot_val(new_prot) == pgprot_val(old_prot)) {
		res = CPA_NO_SPLIT;
		goto out_unlock;
	}

	/*
	 * We need to change the attributes. Check, whether we can
	 * change the large page in one go. We request a split, when
	 * the address is not aligned and the number of pages is
	 * smaller than the number of pages in the large page. Note
	 * that we limited the number of possible pages already to
	 * the number of pages in the large page.
	 */
	if (address == (nextpage_addr - psize) && cpa->numpages == numpages) {
		/*
		 * The address is aligned and the number of pages
		 * covers the full page.
		 */
		new_pte = pfn_pte(pte_pfn(old_pte), canon_pgprot(new_prot));
		__set_pmd_pte(kpte, address, new_pte);
		cpa->flushtlb = 1;
		res = CPA_NO_SPLIT;
	}

out_unlock:
	spin_unlock_irqrestore(&pgd_lock, flags);
	return res;
}

337
static int split_large_page(pte_t *kpte, unsigned long address)
338
{
339
	pgprot_t ref_prot = pte_pgprot(pte_clrhuge(*kpte));
340
	gfp_t gfp_flags = GFP_KERNEL;
341
	unsigned long flags, addr, pfn;
342 343
	pte_t *pbase, *tmp;
	struct page *base;
I
Ingo Molnar 已提交
344
	unsigned int i, level;
345

346
#ifdef CONFIG_DEBUG_PAGEALLOC
I
Ingo Molnar 已提交
347 348
	gfp_flags = __GFP_HIGH | __GFP_NOFAIL | __GFP_NOWARN;
	gfp_flags = GFP_ATOMIC | __GFP_NOWARN;
349 350
#endif
	base = alloc_pages(gfp_flags, 0);
351 352 353
	if (!base)
		return -ENOMEM;

I
Ingo Molnar 已提交
354
	spin_lock_irqsave(&pgd_lock, flags);
355 356 357 358 359
	/*
	 * Check for races, another CPU might have split this page
	 * up for us already:
	 */
	tmp = lookup_address(address, &level);
I
Ingo Molnar 已提交
360 361
	if (tmp != kpte) {
		WARN_ON_ONCE(1);
362
		goto out_unlock;
I
Ingo Molnar 已提交
363
	}
364 365

	address = __pa(address);
366
	addr = address & PMD_PAGE_MASK;
367
	pbase = (pte_t *)page_address(base);
368
#ifdef CONFIG_X86_32
369
	paravirt_alloc_pt(&init_mm, page_to_pfn(base));
370
#endif
371

372 373 374 375 376 377
	/*
	 * Get the target pfn from the original entry:
	 */
	pfn = pte_pfn(*kpte);
	for (i = 0; i < PTRS_PER_PTE; i++, pfn++)
		set_pte(&pbase[i], pfn_pte(pfn, ref_prot));
378 379

	/*
380 381 382 383 384
	 * Install the new, split up pagetable. Important detail here:
	 *
	 * On Intel the NX bit of all levels must be cleared to make a
	 * page executable. See section 4.13.2 of Intel 64 and IA-32
	 * Architectures Software Developer's Manual).
385
	 */
386
	ref_prot = pte_pgprot(pte_mkexec(pte_clrhuge(*kpte)));
I
Ingo Molnar 已提交
387
	__set_pmd_pte(kpte, address, mk_pte(base, ref_prot));
388 389 390
	base = NULL;

out_unlock:
I
Ingo Molnar 已提交
391
	spin_unlock_irqrestore(&pgd_lock, flags);
392 393 394 395 396 397 398

	if (base)
		__free_pages(base, 0);

	return 0;
}

T
Thomas Gleixner 已提交
399
static int __change_page_attr(unsigned long address, struct cpa_data *cpa)
400
{
L
Linus Torvalds 已提交
401
	struct page *kpte_page;
402
	int level, res;
403
	pte_t *kpte;
L
Linus Torvalds 已提交
404

405
repeat:
406
	kpte = lookup_address(address, &level);
L
Linus Torvalds 已提交
407 408
	if (!kpte)
		return -EINVAL;
409

L
Linus Torvalds 已提交
410
	kpte_page = virt_to_page(kpte);
411 412 413
	BUG_ON(PageLRU(kpte_page));
	BUG_ON(PageCompound(kpte_page));

T
Thomas Gleixner 已提交
414
	if (level == PG_LEVEL_4K) {
I
Ingo Molnar 已提交
415
		pte_t new_pte, old_pte = *kpte;
416 417 418
		pgprot_t new_prot = pte_pgprot(old_pte);

		if(!pte_val(old_pte)) {
T
Thomas Gleixner 已提交
419 420 421 422
			printk(KERN_WARNING "CPA: called for zero pte. "
			       "vaddr = %lx cpa->vaddr = %lx\n", address,
				cpa->vaddr);
			WARN_ON(1);
423 424
			return -EINVAL;
		}
I
Ingo Molnar 已提交
425

T
Thomas Gleixner 已提交
426 427
		pgprot_val(new_prot) &= ~pgprot_val(cpa->mask_clr);
		pgprot_val(new_prot) |= pgprot_val(cpa->mask_set);
I
Ingo Molnar 已提交
428 429 430

		new_prot = static_protections(new_prot, address);

431 432 433 434 435 436
		/*
		 * We need to keep the pfn from the existing PTE,
		 * after all we're only going to change it's attributes
		 * not the memory it points to
		 */
		new_pte = pfn_pte(pte_pfn(old_pte), canon_pgprot(new_prot));
437 438 439 440 441 442 443 444

		/*
		 * Do we really change anything ?
		 */
		if (pte_val(old_pte) != pte_val(new_pte)) {
			set_pte_atomic(kpte, new_pte);
			cpa->flushtlb = 1;
		}
445 446
		cpa->numpages = 1;
		return 0;
L
Linus Torvalds 已提交
447
	}
448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472

	/*
	 * Check, whether we can keep the large page intact
	 * and just change the pte:
	 */
	res = try_preserve_large_page(kpte, address, cpa);
	if (res < 0)
		return res;

	/*
	 * When the range fits into the existing large page,
	 * return. cp->numpages and cpa->tlbflush have been updated in
	 * try_large_page:
	 */
	if (res == CPA_NO_SPLIT)
		return 0;

	/*
	 * We have to split the large page:
	 */
	res = split_large_page(kpte, address);
	if (res)
		return res;
	cpa->flushtlb = 1;
	goto repeat;
473
}
L
Linus Torvalds 已提交
474

475 476 477 478
/**
 * change_page_attr_addr - Change page table attributes in linear mapping
 * @address: Virtual address in linear mapping.
 * @prot:    New page table attribute (PAGE_*)
L
Linus Torvalds 已提交
479
 *
480 481 482
 * Change page attributes of a page in the direct mapping. This is a variant
 * of change_page_attr() that also works on memory holes that do not have
 * mem_map entry (pfn_valid() is false).
483
 *
484
 * See change_page_attr() documentation for more details.
485 486
 *
 * Modules and drivers should use the set_memory_* APIs instead.
L
Linus Torvalds 已提交
487
 */
488

T
Thomas Gleixner 已提交
489
static int change_page_attr_addr(struct cpa_data *cpa)
L
Linus Torvalds 已提交
490
{
491
	int err;
T
Thomas Gleixner 已提交
492
	unsigned long address = cpa->vaddr;
493 494

#ifdef CONFIG_X86_64
495 496
	unsigned long phys_addr = __pa(address);

497 498 499 500 501 502 503
	/*
	 * If we are inside the high mapped kernel range, then we
	 * fixup the low mapping first. __va() returns the virtual
	 * address in the linear mapping:
	 */
	if (within(address, HIGH_MAP_START, HIGH_MAP_END))
		address = (unsigned long) __va(phys_addr);
504 505
#endif

T
Thomas Gleixner 已提交
506
	err = __change_page_attr(address, cpa);
507 508
	if (err)
		return err;
509 510

#ifdef CONFIG_X86_64
A
Arjan van de Ven 已提交
511
	/*
512 513
	 * If the physical address is inside the kernel map, we need
	 * to touch the high mapped kernel as well:
A
Arjan van de Ven 已提交
514
	 */
515 516 517 518
	if (within(phys_addr, 0, KERNEL_TEXT_SIZE)) {
		/*
		 * Calc the high mapping address. See __phys_addr()
		 * for the non obvious details.
519 520 521
		 *
		 * Note that NX and other required permissions are
		 * checked in static_protections().
522 523 524
		 */
		address = phys_addr + HIGH_MAP_START - phys_base;

I
Ingo Molnar 已提交
525
		/*
526 527 528
		 * Our high aliases are imprecise, because we check
		 * everything between 0 and KERNEL_TEXT_SIZE, so do
		 * not propagate lookup failures back to users:
I
Ingo Molnar 已提交
529
		 */
T
Thomas Gleixner 已提交
530
		__change_page_attr(address, cpa);
531
	}
A
Arjan van de Ven 已提交
532
#endif
L
Linus Torvalds 已提交
533 534 535
	return err;
}

T
Thomas Gleixner 已提交
536
static int __change_page_attr_set_clr(struct cpa_data *cpa)
537
{
538
	int ret, numpages = cpa->numpages;
539

540 541 542 543 544 545
	while (numpages) {
		/*
		 * Store the remaining nr of pages for the large page
		 * preservation check.
		 */
		cpa->numpages = numpages;
T
Thomas Gleixner 已提交
546
		ret = change_page_attr_addr(cpa);
547 548 549
		if (ret)
			return ret;

550 551 552 553 554 555 556 557 558
		/*
		 * Adjust the number of pages with the result of the
		 * CPA operation. Either a large page has been
		 * preserved or a single page update happened.
		 */
		BUG_ON(cpa->numpages > numpages);
		numpages -= cpa->numpages;
		cpa->vaddr += cpa->numpages * PAGE_SIZE;
	}
559 560 561
	return 0;
}

562 563 564 565 566 567
static inline int cache_attr(pgprot_t attr)
{
	return pgprot_val(attr) &
		(_PAGE_PAT | _PAGE_PAT_LARGE | _PAGE_PWT | _PAGE_PCD);
}

568 569 570
static int change_page_attr_set_clr(unsigned long addr, int numpages,
				    pgprot_t mask_set, pgprot_t mask_clr)
{
T
Thomas Gleixner 已提交
571
	struct cpa_data cpa;
572
	int ret, cache;
573 574 575 576 577 578 579 580 581 582

	/*
	 * Check, if we are requested to change a not supported
	 * feature:
	 */
	mask_set = canon_pgprot(mask_set);
	mask_clr = canon_pgprot(mask_clr);
	if (!pgprot_val(mask_set) && !pgprot_val(mask_clr))
		return 0;

T
Thomas Gleixner 已提交
583 584 585 586
	cpa.vaddr = addr;
	cpa.numpages = numpages;
	cpa.mask_set = mask_set;
	cpa.mask_clr = mask_clr;
587
	cpa.flushtlb = 0;
T
Thomas Gleixner 已提交
588 589

	ret = __change_page_attr_set_clr(&cpa);
590

591 592 593 594 595 596
	/*
	 * Check whether we really changed something:
	 */
	if (!cpa.flushtlb)
		return ret;

597 598 599 600 601 602
	/*
	 * No need to flush, when we did not set any of the caching
	 * attributes:
	 */
	cache = cache_attr(mask_set);

603 604 605
	/*
	 * On success we use clflush, when the CPU supports it to
	 * avoid the wbindv. If the CPU does not support it and in the
606
	 * error case we fall back to cpa_flush_all (which uses
607 608 609
	 * wbindv):
	 */
	if (!ret && cpu_has_clflush)
610
		cpa_flush_range(addr, numpages, cache);
611
	else
612
		cpa_flush_all(cache);
613 614 615 616

	return ret;
}

617 618
static inline int change_page_attr_set(unsigned long addr, int numpages,
				       pgprot_t mask)
619
{
620
	return change_page_attr_set_clr(addr, numpages, mask, __pgprot(0));
621 622
}

623 624
static inline int change_page_attr_clear(unsigned long addr, int numpages,
					 pgprot_t mask)
625
{
H
Huang, Ying 已提交
626
	return change_page_attr_set_clr(addr, numpages, __pgprot(0), mask);
627 628 629 630 631 632
}

int set_memory_uc(unsigned long addr, int numpages)
{
	return change_page_attr_set(addr, numpages,
				    __pgprot(_PAGE_PCD | _PAGE_PWT));
633 634 635 636 637
}
EXPORT_SYMBOL(set_memory_uc);

int set_memory_wb(unsigned long addr, int numpages)
{
638 639
	return change_page_attr_clear(addr, numpages,
				      __pgprot(_PAGE_PCD | _PAGE_PWT));
640 641 642 643 644
}
EXPORT_SYMBOL(set_memory_wb);

int set_memory_x(unsigned long addr, int numpages)
{
645
	return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_NX));
646 647 648 649 650
}
EXPORT_SYMBOL(set_memory_x);

int set_memory_nx(unsigned long addr, int numpages)
{
651
	return change_page_attr_set(addr, numpages, __pgprot(_PAGE_NX));
652 653 654 655 656
}
EXPORT_SYMBOL(set_memory_nx);

int set_memory_ro(unsigned long addr, int numpages)
{
657
	return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_RW));
658 659 660 661
}

int set_memory_rw(unsigned long addr, int numpages)
{
662
	return change_page_attr_set(addr, numpages, __pgprot(_PAGE_RW));
663
}
I
Ingo Molnar 已提交
664 665 666

int set_memory_np(unsigned long addr, int numpages)
{
667
	return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_PRESENT));
I
Ingo Molnar 已提交
668
}
669 670 671 672 673

int set_pages_uc(struct page *page, int numpages)
{
	unsigned long addr = (unsigned long)page_address(page);

T
Thomas Gleixner 已提交
674
	return set_memory_uc(addr, numpages);
675 676 677 678 679 680 681
}
EXPORT_SYMBOL(set_pages_uc);

int set_pages_wb(struct page *page, int numpages)
{
	unsigned long addr = (unsigned long)page_address(page);

T
Thomas Gleixner 已提交
682
	return set_memory_wb(addr, numpages);
683 684 685 686 687 688 689
}
EXPORT_SYMBOL(set_pages_wb);

int set_pages_x(struct page *page, int numpages)
{
	unsigned long addr = (unsigned long)page_address(page);

T
Thomas Gleixner 已提交
690
	return set_memory_x(addr, numpages);
691 692 693 694 695 696 697
}
EXPORT_SYMBOL(set_pages_x);

int set_pages_nx(struct page *page, int numpages)
{
	unsigned long addr = (unsigned long)page_address(page);

T
Thomas Gleixner 已提交
698
	return set_memory_nx(addr, numpages);
699 700 701 702 703 704 705
}
EXPORT_SYMBOL(set_pages_nx);

int set_pages_ro(struct page *page, int numpages)
{
	unsigned long addr = (unsigned long)page_address(page);

T
Thomas Gleixner 已提交
706
	return set_memory_ro(addr, numpages);
707 708 709 710 711
}

int set_pages_rw(struct page *page, int numpages)
{
	unsigned long addr = (unsigned long)page_address(page);
712

T
Thomas Gleixner 已提交
713
	return set_memory_rw(addr, numpages);
I
Ingo Molnar 已提交
714 715
}

L
Linus Torvalds 已提交
716
#ifdef CONFIG_DEBUG_PAGEALLOC
I
Ingo Molnar 已提交
717 718 719

static int __set_pages_p(struct page *page, int numpages)
{
T
Thomas Gleixner 已提交
720 721 722 723
	struct cpa_data cpa = { .vaddr = (unsigned long) page_address(page),
				.numpages = numpages,
				.mask_set = __pgprot(_PAGE_PRESENT | _PAGE_RW),
				.mask_clr = __pgprot(0)};
724

T
Thomas Gleixner 已提交
725
	return __change_page_attr_set_clr(&cpa);
I
Ingo Molnar 已提交
726 727 728 729
}

static int __set_pages_np(struct page *page, int numpages)
{
T
Thomas Gleixner 已提交
730 731 732 733
	struct cpa_data cpa = { .vaddr = (unsigned long) page_address(page),
				.numpages = numpages,
				.mask_set = __pgprot(0),
				.mask_clr = __pgprot(_PAGE_PRESENT | _PAGE_RW)};
734

T
Thomas Gleixner 已提交
735
	return __change_page_attr_set_clr(&cpa);
I
Ingo Molnar 已提交
736 737
}

L
Linus Torvalds 已提交
738 739 740 741
void kernel_map_pages(struct page *page, int numpages, int enable)
{
	if (PageHighMem(page))
		return;
742
	if (!enable) {
743 744
		debug_check_no_locks_freed(page_address(page),
					   numpages * PAGE_SIZE);
745
	}
746

747 748 749 750 751 752
	/*
	 * If page allocator is not up yet then do not call c_p_a():
	 */
	if (!debug_pagealloc_enabled)
		return;

753
	/*
754 755
	 * The return value is ignored - the calls cannot fail,
	 * large pages are disabled at boot time:
L
Linus Torvalds 已提交
756
	 */
I
Ingo Molnar 已提交
757 758 759 760
	if (enable)
		__set_pages_p(page, numpages);
	else
		__set_pages_np(page, numpages);
761 762

	/*
763 764
	 * We should perform an IPI and flush all tlbs,
	 * but that can deadlock->flush only current cpu:
L
Linus Torvalds 已提交
765 766 767 768
	 */
	__flush_tlb_all();
}
#endif
769 770 771 772 773 774 775 776

/*
 * The testcases use internal knowledge of the implementation that shouldn't
 * be exposed to the rest of the kernel. Include these directly here.
 */
#ifdef CONFIG_CPA_DEBUG
#include "pageattr-test.c"
#endif