pageattr.c 12.8 KB
Newer Older
1 2
/*
 * Copyright 2002 Andi Kleen, SuSE Labs.
L
Linus Torvalds 已提交
3
 * Thanks to Ben LaHaise for precious feedback.
4
 */
L
Linus Torvalds 已提交
5
#include <linux/highmem.h>
I
Ingo Molnar 已提交
6
#include <linux/bootmem.h>
L
Linus Torvalds 已提交
7
#include <linux/module.h>
8
#include <linux/sched.h>
L
Linus Torvalds 已提交
9
#include <linux/slab.h>
10 11
#include <linux/mm.h>

12
#include <asm/e820.h>
L
Linus Torvalds 已提交
13 14
#include <asm/processor.h>
#include <asm/tlbflush.h>
D
Dave Jones 已提交
15
#include <asm/sections.h>
16 17
#include <asm/uaccess.h>
#include <asm/pgalloc.h>
L
Linus Torvalds 已提交
18

19 20
static inline int
within(unsigned long addr, unsigned long start, unsigned long end)
I
Ingo Molnar 已提交
21
{
22 23 24
	return addr >= start && addr < end;
}

T
Thomas Gleixner 已提交
25 26 27
/*
 * Flushing functions
 */
28 29 30 31 32 33 34 35 36

/**
 * clflush_cache_range - flush a cache range with clflush
 * @addr:	virtual start address
 * @size:	number of bytes to flush
 *
 * clflush is an unordered instruction which needs fencing with mfence
 * to avoid ordering issues.
 */
I
Ingo Molnar 已提交
37
void clflush_cache_range(void *vaddr, unsigned int size)
T
Thomas Gleixner 已提交
38
{
I
Ingo Molnar 已提交
39
	void *vend = vaddr + size - 1;
T
Thomas Gleixner 已提交
40

41
	mb();
I
Ingo Molnar 已提交
42 43 44 45 46 47 48 49

	for (; vaddr < vend; vaddr += boot_cpu_data.x86_clflush_size)
		clflush(vaddr);
	/*
	 * Flush any possible final partial cacheline:
	 */
	clflush(vend);

50
	mb();
T
Thomas Gleixner 已提交
51 52
}

53
static void __cpa_flush_all(void *arg)
T
Thomas Gleixner 已提交
54 55 56 57 58 59 60 61 62 63 64
{
	/*
	 * Flush all to work around Errata in early athlons regarding
	 * large page flushing.
	 */
	__flush_tlb_all();

	if (boot_cpu_data.x86_model >= 4)
		wbinvd();
}

65
static void cpa_flush_all(void)
T
Thomas Gleixner 已提交
66 67 68
{
	BUG_ON(irqs_disabled());

69
	on_each_cpu(__cpa_flush_all, NULL, 1, 1);
T
Thomas Gleixner 已提交
70 71
}

72 73 74 75 76 77 78 79 80 81
static void __cpa_flush_range(void *arg)
{
	/*
	 * We could optimize that further and do individual per page
	 * tlb invalidates for a low number of pages. Caveat: we must
	 * flush the high aliases on 64bit as well.
	 */
	__flush_tlb_all();
}

I
Ingo Molnar 已提交
82
static void cpa_flush_range(unsigned long start, int numpages)
83
{
I
Ingo Molnar 已提交
84 85 86
	unsigned int i, level;
	unsigned long addr;

87
	BUG_ON(irqs_disabled());
I
Ingo Molnar 已提交
88
	WARN_ON(PAGE_ALIGN(start) != start);
89

T
Thomas Gleixner 已提交
90
	on_each_cpu(__cpa_flush_range, NULL, 1, 1);
91

T
Thomas Gleixner 已提交
92 93 94 95 96 97
	/*
	 * We only need to flush on one CPU,
	 * clflush is a MESI-coherent instruction that
	 * will cause all other CPUs to flush the same
	 * cachelines:
	 */
I
Ingo Molnar 已提交
98 99 100 101 102 103 104 105 106
	for (i = 0, addr = start; i < numpages; i++, addr += PAGE_SIZE) {
		pte_t *pte = lookup_address(addr, &level);

		/*
		 * Only flush present addresses:
		 */
		if (pte && pte_present(*pte))
			clflush_cache_range((void *) addr, PAGE_SIZE);
	}
107 108
}

109 110 111 112 113 114 115 116 117 118
/*
 * Certain areas of memory on x86 require very specific protection flags,
 * for example the BIOS area or kernel text. Callers don't always get this
 * right (again, ioremap() on BIOS memory is not uncommon) so this function
 * checks and fixes these known static required protection bits.
 */
static inline pgprot_t static_protections(pgprot_t prot, unsigned long address)
{
	pgprot_t forbidden = __pgprot(0);

I
Ingo Molnar 已提交
119
	/*
120 121
	 * The BIOS area between 640k and 1Mb needs to be executable for
	 * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support.
I
Ingo Molnar 已提交
122
	 */
123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140
	if (within(__pa(address), BIOS_BEGIN, BIOS_END))
		pgprot_val(forbidden) |= _PAGE_NX;

	/*
	 * The kernel text needs to be executable for obvious reasons
	 * Does not cover __inittext since that is gone later on
	 */
	if (within(address, (unsigned long)_text, (unsigned long)_etext))
		pgprot_val(forbidden) |= _PAGE_NX;

#ifdef CONFIG_DEBUG_RODATA
	/* The .rodata section needs to be read-only */
	if (within(address, (unsigned long)__start_rodata,
				(unsigned long)__end_rodata))
		pgprot_val(forbidden) |= _PAGE_RW;
#endif

	prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
I
Ingo Molnar 已提交
141 142 143 144

	return prot;
}

145
pte_t *lookup_address(unsigned long address, int *level)
146
{
L
Linus Torvalds 已提交
147 148 149
	pgd_t *pgd = pgd_offset_k(address);
	pud_t *pud;
	pmd_t *pmd;
150

T
Thomas Gleixner 已提交
151 152
	*level = PG_LEVEL_NONE;

L
Linus Torvalds 已提交
153 154 155 156 157 158 159 160
	if (pgd_none(*pgd))
		return NULL;
	pud = pud_offset(pgd, address);
	if (pud_none(*pud))
		return NULL;
	pmd = pmd_offset(pud, address);
	if (pmd_none(*pmd))
		return NULL;
T
Thomas Gleixner 已提交
161 162

	*level = PG_LEVEL_2M;
L
Linus Torvalds 已提交
163 164 165
	if (pmd_large(*pmd))
		return (pte_t *)pmd;

T
Thomas Gleixner 已提交
166
	*level = PG_LEVEL_4K;
167 168 169
	return pte_offset_kernel(pmd, address);
}

I
Ingo Molnar 已提交
170
static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
171 172 173
{
	/* change init_mm */
	set_pte_atomic(kpte, pte);
174
#ifdef CONFIG_X86_32
175
	if (!SHARED_KERNEL_PMD) {
176 177 178 179 180 181 182 183 184 185 186 187
		struct page *page;

		for (page = pgd_list; page; page = (struct page *)page->index) {
			pgd_t *pgd;
			pud_t *pud;
			pmd_t *pmd;

			pgd = (pgd_t *)page_address(page) + pgd_index(address);
			pud = pud_offset(pgd, address);
			pmd = pmd_offset(pud, address);
			set_pte_atomic((pte_t *)pmd, pte);
		}
L
Linus Torvalds 已提交
188
	}
189
#endif
L
Linus Torvalds 已提交
190 191
}

192
static int split_large_page(pte_t *kpte, unsigned long address)
193
{
194
	pgprot_t ref_prot = pte_pgprot(pte_clrhuge(*kpte));
195
	gfp_t gfp_flags = GFP_KERNEL;
I
Ingo Molnar 已提交
196
	unsigned long flags;
197 198 199
	unsigned long addr;
	pte_t *pbase, *tmp;
	struct page *base;
200
	int i, level;
201

202 203 204 205
#ifdef CONFIG_DEBUG_PAGEALLOC
	gfp_flags = GFP_ATOMIC;
#endif
	base = alloc_pages(gfp_flags, 0);
206 207 208
	if (!base)
		return -ENOMEM;

I
Ingo Molnar 已提交
209
	spin_lock_irqsave(&pgd_lock, flags);
210 211 212 213 214
	/*
	 * Check for races, another CPU might have split this page
	 * up for us already:
	 */
	tmp = lookup_address(address, &level);
I
Ingo Molnar 已提交
215 216
	if (tmp != kpte) {
		WARN_ON_ONCE(1);
217
		goto out_unlock;
I
Ingo Molnar 已提交
218
	}
219 220 221 222

	address = __pa(address);
	addr = address & LARGE_PAGE_MASK;
	pbase = (pte_t *)page_address(base);
223
#ifdef CONFIG_X86_32
224
	paravirt_alloc_pt(&init_mm, page_to_pfn(base));
225
#endif
226 227 228 229 230

	for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE)
		set_pte(&pbase[i], pfn_pte(addr >> PAGE_SHIFT, ref_prot));

	/*
231 232 233 234 235
	 * Install the new, split up pagetable. Important detail here:
	 *
	 * On Intel the NX bit of all levels must be cleared to make a
	 * page executable. See section 4.13.2 of Intel 64 and IA-32
	 * Architectures Software Developer's Manual).
236
	 */
237
	ref_prot = pte_pgprot(pte_mkexec(pte_clrhuge(*kpte)));
I
Ingo Molnar 已提交
238
	__set_pmd_pte(kpte, address, mk_pte(base, ref_prot));
239 240 241
	base = NULL;

out_unlock:
I
Ingo Molnar 已提交
242
	spin_unlock_irqrestore(&pgd_lock, flags);
243 244 245 246 247 248 249

	if (base)
		__free_pages(base, 0);

	return 0;
}

250
static int
I
Ingo Molnar 已提交
251
__change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot)
252
{
L
Linus Torvalds 已提交
253
	struct page *kpte_page;
254
	int level, err = 0;
255
	pte_t *kpte;
L
Linus Torvalds 已提交
256

I
Ingo Molnar 已提交
257 258 259
#ifdef CONFIG_X86_32
	BUG_ON(pfn > max_low_pfn);
#endif
L
Linus Torvalds 已提交
260

261
repeat:
262
	kpte = lookup_address(address, &level);
L
Linus Torvalds 已提交
263 264
	if (!kpte)
		return -EINVAL;
265

L
Linus Torvalds 已提交
266
	kpte_page = virt_to_page(kpte);
267 268 269
	BUG_ON(PageLRU(kpte_page));
	BUG_ON(PageCompound(kpte_page));

270
	prot = static_protections(prot, address);
271

T
Thomas Gleixner 已提交
272
	if (level == PG_LEVEL_4K) {
T
Thomas Gleixner 已提交
273
		WARN_ON_ONCE(pgprot_val(prot) & _PAGE_PSE);
I
Ingo Molnar 已提交
274
		set_pte_atomic(kpte, pfn_pte(pfn, canon_pgprot(prot)));
I
Ingo Molnar 已提交
275
	} else {
T
Thomas Gleixner 已提交
276 277 278
		/* Clear the PSE bit for the 4k level pages ! */
		pgprot_val(prot) = pgprot_val(prot) & ~_PAGE_PSE;

279
		err = split_large_page(kpte, address);
280 281
		if (!err)
			goto repeat;
L
Linus Torvalds 已提交
282
	}
283
	return err;
284
}
L
Linus Torvalds 已提交
285

286 287 288 289
/**
 * change_page_attr_addr - Change page table attributes in linear mapping
 * @address: Virtual address in linear mapping.
 * @prot:    New page table attribute (PAGE_*)
L
Linus Torvalds 已提交
290
 *
291 292 293
 * Change page attributes of a page in the direct mapping. This is a variant
 * of change_page_attr() that also works on memory holes that do not have
 * mem_map entry (pfn_valid() is false).
294
 *
295
 * See change_page_attr() documentation for more details.
296 297
 *
 * Modules and drivers should use the set_memory_* APIs instead.
L
Linus Torvalds 已提交
298
 */
299

A
Arjan van de Ven 已提交
300
static int change_page_attr_addr(unsigned long address, pgprot_t prot)
L
Linus Torvalds 已提交
301
{
A
Arjan van de Ven 已提交
302 303
	int err = 0, kernel_map = 0;
	unsigned long pfn = __pa(address) >> PAGE_SHIFT;
304 305 306 307

#ifdef CONFIG_X86_64
	if (address >= __START_KERNEL_map &&
			address < __START_KERNEL_map + KERNEL_TEXT_SIZE) {
L
Linus Torvalds 已提交
308

309 310 311 312 313
		address = (unsigned long)__va(__pa(address));
		kernel_map = 1;
	}
#endif

A
Arjan van de Ven 已提交
314 315 316 317 318
	if (!kernel_map || pte_present(pfn_pte(0, prot))) {
		err = __change_page_attr(address, pfn, prot);
		if (err)
			return err;
	}
319 320

#ifdef CONFIG_X86_64
A
Arjan van de Ven 已提交
321 322 323 324 325 326 327 328 329 330 331 332
	/*
	 * Handle kernel mapping too which aliases part of
	 * lowmem:
	 */
	if (__pa(address) < KERNEL_TEXT_SIZE) {
		unsigned long addr2;
		pgprot_t prot2;

		addr2 = __START_KERNEL_map + __pa(address);
		/* Make sure the kernel mappings stay executable */
		prot2 = pte_pgprot(pte_mkexec(pfn_pte(0, prot)));
		err = __change_page_attr(addr2, pfn, prot2);
333
	}
A
Arjan van de Ven 已提交
334
#endif
335

L
Linus Torvalds 已提交
336 337 338
	return err;
}

339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372
static int __change_page_attr_set_clr(unsigned long addr, int numpages,
				      pgprot_t mask_set, pgprot_t mask_clr)
{
	pgprot_t new_prot;
	int level;
	pte_t *pte;
	int i, ret;

	for (i = 0; i < numpages ; i++) {

		pte = lookup_address(addr, &level);
		if (!pte)
			return -EINVAL;

		new_prot = pte_pgprot(*pte);

		pgprot_val(new_prot) &= ~pgprot_val(mask_clr);
		pgprot_val(new_prot) |= pgprot_val(mask_set);

		ret = change_page_attr_addr(addr, new_prot);
		if (ret)
			return ret;
		addr += PAGE_SIZE;
	}

	return 0;
}

static int change_page_attr_set_clr(unsigned long addr, int numpages,
				    pgprot_t mask_set, pgprot_t mask_clr)
{
	int ret = __change_page_attr_set_clr(addr, numpages, mask_set,
					     mask_clr);

373 374 375
	/*
	 * On success we use clflush, when the CPU supports it to
	 * avoid the wbindv. If the CPU does not support it and in the
376
	 * error case we fall back to cpa_flush_all (which uses
377 378 379 380 381
	 * wbindv):
	 */
	if (!ret && cpu_has_clflush)
		cpa_flush_range(addr, numpages);
	else
382
		cpa_flush_all();
383 384 385 386

	return ret;
}

387 388
static inline int change_page_attr_set(unsigned long addr, int numpages,
				       pgprot_t mask)
389
{
390
	return change_page_attr_set_clr(addr, numpages, mask, __pgprot(0));
391 392
}

393 394
static inline int change_page_attr_clear(unsigned long addr, int numpages,
					 pgprot_t mask)
395
{
396
	return __change_page_attr_set_clr(addr, numpages, __pgprot(0), mask);
397 398 399 400 401 402 403

}

int set_memory_uc(unsigned long addr, int numpages)
{
	return change_page_attr_set(addr, numpages,
				    __pgprot(_PAGE_PCD | _PAGE_PWT));
404 405 406 407 408
}
EXPORT_SYMBOL(set_memory_uc);

int set_memory_wb(unsigned long addr, int numpages)
{
409 410
	return change_page_attr_clear(addr, numpages,
				      __pgprot(_PAGE_PCD | _PAGE_PWT));
411 412 413 414 415
}
EXPORT_SYMBOL(set_memory_wb);

int set_memory_x(unsigned long addr, int numpages)
{
416
	return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_NX));
417 418 419 420 421
}
EXPORT_SYMBOL(set_memory_x);

int set_memory_nx(unsigned long addr, int numpages)
{
422
	return change_page_attr_set(addr, numpages, __pgprot(_PAGE_NX));
423 424 425 426 427
}
EXPORT_SYMBOL(set_memory_nx);

int set_memory_ro(unsigned long addr, int numpages)
{
428
	return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_RW));
429 430 431 432
}

int set_memory_rw(unsigned long addr, int numpages)
{
433
	return change_page_attr_set(addr, numpages, __pgprot(_PAGE_RW));
434
}
I
Ingo Molnar 已提交
435 436 437

int set_memory_np(unsigned long addr, int numpages)
{
438
	return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_PRESENT));
I
Ingo Molnar 已提交
439
}
440 441 442 443 444

int set_pages_uc(struct page *page, int numpages)
{
	unsigned long addr = (unsigned long)page_address(page);

T
Thomas Gleixner 已提交
445
	return set_memory_uc(addr, numpages);
446 447 448 449 450 451 452
}
EXPORT_SYMBOL(set_pages_uc);

int set_pages_wb(struct page *page, int numpages)
{
	unsigned long addr = (unsigned long)page_address(page);

T
Thomas Gleixner 已提交
453
	return set_memory_wb(addr, numpages);
454 455 456 457 458 459 460
}
EXPORT_SYMBOL(set_pages_wb);

int set_pages_x(struct page *page, int numpages)
{
	unsigned long addr = (unsigned long)page_address(page);

T
Thomas Gleixner 已提交
461
	return set_memory_x(addr, numpages);
462 463 464 465 466 467 468
}
EXPORT_SYMBOL(set_pages_x);

int set_pages_nx(struct page *page, int numpages)
{
	unsigned long addr = (unsigned long)page_address(page);

T
Thomas Gleixner 已提交
469
	return set_memory_nx(addr, numpages);
470 471 472 473 474 475 476
}
EXPORT_SYMBOL(set_pages_nx);

int set_pages_ro(struct page *page, int numpages)
{
	unsigned long addr = (unsigned long)page_address(page);

T
Thomas Gleixner 已提交
477
	return set_memory_ro(addr, numpages);
478 479 480 481 482
}

int set_pages_rw(struct page *page, int numpages)
{
	unsigned long addr = (unsigned long)page_address(page);
483

T
Thomas Gleixner 已提交
484
	return set_memory_rw(addr, numpages);
I
Ingo Molnar 已提交
485 486
}

L
Linus Torvalds 已提交
487

488 489 490 491 492 493 494 495 496 497 498 499 500 501
#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_CPA_DEBUG)
static inline int __change_page_attr_set(unsigned long addr, int numpages,
					 pgprot_t mask)
{
	return __change_page_attr_set_clr(addr, numpages, mask, __pgprot(0));
}

static inline int __change_page_attr_clear(unsigned long addr, int numpages,
					   pgprot_t mask)
{
	return __change_page_attr_set_clr(addr, numpages, __pgprot(0), mask);
}
#endif

L
Linus Torvalds 已提交
502
#ifdef CONFIG_DEBUG_PAGEALLOC
I
Ingo Molnar 已提交
503 504 505 506

static int __set_pages_p(struct page *page, int numpages)
{
	unsigned long addr = (unsigned long)page_address(page);
507 508 509

	return __change_page_attr_set(addr, numpages,
				      __pgprot(_PAGE_PRESENT | _PAGE_RW));
I
Ingo Molnar 已提交
510 511 512 513 514
}

static int __set_pages_np(struct page *page, int numpages)
{
	unsigned long addr = (unsigned long)page_address(page);
515 516 517

	return __change_page_attr_clear(addr, numpages,
					__pgprot(_PAGE_PRESENT));
I
Ingo Molnar 已提交
518 519
}

L
Linus Torvalds 已提交
520 521 522 523
void kernel_map_pages(struct page *page, int numpages, int enable)
{
	if (PageHighMem(page))
		return;
524
	if (!enable) {
525 526
		debug_check_no_locks_freed(page_address(page),
					   numpages * PAGE_SIZE);
527
	}
528

529 530 531 532 533 534
	/*
	 * If page allocator is not up yet then do not call c_p_a():
	 */
	if (!debug_pagealloc_enabled)
		return;

535
	/*
536 537
	 * The return value is ignored - the calls cannot fail,
	 * large pages are disabled at boot time:
L
Linus Torvalds 已提交
538
	 */
I
Ingo Molnar 已提交
539 540 541 542
	if (enable)
		__set_pages_p(page, numpages);
	else
		__set_pages_np(page, numpages);
543 544

	/*
545 546
	 * We should perform an IPI and flush all tlbs,
	 * but that can deadlock->flush only current cpu:
L
Linus Torvalds 已提交
547 548 549 550
	 */
	__flush_tlb_all();
}
#endif
551 552 553 554 555 556 557 558

/*
 * The testcases use internal knowledge of the implementation that shouldn't
 * be exposed to the rest of the kernel. Include these directly here.
 */
#ifdef CONFIG_CPA_DEBUG
#include "pageattr-test.c"
#endif