pageattr.c 12.4 KB
Newer Older
1 2
/*
 * Copyright 2002 Andi Kleen, SuSE Labs.
L
Linus Torvalds 已提交
3
 * Thanks to Ben LaHaise for precious feedback.
4
 */
L
Linus Torvalds 已提交
5
#include <linux/highmem.h>
I
Ingo Molnar 已提交
6
#include <linux/bootmem.h>
L
Linus Torvalds 已提交
7
#include <linux/module.h>
8
#include <linux/sched.h>
L
Linus Torvalds 已提交
9
#include <linux/slab.h>
10 11
#include <linux/mm.h>

12
#include <asm/e820.h>
L
Linus Torvalds 已提交
13 14
#include <asm/processor.h>
#include <asm/tlbflush.h>
D
Dave Jones 已提交
15
#include <asm/sections.h>
16 17
#include <asm/uaccess.h>
#include <asm/pgalloc.h>
L
Linus Torvalds 已提交
18

19 20
static inline int
within(unsigned long addr, unsigned long start, unsigned long end)
I
Ingo Molnar 已提交
21
{
22 23 24
	return addr >= start && addr < end;
}

T
Thomas Gleixner 已提交
25 26 27
/*
 * Flushing functions
 */
28 29 30 31 32 33 34 35 36 37


/**
 * clflush_cache_range - flush a cache range with clflush
 * @addr:	virtual start address
 * @size:	number of bytes to flush
 *
 * clflush is an unordered instruction which needs fencing with mfence
 * to avoid ordering issues.
 */
T
Thomas Gleixner 已提交
38 39 40 41
void clflush_cache_range(void *addr, int size)
{
	int i;

42
	mb();
T
Thomas Gleixner 已提交
43 44
	for (i = 0; i < size; i += boot_cpu_data.x86_clflush_size)
		clflush(addr+i);
45
	mb();
T
Thomas Gleixner 已提交
46 47
}

48
static void __cpa_flush_all(void *arg)
T
Thomas Gleixner 已提交
49 50 51 52 53 54 55 56 57 58 59
{
	/*
	 * Flush all to work around Errata in early athlons regarding
	 * large page flushing.
	 */
	__flush_tlb_all();

	if (boot_cpu_data.x86_model >= 4)
		wbinvd();
}

60
static void cpa_flush_all(void)
T
Thomas Gleixner 已提交
61 62 63
{
	BUG_ON(irqs_disabled());

64
	on_each_cpu(__cpa_flush_all, NULL, 1, 1);
T
Thomas Gleixner 已提交
65 66
}

67 68 69 70 71 72 73 74 75 76 77 78 79 80
static void __cpa_flush_range(void *arg)
{
	/*
	 * We could optimize that further and do individual per page
	 * tlb invalidates for a low number of pages. Caveat: we must
	 * flush the high aliases on 64bit as well.
	 */
	__flush_tlb_all();
}

static void cpa_flush_range(unsigned long addr, int numpages)
{
	BUG_ON(irqs_disabled());

T
Thomas Gleixner 已提交
81
	on_each_cpu(__cpa_flush_range, NULL, 1, 1);
82

T
Thomas Gleixner 已提交
83 84 85 86 87 88 89
	/*
	 * We only need to flush on one CPU,
	 * clflush is a MESI-coherent instruction that
	 * will cause all other CPUs to flush the same
	 * cachelines:
	 */
	clflush_cache_range((void *) addr, numpages * PAGE_SIZE);
90 91
}

92 93 94 95 96 97 98 99 100 101
/*
 * Certain areas of memory on x86 require very specific protection flags,
 * for example the BIOS area or kernel text. Callers don't always get this
 * right (again, ioremap() on BIOS memory is not uncommon) so this function
 * checks and fixes these known static required protection bits.
 */
static inline pgprot_t static_protections(pgprot_t prot, unsigned long address)
{
	pgprot_t forbidden = __pgprot(0);

I
Ingo Molnar 已提交
102
	/*
103 104
	 * The BIOS area between 640k and 1Mb needs to be executable for
	 * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support.
I
Ingo Molnar 已提交
105
	 */
106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123
	if (within(__pa(address), BIOS_BEGIN, BIOS_END))
		pgprot_val(forbidden) |= _PAGE_NX;

	/*
	 * The kernel text needs to be executable for obvious reasons
	 * Does not cover __inittext since that is gone later on
	 */
	if (within(address, (unsigned long)_text, (unsigned long)_etext))
		pgprot_val(forbidden) |= _PAGE_NX;

#ifdef CONFIG_DEBUG_RODATA
	/* The .rodata section needs to be read-only */
	if (within(address, (unsigned long)__start_rodata,
				(unsigned long)__end_rodata))
		pgprot_val(forbidden) |= _PAGE_RW;
#endif

	prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
I
Ingo Molnar 已提交
124 125 126 127

	return prot;
}

128
pte_t *lookup_address(unsigned long address, int *level)
129
{
L
Linus Torvalds 已提交
130 131 132
	pgd_t *pgd = pgd_offset_k(address);
	pud_t *pud;
	pmd_t *pmd;
133

T
Thomas Gleixner 已提交
134 135
	*level = PG_LEVEL_NONE;

L
Linus Torvalds 已提交
136 137 138 139 140 141 142 143
	if (pgd_none(*pgd))
		return NULL;
	pud = pud_offset(pgd, address);
	if (pud_none(*pud))
		return NULL;
	pmd = pmd_offset(pud, address);
	if (pmd_none(*pmd))
		return NULL;
T
Thomas Gleixner 已提交
144 145

	*level = PG_LEVEL_2M;
L
Linus Torvalds 已提交
146 147 148
	if (pmd_large(*pmd))
		return (pte_t *)pmd;

T
Thomas Gleixner 已提交
149
	*level = PG_LEVEL_4K;
150 151 152
	return pte_offset_kernel(pmd, address);
}

I
Ingo Molnar 已提交
153
static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
154 155 156
{
	/* change init_mm */
	set_pte_atomic(kpte, pte);
157
#ifdef CONFIG_X86_32
158
	if (!SHARED_KERNEL_PMD) {
159 160 161 162 163 164 165 166 167 168 169 170
		struct page *page;

		for (page = pgd_list; page; page = (struct page *)page->index) {
			pgd_t *pgd;
			pud_t *pud;
			pmd_t *pmd;

			pgd = (pgd_t *)page_address(page) + pgd_index(address);
			pud = pud_offset(pgd, address);
			pmd = pmd_offset(pud, address);
			set_pte_atomic((pte_t *)pmd, pte);
		}
L
Linus Torvalds 已提交
171
	}
172
#endif
L
Linus Torvalds 已提交
173 174
}

175
static int split_large_page(pte_t *kpte, unsigned long address)
176
{
177
	pgprot_t ref_prot = pte_pgprot(pte_clrhuge(*kpte));
178
	gfp_t gfp_flags = GFP_KERNEL;
I
Ingo Molnar 已提交
179
	unsigned long flags;
180 181 182
	unsigned long addr;
	pte_t *pbase, *tmp;
	struct page *base;
183
	int i, level;
184

185 186 187 188
#ifdef CONFIG_DEBUG_PAGEALLOC
	gfp_flags = GFP_ATOMIC;
#endif
	base = alloc_pages(gfp_flags, 0);
189 190 191
	if (!base)
		return -ENOMEM;

I
Ingo Molnar 已提交
192
	spin_lock_irqsave(&pgd_lock, flags);
193 194 195 196 197
	/*
	 * Check for races, another CPU might have split this page
	 * up for us already:
	 */
	tmp = lookup_address(address, &level);
I
Ingo Molnar 已提交
198 199
	if (tmp != kpte) {
		WARN_ON_ONCE(1);
200
		goto out_unlock;
I
Ingo Molnar 已提交
201
	}
202 203 204 205

	address = __pa(address);
	addr = address & LARGE_PAGE_MASK;
	pbase = (pte_t *)page_address(base);
206
#ifdef CONFIG_X86_32
207
	paravirt_alloc_pt(&init_mm, page_to_pfn(base));
208
#endif
209 210 211 212 213

	for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE)
		set_pte(&pbase[i], pfn_pte(addr >> PAGE_SHIFT, ref_prot));

	/*
214 215 216 217 218
	 * Install the new, split up pagetable. Important detail here:
	 *
	 * On Intel the NX bit of all levels must be cleared to make a
	 * page executable. See section 4.13.2 of Intel 64 and IA-32
	 * Architectures Software Developer's Manual).
219
	 */
220
	ref_prot = pte_pgprot(pte_mkexec(pte_clrhuge(*kpte)));
I
Ingo Molnar 已提交
221
	__set_pmd_pte(kpte, address, mk_pte(base, ref_prot));
222 223 224
	base = NULL;

out_unlock:
I
Ingo Molnar 已提交
225
	spin_unlock_irqrestore(&pgd_lock, flags);
226 227 228 229 230 231 232

	if (base)
		__free_pages(base, 0);

	return 0;
}

233
static int
I
Ingo Molnar 已提交
234
__change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot)
235
{
L
Linus Torvalds 已提交
236
	struct page *kpte_page;
237
	int level, err = 0;
238
	pte_t *kpte;
L
Linus Torvalds 已提交
239

I
Ingo Molnar 已提交
240 241 242
#ifdef CONFIG_X86_32
	BUG_ON(pfn > max_low_pfn);
#endif
L
Linus Torvalds 已提交
243

244
repeat:
245
	kpte = lookup_address(address, &level);
L
Linus Torvalds 已提交
246 247
	if (!kpte)
		return -EINVAL;
248

L
Linus Torvalds 已提交
249
	kpte_page = virt_to_page(kpte);
250 251 252
	BUG_ON(PageLRU(kpte_page));
	BUG_ON(PageCompound(kpte_page));

253
	prot = static_protections(prot, address);
254

T
Thomas Gleixner 已提交
255
	if (level == PG_LEVEL_4K) {
T
Thomas Gleixner 已提交
256
		WARN_ON_ONCE(pgprot_val(prot) & _PAGE_PSE);
I
Ingo Molnar 已提交
257
		set_pte_atomic(kpte, pfn_pte(pfn, canon_pgprot(prot)));
I
Ingo Molnar 已提交
258
	} else {
T
Thomas Gleixner 已提交
259 260 261
		/* Clear the PSE bit for the 4k level pages ! */
		pgprot_val(prot) = pgprot_val(prot) & ~_PAGE_PSE;

262
		err = split_large_page(kpte, address);
263 264
		if (!err)
			goto repeat;
L
Linus Torvalds 已提交
265
	}
266
	return err;
267
}
L
Linus Torvalds 已提交
268

269 270 271 272
/**
 * change_page_attr_addr - Change page table attributes in linear mapping
 * @address: Virtual address in linear mapping.
 * @prot:    New page table attribute (PAGE_*)
L
Linus Torvalds 已提交
273
 *
274 275 276
 * Change page attributes of a page in the direct mapping. This is a variant
 * of change_page_attr() that also works on memory holes that do not have
 * mem_map entry (pfn_valid() is false).
277
 *
278
 * See change_page_attr() documentation for more details.
279 280
 *
 * Modules and drivers should use the set_memory_* APIs instead.
L
Linus Torvalds 已提交
281
 */
282

A
Arjan van de Ven 已提交
283
static int change_page_attr_addr(unsigned long address, pgprot_t prot)
L
Linus Torvalds 已提交
284
{
A
Arjan van de Ven 已提交
285 286
	int err = 0, kernel_map = 0;
	unsigned long pfn = __pa(address) >> PAGE_SHIFT;
287 288 289 290

#ifdef CONFIG_X86_64
	if (address >= __START_KERNEL_map &&
			address < __START_KERNEL_map + KERNEL_TEXT_SIZE) {
L
Linus Torvalds 已提交
291

292 293 294 295 296
		address = (unsigned long)__va(__pa(address));
		kernel_map = 1;
	}
#endif

A
Arjan van de Ven 已提交
297 298 299 300 301
	if (!kernel_map || pte_present(pfn_pte(0, prot))) {
		err = __change_page_attr(address, pfn, prot);
		if (err)
			return err;
	}
302 303

#ifdef CONFIG_X86_64
A
Arjan van de Ven 已提交
304 305 306 307 308 309 310 311 312 313 314 315
	/*
	 * Handle kernel mapping too which aliases part of
	 * lowmem:
	 */
	if (__pa(address) < KERNEL_TEXT_SIZE) {
		unsigned long addr2;
		pgprot_t prot2;

		addr2 = __START_KERNEL_map + __pa(address);
		/* Make sure the kernel mappings stay executable */
		prot2 = pte_pgprot(pte_mkexec(pfn_pte(0, prot)));
		err = __change_page_attr(addr2, pfn, prot2);
316
	}
A
Arjan van de Ven 已提交
317
#endif
318

L
Linus Torvalds 已提交
319 320 321
	return err;
}

322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355
static int __change_page_attr_set_clr(unsigned long addr, int numpages,
				      pgprot_t mask_set, pgprot_t mask_clr)
{
	pgprot_t new_prot;
	int level;
	pte_t *pte;
	int i, ret;

	for (i = 0; i < numpages ; i++) {

		pte = lookup_address(addr, &level);
		if (!pte)
			return -EINVAL;

		new_prot = pte_pgprot(*pte);

		pgprot_val(new_prot) &= ~pgprot_val(mask_clr);
		pgprot_val(new_prot) |= pgprot_val(mask_set);

		ret = change_page_attr_addr(addr, new_prot);
		if (ret)
			return ret;
		addr += PAGE_SIZE;
	}

	return 0;
}

static int change_page_attr_set_clr(unsigned long addr, int numpages,
				    pgprot_t mask_set, pgprot_t mask_clr)
{
	int ret = __change_page_attr_set_clr(addr, numpages, mask_set,
					     mask_clr);

356 357 358
	/*
	 * On success we use clflush, when the CPU supports it to
	 * avoid the wbindv. If the CPU does not support it and in the
359
	 * error case we fall back to cpa_flush_all (which uses
360 361 362 363 364
	 * wbindv):
	 */
	if (!ret && cpu_has_clflush)
		cpa_flush_range(addr, numpages);
	else
365
		cpa_flush_all();
366 367 368 369

	return ret;
}

370 371
static inline int change_page_attr_set(unsigned long addr, int numpages,
				       pgprot_t mask)
372
{
373
	return change_page_attr_set_clr(addr, numpages, mask, __pgprot(0));
374 375
}

376 377
static inline int change_page_attr_clear(unsigned long addr, int numpages,
					 pgprot_t mask)
378
{
379
	return __change_page_attr_set_clr(addr, numpages, __pgprot(0), mask);
380 381 382 383 384 385 386

}

int set_memory_uc(unsigned long addr, int numpages)
{
	return change_page_attr_set(addr, numpages,
				    __pgprot(_PAGE_PCD | _PAGE_PWT));
387 388 389 390 391
}
EXPORT_SYMBOL(set_memory_uc);

int set_memory_wb(unsigned long addr, int numpages)
{
392 393
	return change_page_attr_clear(addr, numpages,
				      __pgprot(_PAGE_PCD | _PAGE_PWT));
394 395 396 397 398
}
EXPORT_SYMBOL(set_memory_wb);

int set_memory_x(unsigned long addr, int numpages)
{
399
	return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_NX));
400 401 402 403 404
}
EXPORT_SYMBOL(set_memory_x);

int set_memory_nx(unsigned long addr, int numpages)
{
405
	return change_page_attr_set(addr, numpages, __pgprot(_PAGE_NX));
406 407 408 409 410
}
EXPORT_SYMBOL(set_memory_nx);

int set_memory_ro(unsigned long addr, int numpages)
{
411
	return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_RW));
412 413 414 415
}

int set_memory_rw(unsigned long addr, int numpages)
{
416
	return change_page_attr_set(addr, numpages, __pgprot(_PAGE_RW));
417
}
I
Ingo Molnar 已提交
418 419 420

int set_memory_np(unsigned long addr, int numpages)
{
421
	return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_PRESENT));
I
Ingo Molnar 已提交
422
}
423 424 425 426 427

int set_pages_uc(struct page *page, int numpages)
{
	unsigned long addr = (unsigned long)page_address(page);

T
Thomas Gleixner 已提交
428
	return set_memory_uc(addr, numpages);
429 430 431 432 433 434 435
}
EXPORT_SYMBOL(set_pages_uc);

int set_pages_wb(struct page *page, int numpages)
{
	unsigned long addr = (unsigned long)page_address(page);

T
Thomas Gleixner 已提交
436
	return set_memory_wb(addr, numpages);
437 438 439 440 441 442 443
}
EXPORT_SYMBOL(set_pages_wb);

int set_pages_x(struct page *page, int numpages)
{
	unsigned long addr = (unsigned long)page_address(page);

T
Thomas Gleixner 已提交
444
	return set_memory_x(addr, numpages);
445 446 447 448 449 450 451
}
EXPORT_SYMBOL(set_pages_x);

int set_pages_nx(struct page *page, int numpages)
{
	unsigned long addr = (unsigned long)page_address(page);

T
Thomas Gleixner 已提交
452
	return set_memory_nx(addr, numpages);
453 454 455 456 457 458 459
}
EXPORT_SYMBOL(set_pages_nx);

int set_pages_ro(struct page *page, int numpages)
{
	unsigned long addr = (unsigned long)page_address(page);

T
Thomas Gleixner 已提交
460
	return set_memory_ro(addr, numpages);
461 462 463 464 465
}

int set_pages_rw(struct page *page, int numpages)
{
	unsigned long addr = (unsigned long)page_address(page);
466

T
Thomas Gleixner 已提交
467
	return set_memory_rw(addr, numpages);
I
Ingo Molnar 已提交
468 469
}

L
Linus Torvalds 已提交
470

471 472 473 474 475 476 477 478 479 480 481 482 483 484
#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_CPA_DEBUG)
static inline int __change_page_attr_set(unsigned long addr, int numpages,
					 pgprot_t mask)
{
	return __change_page_attr_set_clr(addr, numpages, mask, __pgprot(0));
}

static inline int __change_page_attr_clear(unsigned long addr, int numpages,
					   pgprot_t mask)
{
	return __change_page_attr_set_clr(addr, numpages, __pgprot(0), mask);
}
#endif

L
Linus Torvalds 已提交
485
#ifdef CONFIG_DEBUG_PAGEALLOC
I
Ingo Molnar 已提交
486 487 488 489

static int __set_pages_p(struct page *page, int numpages)
{
	unsigned long addr = (unsigned long)page_address(page);
490 491 492

	return __change_page_attr_set(addr, numpages,
				      __pgprot(_PAGE_PRESENT | _PAGE_RW));
I
Ingo Molnar 已提交
493 494 495 496 497
}

static int __set_pages_np(struct page *page, int numpages)
{
	unsigned long addr = (unsigned long)page_address(page);
498 499 500

	return __change_page_attr_clear(addr, numpages,
					__pgprot(_PAGE_PRESENT));
I
Ingo Molnar 已提交
501 502
}

L
Linus Torvalds 已提交
503 504 505 506
void kernel_map_pages(struct page *page, int numpages, int enable)
{
	if (PageHighMem(page))
		return;
507
	if (!enable) {
508 509
		debug_check_no_locks_freed(page_address(page),
					   numpages * PAGE_SIZE);
510
	}
511

512 513 514 515 516 517
	/*
	 * If page allocator is not up yet then do not call c_p_a():
	 */
	if (!debug_pagealloc_enabled)
		return;

518
	/*
519 520
	 * The return value is ignored - the calls cannot fail,
	 * large pages are disabled at boot time:
L
Linus Torvalds 已提交
521
	 */
I
Ingo Molnar 已提交
522 523 524 525
	if (enable)
		__set_pages_p(page, numpages);
	else
		__set_pages_np(page, numpages);
526 527

	/*
528 529
	 * We should perform an IPI and flush all tlbs,
	 * but that can deadlock->flush only current cpu:
L
Linus Torvalds 已提交
530 531 532 533
	 */
	__flush_tlb_all();
}
#endif
534 535 536 537 538 539 540 541

/*
 * The testcases use internal knowledge of the implementation that shouldn't
 * be exposed to the rest of the kernel. Include these directly here.
 */
#ifdef CONFIG_CPA_DEBUG
#include "pageattr-test.c"
#endif