pageattr.c 12.7 KB
Newer Older
1 2
/*
 * Copyright 2002 Andi Kleen, SuSE Labs.
L
Linus Torvalds 已提交
3
 * Thanks to Ben LaHaise for precious feedback.
4
 */
L
Linus Torvalds 已提交
5
#include <linux/highmem.h>
I
Ingo Molnar 已提交
6
#include <linux/bootmem.h>
L
Linus Torvalds 已提交
7
#include <linux/module.h>
8
#include <linux/sched.h>
L
Linus Torvalds 已提交
9
#include <linux/slab.h>
10 11
#include <linux/mm.h>

12
#include <asm/e820.h>
L
Linus Torvalds 已提交
13 14
#include <asm/processor.h>
#include <asm/tlbflush.h>
D
Dave Jones 已提交
15
#include <asm/sections.h>
16 17
#include <asm/uaccess.h>
#include <asm/pgalloc.h>
L
Linus Torvalds 已提交
18

19 20
static inline int
within(unsigned long addr, unsigned long start, unsigned long end)
I
Ingo Molnar 已提交
21
{
22 23 24
	return addr >= start && addr < end;
}

T
Thomas Gleixner 已提交
25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54
/*
 * Flushing functions
 */
void clflush_cache_range(void *addr, int size)
{
	int i;

	for (i = 0; i < size; i += boot_cpu_data.x86_clflush_size)
		clflush(addr+i);
}

static void flush_kernel_map(void *arg)
{
	/*
	 * Flush all to work around Errata in early athlons regarding
	 * large page flushing.
	 */
	__flush_tlb_all();

	if (boot_cpu_data.x86_model >= 4)
		wbinvd();
}

static void global_flush_tlb(void)
{
	BUG_ON(irqs_disabled());

	on_each_cpu(flush_kernel_map, NULL, 1, 1);
}

55 56 57 58 59 60 61 62 63 64
/*
 * Certain areas of memory on x86 require very specific protection flags,
 * for example the BIOS area or kernel text. Callers don't always get this
 * right (again, ioremap() on BIOS memory is not uncommon) so this function
 * checks and fixes these known static required protection bits.
 */
static inline pgprot_t static_protections(pgprot_t prot, unsigned long address)
{
	pgprot_t forbidden = __pgprot(0);

I
Ingo Molnar 已提交
65
	/*
66 67
	 * The BIOS area between 640k and 1Mb needs to be executable for
	 * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support.
I
Ingo Molnar 已提交
68
	 */
69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86
	if (within(__pa(address), BIOS_BEGIN, BIOS_END))
		pgprot_val(forbidden) |= _PAGE_NX;

	/*
	 * The kernel text needs to be executable for obvious reasons
	 * Does not cover __inittext since that is gone later on
	 */
	if (within(address, (unsigned long)_text, (unsigned long)_etext))
		pgprot_val(forbidden) |= _PAGE_NX;

#ifdef CONFIG_DEBUG_RODATA
	/* The .rodata section needs to be read-only */
	if (within(address, (unsigned long)__start_rodata,
				(unsigned long)__end_rodata))
		pgprot_val(forbidden) |= _PAGE_RW;
#endif

	prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
I
Ingo Molnar 已提交
87 88 89 90

	return prot;
}

91
pte_t *lookup_address(unsigned long address, int *level)
92
{
L
Linus Torvalds 已提交
93 94 95
	pgd_t *pgd = pgd_offset_k(address);
	pud_t *pud;
	pmd_t *pmd;
96

T
Thomas Gleixner 已提交
97 98
	*level = PG_LEVEL_NONE;

L
Linus Torvalds 已提交
99 100 101 102 103 104 105 106
	if (pgd_none(*pgd))
		return NULL;
	pud = pud_offset(pgd, address);
	if (pud_none(*pud))
		return NULL;
	pmd = pmd_offset(pud, address);
	if (pmd_none(*pmd))
		return NULL;
T
Thomas Gleixner 已提交
107 108

	*level = PG_LEVEL_2M;
L
Linus Torvalds 已提交
109 110 111
	if (pmd_large(*pmd))
		return (pte_t *)pmd;

T
Thomas Gleixner 已提交
112
	*level = PG_LEVEL_4K;
113 114 115
	return pte_offset_kernel(pmd, address);
}

I
Ingo Molnar 已提交
116
static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
117 118 119
{
	/* change init_mm */
	set_pte_atomic(kpte, pte);
120
#ifdef CONFIG_X86_32
121
	if (!SHARED_KERNEL_PMD) {
122 123 124 125 126 127 128 129 130 131 132 133
		struct page *page;

		for (page = pgd_list; page; page = (struct page *)page->index) {
			pgd_t *pgd;
			pud_t *pud;
			pmd_t *pmd;

			pgd = (pgd_t *)page_address(page) + pgd_index(address);
			pud = pud_offset(pgd, address);
			pmd = pmd_offset(pud, address);
			set_pte_atomic((pte_t *)pmd, pte);
		}
L
Linus Torvalds 已提交
134
	}
135
#endif
L
Linus Torvalds 已提交
136 137
}

138
static int split_large_page(pte_t *kpte, unsigned long address)
139
{
140
	pgprot_t ref_prot = pte_pgprot(pte_clrhuge(*kpte));
141
	gfp_t gfp_flags = GFP_KERNEL;
I
Ingo Molnar 已提交
142
	unsigned long flags;
143 144 145
	unsigned long addr;
	pte_t *pbase, *tmp;
	struct page *base;
146
	int i, level;
147

148 149 150 151
#ifdef CONFIG_DEBUG_PAGEALLOC
	gfp_flags = GFP_ATOMIC;
#endif
	base = alloc_pages(gfp_flags, 0);
152 153 154
	if (!base)
		return -ENOMEM;

I
Ingo Molnar 已提交
155
	spin_lock_irqsave(&pgd_lock, flags);
156 157 158 159 160
	/*
	 * Check for races, another CPU might have split this page
	 * up for us already:
	 */
	tmp = lookup_address(address, &level);
I
Ingo Molnar 已提交
161 162
	if (tmp != kpte) {
		WARN_ON_ONCE(1);
163
		goto out_unlock;
I
Ingo Molnar 已提交
164
	}
165 166 167 168

	address = __pa(address);
	addr = address & LARGE_PAGE_MASK;
	pbase = (pte_t *)page_address(base);
169
#ifdef CONFIG_X86_32
170
	paravirt_alloc_pt(&init_mm, page_to_pfn(base));
171
#endif
172 173 174 175 176

	for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE)
		set_pte(&pbase[i], pfn_pte(addr >> PAGE_SHIFT, ref_prot));

	/*
177 178 179 180 181
	 * Install the new, split up pagetable. Important detail here:
	 *
	 * On Intel the NX bit of all levels must be cleared to make a
	 * page executable. See section 4.13.2 of Intel 64 and IA-32
	 * Architectures Software Developer's Manual).
182
	 */
183
	ref_prot = pte_pgprot(pte_mkexec(pte_clrhuge(*kpte)));
I
Ingo Molnar 已提交
184
	__set_pmd_pte(kpte, address, mk_pte(base, ref_prot));
185 186 187
	base = NULL;

out_unlock:
I
Ingo Molnar 已提交
188
	spin_unlock_irqrestore(&pgd_lock, flags);
189 190 191 192 193 194 195

	if (base)
		__free_pages(base, 0);

	return 0;
}

196
static int
I
Ingo Molnar 已提交
197
__change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot)
198
{
L
Linus Torvalds 已提交
199
	struct page *kpte_page;
200
	int level, err = 0;
201
	pte_t *kpte;
L
Linus Torvalds 已提交
202

I
Ingo Molnar 已提交
203 204 205
#ifdef CONFIG_X86_32
	BUG_ON(pfn > max_low_pfn);
#endif
L
Linus Torvalds 已提交
206

207
repeat:
208
	kpte = lookup_address(address, &level);
L
Linus Torvalds 已提交
209 210
	if (!kpte)
		return -EINVAL;
211

L
Linus Torvalds 已提交
212
	kpte_page = virt_to_page(kpte);
213 214 215
	BUG_ON(PageLRU(kpte_page));
	BUG_ON(PageCompound(kpte_page));

216
	prot = static_protections(prot, address);
217

T
Thomas Gleixner 已提交
218
	if (level == PG_LEVEL_4K) {
T
Thomas Gleixner 已提交
219
		WARN_ON_ONCE(pgprot_val(prot) & _PAGE_PSE);
I
Ingo Molnar 已提交
220
		set_pte_atomic(kpte, pfn_pte(pfn, canon_pgprot(prot)));
I
Ingo Molnar 已提交
221
	} else {
T
Thomas Gleixner 已提交
222 223 224
		/* Clear the PSE bit for the 4k level pages ! */
		pgprot_val(prot) = pgprot_val(prot) & ~_PAGE_PSE;

225
		err = split_large_page(kpte, address);
226 227
		if (!err)
			goto repeat;
L
Linus Torvalds 已提交
228
	}
229
	return err;
230
}
L
Linus Torvalds 已提交
231

232 233 234 235
/**
 * change_page_attr_addr - Change page table attributes in linear mapping
 * @address: Virtual address in linear mapping.
 * @prot:    New page table attribute (PAGE_*)
L
Linus Torvalds 已提交
236
 *
237 238 239
 * Change page attributes of a page in the direct mapping. This is a variant
 * of change_page_attr() that also works on memory holes that do not have
 * mem_map entry (pfn_valid() is false).
240
 *
241
 * See change_page_attr() documentation for more details.
242 243
 *
 * Modules and drivers should use the set_memory_* APIs instead.
L
Linus Torvalds 已提交
244
 */
245

A
Arjan van de Ven 已提交
246
static int change_page_attr_addr(unsigned long address, pgprot_t prot)
L
Linus Torvalds 已提交
247
{
A
Arjan van de Ven 已提交
248 249
	int err = 0, kernel_map = 0;
	unsigned long pfn = __pa(address) >> PAGE_SHIFT;
250 251 252 253

#ifdef CONFIG_X86_64
	if (address >= __START_KERNEL_map &&
			address < __START_KERNEL_map + KERNEL_TEXT_SIZE) {
L
Linus Torvalds 已提交
254

255 256 257 258 259
		address = (unsigned long)__va(__pa(address));
		kernel_map = 1;
	}
#endif

A
Arjan van de Ven 已提交
260 261 262 263 264
	if (!kernel_map || pte_present(pfn_pte(0, prot))) {
		err = __change_page_attr(address, pfn, prot);
		if (err)
			return err;
	}
265 266

#ifdef CONFIG_X86_64
A
Arjan van de Ven 已提交
267 268 269 270 271 272 273 274 275 276 277 278
	/*
	 * Handle kernel mapping too which aliases part of
	 * lowmem:
	 */
	if (__pa(address) < KERNEL_TEXT_SIZE) {
		unsigned long addr2;
		pgprot_t prot2;

		addr2 = __START_KERNEL_map + __pa(address);
		/* Make sure the kernel mappings stay executable */
		prot2 = pte_pgprot(pte_mkexec(pfn_pte(0, prot)));
		err = __change_page_attr(addr2, pfn, prot2);
279
	}
A
Arjan van de Ven 已提交
280
#endif
281

L
Linus Torvalds 已提交
282 283 284
	return err;
}

285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303
/**
 * change_page_attr_set - Change page table attributes in the linear mapping.
 * @addr: Virtual address in linear mapping.
 * @numpages: Number of pages to change
 * @prot: Protection/caching type bits to set (PAGE_*)
 *
 * Returns 0 on success, otherwise a negated errno.
 *
 * This should be used when a page is mapped with a different caching policy
 * than write-back somewhere - some CPUs do not like it when mappings with
 * different caching policies exist. This changes the page attributes of the
 * in kernel linear mapping too.
 *
 * The caller needs to ensure that there are no conflicting mappings elsewhere
 * (e.g. in user space) * This function only deals with the kernel linear map.
 *
 * This function is different from change_page_attr() in that only selected bits
 * are impacted, all other bits remain as is.
 */
304 305
static int change_page_attr_set(unsigned long addr, int numpages,
								pgprot_t prot)
306
{
I
Ingo Molnar 已提交
307
	pgprot_t current_prot, new_prot;
308 309
	int level;
	pte_t *pte;
A
Arjan van de Ven 已提交
310
	int i, ret;
311

A
Arjan van de Ven 已提交
312
	for (i = 0; i < numpages ; i++) {
313

A
Arjan van de Ven 已提交
314
		pte = lookup_address(addr, &level);
315 316 317 318
		if (!pte)
			return -EINVAL;

		current_prot = pte_pgprot(*pte);
319

I
Ingo Molnar 已提交
320 321
		pgprot_val(new_prot) =
			pgprot_val(current_prot) | pgprot_val(prot);
A
Arjan van de Ven 已提交
322

I
Ingo Molnar 已提交
323
		ret = change_page_attr_addr(addr, new_prot);
A
Arjan van de Ven 已提交
324 325 326 327 328
		if (ret)
			return ret;
		addr += PAGE_SIZE;
	}
	return 0;
329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349
}

/**
 * change_page_attr_clear - Change page table attributes in the linear mapping.
 * @addr: Virtual address in linear mapping.
 * @numpages: Number of pages to change
 * @prot: Protection/caching type bits to clear (PAGE_*)
 *
 * Returns 0 on success, otherwise a negated errno.
 *
 * This should be used when a page is mapped with a different caching policy
 * than write-back somewhere - some CPUs do not like it when mappings with
 * different caching policies exist. This changes the page attributes of the
 * in kernel linear mapping too.
 *
 * The caller needs to ensure that there are no conflicting mappings elsewhere
 * (e.g. in user space) * This function only deals with the kernel linear map.
 *
 * This function is different from change_page_attr() in that only selected bits
 * are impacted, all other bits remain as is.
 */
350 351
static int change_page_attr_clear(unsigned long addr, int numpages,
								pgprot_t prot)
352
{
I
Ingo Molnar 已提交
353
	pgprot_t current_prot, new_prot;
354 355
	int level;
	pte_t *pte;
A
Arjan van de Ven 已提交
356 357 358
	int i, ret;

	for (i = 0; i < numpages; i++) {
359

A
Arjan van de Ven 已提交
360
		pte = lookup_address(addr, &level);
361 362 363 364
		if (!pte)
			return -EINVAL;

		current_prot = pte_pgprot(*pte);
A
Arjan van de Ven 已提交
365

I
Ingo Molnar 已提交
366
		pgprot_val(new_prot) =
A
Arjan van de Ven 已提交
367 368
				pgprot_val(current_prot) & ~pgprot_val(prot);

I
Ingo Molnar 已提交
369
		ret = change_page_attr_addr(addr, new_prot);
A
Arjan van de Ven 已提交
370 371 372 373 374
		if (ret)
			return ret;
		addr += PAGE_SIZE;
	}
	return 0;
375 376 377 378
}

int set_memory_uc(unsigned long addr, int numpages)
{
T
Thomas Gleixner 已提交
379
	int err;
380

T
Thomas Gleixner 已提交
381 382 383 384
	err = change_page_attr_set(addr, numpages,
				__pgprot(_PAGE_PCD | _PAGE_PWT));
	global_flush_tlb();
	return err;
385 386 387 388 389
}
EXPORT_SYMBOL(set_memory_uc);

int set_memory_wb(unsigned long addr, int numpages)
{
T
Thomas Gleixner 已提交
390
	int err;
391

T
Thomas Gleixner 已提交
392 393 394 395
	err = change_page_attr_clear(addr, numpages,
				__pgprot(_PAGE_PCD | _PAGE_PWT));
	global_flush_tlb();
	return err;
396 397 398 399 400
}
EXPORT_SYMBOL(set_memory_wb);

int set_memory_x(unsigned long addr, int numpages)
{
T
Thomas Gleixner 已提交
401
	int err;
402

T
Thomas Gleixner 已提交
403 404 405 406
	err = change_page_attr_clear(addr, numpages,
				__pgprot(_PAGE_NX));
	global_flush_tlb();
	return err;
407 408 409 410 411
}
EXPORT_SYMBOL(set_memory_x);

int set_memory_nx(unsigned long addr, int numpages)
{
T
Thomas Gleixner 已提交
412
	int err;
413

T
Thomas Gleixner 已提交
414 415 416 417
	err = change_page_attr_set(addr, numpages,
				__pgprot(_PAGE_NX));
	global_flush_tlb();
	return err;
418 419 420 421 422
}
EXPORT_SYMBOL(set_memory_nx);

int set_memory_ro(unsigned long addr, int numpages)
{
T
Thomas Gleixner 已提交
423
	int err;
424

T
Thomas Gleixner 已提交
425 426 427 428
	err = change_page_attr_clear(addr, numpages,
				__pgprot(_PAGE_RW));
	global_flush_tlb();
	return err;
429 430 431 432
}

int set_memory_rw(unsigned long addr, int numpages)
{
T
Thomas Gleixner 已提交
433
	int err;
434

T
Thomas Gleixner 已提交
435 436 437 438
	err = change_page_attr_set(addr, numpages,
				__pgprot(_PAGE_RW));
	global_flush_tlb();
	return err;
439
}
I
Ingo Molnar 已提交
440 441 442

int set_memory_np(unsigned long addr, int numpages)
{
T
Thomas Gleixner 已提交
443
	int err;
I
Ingo Molnar 已提交
444

T
Thomas Gleixner 已提交
445 446 447 448
	err = change_page_attr_clear(addr, numpages,
				__pgprot(_PAGE_PRESENT));
	global_flush_tlb();
	return err;
I
Ingo Molnar 已提交
449
}
450 451 452 453 454

int set_pages_uc(struct page *page, int numpages)
{
	unsigned long addr = (unsigned long)page_address(page);

T
Thomas Gleixner 已提交
455
	return set_memory_uc(addr, numpages);
456 457 458 459 460 461 462
}
EXPORT_SYMBOL(set_pages_uc);

int set_pages_wb(struct page *page, int numpages)
{
	unsigned long addr = (unsigned long)page_address(page);

T
Thomas Gleixner 已提交
463
	return set_memory_wb(addr, numpages);
464 465 466 467 468 469 470
}
EXPORT_SYMBOL(set_pages_wb);

int set_pages_x(struct page *page, int numpages)
{
	unsigned long addr = (unsigned long)page_address(page);

T
Thomas Gleixner 已提交
471
	return set_memory_x(addr, numpages);
472 473 474 475 476 477 478
}
EXPORT_SYMBOL(set_pages_x);

int set_pages_nx(struct page *page, int numpages)
{
	unsigned long addr = (unsigned long)page_address(page);

T
Thomas Gleixner 已提交
479
	return set_memory_nx(addr, numpages);
480 481 482 483 484 485 486
}
EXPORT_SYMBOL(set_pages_nx);

int set_pages_ro(struct page *page, int numpages)
{
	unsigned long addr = (unsigned long)page_address(page);

T
Thomas Gleixner 已提交
487
	return set_memory_ro(addr, numpages);
488 489 490 491 492
}

int set_pages_rw(struct page *page, int numpages)
{
	unsigned long addr = (unsigned long)page_address(page);
493

T
Thomas Gleixner 已提交
494
	return set_memory_rw(addr, numpages);
I
Ingo Molnar 已提交
495 496
}

L
Linus Torvalds 已提交
497 498

#ifdef CONFIG_DEBUG_PAGEALLOC
I
Ingo Molnar 已提交
499 500 501 502 503 504 505 506 507 508 509 510 511 512

static int __set_pages_p(struct page *page, int numpages)
{
	unsigned long addr = (unsigned long)page_address(page);
	return change_page_attr_set(addr, numpages,
				__pgprot(_PAGE_PRESENT | _PAGE_RW));
}

static int __set_pages_np(struct page *page, int numpages)
{
	unsigned long addr = (unsigned long)page_address(page);
	return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_PRESENT));
}

L
Linus Torvalds 已提交
513 514 515 516
void kernel_map_pages(struct page *page, int numpages, int enable)
{
	if (PageHighMem(page))
		return;
517
	if (!enable) {
518 519
		debug_check_no_locks_freed(page_address(page),
					   numpages * PAGE_SIZE);
520
	}
521

522 523 524 525 526 527
	/*
	 * If page allocator is not up yet then do not call c_p_a():
	 */
	if (!debug_pagealloc_enabled)
		return;

528
	/*
529 530
	 * The return value is ignored - the calls cannot fail,
	 * large pages are disabled at boot time:
L
Linus Torvalds 已提交
531
	 */
I
Ingo Molnar 已提交
532 533 534 535
	if (enable)
		__set_pages_p(page, numpages);
	else
		__set_pages_np(page, numpages);
536 537

	/*
538 539
	 * We should perform an IPI and flush all tlbs,
	 * but that can deadlock->flush only current cpu:
L
Linus Torvalds 已提交
540 541 542 543
	 */
	__flush_tlb_all();
}
#endif
544 545 546 547 548 549 550 551

/*
 * The testcases use internal knowledge of the implementation that shouldn't
 * be exposed to the rest of the kernel. Include these directly here.
 */
#ifdef CONFIG_CPA_DEBUG
#include "pageattr-test.c"
#endif