pageattr.c 12.4 KB
Newer Older
1 2
/*
 * Copyright 2002 Andi Kleen, SuSE Labs.
L
Linus Torvalds 已提交
3
 * Thanks to Ben LaHaise for precious feedback.
4
 */
L
Linus Torvalds 已提交
5
#include <linux/highmem.h>
I
Ingo Molnar 已提交
6
#include <linux/bootmem.h>
L
Linus Torvalds 已提交
7
#include <linux/module.h>
8
#include <linux/sched.h>
L
Linus Torvalds 已提交
9
#include <linux/slab.h>
10 11
#include <linux/mm.h>

12
#include <asm/e820.h>
L
Linus Torvalds 已提交
13 14
#include <asm/processor.h>
#include <asm/tlbflush.h>
D
Dave Jones 已提交
15
#include <asm/sections.h>
16 17
#include <asm/uaccess.h>
#include <asm/pgalloc.h>
L
Linus Torvalds 已提交
18

19 20
static inline int
within(unsigned long addr, unsigned long start, unsigned long end)
I
Ingo Molnar 已提交
21
{
22 23 24
	return addr >= start && addr < end;
}

T
Thomas Gleixner 已提交
25 26 27
/*
 * Flushing functions
 */
28 29 30 31 32 33 34 35 36 37


/**
 * clflush_cache_range - flush a cache range with clflush
 * @addr:	virtual start address
 * @size:	number of bytes to flush
 *
 * clflush is an unordered instruction which needs fencing with mfence
 * to avoid ordering issues.
 */
T
Thomas Gleixner 已提交
38 39 40 41
void clflush_cache_range(void *addr, int size)
{
	int i;

42
	mb();
T
Thomas Gleixner 已提交
43 44
	for (i = 0; i < size; i += boot_cpu_data.x86_clflush_size)
		clflush(addr+i);
45
	mb();
T
Thomas Gleixner 已提交
46 47
}

48
static void __cpa_flush_all(void *arg)
T
Thomas Gleixner 已提交
49 50 51 52 53 54 55 56 57 58 59
{
	/*
	 * Flush all to work around Errata in early athlons regarding
	 * large page flushing.
	 */
	__flush_tlb_all();

	if (boot_cpu_data.x86_model >= 4)
		wbinvd();
}

60
static void cpa_flush_all(void)
T
Thomas Gleixner 已提交
61 62 63
{
	BUG_ON(irqs_disabled());

64
	on_each_cpu(__cpa_flush_all, NULL, 1, 1);
T
Thomas Gleixner 已提交
65 66
}

67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97
struct clflush_data {
	unsigned long addr;
	int numpages;
};

static void __cpa_flush_range(void *arg)
{
	struct clflush_data *cld = arg;

	/*
	 * We could optimize that further and do individual per page
	 * tlb invalidates for a low number of pages. Caveat: we must
	 * flush the high aliases on 64bit as well.
	 */
	__flush_tlb_all();

	clflush_cache_range((void *) cld->addr, cld->numpages * PAGE_SIZE);
}

static void cpa_flush_range(unsigned long addr, int numpages)
{
	struct clflush_data cld;

	BUG_ON(irqs_disabled());

	cld.addr = addr;
	cld.numpages = numpages;

	on_each_cpu(__cpa_flush_range, &cld, 1, 1);
}

98 99 100 101 102 103 104 105 106 107
/*
 * Certain areas of memory on x86 require very specific protection flags,
 * for example the BIOS area or kernel text. Callers don't always get this
 * right (again, ioremap() on BIOS memory is not uncommon) so this function
 * checks and fixes these known static required protection bits.
 */
static inline pgprot_t static_protections(pgprot_t prot, unsigned long address)
{
	pgprot_t forbidden = __pgprot(0);

I
Ingo Molnar 已提交
108
	/*
109 110
	 * The BIOS area between 640k and 1Mb needs to be executable for
	 * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support.
I
Ingo Molnar 已提交
111
	 */
112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129
	if (within(__pa(address), BIOS_BEGIN, BIOS_END))
		pgprot_val(forbidden) |= _PAGE_NX;

	/*
	 * The kernel text needs to be executable for obvious reasons
	 * Does not cover __inittext since that is gone later on
	 */
	if (within(address, (unsigned long)_text, (unsigned long)_etext))
		pgprot_val(forbidden) |= _PAGE_NX;

#ifdef CONFIG_DEBUG_RODATA
	/* The .rodata section needs to be read-only */
	if (within(address, (unsigned long)__start_rodata,
				(unsigned long)__end_rodata))
		pgprot_val(forbidden) |= _PAGE_RW;
#endif

	prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
I
Ingo Molnar 已提交
130 131 132 133

	return prot;
}

134
pte_t *lookup_address(unsigned long address, int *level)
135
{
L
Linus Torvalds 已提交
136 137 138
	pgd_t *pgd = pgd_offset_k(address);
	pud_t *pud;
	pmd_t *pmd;
139

T
Thomas Gleixner 已提交
140 141
	*level = PG_LEVEL_NONE;

L
Linus Torvalds 已提交
142 143 144 145 146 147 148 149
	if (pgd_none(*pgd))
		return NULL;
	pud = pud_offset(pgd, address);
	if (pud_none(*pud))
		return NULL;
	pmd = pmd_offset(pud, address);
	if (pmd_none(*pmd))
		return NULL;
T
Thomas Gleixner 已提交
150 151

	*level = PG_LEVEL_2M;
L
Linus Torvalds 已提交
152 153 154
	if (pmd_large(*pmd))
		return (pte_t *)pmd;

T
Thomas Gleixner 已提交
155
	*level = PG_LEVEL_4K;
156 157 158
	return pte_offset_kernel(pmd, address);
}

I
Ingo Molnar 已提交
159
static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
160 161 162
{
	/* change init_mm */
	set_pte_atomic(kpte, pte);
163
#ifdef CONFIG_X86_32
164
	if (!SHARED_KERNEL_PMD) {
165 166 167 168 169 170 171 172 173 174 175 176
		struct page *page;

		for (page = pgd_list; page; page = (struct page *)page->index) {
			pgd_t *pgd;
			pud_t *pud;
			pmd_t *pmd;

			pgd = (pgd_t *)page_address(page) + pgd_index(address);
			pud = pud_offset(pgd, address);
			pmd = pmd_offset(pud, address);
			set_pte_atomic((pte_t *)pmd, pte);
		}
L
Linus Torvalds 已提交
177
	}
178
#endif
L
Linus Torvalds 已提交
179 180
}

181
static int split_large_page(pte_t *kpte, unsigned long address)
182
{
183
	pgprot_t ref_prot = pte_pgprot(pte_clrhuge(*kpte));
184
	gfp_t gfp_flags = GFP_KERNEL;
I
Ingo Molnar 已提交
185
	unsigned long flags;
186 187 188
	unsigned long addr;
	pte_t *pbase, *tmp;
	struct page *base;
189
	int i, level;
190

191 192 193 194
#ifdef CONFIG_DEBUG_PAGEALLOC
	gfp_flags = GFP_ATOMIC;
#endif
	base = alloc_pages(gfp_flags, 0);
195 196 197
	if (!base)
		return -ENOMEM;

I
Ingo Molnar 已提交
198
	spin_lock_irqsave(&pgd_lock, flags);
199 200 201 202 203
	/*
	 * Check for races, another CPU might have split this page
	 * up for us already:
	 */
	tmp = lookup_address(address, &level);
I
Ingo Molnar 已提交
204 205
	if (tmp != kpte) {
		WARN_ON_ONCE(1);
206
		goto out_unlock;
I
Ingo Molnar 已提交
207
	}
208 209 210 211

	address = __pa(address);
	addr = address & LARGE_PAGE_MASK;
	pbase = (pte_t *)page_address(base);
212
#ifdef CONFIG_X86_32
213
	paravirt_alloc_pt(&init_mm, page_to_pfn(base));
214
#endif
215 216 217 218 219

	for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE)
		set_pte(&pbase[i], pfn_pte(addr >> PAGE_SHIFT, ref_prot));

	/*
220 221 222 223 224
	 * Install the new, split up pagetable. Important detail here:
	 *
	 * On Intel the NX bit of all levels must be cleared to make a
	 * page executable. See section 4.13.2 of Intel 64 and IA-32
	 * Architectures Software Developer's Manual).
225
	 */
226
	ref_prot = pte_pgprot(pte_mkexec(pte_clrhuge(*kpte)));
I
Ingo Molnar 已提交
227
	__set_pmd_pte(kpte, address, mk_pte(base, ref_prot));
228 229 230
	base = NULL;

out_unlock:
I
Ingo Molnar 已提交
231
	spin_unlock_irqrestore(&pgd_lock, flags);
232 233 234 235 236 237 238

	if (base)
		__free_pages(base, 0);

	return 0;
}

239
static int
I
Ingo Molnar 已提交
240
__change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot)
241
{
L
Linus Torvalds 已提交
242
	struct page *kpte_page;
243
	int level, err = 0;
244
	pte_t *kpte;
L
Linus Torvalds 已提交
245

I
Ingo Molnar 已提交
246 247 248
#ifdef CONFIG_X86_32
	BUG_ON(pfn > max_low_pfn);
#endif
L
Linus Torvalds 已提交
249

250
repeat:
251
	kpte = lookup_address(address, &level);
L
Linus Torvalds 已提交
252 253
	if (!kpte)
		return -EINVAL;
254

L
Linus Torvalds 已提交
255
	kpte_page = virt_to_page(kpte);
256 257 258
	BUG_ON(PageLRU(kpte_page));
	BUG_ON(PageCompound(kpte_page));

259
	prot = static_protections(prot, address);
260

T
Thomas Gleixner 已提交
261
	if (level == PG_LEVEL_4K) {
T
Thomas Gleixner 已提交
262
		WARN_ON_ONCE(pgprot_val(prot) & _PAGE_PSE);
I
Ingo Molnar 已提交
263
		set_pte_atomic(kpte, pfn_pte(pfn, canon_pgprot(prot)));
I
Ingo Molnar 已提交
264
	} else {
T
Thomas Gleixner 已提交
265 266 267
		/* Clear the PSE bit for the 4k level pages ! */
		pgprot_val(prot) = pgprot_val(prot) & ~_PAGE_PSE;

268
		err = split_large_page(kpte, address);
269 270
		if (!err)
			goto repeat;
L
Linus Torvalds 已提交
271
	}
272
	return err;
273
}
L
Linus Torvalds 已提交
274

275 276 277 278
/**
 * change_page_attr_addr - Change page table attributes in linear mapping
 * @address: Virtual address in linear mapping.
 * @prot:    New page table attribute (PAGE_*)
L
Linus Torvalds 已提交
279
 *
280 281 282
 * Change page attributes of a page in the direct mapping. This is a variant
 * of change_page_attr() that also works on memory holes that do not have
 * mem_map entry (pfn_valid() is false).
283
 *
284
 * See change_page_attr() documentation for more details.
285 286
 *
 * Modules and drivers should use the set_memory_* APIs instead.
L
Linus Torvalds 已提交
287
 */
288

A
Arjan van de Ven 已提交
289
static int change_page_attr_addr(unsigned long address, pgprot_t prot)
L
Linus Torvalds 已提交
290
{
A
Arjan van de Ven 已提交
291 292
	int err = 0, kernel_map = 0;
	unsigned long pfn = __pa(address) >> PAGE_SHIFT;
293 294 295 296

#ifdef CONFIG_X86_64
	if (address >= __START_KERNEL_map &&
			address < __START_KERNEL_map + KERNEL_TEXT_SIZE) {
L
Linus Torvalds 已提交
297

298 299 300 301 302
		address = (unsigned long)__va(__pa(address));
		kernel_map = 1;
	}
#endif

A
Arjan van de Ven 已提交
303 304 305 306 307
	if (!kernel_map || pte_present(pfn_pte(0, prot))) {
		err = __change_page_attr(address, pfn, prot);
		if (err)
			return err;
	}
308 309

#ifdef CONFIG_X86_64
A
Arjan van de Ven 已提交
310 311 312 313 314 315 316 317 318 319 320 321
	/*
	 * Handle kernel mapping too which aliases part of
	 * lowmem:
	 */
	if (__pa(address) < KERNEL_TEXT_SIZE) {
		unsigned long addr2;
		pgprot_t prot2;

		addr2 = __START_KERNEL_map + __pa(address);
		/* Make sure the kernel mappings stay executable */
		prot2 = pte_pgprot(pte_mkexec(pfn_pte(0, prot)));
		err = __change_page_attr(addr2, pfn, prot2);
322
	}
A
Arjan van de Ven 已提交
323
#endif
324

L
Linus Torvalds 已提交
325 326 327
	return err;
}

328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361
static int __change_page_attr_set_clr(unsigned long addr, int numpages,
				      pgprot_t mask_set, pgprot_t mask_clr)
{
	pgprot_t new_prot;
	int level;
	pte_t *pte;
	int i, ret;

	for (i = 0; i < numpages ; i++) {

		pte = lookup_address(addr, &level);
		if (!pte)
			return -EINVAL;

		new_prot = pte_pgprot(*pte);

		pgprot_val(new_prot) &= ~pgprot_val(mask_clr);
		pgprot_val(new_prot) |= pgprot_val(mask_set);

		ret = change_page_attr_addr(addr, new_prot);
		if (ret)
			return ret;
		addr += PAGE_SIZE;
	}

	return 0;
}

static int change_page_attr_set_clr(unsigned long addr, int numpages,
				    pgprot_t mask_set, pgprot_t mask_clr)
{
	int ret = __change_page_attr_set_clr(addr, numpages, mask_set,
					     mask_clr);

362 363 364
	/*
	 * On success we use clflush, when the CPU supports it to
	 * avoid the wbindv. If the CPU does not support it and in the
365
	 * error case we fall back to cpa_flush_all (which uses
366 367 368 369 370
	 * wbindv):
	 */
	if (!ret && cpu_has_clflush)
		cpa_flush_range(addr, numpages);
	else
371
		cpa_flush_all();
372 373 374 375

	return ret;
}

376 377
static inline int change_page_attr_set(unsigned long addr, int numpages,
				       pgprot_t mask)
378
{
379
	return change_page_attr_set_clr(addr, numpages, mask, __pgprot(0));
380 381
}

382 383
static inline int change_page_attr_clear(unsigned long addr, int numpages,
					 pgprot_t mask)
384
{
385
	return __change_page_attr_set_clr(addr, numpages, __pgprot(0), mask);
386 387 388 389 390 391 392

}

int set_memory_uc(unsigned long addr, int numpages)
{
	return change_page_attr_set(addr, numpages,
				    __pgprot(_PAGE_PCD | _PAGE_PWT));
393 394 395 396 397
}
EXPORT_SYMBOL(set_memory_uc);

int set_memory_wb(unsigned long addr, int numpages)
{
398 399
	return change_page_attr_clear(addr, numpages,
				      __pgprot(_PAGE_PCD | _PAGE_PWT));
400 401 402 403 404
}
EXPORT_SYMBOL(set_memory_wb);

int set_memory_x(unsigned long addr, int numpages)
{
405
	return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_NX));
406 407 408 409 410
}
EXPORT_SYMBOL(set_memory_x);

int set_memory_nx(unsigned long addr, int numpages)
{
411
	return change_page_attr_set(addr, numpages, __pgprot(_PAGE_NX));
412 413 414 415 416
}
EXPORT_SYMBOL(set_memory_nx);

int set_memory_ro(unsigned long addr, int numpages)
{
417
	return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_RW));
418 419 420 421
}

int set_memory_rw(unsigned long addr, int numpages)
{
422
	return change_page_attr_set(addr, numpages, __pgprot(_PAGE_RW));
423
}
I
Ingo Molnar 已提交
424 425 426

int set_memory_np(unsigned long addr, int numpages)
{
427
	return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_PRESENT));
I
Ingo Molnar 已提交
428
}
429 430 431 432 433

int set_pages_uc(struct page *page, int numpages)
{
	unsigned long addr = (unsigned long)page_address(page);

T
Thomas Gleixner 已提交
434
	return set_memory_uc(addr, numpages);
435 436 437 438 439 440 441
}
EXPORT_SYMBOL(set_pages_uc);

int set_pages_wb(struct page *page, int numpages)
{
	unsigned long addr = (unsigned long)page_address(page);

T
Thomas Gleixner 已提交
442
	return set_memory_wb(addr, numpages);
443 444 445 446 447 448 449
}
EXPORT_SYMBOL(set_pages_wb);

int set_pages_x(struct page *page, int numpages)
{
	unsigned long addr = (unsigned long)page_address(page);

T
Thomas Gleixner 已提交
450
	return set_memory_x(addr, numpages);
451 452 453 454 455 456 457
}
EXPORT_SYMBOL(set_pages_x);

int set_pages_nx(struct page *page, int numpages)
{
	unsigned long addr = (unsigned long)page_address(page);

T
Thomas Gleixner 已提交
458
	return set_memory_nx(addr, numpages);
459 460 461 462 463 464 465
}
EXPORT_SYMBOL(set_pages_nx);

int set_pages_ro(struct page *page, int numpages)
{
	unsigned long addr = (unsigned long)page_address(page);

T
Thomas Gleixner 已提交
466
	return set_memory_ro(addr, numpages);
467 468 469 470 471
}

int set_pages_rw(struct page *page, int numpages)
{
	unsigned long addr = (unsigned long)page_address(page);
472

T
Thomas Gleixner 已提交
473
	return set_memory_rw(addr, numpages);
I
Ingo Molnar 已提交
474 475
}

L
Linus Torvalds 已提交
476

477 478 479 480 481 482 483 484 485 486 487 488 489 490
#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_CPA_DEBUG)
static inline int __change_page_attr_set(unsigned long addr, int numpages,
					 pgprot_t mask)
{
	return __change_page_attr_set_clr(addr, numpages, mask, __pgprot(0));
}

static inline int __change_page_attr_clear(unsigned long addr, int numpages,
					   pgprot_t mask)
{
	return __change_page_attr_set_clr(addr, numpages, __pgprot(0), mask);
}
#endif

L
Linus Torvalds 已提交
491
#ifdef CONFIG_DEBUG_PAGEALLOC
I
Ingo Molnar 已提交
492 493 494 495

static int __set_pages_p(struct page *page, int numpages)
{
	unsigned long addr = (unsigned long)page_address(page);
496 497 498

	return __change_page_attr_set(addr, numpages,
				      __pgprot(_PAGE_PRESENT | _PAGE_RW));
I
Ingo Molnar 已提交
499 500 501 502 503
}

static int __set_pages_np(struct page *page, int numpages)
{
	unsigned long addr = (unsigned long)page_address(page);
504 505 506

	return __change_page_attr_clear(addr, numpages,
					__pgprot(_PAGE_PRESENT));
I
Ingo Molnar 已提交
507 508
}

L
Linus Torvalds 已提交
509 510 511 512
void kernel_map_pages(struct page *page, int numpages, int enable)
{
	if (PageHighMem(page))
		return;
513
	if (!enable) {
514 515
		debug_check_no_locks_freed(page_address(page),
					   numpages * PAGE_SIZE);
516
	}
517

518 519 520 521 522 523
	/*
	 * If page allocator is not up yet then do not call c_p_a():
	 */
	if (!debug_pagealloc_enabled)
		return;

524
	/*
525 526
	 * The return value is ignored - the calls cannot fail,
	 * large pages are disabled at boot time:
L
Linus Torvalds 已提交
527
	 */
I
Ingo Molnar 已提交
528 529 530 531
	if (enable)
		__set_pages_p(page, numpages);
	else
		__set_pages_np(page, numpages);
532 533

	/*
534 535
	 * We should perform an IPI and flush all tlbs,
	 * but that can deadlock->flush only current cpu:
L
Linus Torvalds 已提交
536 537 538 539
	 */
	__flush_tlb_all();
}
#endif
540 541 542 543 544 545 546 547

/*
 * The testcases use internal knowledge of the implementation that shouldn't
 * be exposed to the rest of the kernel. Include these directly here.
 */
#ifdef CONFIG_CPA_DEBUG
#include "pageattr-test.c"
#endif