pti.c 16.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40
/*
 * Copyright(c) 2017 Intel Corporation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of version 2 of the GNU General Public License as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 *
 * This code is based in part on work published here:
 *
 *	https://github.com/IAIK/KAISER
 *
 * The original work was written by and and signed off by for the Linux
 * kernel by:
 *
 *   Signed-off-by: Richard Fellner <richard.fellner@student.tugraz.at>
 *   Signed-off-by: Moritz Lipp <moritz.lipp@iaik.tugraz.at>
 *   Signed-off-by: Daniel Gruss <daniel.gruss@iaik.tugraz.at>
 *   Signed-off-by: Michael Schwarz <michael.schwarz@iaik.tugraz.at>
 *
 * Major changes to the original code by: Dave Hansen <dave.hansen@intel.com>
 * Mostly rewritten by Thomas Gleixner <tglx@linutronix.de> and
 *		       Andy Lutomirsky <luto@amacapital.net>
 */
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/bug.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/mm.h>
#include <linux/uaccess.h>

#include <asm/cpufeature.h>
#include <asm/hypervisor.h>
41
#include <asm/vsyscall.h>
42 43 44 45 46 47 48 49 50 51
#include <asm/cmdline.h>
#include <asm/pti.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
#include <asm/desc.h>

#undef pr_fmt
#define pr_fmt(fmt)     "Kernel/User page tables isolation: " fmt

52 53 54 55 56
/* Backporting helper */
#ifndef __GFP_NOTRACK
#define __GFP_NOTRACK	0
#endif

57 58
static void __init pti_print_if_insecure(const char *reason)
{
59
	if (boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
60 61 62
		pr_info("%s\n", reason);
}

63 64
static void __init pti_print_if_secure(const char *reason)
{
65
	if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
66 67 68
		pr_info("%s\n", reason);
}

69 70 71 72 73 74
enum pti_mode {
	PTI_AUTO = 0,
	PTI_FORCE_OFF,
	PTI_FORCE_ON
} pti_mode;

75 76
void __init pti_check_boottime_disable(void)
{
77 78 79
	char arg[5];
	int ret;

80 81 82
	/* Assume mode is auto unless overridden. */
	pti_mode = PTI_AUTO;

83
	if (hypervisor_is_type(X86_HYPER_XEN_PV)) {
84
		pti_mode = PTI_FORCE_OFF;
85 86 87 88
		pti_print_if_insecure("disabled on XEN PV.");
		return;
	}

89 90 91
	ret = cmdline_find_option(boot_command_line, "pti", arg, sizeof(arg));
	if (ret > 0)  {
		if (ret == 3 && !strncmp(arg, "off", 3)) {
92
			pti_mode = PTI_FORCE_OFF;
93 94 95 96
			pti_print_if_insecure("disabled on command line.");
			return;
		}
		if (ret == 2 && !strncmp(arg, "on", 2)) {
97
			pti_mode = PTI_FORCE_ON;
98 99 100
			pti_print_if_secure("force enabled on command line.");
			goto enable;
		}
101 102
		if (ret == 4 && !strncmp(arg, "auto", 4)) {
			pti_mode = PTI_AUTO;
103
			goto autosel;
104
		}
105 106
	}

107
	if (cmdline_find_option_bool(boot_command_line, "nopti")) {
108
		pti_mode = PTI_FORCE_OFF;
109 110 111 112
		pti_print_if_insecure("disabled on command line.");
		return;
	}

113
autosel:
114
	if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
115
		return;
116
enable:
117 118 119
	setup_force_cpu_cap(X86_FEATURE_PTI);
}

120
pgd_t __pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd)
121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160
{
	/*
	 * Changes to the high (kernel) portion of the kernelmode page
	 * tables are not automatically propagated to the usermode tables.
	 *
	 * Users should keep in mind that, unlike the kernelmode tables,
	 * there is no vmalloc_fault equivalent for the usermode tables.
	 * Top-level entries added to init_mm's usermode pgd after boot
	 * will not be automatically propagated to other mms.
	 */
	if (!pgdp_maps_userspace(pgdp))
		return pgd;

	/*
	 * The user page tables get the full PGD, accessible from
	 * userspace:
	 */
	kernel_to_user_pgdp(pgdp)->pgd = pgd.pgd;

	/*
	 * If this is normal user memory, make it NX in the kernel
	 * pagetables so that, if we somehow screw up and return to
	 * usermode with the kernel CR3 loaded, we'll get a page fault
	 * instead of allowing user code to execute with the wrong CR3.
	 *
	 * As exceptions, we don't set NX if:
	 *  - _PAGE_USER is not set.  This could be an executable
	 *     EFI runtime mapping or something similar, and the kernel
	 *     may execute from it
	 *  - we don't have NX support
	 *  - we're clearing the PGD (i.e. the new pgd is not present).
	 */
	if ((pgd.pgd & (_PAGE_USER|_PAGE_PRESENT)) == (_PAGE_USER|_PAGE_PRESENT) &&
	    (__supported_pte_mask & _PAGE_NX))
		pgd.pgd |= _PAGE_NX;

	/* return the copy of the PGD we want the kernel to use: */
	return pgd;
}

161 162 163 164 165 166
/*
 * Walk the user copy of the page tables (optionally) trying to allocate
 * page table pages on the way down.
 *
 * Returns a pointer to a P4D on success, or NULL on failure.
 */
167
static p4d_t *pti_user_pagetable_walk_p4d(unsigned long address)
168 169 170 171 172 173 174 175 176 177 178
{
	pgd_t *pgd = kernel_to_user_pgdp(pgd_offset_k(address));
	gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);

	if (address < PAGE_OFFSET) {
		WARN_ONCE(1, "attempt to walk user address\n");
		return NULL;
	}

	if (pgd_none(*pgd)) {
		unsigned long new_p4d_page = __get_free_page(gfp);
179
		if (WARN_ON_ONCE(!new_p4d_page))
180 181
			return NULL;

182
		set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(new_p4d_page)));
183 184 185 186 187 188 189 190 191 192 193 194
	}
	BUILD_BUG_ON(pgd_large(*pgd) != 0);

	return p4d_offset(pgd, address);
}

/*
 * Walk the user copy of the page tables (optionally) trying to allocate
 * page table pages on the way down.
 *
 * Returns a pointer to a PMD on success, or NULL on failure.
 */
195
static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address)
196 197
{
	gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
198
	p4d_t *p4d;
199 200
	pud_t *pud;

201 202 203 204
	p4d = pti_user_pagetable_walk_p4d(address);
	if (!p4d)
		return NULL;

205 206 207
	BUILD_BUG_ON(p4d_large(*p4d) != 0);
	if (p4d_none(*p4d)) {
		unsigned long new_pud_page = __get_free_page(gfp);
208
		if (WARN_ON_ONCE(!new_pud_page))
209 210
			return NULL;

211
		set_p4d(p4d, __p4d(_KERNPG_TABLE | __pa(new_pud_page)));
212 213 214 215 216 217 218 219 220 221
	}

	pud = pud_offset(p4d, address);
	/* The user page tables do not use large mappings: */
	if (pud_large(*pud)) {
		WARN_ON(1);
		return NULL;
	}
	if (pud_none(*pud)) {
		unsigned long new_pmd_page = __get_free_page(gfp);
222
		if (WARN_ON_ONCE(!new_pmd_page))
223 224
			return NULL;

225
		set_pud(pud, __pud(_KERNPG_TABLE | __pa(new_pmd_page)));
226 227 228 229 230
	}

	return pmd_offset(pud, address);
}

231 232 233 234 235 236 237 238 239 240 241 242 243
#ifdef CONFIG_X86_VSYSCALL_EMULATION
/*
 * Walk the shadow copy of the page tables (optionally) trying to allocate
 * page table pages on the way down.  Does not support large pages.
 *
 * Note: this is only used when mapping *new* kernel data into the
 * user/shadow page tables.  It is never used for userspace data.
 *
 * Returns a pointer to a PTE on success, or NULL on failure.
 */
static __init pte_t *pti_user_pagetable_walk_pte(unsigned long address)
{
	gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
244
	pmd_t *pmd;
245 246
	pte_t *pte;

247 248 249 250
	pmd = pti_user_pagetable_walk_pmd(address);
	if (!pmd)
		return NULL;

251 252 253 254 255 256 257 258 259 260 261
	/* We can't do anything sensible if we hit a large mapping. */
	if (pmd_large(*pmd)) {
		WARN_ON(1);
		return NULL;
	}

	if (pmd_none(*pmd)) {
		unsigned long new_pte_page = __get_free_page(gfp);
		if (!new_pte_page)
			return NULL;

262
		set_pmd(pmd, __pmd(_KERNPG_TABLE | __pa(new_pte_page)));
263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292
	}

	pte = pte_offset_kernel(pmd, address);
	if (pte_flags(*pte) & _PAGE_USER) {
		WARN_ONCE(1, "attempt to walk to user pte\n");
		return NULL;
	}
	return pte;
}

static void __init pti_setup_vsyscall(void)
{
	pte_t *pte, *target_pte;
	unsigned int level;

	pte = lookup_address(VSYSCALL_ADDR, &level);
	if (!pte || WARN_ON(level != PG_LEVEL_4K) || pte_none(*pte))
		return;

	target_pte = pti_user_pagetable_walk_pte(VSYSCALL_ADDR);
	if (WARN_ON(!target_pte))
		return;

	*target_pte = *pte;
	set_vsyscall_pgtable_user_bits(kernel_to_user_pgdp(swapper_pg_dir));
}
#else
static void __init pti_setup_vsyscall(void) { }
#endif

293
static void
294 295 296 297 298 299 300 301 302 303 304 305 306 307
pti_clone_pmds(unsigned long start, unsigned long end, pmdval_t clear)
{
	unsigned long addr;

	/*
	 * Clone the populated PMDs which cover start to end. These PMD areas
	 * can have holes.
	 */
	for (addr = start; addr < end; addr += PMD_SIZE) {
		pmd_t *pmd, *target_pmd;
		pgd_t *pgd;
		p4d_t *p4d;
		pud_t *pud;

308 309 310 311
		/* Overflow check */
		if (addr < start)
			break;

312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328
		pgd = pgd_offset_k(addr);
		if (WARN_ON(pgd_none(*pgd)))
			return;
		p4d = p4d_offset(pgd, addr);
		if (WARN_ON(p4d_none(*p4d)))
			return;
		pud = pud_offset(p4d, addr);
		if (pud_none(*pud))
			continue;
		pmd = pmd_offset(pud, addr);
		if (pmd_none(*pmd))
			continue;

		target_pmd = pti_user_pagetable_walk_pmd(addr);
		if (WARN_ON(!target_pmd))
			return;

329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349
		/*
		 * Only clone present PMDs.  This ensures only setting
		 * _PAGE_GLOBAL on present PMDs.  This should only be
		 * called on well-known addresses anyway, so a non-
		 * present PMD would be a surprise.
		 */
		if (WARN_ON(!(pmd_flags(*pmd) & _PAGE_PRESENT)))
			return;

		/*
		 * Setting 'target_pmd' below creates a mapping in both
		 * the user and kernel page tables.  It is effectively
		 * global, so set it as global in both copies.  Note:
		 * the X86_FEATURE_PGE check is not _required_ because
		 * the CPU ignores _PAGE_GLOBAL when PGE is not
		 * supported.  The check keeps consistentency with
		 * code that only set this bit when supported.
		 */
		if (boot_cpu_has(X86_FEATURE_PGE))
			*pmd = pmd_set_flags(*pmd, _PAGE_GLOBAL);

350 351 352 353 354 355 356 357 358
		/*
		 * Copy the PMD.  That is, the kernelmode and usermode
		 * tables will share the last-level page tables of this
		 * address range
		 */
		*target_pmd = pmd_clear_flags(*pmd, clear);
	}
}

359
#ifdef CONFIG_X86_64
360 361 362 363 364 365 366 367 368 369
/*
 * Clone a single p4d (i.e. a top-level entry on 4-level systems and a
 * next-level entry on 5-level systems.
 */
static void __init pti_clone_p4d(unsigned long addr)
{
	p4d_t *kernel_p4d, *user_p4d;
	pgd_t *kernel_pgd;

	user_p4d = pti_user_pagetable_walk_p4d(addr);
370 371 372
	if (!user_p4d)
		return;

373 374 375 376 377 378 379 380 381 382 383 384 385
	kernel_pgd = pgd_offset_k(addr);
	kernel_p4d = p4d_offset(kernel_pgd, addr);
	*user_p4d = *kernel_p4d;
}

/*
 * Clone the CPU_ENTRY_AREA into the user space visible page table.
 */
static void __init pti_clone_user_shared(void)
{
	pti_clone_p4d(CPU_ENTRY_AREA_BASE);
}

386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404
#else /* CONFIG_X86_64 */

/*
 * On 32 bit PAE systems with 1GB of Kernel address space there is only
 * one pgd/p4d for the whole kernel. Cloning that would map the whole
 * address space into the user page-tables, making PTI useless. So clone
 * the page-table on the PMD level to prevent that.
 */
static void __init pti_clone_user_shared(void)
{
	unsigned long start, end;

	start = CPU_ENTRY_AREA_BASE;
	end   = start + (PAGE_SIZE * CPU_ENTRY_AREA_PAGES);

	pti_clone_pmds(start, end, 0);
}
#endif /* CONFIG_X86_64 */

405
/*
S
Seunghun Han 已提交
406
 * Clone the ESPFIX P4D into the user space visible page table
407 408 409 410 411 412 413 414
 */
static void __init pti_setup_espfix64(void)
{
#ifdef CONFIG_X86_ESPFIX64
	pti_clone_p4d(ESPFIX_BASE_ADDR);
#endif
}

T
Thomas Gleixner 已提交
415 416 417
/*
 * Clone the populated PMDs of the entry and irqentry text and force it RO.
 */
418
static void pti_clone_entry_text(void)
T
Thomas Gleixner 已提交
419 420
{
	pti_clone_pmds((unsigned long) __entry_text_start,
421
			(unsigned long) __irqentry_text_end,
422
		       _PAGE_RW);
T
Thomas Gleixner 已提交
423 424
}

425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458
/*
 * Global pages and PCIDs are both ways to make kernel TLB entries
 * live longer, reduce TLB misses and improve kernel performance.
 * But, leaving all kernel text Global makes it potentially accessible
 * to Meltdown-style attacks which make it trivial to find gadgets or
 * defeat KASLR.
 *
 * Only use global pages when it is really worth it.
 */
static inline bool pti_kernel_image_global_ok(void)
{
	/*
	 * Systems with PCIDs get litlle benefit from global
	 * kernel text and are not worth the downsides.
	 */
	if (cpu_feature_enabled(X86_FEATURE_PCID))
		return false;

	/*
	 * Only do global kernel image for pti=auto.  Do the most
	 * secure thing (not global) if pti=on specified.
	 */
	if (pti_mode != PTI_AUTO)
		return false;

	/*
	 * K8 may not tolerate the cleared _PAGE_RW on the userspace
	 * global kernel image pages.  Do the safe thing (disable
	 * global kernel image).  This is unlikely to ever be
	 * noticed because PTI is disabled by default on AMD CPUs.
	 */
	if (boot_cpu_has(X86_FEATURE_K8))
		return false;

459 460 461 462 463 464 465 466 467 468
	/*
	 * RANDSTRUCT derives its hardening benefits from the
	 * attacker's lack of knowledge about the layout of kernel
	 * data structures.  Keep the kernel image non-global in
	 * cases where RANDSTRUCT is in use to help keep the layout a
	 * secret.
	 */
	if (IS_ENABLED(CONFIG_GCC_PLUGIN_RANDSTRUCT))
		return false;

469 470 471
	return true;
}

472 473 474 475 476 477 478
/*
 * This is the only user for these and it is not arch-generic
 * like the other set_memory.h functions.  Just extern them.
 */
extern int set_memory_nonglobal(unsigned long addr, int numpages);
extern int set_memory_global(unsigned long addr, int numpages);

479 480 481 482
/*
 * For some configurations, map all of kernel text into the user page
 * tables.  This reduces TLB misses, especially on non-PCID systems.
 */
483
static void pti_clone_kernel_text(void)
484
{
485 486 487 488 489
	/*
	 * rodata is part of the kernel image and is normally
	 * readable on the filesystem or on the web.  But, do not
	 * clone the areas past rodata, they might contain secrets.
	 */
490
	unsigned long start = PFN_ALIGN(_text);
491
	unsigned long end_clone  = (unsigned long)__end_rodata_aligned;
492
	unsigned long end_global = PFN_ALIGN((unsigned long)__stop___ex_table);
493 494 495 496

	if (!pti_kernel_image_global_ok())
		return;

497 498 499 500 501 502 503
	pr_debug("mapping partial kernel image into user address space\n");

	/*
	 * Note that this will undo _some_ of the work that
	 * pti_set_kernel_image_nonglobal() did to clear the
	 * global bit.
	 */
504 505 506 507 508 509 510 511 512 513
	pti_clone_pmds(start, end_clone, _PAGE_RW);

	/*
	 * pti_clone_pmds() will set the global bit in any PMDs
	 * that it clones, but we also need to get any PTEs in
	 * the last level for areas that are not huge-page-aligned.
	 */

	/* Set the global bit for normal non-__init kernel text: */
	set_memory_global(start, (end_global - start) >> PAGE_SHIFT);
514 515
}

516 517 518 519 520 521 522 523 524 525 526
void pti_set_kernel_image_nonglobal(void)
{
	/*
	 * The identity map is created with PMDs, regardless of the
	 * actual length of the kernel.  We need to clear
	 * _PAGE_GLOBAL up to a PMD boundary, not just to the end
	 * of the image.
	 */
	unsigned long start = PFN_ALIGN(_text);
	unsigned long end = ALIGN((unsigned long)_end, PMD_PAGE_SIZE);

527 528 529 530 531
	/*
	 * This clears _PAGE_GLOBAL from the entire kernel image.
	 * pti_clone_kernel_text() map put _PAGE_GLOBAL back for
	 * areas that are mapped to userspace.
	 */
532 533 534
	set_memory_nonglobal(start, (end - start) >> PAGE_SHIFT);
}

535 536 537 538 539 540 541 542 543
/*
 * Initialize kernel page table isolation
 */
void __init pti_init(void)
{
	if (!static_cpu_has(X86_FEATURE_PTI))
		return;

	pr_info("enabled\n");
544

545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566
#ifdef CONFIG_X86_32
	/*
	 * We check for X86_FEATURE_PCID here. But the init-code will
	 * clear the feature flag on 32 bit because the feature is not
	 * supported on 32 bit anyway. To print the warning we need to
	 * check with cpuid directly again.
	 */
	if (cpuid_ecx(0x1) && BIT(17)) {
		/* Use printk to work around pr_fmt() */
		printk(KERN_WARNING "\n");
		printk(KERN_WARNING "************************************************************\n");
		printk(KERN_WARNING "** WARNING! WARNING! WARNING! WARNING! WARNING! WARNING!  **\n");
		printk(KERN_WARNING "**                                                        **\n");
		printk(KERN_WARNING "** You are using 32-bit PTI on a 64-bit PCID-capable CPU. **\n");
		printk(KERN_WARNING "** Your performance will increase dramatically if you     **\n");
		printk(KERN_WARNING "** switch to a 64-bit kernel!                             **\n");
		printk(KERN_WARNING "**                                                        **\n");
		printk(KERN_WARNING "** WARNING! WARNING! WARNING! WARNING! WARNING! WARNING!  **\n");
		printk(KERN_WARNING "************************************************************\n");
	}
#endif

567
	pti_clone_user_shared();
568 569 570 571

	/* Undo all global bits from the init pagetables in head_64.S: */
	pti_set_kernel_image_nonglobal();
	/* Replace some of the global bits just for shared entry text: */
T
Thomas Gleixner 已提交
572
	pti_clone_entry_text();
573
	pti_setup_espfix64();
574
	pti_setup_vsyscall();
575
}
576 577

/*
578 579 580 581 582
 * Finalize the kernel mappings in the userspace page-table. Some of the
 * mappings for the kernel image might have changed since pti_init()
 * cloned them. This is because parts of the kernel image have been
 * mapped RO and/or NX.  These changes need to be cloned again to the
 * userspace page-table.
583 584 585 586
 */
void pti_finalize(void)
{
	/*
587 588
	 * We need to clone everything (again) that maps parts of the
	 * kernel image.
589
	 */
590
	pti_clone_entry_text();
591 592
	pti_clone_kernel_text();
}