bugs.c 17.6 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2 3 4 5 6 7 8 9 10 11 12
/*
 *  Copyright (C) 1994  Linus Torvalds
 *
 *  Cyrix stuff, June 1998 by:
 *	- Rafael R. Reilova (moved everything from head.S),
 *        <rreilova@ececs.uc.edu>
 *	- Channing Corn (tests & fixes),
 *	- Andrew D. Balsa (code cleanup).
 */
#include <linux/init.h>
#include <linux/utsname.h>
13
#include <linux/cpu.h>
14
#include <linux/module.h>
15 16
#include <linux/nospec.h>
#include <linux/prctl.h>
17

18
#include <asm/spec-ctrl.h>
19
#include <asm/cmdline.h>
20
#include <asm/bugs.h>
21
#include <asm/processor.h>
D
Dave Jones 已提交
22
#include <asm/processor-flags.h>
23
#include <asm/fpu/internal.h>
24 25 26
#include <asm/msr.h>
#include <asm/paravirt.h>
#include <asm/alternative.h>
27
#include <asm/pgtable.h>
L
Laura Abbott 已提交
28
#include <asm/set_memory.h>
29
#include <asm/intel-family.h>
30

31
static void __init spectre_v2_select_mitigation(void);
32
static void __init ssb_select_mitigation(void);
33

34 35 36 37
/*
 * Our boot-time value of the SPEC_CTRL MSR. We read it once so that any
 * writes to SPEC_CTRL contain whatever reserved bits have been set.
 */
38
u64 __ro_after_init x86_spec_ctrl_base;
39

40 41 42 43 44 45
/*
 * The vendor and possibly platform specific bits which can be modified in
 * x86_spec_ctrl_base.
 */
static u64 __ro_after_init x86_spec_ctrl_mask = ~SPEC_CTRL_IBRS;

46 47 48 49 50 51 52
/*
 * AMD specific MSR info for Speculative Store Bypass control.
 * x86_amd_ls_cfg_rds_mask is initialized in identify_boot_cpu().
 */
u64 __ro_after_init x86_amd_ls_cfg_base;
u64 __ro_after_init x86_amd_ls_cfg_rds_mask;

53 54 55
void __init check_bugs(void)
{
	identify_boot_cpu();
56

57 58 59 60 61
	if (!IS_ENABLED(CONFIG_SMP)) {
		pr_info("CPU: ");
		print_cpu_info(&boot_cpu_data);
	}

62 63
	/*
	 * Read the SPEC_CTRL MSR to account for reserved bits which may
64 65
	 * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD
	 * init code as it is not enumerated and depends on the family.
66 67 68 69
	 */
	if (boot_cpu_has(X86_FEATURE_IBRS))
		rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);

70 71 72
	/* Select the proper spectre mitigation before patching alternatives */
	spectre_v2_select_mitigation();

73 74 75 76 77 78
	/*
	 * Select proper mitigation for any exposure to the Speculative Store
	 * Bypass vulnerability.
	 */
	ssb_select_mitigation();

79
#ifdef CONFIG_X86_32
80 81 82 83 84 85 86 87 88 89
	/*
	 * Check whether we are able to run this kernel safely on SMP.
	 *
	 * - i386 is no longer supported.
	 * - In order to run on anything without a TSC, we need to be
	 *   compiled for a i486.
	 */
	if (boot_cpu_data.x86 < 4)
		panic("Kernel requires i486+ for 'invlpg' and other features");

M
Miklos Vajna 已提交
90 91
	init_utsname()->machine[1] =
		'0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86);
92
	alternative_instructions();
93

94
	fpu__init_check_bugs();
95 96 97 98 99 100 101 102 103 104 105 106 107 108
#else /* CONFIG_X86_64 */
	alternative_instructions();

	/*
	 * Make sure the first 2MB area is not mapped by huge pages
	 * There are typically fixed size MTRRs in there and overlapping
	 * MTRRs into large pages causes slow downs.
	 *
	 * Right now we don't do that with gbpages because there seems
	 * very little benefit for that case.
	 */
	if (!direct_gbpages)
		set_memory_4k((unsigned long)__va(0), 1);
#endif
109
}
110

111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129
/* The kernel command line selection */
enum spectre_v2_mitigation_cmd {
	SPECTRE_V2_CMD_NONE,
	SPECTRE_V2_CMD_AUTO,
	SPECTRE_V2_CMD_FORCE,
	SPECTRE_V2_CMD_RETPOLINE,
	SPECTRE_V2_CMD_RETPOLINE_GENERIC,
	SPECTRE_V2_CMD_RETPOLINE_AMD,
};

static const char *spectre_v2_strings[] = {
	[SPECTRE_V2_NONE]			= "Vulnerable",
	[SPECTRE_V2_RETPOLINE_MINIMAL]		= "Vulnerable: Minimal generic ASM retpoline",
	[SPECTRE_V2_RETPOLINE_MINIMAL_AMD]	= "Vulnerable: Minimal AMD ASM retpoline",
	[SPECTRE_V2_RETPOLINE_GENERIC]		= "Mitigation: Full generic retpoline",
	[SPECTRE_V2_RETPOLINE_AMD]		= "Mitigation: Full AMD retpoline",
};

#undef pr_fmt
130
#define pr_fmt(fmt)     "Spectre V2 : " fmt
131 132

static enum spectre_v2_mitigation spectre_v2_enabled = SPECTRE_V2_NONE;
133

134 135
void x86_spec_ctrl_set(u64 val)
{
136
	if (val & x86_spec_ctrl_mask)
137 138 139 140 141 142 143 144
		WARN_ONCE(1, "SPEC_CTRL MSR value 0x%16llx is unknown.\n", val);
	else
		wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base | val);
}
EXPORT_SYMBOL_GPL(x86_spec_ctrl_set);

u64 x86_spec_ctrl_get_default(void)
{
145 146 147 148 149
	u64 msrval = x86_spec_ctrl_base;

	if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
		msrval |= rds_tif_to_spec_ctrl(current_thread_info()->flags);
	return msrval;
150 151 152
}
EXPORT_SYMBOL_GPL(x86_spec_ctrl_get_default);

153 154
void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl)
{
155 156
	u64 host = x86_spec_ctrl_base;

157 158
	if (!boot_cpu_has(X86_FEATURE_IBRS))
		return;
159 160 161 162 163

	if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
		host |= rds_tif_to_spec_ctrl(current_thread_info()->flags);

	if (host != guest_spec_ctrl)
164 165 166 167 168 169
		wrmsrl(MSR_IA32_SPEC_CTRL, guest_spec_ctrl);
}
EXPORT_SYMBOL_GPL(x86_spec_ctrl_set_guest);

void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl)
{
170 171
	u64 host = x86_spec_ctrl_base;

172 173
	if (!boot_cpu_has(X86_FEATURE_IBRS))
		return;
174 175 176 177 178 179

	if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
		host |= rds_tif_to_spec_ctrl(current_thread_info()->flags);

	if (host != guest_spec_ctrl)
		wrmsrl(MSR_IA32_SPEC_CTRL, host);
180 181 182
}
EXPORT_SYMBOL_GPL(x86_spec_ctrl_restore_host);

183 184 185 186 187 188 189 190
static void x86_amd_rds_enable(void)
{
	u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_rds_mask;

	if (boot_cpu_has(X86_FEATURE_AMD_RDS))
		wrmsrl(MSR_AMD64_LS_CFG, msrval);
}

191
#ifdef RETPOLINE
192 193
static bool spectre_v2_bad_module;

194 195 196 197 198
bool retpoline_module_ok(bool has_retpoline)
{
	if (spectre_v2_enabled == SPECTRE_V2_NONE || has_retpoline)
		return true;

199
	pr_err("System may be vulnerable to spectre v2\n");
200 201 202
	spectre_v2_bad_module = true;
	return false;
}
203 204 205 206 207 208 209

static inline const char *spectre_v2_module_string(void)
{
	return spectre_v2_bad_module ? " - vulnerable module loaded" : "";
}
#else
static inline const char *spectre_v2_module_string(void) { return ""; }
210
#endif
211 212 213 214

static void __init spec2_print_if_insecure(const char *reason)
{
	if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
215
		pr_info("%s selected on command line.\n", reason);
216 217 218 219 220
}

static void __init spec2_print_if_secure(const char *reason)
{
	if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
221
		pr_info("%s selected on command line.\n", reason);
222 223 224 225 226 227 228 229 230 231 232 233 234 235
}

static inline bool retp_compiler(void)
{
	return __is_defined(RETPOLINE);
}

static inline bool match_option(const char *arg, int arglen, const char *opt)
{
	int len = strlen(opt);

	return len == arglen && !strncmp(arg, opt, len);
}

236 237 238 239 240 241 242 243 244 245 246 247 248
static const struct {
	const char *option;
	enum spectre_v2_mitigation_cmd cmd;
	bool secure;
} mitigation_options[] = {
	{ "off",               SPECTRE_V2_CMD_NONE,              false },
	{ "on",                SPECTRE_V2_CMD_FORCE,             true },
	{ "retpoline",         SPECTRE_V2_CMD_RETPOLINE,         false },
	{ "retpoline,amd",     SPECTRE_V2_CMD_RETPOLINE_AMD,     false },
	{ "retpoline,generic", SPECTRE_V2_CMD_RETPOLINE_GENERIC, false },
	{ "auto",              SPECTRE_V2_CMD_AUTO,              false },
};

249 250 251
static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
{
	char arg[20];
252 253 254 255 256 257
	int ret, i;
	enum spectre_v2_mitigation_cmd cmd = SPECTRE_V2_CMD_AUTO;

	if (cmdline_find_option_bool(boot_command_line, "nospectre_v2"))
		return SPECTRE_V2_CMD_NONE;
	else {
258
		ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg));
259 260 261 262 263 264 265 266 267 268 269
		if (ret < 0)
			return SPECTRE_V2_CMD_AUTO;

		for (i = 0; i < ARRAY_SIZE(mitigation_options); i++) {
			if (!match_option(arg, ret, mitigation_options[i].option))
				continue;
			cmd = mitigation_options[i].cmd;
			break;
		}

		if (i >= ARRAY_SIZE(mitigation_options)) {
270
			pr_err("unknown option (%s). Switching to AUTO select\n", arg);
271 272 273 274
			return SPECTRE_V2_CMD_AUTO;
		}
	}

275 276 277 278
	if ((cmd == SPECTRE_V2_CMD_RETPOLINE ||
	     cmd == SPECTRE_V2_CMD_RETPOLINE_AMD ||
	     cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC) &&
	    !IS_ENABLED(CONFIG_RETPOLINE)) {
279
		pr_err("%s selected but not compiled in. Switching to AUTO select\n", mitigation_options[i].option);
280
		return SPECTRE_V2_CMD_AUTO;
281 282 283 284 285 286 287 288 289 290 291 292 293 294
	}

	if (cmd == SPECTRE_V2_CMD_RETPOLINE_AMD &&
	    boot_cpu_data.x86_vendor != X86_VENDOR_AMD) {
		pr_err("retpoline,amd selected but CPU is not AMD. Switching to AUTO select\n");
		return SPECTRE_V2_CMD_AUTO;
	}

	if (mitigation_options[i].secure)
		spec2_print_if_secure(mitigation_options[i].option);
	else
		spec2_print_if_insecure(mitigation_options[i].option);

	return cmd;
295 296
}

297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313
/* Check for Skylake-like CPUs (for RSB handling) */
static bool __init is_skylake_era(void)
{
	if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
	    boot_cpu_data.x86 == 6) {
		switch (boot_cpu_data.x86_model) {
		case INTEL_FAM6_SKYLAKE_MOBILE:
		case INTEL_FAM6_SKYLAKE_DESKTOP:
		case INTEL_FAM6_SKYLAKE_X:
		case INTEL_FAM6_KABYLAKE_MOBILE:
		case INTEL_FAM6_KABYLAKE_DESKTOP:
			return true;
		}
	}
	return false;
}

314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332
static void __init spectre_v2_select_mitigation(void)
{
	enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
	enum spectre_v2_mitigation mode = SPECTRE_V2_NONE;

	/*
	 * If the CPU is not affected and the command line mode is NONE or AUTO
	 * then nothing to do.
	 */
	if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2) &&
	    (cmd == SPECTRE_V2_CMD_NONE || cmd == SPECTRE_V2_CMD_AUTO))
		return;

	switch (cmd) {
	case SPECTRE_V2_CMD_NONE:
		return;

	case SPECTRE_V2_CMD_FORCE:
	case SPECTRE_V2_CMD_AUTO:
333 334 335
		if (IS_ENABLED(CONFIG_RETPOLINE))
			goto retpoline_auto;
		break;
336 337 338 339 340 341 342 343 344 345 346 347 348
	case SPECTRE_V2_CMD_RETPOLINE_AMD:
		if (IS_ENABLED(CONFIG_RETPOLINE))
			goto retpoline_amd;
		break;
	case SPECTRE_V2_CMD_RETPOLINE_GENERIC:
		if (IS_ENABLED(CONFIG_RETPOLINE))
			goto retpoline_generic;
		break;
	case SPECTRE_V2_CMD_RETPOLINE:
		if (IS_ENABLED(CONFIG_RETPOLINE))
			goto retpoline_auto;
		break;
	}
349
	pr_err("Spectre mitigation: kernel not compiled with retpoline; no mitigation available!");
350 351 352 353 354 355
	return;

retpoline_auto:
	if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
	retpoline_amd:
		if (!boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) {
356
			pr_err("Spectre mitigation: LFENCE not serializing, switching to generic retpoline\n");
357 358 359 360 361 362 363 364 365 366 367 368 369 370 371
			goto retpoline_generic;
		}
		mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_AMD :
					 SPECTRE_V2_RETPOLINE_MINIMAL_AMD;
		setup_force_cpu_cap(X86_FEATURE_RETPOLINE_AMD);
		setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
	} else {
	retpoline_generic:
		mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_GENERIC :
					 SPECTRE_V2_RETPOLINE_MINIMAL;
		setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
	}

	spectre_v2_enabled = mode;
	pr_info("%s\n", spectre_v2_strings[mode]);
372 373

	/*
374
	 * If neither SMEP nor PTI are available, there is a risk of
375 376 377 378 379 380 381 382 383 384 385 386 387
	 * hitting userspace addresses in the RSB after a context switch
	 * from a shallow call stack to a deeper one. To prevent this fill
	 * the entire RSB, even when using IBRS.
	 *
	 * Skylake era CPUs have a separate issue with *underflow* of the
	 * RSB, when they will predict 'ret' targets from the generic BTB.
	 * The proper mitigation for this is IBRS. If IBRS is not supported
	 * or deactivated in favour of retpolines the RSB fill on context
	 * switch is required.
	 */
	if ((!boot_cpu_has(X86_FEATURE_PTI) &&
	     !boot_cpu_has(X86_FEATURE_SMEP)) || is_skylake_era()) {
		setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
388
		pr_info("Spectre v2 mitigation: Filling RSB on context switch\n");
389
	}
390 391

	/* Initialize Indirect Branch Prediction Barrier if supported */
392 393
	if (boot_cpu_has(X86_FEATURE_IBPB)) {
		setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
394
		pr_info("Spectre v2 mitigation: Enabling Indirect Branch Prediction Barrier\n");
395
	}
396 397 398 399 400 401 402 403 404

	/*
	 * Retpoline means the kernel is safe because it has no indirect
	 * branches. But firmware isn't, so use IBRS to protect that.
	 */
	if (boot_cpu_has(X86_FEATURE_IBRS)) {
		setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
		pr_info("Enabling Restricted Speculation for firmware calls\n");
	}
405 406
}

407 408 409 410 411 412 413 414 415 416
#undef pr_fmt
#define pr_fmt(fmt)	"Speculative Store Bypass: " fmt

static enum ssb_mitigation ssb_mode = SPEC_STORE_BYPASS_NONE;

/* The kernel command line selection */
enum ssb_mitigation_cmd {
	SPEC_STORE_BYPASS_CMD_NONE,
	SPEC_STORE_BYPASS_CMD_AUTO,
	SPEC_STORE_BYPASS_CMD_ON,
417
	SPEC_STORE_BYPASS_CMD_PRCTL,
418 419 420 421
};

static const char *ssb_strings[] = {
	[SPEC_STORE_BYPASS_NONE]	= "Vulnerable",
422 423
	[SPEC_STORE_BYPASS_DISABLE]	= "Mitigation: Speculative Store Bypass disabled",
	[SPEC_STORE_BYPASS_PRCTL]	= "Mitigation: Speculative Store Bypass disabled via prctl"
424 425 426 427 428 429
};

static const struct {
	const char *option;
	enum ssb_mitigation_cmd cmd;
} ssb_mitigation_options[] = {
430 431 432 433
	{ "auto",	SPEC_STORE_BYPASS_CMD_AUTO },  /* Platform decides */
	{ "on",		SPEC_STORE_BYPASS_CMD_ON },    /* Disable Speculative Store Bypass */
	{ "off",	SPEC_STORE_BYPASS_CMD_NONE },  /* Don't touch Speculative Store Bypass */
	{ "prctl",	SPEC_STORE_BYPASS_CMD_PRCTL }, /* Disable Speculative Store Bypass via prctl */
434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482
};

static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void)
{
	enum ssb_mitigation_cmd cmd = SPEC_STORE_BYPASS_CMD_AUTO;
	char arg[20];
	int ret, i;

	if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable")) {
		return SPEC_STORE_BYPASS_CMD_NONE;
	} else {
		ret = cmdline_find_option(boot_command_line, "spec_store_bypass_disable",
					  arg, sizeof(arg));
		if (ret < 0)
			return SPEC_STORE_BYPASS_CMD_AUTO;

		for (i = 0; i < ARRAY_SIZE(ssb_mitigation_options); i++) {
			if (!match_option(arg, ret, ssb_mitigation_options[i].option))
				continue;

			cmd = ssb_mitigation_options[i].cmd;
			break;
		}

		if (i >= ARRAY_SIZE(ssb_mitigation_options)) {
			pr_err("unknown option (%s). Switching to AUTO select\n", arg);
			return SPEC_STORE_BYPASS_CMD_AUTO;
		}
	}

	return cmd;
}

static enum ssb_mitigation_cmd __init __ssb_select_mitigation(void)
{
	enum ssb_mitigation mode = SPEC_STORE_BYPASS_NONE;
	enum ssb_mitigation_cmd cmd;

	if (!boot_cpu_has(X86_FEATURE_RDS))
		return mode;

	cmd = ssb_parse_cmdline();
	if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS) &&
	    (cmd == SPEC_STORE_BYPASS_CMD_NONE ||
	     cmd == SPEC_STORE_BYPASS_CMD_AUTO))
		return mode;

	switch (cmd) {
	case SPEC_STORE_BYPASS_CMD_AUTO:
483 484 485
		/* Choose prctl as the default mode */
		mode = SPEC_STORE_BYPASS_PRCTL;
		break;
486 487 488
	case SPEC_STORE_BYPASS_CMD_ON:
		mode = SPEC_STORE_BYPASS_DISABLE;
		break;
489 490 491
	case SPEC_STORE_BYPASS_CMD_PRCTL:
		mode = SPEC_STORE_BYPASS_PRCTL;
		break;
492 493 494 495
	case SPEC_STORE_BYPASS_CMD_NONE:
		break;
	}

496 497 498 499 500 501
	/*
	 * We have three CPU feature flags that are in play here:
	 *  - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
	 *  - X86_FEATURE_RDS - CPU is able to turn off speculative store bypass
	 *  - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation
	 */
502
	if (mode == SPEC_STORE_BYPASS_DISABLE) {
503
		setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE);
504 505 506 507 508 509 510
		/*
		 * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD uses
		 * a completely different MSR and bit dependent on family.
		 */
		switch (boot_cpu_data.x86_vendor) {
		case X86_VENDOR_INTEL:
			x86_spec_ctrl_base |= SPEC_CTRL_RDS;
511
			x86_spec_ctrl_mask &= ~SPEC_CTRL_RDS;
512 513 514
			x86_spec_ctrl_set(SPEC_CTRL_RDS);
			break;
		case X86_VENDOR_AMD:
515
			x86_amd_rds_enable();
516 517 518 519
			break;
		}
	}

520 521 522 523 524 525 526 527 528 529 530
	return mode;
}

static void ssb_select_mitigation()
{
	ssb_mode = __ssb_select_mitigation();

	if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
		pr_info("%s\n", ssb_strings[ssb_mode]);
}

531 532
#undef pr_fmt

533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589
static int ssb_prctl_set(unsigned long ctrl)
{
	bool rds = !!test_tsk_thread_flag(current, TIF_RDS);

	if (ssb_mode != SPEC_STORE_BYPASS_PRCTL)
		return -ENXIO;

	if (ctrl == PR_SPEC_ENABLE)
		clear_tsk_thread_flag(current, TIF_RDS);
	else
		set_tsk_thread_flag(current, TIF_RDS);

	if (rds != !!test_tsk_thread_flag(current, TIF_RDS))
		speculative_store_bypass_update();

	return 0;
}

static int ssb_prctl_get(void)
{
	switch (ssb_mode) {
	case SPEC_STORE_BYPASS_DISABLE:
		return PR_SPEC_DISABLE;
	case SPEC_STORE_BYPASS_PRCTL:
		if (test_tsk_thread_flag(current, TIF_RDS))
			return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
		return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
	default:
		if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
			return PR_SPEC_ENABLE;
		return PR_SPEC_NOT_AFFECTED;
	}
}

int arch_prctl_spec_ctrl_set(unsigned long which, unsigned long ctrl)
{
	if (ctrl != PR_SPEC_ENABLE && ctrl != PR_SPEC_DISABLE)
		return -ERANGE;

	switch (which) {
	case PR_SPEC_STORE_BYPASS:
		return ssb_prctl_set(ctrl);
	default:
		return -ENODEV;
	}
}

int arch_prctl_spec_ctrl_get(unsigned long which)
{
	switch (which) {
	case PR_SPEC_STORE_BYPASS:
		return ssb_prctl_get();
	default:
		return -ENODEV;
	}
}

590 591 592
void x86_spec_ctrl_setup_ap(void)
{
	if (boot_cpu_has(X86_FEATURE_IBRS))
593
		x86_spec_ctrl_set(x86_spec_ctrl_base & ~x86_spec_ctrl_mask);
594 595 596

	if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
		x86_amd_rds_enable();
597 598
}

599
#ifdef CONFIG_SYSFS
600 601 602

ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
			char *buf, unsigned int bug)
603
{
604
	if (!boot_cpu_has_bug(bug))
605
		return sprintf(buf, "Not affected\n");
606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622

	switch (bug) {
	case X86_BUG_CPU_MELTDOWN:
		if (boot_cpu_has(X86_FEATURE_PTI))
			return sprintf(buf, "Mitigation: PTI\n");

		break;

	case X86_BUG_SPECTRE_V1:
		return sprintf(buf, "Mitigation: __user pointer sanitization\n");

	case X86_BUG_SPECTRE_V2:
		return sprintf(buf, "%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
			       boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "",
			       boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
			       spectre_v2_module_string());

623 624 625
	case X86_BUG_SPEC_STORE_BYPASS:
		return sprintf(buf, "%s\n", ssb_strings[ssb_mode]);

626 627 628 629
	default:
		break;
	}

630 631 632
	return sprintf(buf, "Vulnerable\n");
}

633 634 635 636 637
ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
{
	return cpu_show_common(dev, attr, buf, X86_BUG_CPU_MELTDOWN);
}

638
ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
639
{
640
	return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V1);
641 642
}

643
ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf)
644
{
645
	return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V2);
646
}
647 648 649 650 651

ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf)
{
	return cpu_show_common(dev, attr, buf, X86_BUG_SPEC_STORE_BYPASS);
}
652
#endif