process.c 18.9 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2 3
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

4 5 6 7
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/smp.h>
8
#include <linux/prctl.h>
9 10
#include <linux/slab.h>
#include <linux/sched.h>
11
#include <linux/sched/idle.h>
12
#include <linux/sched/debug.h>
13
#include <linux/sched/task.h>
14
#include <linux/sched/task_stack.h>
15 16
#include <linux/init.h>
#include <linux/export.h>
17
#include <linux/pm.h>
18
#include <linux/tick.h>
A
Amerigo Wang 已提交
19
#include <linux/random.h>
A
Avi Kivity 已提交
20
#include <linux/user-return-notifier.h>
21 22
#include <linux/dmi.h>
#include <linux/utsname.h>
23 24
#include <linux/stackprotector.h>
#include <linux/cpuidle.h>
25
#include <trace/events/power.h>
26
#include <linux/hw_breakpoint.h>
27
#include <asm/cpu.h>
28
#include <asm/apic.h>
29
#include <asm/syscalls.h>
30
#include <linux/uaccess.h>
31
#include <asm/mwait.h>
32
#include <asm/fpu/internal.h>
33
#include <asm/debugreg.h>
34
#include <asm/nmi.h>
A
Andy Lutomirski 已提交
35
#include <asm/tlbflush.h>
36
#include <asm/mce.h>
37
#include <asm/vm86.h>
38
#include <asm/switch_to.h>
39
#include <asm/desc.h>
40
#include <asm/prctl.h>
41
#include <asm/spec-ctrl.h>
42

T
Thomas Gleixner 已提交
43 44 45 46 47 48 49
/*
 * per-CPU TSS segments. Threads are completely 'soft' on Linux,
 * no more per-task TSS's. The TSS size is kept cacheline-aligned
 * so they are allowed to end up in the .data..cacheline_aligned
 * section. Since TSS's are completely CPU-local, we want them
 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
 */
50
__visible DEFINE_PER_CPU_PAGE_ALIGNED(struct tss_struct, cpu_tss_rw) = {
51
	.x86_tss = {
52 53 54 55 56 57 58
		/*
		 * .sp0 is only used when entering ring 0 from a lower
		 * privilege level.  Since the init task never runs anything
		 * but ring 0 code, there is no need for a valid value here.
		 * Poison it.
		 */
		.sp0 = (1UL << (BITS_PER_LONG-1)) + 1,
59 60 61 62 63 64 65 66 67 68

#ifdef CONFIG_X86_64
		/*
		 * .sp1 is cpu_current_top_of_stack.  The init task never
		 * runs user code, but cpu_current_top_of_stack should still
		 * be well defined before the first context switch.
		 */
		.sp1 = TOP_OF_INIT_STACK,
#endif

69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84
#ifdef CONFIG_X86_32
		.ss0 = __KERNEL_DS,
		.ss1 = __KERNEL_CS,
		.io_bitmap_base	= INVALID_IO_BITMAP_OFFSET,
#endif
	 },
#ifdef CONFIG_X86_32
	 /*
	  * Note that the .io_bitmap member must be extra-big. This is because
	  * the CPU will access an additional byte beyond the end of the IO
	  * permission bitmap. The extra byte must be all 1 bits, and must
	  * be within the limit.
	  */
	.io_bitmap		= { [0 ... IO_BITMAP_LONGS] = ~0 },
#endif
};
85
EXPORT_PER_CPU_SYMBOL(cpu_tss_rw);
T
Thomas Gleixner 已提交
86

87 88
DEFINE_PER_CPU(bool, __tss_limit_invalid);
EXPORT_PER_CPU_SYMBOL_GPL(__tss_limit_invalid);
89

90 91 92 93
/*
 * this gets called so that we can store lazy state into memory and copy the
 * current task into the new thread.
 */
94 95
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
{
96
	memcpy(dst, src, arch_task_struct_size);
97 98 99
#ifdef CONFIG_VM86
	dst->thread.vm86 = NULL;
#endif
100

101
	return fpu__copy(&dst->thread.fpu, &src->thread.fpu);
102
}
103

104 105 106
/*
 * Free current thread data structures etc..
 */
107
void exit_thread(struct task_struct *tsk)
108
{
109
	struct thread_struct *t = &tsk->thread;
110
	unsigned long *bp = t->io_bitmap_ptr;
111
	struct fpu *fpu = &t->fpu;
112

113
	if (bp) {
114
		struct tss_struct *tss = &per_cpu(cpu_tss_rw, get_cpu());
115 116 117 118 119 120 121 122 123

		t->io_bitmap_ptr = NULL;
		clear_thread_flag(TIF_IO_BITMAP);
		/*
		 * Careful, clear this in the TSS too:
		 */
		memset(tss->io_bitmap, 0xff, t->io_bitmap_max);
		t->io_bitmap_max = 0;
		put_cpu();
124
		kfree(bp);
125
	}
126

127 128
	free_vm86(t);

129
	fpu__drop(fpu);
130 131 132 133 134 135
}

void flush_thread(void)
{
	struct task_struct *tsk = current;

136
	flush_ptrace_hw_breakpoint(tsk);
137
	memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
138

139
	fpu__clear(&tsk->thread.fpu);
140 141 142 143 144 145 146 147 148 149
}

void disable_TSC(void)
{
	preempt_disable();
	if (!test_and_set_thread_flag(TIF_NOTSC))
		/*
		 * Must flip the CPU state synchronously with
		 * TIF_NOTSC in the current running context.
		 */
150
		cr4_set_bits(X86_CR4_TSD);
151 152 153 154 155 156 157 158 159 160 161
	preempt_enable();
}

static void enable_TSC(void)
{
	preempt_disable();
	if (test_and_clear_thread_flag(TIF_NOTSC))
		/*
		 * Must flip the CPU state synchronously with
		 * TIF_NOTSC in the current running context.
		 */
162
		cr4_clear_bits(X86_CR4_TSD);
163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189
	preempt_enable();
}

int get_tsc_mode(unsigned long adr)
{
	unsigned int val;

	if (test_thread_flag(TIF_NOTSC))
		val = PR_TSC_SIGSEGV;
	else
		val = PR_TSC_ENABLE;

	return put_user(val, (unsigned int __user *)adr);
}

int set_tsc_mode(unsigned int val)
{
	if (val == PR_TSC_SIGSEGV)
		disable_TSC();
	else if (val == PR_TSC_ENABLE)
		enable_TSC();
	else
		return -EINVAL;

	return 0;
}

190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256
DEFINE_PER_CPU(u64, msr_misc_features_shadow);

static void set_cpuid_faulting(bool on)
{
	u64 msrval;

	msrval = this_cpu_read(msr_misc_features_shadow);
	msrval &= ~MSR_MISC_FEATURES_ENABLES_CPUID_FAULT;
	msrval |= (on << MSR_MISC_FEATURES_ENABLES_CPUID_FAULT_BIT);
	this_cpu_write(msr_misc_features_shadow, msrval);
	wrmsrl(MSR_MISC_FEATURES_ENABLES, msrval);
}

static void disable_cpuid(void)
{
	preempt_disable();
	if (!test_and_set_thread_flag(TIF_NOCPUID)) {
		/*
		 * Must flip the CPU state synchronously with
		 * TIF_NOCPUID in the current running context.
		 */
		set_cpuid_faulting(true);
	}
	preempt_enable();
}

static void enable_cpuid(void)
{
	preempt_disable();
	if (test_and_clear_thread_flag(TIF_NOCPUID)) {
		/*
		 * Must flip the CPU state synchronously with
		 * TIF_NOCPUID in the current running context.
		 */
		set_cpuid_faulting(false);
	}
	preempt_enable();
}

static int get_cpuid_mode(void)
{
	return !test_thread_flag(TIF_NOCPUID);
}

static int set_cpuid_mode(struct task_struct *task, unsigned long cpuid_enabled)
{
	if (!static_cpu_has(X86_FEATURE_CPUID_FAULT))
		return -ENODEV;

	if (cpuid_enabled)
		enable_cpuid();
	else
		disable_cpuid();

	return 0;
}

/*
 * Called immediately after a successful exec.
 */
void arch_setup_new_exec(void)
{
	/* If cpuid was previously disabled for this task, re-enable it. */
	if (test_thread_flag(TIF_NOCPUID))
		enable_cpuid();
}

257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281
static inline void switch_to_bitmap(struct tss_struct *tss,
				    struct thread_struct *prev,
				    struct thread_struct *next,
				    unsigned long tifp, unsigned long tifn)
{
	if (tifn & _TIF_IO_BITMAP) {
		/*
		 * Copy the relevant range of the IO bitmap.
		 * Normally this is 128 bytes or less:
		 */
		memcpy(tss->io_bitmap, next->io_bitmap_ptr,
		       max(prev->io_bitmap_max, next->io_bitmap_max));
		/*
		 * Make sure that the TSS limit is correct for the CPU
		 * to notice the IO bitmap.
		 */
		refresh_tss_limit();
	} else if (tifp & _TIF_IO_BITMAP) {
		/*
		 * Clear any possible leftover bits:
		 */
		memset(tss->io_bitmap, 0xff, prev->io_bitmap_max);
	}
}

282 283 284 285 286 287 288 289 290 291 292 293 294 295
#ifdef CONFIG_SMP

struct ssb_state {
	struct ssb_state	*shared_state;
	raw_spinlock_t		lock;
	unsigned int		disable_state;
	unsigned long		local_state;
};

#define LSTATE_SSB	0

static DEFINE_PER_CPU(struct ssb_state, ssb_state);

void speculative_store_bypass_ht_init(void)
296
{
297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335
	struct ssb_state *st = this_cpu_ptr(&ssb_state);
	unsigned int this_cpu = smp_processor_id();
	unsigned int cpu;

	st->local_state = 0;

	/*
	 * Shared state setup happens once on the first bringup
	 * of the CPU. It's not destroyed on CPU hotunplug.
	 */
	if (st->shared_state)
		return;

	raw_spin_lock_init(&st->lock);

	/*
	 * Go over HT siblings and check whether one of them has set up the
	 * shared state pointer already.
	 */
	for_each_cpu(cpu, topology_sibling_cpumask(this_cpu)) {
		if (cpu == this_cpu)
			continue;

		if (!per_cpu(ssb_state, cpu).shared_state)
			continue;

		/* Link it to the state of the sibling: */
		st->shared_state = per_cpu(ssb_state, cpu).shared_state;
		return;
	}

	/*
	 * First HT sibling to come up on the core.  Link shared state of
	 * the first HT sibling to itself. The siblings on the same core
	 * which come up later will see the shared state pointer and link
	 * themself to the state of this CPU.
	 */
	st->shared_state = st;
}
336

337 338 339 340 341 342 343 344 345 346 347 348 349 350
/*
 * Logic is: First HT sibling enables SSBD for both siblings in the core
 * and last sibling to disable it, disables it for the whole core. This how
 * MSR_SPEC_CTRL works in "hardware":
 *
 *  CORE_SPEC_CTRL = THREAD0_SPEC_CTRL | THREAD1_SPEC_CTRL
 */
static __always_inline void amd_set_core_ssb_state(unsigned long tifn)
{
	struct ssb_state *st = this_cpu_ptr(&ssb_state);
	u64 msr = x86_amd_ls_cfg_base;

	if (!static_cpu_has(X86_FEATURE_ZEN)) {
		msr |= ssbd_tif_to_amd_ls_cfg(tifn);
351
		wrmsrl(MSR_AMD64_LS_CFG, msr);
352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370
		return;
	}

	if (tifn & _TIF_SSBD) {
		/*
		 * Since this can race with prctl(), block reentry on the
		 * same CPU.
		 */
		if (__test_and_set_bit(LSTATE_SSB, &st->local_state))
			return;

		msr |= x86_amd_ls_cfg_ssbd_mask;

		raw_spin_lock(&st->shared_state->lock);
		/* First sibling enables SSBD: */
		if (!st->shared_state->disable_state)
			wrmsrl(MSR_AMD64_LS_CFG, msr);
		st->shared_state->disable_state++;
		raw_spin_unlock(&st->shared_state->lock);
371
	} else {
372 373 374 375 376 377 378 379
		if (!__test_and_clear_bit(LSTATE_SSB, &st->local_state))
			return;

		raw_spin_lock(&st->shared_state->lock);
		st->shared_state->disable_state--;
		if (!st->shared_state->disable_state)
			wrmsrl(MSR_AMD64_LS_CFG, msr);
		raw_spin_unlock(&st->shared_state->lock);
380 381
	}
}
382 383 384 385 386 387 388 389 390
#else
static __always_inline void amd_set_core_ssb_state(unsigned long tifn)
{
	u64 msr = x86_amd_ls_cfg_base | ssbd_tif_to_amd_ls_cfg(tifn);

	wrmsrl(MSR_AMD64_LS_CFG, msr);
}
#endif

391 392 393 394 395 396 397 398 399
static __always_inline void amd_set_ssb_virt_state(unsigned long tifn)
{
	/*
	 * SSBD has the same definition in SPEC_CTRL and VIRT_SPEC_CTRL,
	 * so ssbd_tif_to_spec_ctrl() just works.
	 */
	wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, ssbd_tif_to_spec_ctrl(tifn));
}

400 401 402 403 404 405 406 407 408
static __always_inline void intel_set_ssb_state(unsigned long tifn)
{
	u64 msr = x86_spec_ctrl_base | ssbd_tif_to_spec_ctrl(tifn);

	wrmsrl(MSR_IA32_SPEC_CTRL, msr);
}

static __always_inline void __speculative_store_bypass_update(unsigned long tifn)
{
409 410 411
	if (static_cpu_has(X86_FEATURE_VIRT_SSBD))
		amd_set_ssb_virt_state(tifn);
	else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD))
412 413 414 415
		amd_set_core_ssb_state(tifn);
	else
		intel_set_ssb_state(tifn);
}
416 417 418

void speculative_store_bypass_update(void)
{
419
	preempt_disable();
420
	__speculative_store_bypass_update(current_thread_info()->flags);
421
	preempt_enable();
422 423
}

424 425 426 427
void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
		      struct tss_struct *tss)
{
	struct thread_struct *prev, *next;
428
	unsigned long tifp, tifn;
429 430 431 432

	prev = &prev_p->thread;
	next = &next_p->thread;

433 434 435 436 437 438
	tifn = READ_ONCE(task_thread_info(next_p)->flags);
	tifp = READ_ONCE(task_thread_info(prev_p)->flags);
	switch_to_bitmap(tss, prev, next, tifp, tifn);

	propagate_user_return_notify(prev_p, next_p);

439 440 441
	if ((tifp & _TIF_BLOCKSTEP || tifn & _TIF_BLOCKSTEP) &&
	    arch_has_block_step()) {
		unsigned long debugctl, msk;
P
Peter Zijlstra 已提交
442

443
		rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
P
Peter Zijlstra 已提交
444
		debugctl &= ~DEBUGCTLMSR_BTF;
445 446 447
		msk = tifn & _TIF_BLOCKSTEP;
		debugctl |= (msk >> TIF_BLOCKSTEP) << DEBUGCTLMSR_BTF_SHIFT;
		wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
P
Peter Zijlstra 已提交
448
	}
449

450
	if ((tifp ^ tifn) & _TIF_NOTSC)
451
		cr4_toggle_bits_irqsoff(X86_CR4_TSD);
452 453 454

	if ((tifp ^ tifn) & _TIF_NOCPUID)
		set_cpuid_faulting(!!(tifn & _TIF_NOCPUID));
455

456
	if ((tifp ^ tifn) & _TIF_SSBD)
457
		__speculative_store_bypass_update(tifn);
458 459
}

460 461 462
/*
 * Idle related variables and functions
 */
463
unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE;
464 465
EXPORT_SYMBOL(boot_option_idle_override);

466
static void (*x86_idle)(void);
467

468 469 470 471 472 473 474
#ifndef CONFIG_SMP
static inline void play_dead(void)
{
	BUG();
}
#endif

T
Thomas Gleixner 已提交
475 476
void arch_cpu_idle_enter(void)
{
477
	tsc_verify_tsc_adjust(false);
T
Thomas Gleixner 已提交
478 479
	local_touch_nmi();
}
480

T
Thomas Gleixner 已提交
481 482 483 484
void arch_cpu_idle_dead(void)
{
	play_dead();
}
485

T
Thomas Gleixner 已提交
486 487 488 489 490
/*
 * Called from the generic idle code.
 */
void arch_cpu_idle(void)
{
491
	x86_idle();
492 493
}

494
/*
T
Thomas Gleixner 已提交
495
 * We use this if we don't have any better idle routine..
496
 */
497
void __cpuidle default_idle(void)
498
{
499
	trace_cpu_idle_rcuidle(1, smp_processor_id());
T
Thomas Gleixner 已提交
500
	safe_halt();
501
	trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
502
}
503
#ifdef CONFIG_APM_MODULE
504 505 506
EXPORT_SYMBOL(default_idle);
#endif

507 508
#ifdef CONFIG_XEN
bool xen_set_default_idle(void)
509
{
510
	bool ret = !!x86_idle;
511

512
	x86_idle = default_idle;
513 514 515

	return ret;
}
516
#endif
517

518 519 520 521 522 523
void stop_this_cpu(void *dummy)
{
	local_irq_disable();
	/*
	 * Remove this CPU:
	 */
524
	set_cpu_online(smp_processor_id(), false);
525
	disable_local_APIC();
526
	mcheck_cpu_clear(this_cpu_ptr(&cpu_info));
527

528 529 530 531 532 533 534 535 536 537 538
	/*
	 * Use wbinvd on processors that support SME. This provides support
	 * for performing a successful kexec when going from SME inactive
	 * to SME active (or vice-versa). The cache must be cleared so that
	 * if there are entries with the same physical address, both with and
	 * without the encryption bit, they don't race each other when flushed
	 * and potentially end up with the wrong entry being committed to
	 * memory.
	 */
	if (boot_cpu_has(X86_FEATURE_SME))
		native_wbinvd();
539 540
	for (;;) {
		/*
541 542 543
		 * Use native_halt() so that memory contents don't change
		 * (stack usage and variables) after possibly issuing the
		 * native_wbinvd() above.
544
		 */
545
		native_halt();
546
	}
547 548
}

549
/*
550 551
 * AMD Erratum 400 aware idle routine. We handle it the same way as C3 power
 * states (local apic timer and TSC stop).
552
 */
553
static void amd_e400_idle(void)
554
{
555 556 557 558 559 560 561 562
	/*
	 * We cannot use static_cpu_has_bug() here because X86_BUG_AMD_APIC_C1E
	 * gets set after static_cpu_has() places have been converted via
	 * alternatives.
	 */
	if (!boot_cpu_has_bug(X86_BUG_AMD_APIC_C1E)) {
		default_idle();
		return;
563 564
	}

565
	tick_broadcast_enter();
566

567
	default_idle();
568

569 570 571 572 573 574 575
	/*
	 * The switch back from broadcast mode needs to be called with
	 * interrupts disabled.
	 */
	local_irq_disable();
	tick_broadcast_exit();
	local_irq_enable();
576 577
}

578 579 580 581 582 583 584 585 586 587 588 589 590 591 592
/*
 * Intel Core2 and older machines prefer MWAIT over HALT for C1.
 * We can't rely on cpuidle installing MWAIT, because it will not load
 * on systems that support only C1 -- so the boot default must be MWAIT.
 *
 * Some AMD machines are the opposite, they depend on using HALT.
 *
 * So for default C1, which is used during boot until cpuidle loads,
 * use MWAIT-C1 on Intel HW that has it, else use HALT.
 */
static int prefer_mwait_c1_over_halt(const struct cpuinfo_x86 *c)
{
	if (c->x86_vendor != X86_VENDOR_INTEL)
		return 0;

593
	if (!cpu_has(c, X86_FEATURE_MWAIT) || static_cpu_has_bug(X86_BUG_MONITOR))
594 595 596 597 598 599
		return 0;

	return 1;
}

/*
600 601 602
 * MONITOR/MWAIT with no hints, used for default C1 state. This invokes MWAIT
 * with interrupts enabled and no flags, which is backwards compatible with the
 * original MWAIT implementation.
603
 */
604
static __cpuidle void mwait_idle(void)
605
{
606
	if (!current_set_polling_and_test()) {
607
		trace_cpu_idle_rcuidle(1, smp_processor_id());
608
		if (this_cpu_has(X86_BUG_CLFLUSH_MONITOR)) {
609
			mb(); /* quirk */
610
			clflush((void *)&current_thread_info()->flags);
611
			mb(); /* quirk */
612
		}
613 614 615 616 617 618

		__monitor((void *)&current_thread_info()->flags, 0, 0);
		if (!need_resched())
			__sti_mwait(0, 0);
		else
			local_irq_enable();
619
		trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
620
	} else {
621
		local_irq_enable();
622 623
	}
	__current_clr_polling();
624 625
}

626
void select_idle_routine(const struct cpuinfo_x86 *c)
627
{
628
#ifdef CONFIG_SMP
T
Thomas Gleixner 已提交
629
	if (boot_option_idle_override == IDLE_POLL && smp_num_siblings > 1)
630
		pr_warn_once("WARNING: polling idle and HT enabled, performance may degrade\n");
631
#endif
T
Thomas Gleixner 已提交
632
	if (x86_idle || boot_option_idle_override == IDLE_POLL)
T
Thomas Gleixner 已提交
633 634
		return;

635
	if (boot_cpu_has_bug(X86_BUG_AMD_E400)) {
636
		pr_info("using AMD E400 aware idle routine\n");
637
		x86_idle = amd_e400_idle;
638 639 640
	} else if (prefer_mwait_c1_over_halt(c)) {
		pr_info("using mwait in idle threads\n");
		x86_idle = mwait_idle;
T
Thomas Gleixner 已提交
641
	} else
642
		x86_idle = default_idle;
643 644
}

645
void amd_e400_c1e_apic_setup(void)
646
{
647 648 649 650 651 652
	if (boot_cpu_has_bug(X86_BUG_AMD_APIC_C1E)) {
		pr_info("Switch to broadcast mode on CPU%d\n", smp_processor_id());
		local_irq_disable();
		tick_broadcast_force();
		local_irq_enable();
	}
653 654
}

655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677
void __init arch_post_acpi_subsys_init(void)
{
	u32 lo, hi;

	if (!boot_cpu_has_bug(X86_BUG_AMD_E400))
		return;

	/*
	 * AMD E400 detection needs to happen after ACPI has been enabled. If
	 * the machine is affected K8_INTP_C1E_ACTIVE_MASK bits are set in
	 * MSR_K8_INT_PENDING_MSG.
	 */
	rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi);
	if (!(lo & K8_INTP_C1E_ACTIVE_MASK))
		return;

	boot_cpu_set_bug(X86_BUG_AMD_APIC_C1E);

	if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
		mark_tsc_unstable("TSC halt in AMD C1E");
	pr_info("System has AMD C1E enabled\n");
}

678 679
static int __init idle_setup(char *str)
{
680 681 682
	if (!str)
		return -EINVAL;

683
	if (!strcmp(str, "poll")) {
684
		pr_info("using polling idle threads\n");
685
		boot_option_idle_override = IDLE_POLL;
T
Thomas Gleixner 已提交
686
		cpu_idle_poll_ctrl(true);
687
	} else if (!strcmp(str, "halt")) {
Z
Zhao Yakui 已提交
688 689 690 691 692 693 694
		/*
		 * When the boot option of idle=halt is added, halt is
		 * forced to be used for CPU idle. In such case CPU C2/C3
		 * won't be used again.
		 * To continue to load the CPU idle driver, don't touch
		 * the boot_option_idle_override.
		 */
695
		x86_idle = default_idle;
696
		boot_option_idle_override = IDLE_HALT;
697 698 699 700 701 702 703
	} else if (!strcmp(str, "nomwait")) {
		/*
		 * If the boot option of "idle=nomwait" is added,
		 * it means that mwait will be disabled for CPU C2/C3
		 * states. In such case it won't touch the variable
		 * of boot_option_idle_override.
		 */
704
		boot_option_idle_override = IDLE_NOMWAIT;
Z
Zhao Yakui 已提交
705
	} else
706 707 708 709 710 711
		return -1;

	return 0;
}
early_param("idle", idle_setup);

A
Amerigo Wang 已提交
712 713 714 715 716 717 718 719 720
unsigned long arch_align_stack(unsigned long sp)
{
	if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
		sp -= get_random_int() % 8192;
	return sp & ~0xf;
}

unsigned long arch_randomize_brk(struct mm_struct *mm)
{
721
	return randomize_page(mm->brk, 0x02000000);
A
Amerigo Wang 已提交
722 723
}

724 725 726 727 728 729 730 731
/*
 * Called from fs/proc with a reference on @p to find the function
 * which called into schedule(). This needs to be done carefully
 * because the task might wake up and we might look at a stack
 * changing under us.
 */
unsigned long get_wchan(struct task_struct *p)
{
732
	unsigned long start, bottom, top, sp, fp, ip, ret = 0;
733 734 735 736 737
	int count = 0;

	if (!p || p == current || p->state == TASK_RUNNING)
		return 0;

738 739 740
	if (!try_get_task_stack(p))
		return 0;

741 742
	start = (unsigned long)task_stack_page(p);
	if (!start)
743
		goto out;
744 745 746 747 748 749 750 751

	/*
	 * Layout of the stack page:
	 *
	 * ----------- topmax = start + THREAD_SIZE - sizeof(unsigned long)
	 * PADDING
	 * ----------- top = topmax - TOP_OF_KERNEL_STACK_PADDING
	 * stack
752
	 * ----------- bottom = start
753 754 755 756 757 758 759 760 761 762
	 *
	 * The tasks stack pointer points at the location where the
	 * framepointer is stored. The data on the stack is:
	 * ... IP FP ... IP FP
	 *
	 * We need to read FP and IP, so we need to adjust the upper
	 * bound by another unsigned long.
	 */
	top = start + THREAD_SIZE - TOP_OF_KERNEL_STACK_PADDING;
	top -= 2 * sizeof(unsigned long);
763
	bottom = start;
764 765 766

	sp = READ_ONCE(p->thread.sp);
	if (sp < bottom || sp > top)
767
		goto out;
768

769
	fp = READ_ONCE_NOCHECK(((struct inactive_task_frame *)sp)->bp);
770 771
	do {
		if (fp < bottom || fp > top)
772
			goto out;
773
		ip = READ_ONCE_NOCHECK(*(unsigned long *)(fp + sizeof(unsigned long)));
774 775 776 777
		if (!in_sched_functions(ip)) {
			ret = ip;
			goto out;
		}
778
		fp = READ_ONCE_NOCHECK(*(unsigned long *)fp);
779
	} while (count++ < 16 && p->state != TASK_RUNNING);
780 781 782 783

out:
	put_task_stack(p);
	return ret;
784
}
785 786 787 788

long do_arch_prctl_common(struct task_struct *task, int option,
			  unsigned long cpuid_enabled)
{
789 790 791 792 793 794 795
	switch (option) {
	case ARCH_GET_CPUID:
		return get_cpuid_mode();
	case ARCH_SET_CPUID:
		return set_cpuid_mode(task, cpuid_enabled);
	}

796 797
	return -EINVAL;
}