process.c 11.4 KB
Newer Older
1 2
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

3 4 5 6
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/smp.h>
7
#include <linux/prctl.h>
8 9
#include <linux/slab.h>
#include <linux/sched.h>
10 11
#include <linux/module.h>
#include <linux/pm.h>
12
#include <linux/tick.h>
A
Amerigo Wang 已提交
13
#include <linux/random.h>
A
Avi Kivity 已提交
14
#include <linux/user-return-notifier.h>
15 16
#include <linux/dmi.h>
#include <linux/utsname.h>
17 18 19
#include <linux/stackprotector.h>
#include <linux/tick.h>
#include <linux/cpuidle.h>
20
#include <trace/events/power.h>
21
#include <linux/hw_breakpoint.h>
22
#include <asm/cpu.h>
23
#include <asm/apic.h>
24
#include <asm/syscalls.h>
25 26
#include <asm/idle.h>
#include <asm/uaccess.h>
27
#include <asm/mwait.h>
28
#include <asm/fpu-internal.h>
29
#include <asm/debugreg.h>
30
#include <asm/nmi.h>
A
Andy Lutomirski 已提交
31
#include <asm/tlbflush.h>
32

T
Thomas Gleixner 已提交
33 34 35 36 37 38 39
/*
 * per-CPU TSS segments. Threads are completely 'soft' on Linux,
 * no more per-task TSS's. The TSS size is kept cacheline-aligned
 * so they are allowed to end up in the .data..cacheline_aligned
 * section. Since TSS's are completely CPU-local, we want them
 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
 */
40 41
__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss) = {
	.x86_tss = {
42
		.sp0 = TOP_OF_INIT_STACK,
43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58
#ifdef CONFIG_X86_32
		.ss0 = __KERNEL_DS,
		.ss1 = __KERNEL_CS,
		.io_bitmap_base	= INVALID_IO_BITMAP_OFFSET,
#endif
	 },
#ifdef CONFIG_X86_32
	 /*
	  * Note that the .io_bitmap member must be extra-big. This is because
	  * the CPU will access an additional byte beyond the end of the IO
	  * permission bitmap. The extra byte must be all 1 bits, and must
	  * be within the limit.
	  */
	.io_bitmap		= { [0 ... IO_BITMAP_LONGS] = ~0 },
#endif
};
59
EXPORT_PER_CPU_SYMBOL(cpu_tss);
T
Thomas Gleixner 已提交
60

61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76
#ifdef CONFIG_X86_64
static DEFINE_PER_CPU(unsigned char, is_idle);
static ATOMIC_NOTIFIER_HEAD(idle_notifier);

void idle_notifier_register(struct notifier_block *n)
{
	atomic_notifier_chain_register(&idle_notifier, n);
}
EXPORT_SYMBOL_GPL(idle_notifier_register);

void idle_notifier_unregister(struct notifier_block *n)
{
	atomic_notifier_chain_unregister(&idle_notifier, n);
}
EXPORT_SYMBOL_GPL(idle_notifier_unregister);
#endif
Z
Zhao Yakui 已提交
77

78 79 80 81
/*
 * this gets called so that we can store lazy state into memory and copy the
 * current task into the new thread.
 */
82 83 84
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
{
	*dst = *src;
85

I
Ingo Molnar 已提交
86
	return fpu__copy(dst, src);
87 88
}

89
void arch_release_task_struct(struct task_struct *tsk)
90
{
91
	fpstate_free(&tsk->thread.fpu);
92 93 94 95
}

void arch_task_cache_init(void)
{
96
	fpstate_cache_init();
97
}
98

99 100 101 102 103 104 105
/*
 * Free current thread data structures etc..
 */
void exit_thread(void)
{
	struct task_struct *me = current;
	struct thread_struct *t = &me->thread;
106
	unsigned long *bp = t->io_bitmap_ptr;
107

108
	if (bp) {
109
		struct tss_struct *tss = &per_cpu(cpu_tss, get_cpu());
110 111 112 113 114 115 116 117 118

		t->io_bitmap_ptr = NULL;
		clear_thread_flag(TIF_IO_BITMAP);
		/*
		 * Careful, clear this in the TSS too:
		 */
		memset(tss->io_bitmap, 0xff, t->io_bitmap_max);
		t->io_bitmap_max = 0;
		put_cpu();
119
		kfree(bp);
120
	}
121 122

	drop_fpu(me);
123 124 125 126 127 128
}

void flush_thread(void)
{
	struct task_struct *tsk = current;

129
	flush_ptrace_hw_breakpoint(tsk);
130
	memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
131

132
	fpu__flush_thread(tsk);
133 134 135 136
}

static void hard_disable_TSC(void)
{
A
Andy Lutomirski 已提交
137
	cr4_set_bits(X86_CR4_TSD);
138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153
}

void disable_TSC(void)
{
	preempt_disable();
	if (!test_and_set_thread_flag(TIF_NOTSC))
		/*
		 * Must flip the CPU state synchronously with
		 * TIF_NOTSC in the current running context.
		 */
		hard_disable_TSC();
	preempt_enable();
}

static void hard_enable_TSC(void)
{
A
Andy Lutomirski 已提交
154
	cr4_clear_bits(X86_CR4_TSD);
155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200
}

static void enable_TSC(void)
{
	preempt_disable();
	if (test_and_clear_thread_flag(TIF_NOTSC))
		/*
		 * Must flip the CPU state synchronously with
		 * TIF_NOTSC in the current running context.
		 */
		hard_enable_TSC();
	preempt_enable();
}

int get_tsc_mode(unsigned long adr)
{
	unsigned int val;

	if (test_thread_flag(TIF_NOTSC))
		val = PR_TSC_SIGSEGV;
	else
		val = PR_TSC_ENABLE;

	return put_user(val, (unsigned int __user *)adr);
}

int set_tsc_mode(unsigned int val)
{
	if (val == PR_TSC_SIGSEGV)
		disable_TSC();
	else if (val == PR_TSC_ENABLE)
		enable_TSC();
	else
		return -EINVAL;

	return 0;
}

void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
		      struct tss_struct *tss)
{
	struct thread_struct *prev, *next;

	prev = &prev_p->thread;
	next = &next_p->thread;

P
Peter Zijlstra 已提交
201 202 203 204 205 206 207 208 209 210
	if (test_tsk_thread_flag(prev_p, TIF_BLOCKSTEP) ^
	    test_tsk_thread_flag(next_p, TIF_BLOCKSTEP)) {
		unsigned long debugctl = get_debugctlmsr();

		debugctl &= ~DEBUGCTLMSR_BTF;
		if (test_tsk_thread_flag(next_p, TIF_BLOCKSTEP))
			debugctl |= DEBUGCTLMSR_BTF;

		update_debugctlmsr(debugctl);
	}
211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233

	if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^
	    test_tsk_thread_flag(next_p, TIF_NOTSC)) {
		/* prev and next are different */
		if (test_tsk_thread_flag(next_p, TIF_NOTSC))
			hard_disable_TSC();
		else
			hard_enable_TSC();
	}

	if (test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
		/*
		 * Copy the relevant range of the IO bitmap.
		 * Normally this is 128 bytes or less:
		 */
		memcpy(tss->io_bitmap, next->io_bitmap_ptr,
		       max(prev->io_bitmap_max, next->io_bitmap_max));
	} else if (test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)) {
		/*
		 * Clear any possible leftover bits:
		 */
		memset(tss->io_bitmap, 0xff, prev->io_bitmap_max);
	}
A
Avi Kivity 已提交
234
	propagate_user_return_notify(prev_p, next_p);
235 236
}

237 238 239
/*
 * Idle related variables and functions
 */
240
unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE;
241 242
EXPORT_SYMBOL(boot_option_idle_override);

243
static void (*x86_idle)(void);
244

245 246 247 248 249 250 251 252 253 254
#ifndef CONFIG_SMP
static inline void play_dead(void)
{
	BUG();
}
#endif

#ifdef CONFIG_X86_64
void enter_idle(void)
{
255
	this_cpu_write(is_idle, 1);
256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275
	atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
}

static void __exit_idle(void)
{
	if (x86_test_and_clear_bit_percpu(0, is_idle) == 0)
		return;
	atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
}

/* Called from interrupts to signify idle end */
void exit_idle(void)
{
	/* idle loop has pid 0 */
	if (current->pid)
		return;
	__exit_idle();
}
#endif

T
Thomas Gleixner 已提交
276 277 278 279 280
void arch_cpu_idle_enter(void)
{
	local_touch_nmi();
	enter_idle();
}
281

T
Thomas Gleixner 已提交
282 283 284 285
void arch_cpu_idle_exit(void)
{
	__exit_idle();
}
286

T
Thomas Gleixner 已提交
287 288 289 290
void arch_cpu_idle_dead(void)
{
	play_dead();
}
291

T
Thomas Gleixner 已提交
292 293 294 295 296
/*
 * Called from the generic idle code.
 */
void arch_cpu_idle(void)
{
297
	x86_idle();
298 299
}

300
/*
T
Thomas Gleixner 已提交
301
 * We use this if we don't have any better idle routine..
302 303 304
 */
void default_idle(void)
{
305
	trace_cpu_idle_rcuidle(1, smp_processor_id());
T
Thomas Gleixner 已提交
306
	safe_halt();
307
	trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
308
}
309
#ifdef CONFIG_APM_MODULE
310 311 312
EXPORT_SYMBOL(default_idle);
#endif

313 314
#ifdef CONFIG_XEN
bool xen_set_default_idle(void)
315
{
316
	bool ret = !!x86_idle;
317

318
	x86_idle = default_idle;
319 320 321

	return ret;
}
322
#endif
323 324 325 326 327 328
void stop_this_cpu(void *dummy)
{
	local_irq_disable();
	/*
	 * Remove this CPU:
	 */
329
	set_cpu_online(smp_processor_id(), false);
330 331
	disable_local_APIC();

332 333
	for (;;)
		halt();
334 335
}

336 337
bool amd_e400_c1e_detected;
EXPORT_SYMBOL(amd_e400_c1e_detected);
338

339
static cpumask_var_t amd_e400_c1e_mask;
340

341
void amd_e400_remove_cpu(int cpu)
342
{
343 344
	if (amd_e400_c1e_mask != NULL)
		cpumask_clear_cpu(cpu, amd_e400_c1e_mask);
345 346
}

347
/*
348
 * AMD Erratum 400 aware idle routine. We check for C1E active in the interrupt
349 350 351
 * pending message MSR. If we detect C1E, then we handle it the same
 * way as C3 power states (local apic timer and TSC stop)
 */
352
static void amd_e400_idle(void)
353
{
354
	if (!amd_e400_c1e_detected) {
355 356 357
		u32 lo, hi;

		rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi);
358

359
		if (lo & K8_INTP_C1E_ACTIVE_MASK) {
360
			amd_e400_c1e_detected = true;
361
			if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
362
				mark_tsc_unstable("TSC halt in AMD C1E");
363
			pr_info("System has AMD C1E enabled\n");
364 365 366
		}
	}

367
	if (amd_e400_c1e_detected) {
368 369
		int cpu = smp_processor_id();

370 371
		if (!cpumask_test_cpu(cpu, amd_e400_c1e_mask)) {
			cpumask_set_cpu(cpu, amd_e400_c1e_mask);
372 373
			/* Force broadcast so ACPI can not interfere. */
			tick_broadcast_force();
374
			pr_info("Switch to broadcast mode on CPU%d\n", cpu);
375
		}
376
		tick_broadcast_enter();
377

378
		default_idle();
379 380 381 382 383

		/*
		 * The switch back from broadcast mode needs to be
		 * called with interrupts disabled.
		 */
384
		local_irq_disable();
385
		tick_broadcast_exit();
386
		local_irq_enable();
387 388 389 390
	} else
		default_idle();
}

391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419
/*
 * Intel Core2 and older machines prefer MWAIT over HALT for C1.
 * We can't rely on cpuidle installing MWAIT, because it will not load
 * on systems that support only C1 -- so the boot default must be MWAIT.
 *
 * Some AMD machines are the opposite, they depend on using HALT.
 *
 * So for default C1, which is used during boot until cpuidle loads,
 * use MWAIT-C1 on Intel HW that has it, else use HALT.
 */
static int prefer_mwait_c1_over_halt(const struct cpuinfo_x86 *c)
{
	if (c->x86_vendor != X86_VENDOR_INTEL)
		return 0;

	if (!cpu_has(c, X86_FEATURE_MWAIT))
		return 0;

	return 1;
}

/*
 * MONITOR/MWAIT with no hints, used for default default C1 state.
 * This invokes MWAIT with interrutps enabled and no flags,
 * which is backwards compatible with the original MWAIT implementation.
 */

static void mwait_idle(void)
{
420 421 422
	if (!current_set_polling_and_test()) {
		if (this_cpu_has(X86_BUG_CLFLUSH_MONITOR)) {
			smp_mb(); /* quirk */
423
			clflush((void *)&current_thread_info()->flags);
424 425
			smp_mb(); /* quirk */
		}
426 427 428 429 430 431

		__monitor((void *)&current_thread_info()->flags, 0, 0);
		if (!need_resched())
			__sti_mwait(0, 0);
		else
			local_irq_enable();
432
	} else {
433
		local_irq_enable();
434 435
	}
	__current_clr_polling();
436 437
}

438
void select_idle_routine(const struct cpuinfo_x86 *c)
439
{
440
#ifdef CONFIG_SMP
T
Thomas Gleixner 已提交
441
	if (boot_option_idle_override == IDLE_POLL && smp_num_siblings > 1)
442
		pr_warn_once("WARNING: polling idle and HT enabled, performance may degrade\n");
443
#endif
T
Thomas Gleixner 已提交
444
	if (x86_idle || boot_option_idle_override == IDLE_POLL)
T
Thomas Gleixner 已提交
445 446
		return;

447
	if (cpu_has_bug(c, X86_BUG_AMD_APIC_C1E)) {
448
		/* E400: APIC timer interrupt does not wake up CPU from C1e */
449
		pr_info("using AMD E400 aware idle routine\n");
450
		x86_idle = amd_e400_idle;
451 452 453
	} else if (prefer_mwait_c1_over_halt(c)) {
		pr_info("using mwait in idle threads\n");
		x86_idle = mwait_idle;
T
Thomas Gleixner 已提交
454
	} else
455
		x86_idle = default_idle;
456 457
}

458
void __init init_amd_e400_c1e_mask(void)
459
{
460
	/* If we're using amd_e400_idle, we need to allocate amd_e400_c1e_mask. */
461
	if (x86_idle == amd_e400_idle)
462
		zalloc_cpumask_var(&amd_e400_c1e_mask, GFP_KERNEL);
463 464
}

465 466
static int __init idle_setup(char *str)
{
467 468 469
	if (!str)
		return -EINVAL;

470
	if (!strcmp(str, "poll")) {
471
		pr_info("using polling idle threads\n");
472
		boot_option_idle_override = IDLE_POLL;
T
Thomas Gleixner 已提交
473
		cpu_idle_poll_ctrl(true);
474
	} else if (!strcmp(str, "halt")) {
Z
Zhao Yakui 已提交
475 476 477 478 479 480 481
		/*
		 * When the boot option of idle=halt is added, halt is
		 * forced to be used for CPU idle. In such case CPU C2/C3
		 * won't be used again.
		 * To continue to load the CPU idle driver, don't touch
		 * the boot_option_idle_override.
		 */
482
		x86_idle = default_idle;
483
		boot_option_idle_override = IDLE_HALT;
484 485 486 487 488 489 490
	} else if (!strcmp(str, "nomwait")) {
		/*
		 * If the boot option of "idle=nomwait" is added,
		 * it means that mwait will be disabled for CPU C2/C3
		 * states. In such case it won't touch the variable
		 * of boot_option_idle_override.
		 */
491
		boot_option_idle_override = IDLE_NOMWAIT;
Z
Zhao Yakui 已提交
492
	} else
493 494 495 496 497 498
		return -1;

	return 0;
}
early_param("idle", idle_setup);

A
Amerigo Wang 已提交
499 500 501 502 503 504 505 506 507 508 509 510 511
unsigned long arch_align_stack(unsigned long sp)
{
	if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
		sp -= get_random_int() % 8192;
	return sp & ~0xf;
}

unsigned long arch_randomize_brk(struct mm_struct *mm)
{
	unsigned long range_end = mm->brk + 0x02000000;
	return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
}