process_32.c 24.1 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13
/*
 *  Copyright (C) 1995  Linus Torvalds
 *
 *  Pentium III FXSR, SSE support
 *	Gareth Hughes <gareth@valinux.com>, May 2000
 */

/*
 * This file handles the architecture-dependent parts of process handling..
 */

#include <stdarg.h>

Z
Zwane Mwaikambo 已提交
14
#include <linux/cpu.h>
L
Linus Torvalds 已提交
15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/fs.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/elfcore.h>
#include <linux/smp.h>
#include <linux/stddef.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/user.h>
#include <linux/a.out.h>
#include <linux/interrupt.h>
#include <linux/utsname.h>
#include <linux/delay.h>
#include <linux/reboot.h>
#include <linux/init.h>
#include <linux/mc146818rtc.h>
#include <linux/module.h>
#include <linux/kallsyms.h>
#include <linux/ptrace.h>
#include <linux/random.h>
37
#include <linux/personality.h>
I
Ingo Molnar 已提交
38
#include <linux/tick.h>
39
#include <linux/percpu.h>
L
Linus Torvalds 已提交
40 41 42 43 44 45 46 47 48

#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/system.h>
#include <asm/io.h>
#include <asm/ldt.h>
#include <asm/processor.h>
#include <asm/i387.h>
#include <asm/desc.h>
49
#include <asm/vm86.h>
L
Linus Torvalds 已提交
50 51 52 53 54 55
#ifdef CONFIG_MATH_EMULATION
#include <asm/math_emu.h>
#endif

#include <linux/err.h>

Z
Zwane Mwaikambo 已提交
56 57 58
#include <asm/tlbflush.h>
#include <asm/cpu.h>

L
Linus Torvalds 已提交
59 60 61 62 63 64 65
asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");

static int hlt_counter;

unsigned long boot_option_idle_override = 0;
EXPORT_SYMBOL(boot_option_idle_override);

66 67 68 69 70 71
DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task;
EXPORT_PER_CPU_SYMBOL(current_task);

DEFINE_PER_CPU(int, cpu_number);
EXPORT_PER_CPU_SYMBOL(cpu_number);

L
Linus Torvalds 已提交
72 73 74 75 76 77 78 79 80 81 82 83
/*
 * Return saved PC of a blocked thread.
 */
unsigned long thread_saved_pc(struct task_struct *tsk)
{
	return ((unsigned long *)tsk->thread.esp)[3];
}

/*
 * Powermanagement idle function, if any..
 */
void (*pm_idle)(void);
84
EXPORT_SYMBOL(pm_idle);
L
Linus Torvalds 已提交
85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107
static DEFINE_PER_CPU(unsigned int, cpu_idle_state);

void disable_hlt(void)
{
	hlt_counter++;
}

EXPORT_SYMBOL(disable_hlt);

void enable_hlt(void)
{
	hlt_counter--;
}

EXPORT_SYMBOL(enable_hlt);

/*
 * We use this if we don't have any better
 * idle routine..
 */
void default_idle(void)
{
	if (!hlt_counter && boot_cpu_data.hlt_works_ok) {
108
		current_thread_info()->status &= ~TS_POLLING;
109 110 111 112 113 114
		/*
		 * TS_POLLING-cleared state must be visible before we
		 * test NEED_RESCHED:
		 */
		smp_mb();

115
		local_irq_disable();
116 117 118 119 120 121
		if (!need_resched()) {
			ktime_t t0, t1;
			u64 t0n, t1n;

			t0 = ktime_get();
			t0n = ktime_to_ns(t0);
122
			safe_halt();	/* enables interrupts racelessly */
123 124 125 126 127 128
			local_irq_disable();
			t1 = ktime_get();
			t1n = ktime_to_ns(t1);
			sched_clock_idle_wakeup_event(t1n - t0n);
		}
		local_irq_enable();
129
		current_thread_info()->status |= TS_POLLING;
L
Linus Torvalds 已提交
130
	} else {
131 132
		/* loop is done by the caller */
		cpu_relax();
L
Linus Torvalds 已提交
133 134
	}
}
135 136 137
#ifdef CONFIG_APM_MODULE
EXPORT_SYMBOL(default_idle);
#endif
L
Linus Torvalds 已提交
138 139 140 141 142 143 144 145

/*
 * On SMP it's slightly faster (but much more power-consuming!)
 * to poll the ->work.need_resched flag instead of waiting for the
 * cross-CPU IPI to arrive. Use this option with caution.
 */
static void poll_idle (void)
{
146
	cpu_relax();
L
Linus Torvalds 已提交
147 148
}

Z
Zwane Mwaikambo 已提交
149 150 151 152 153
#ifdef CONFIG_HOTPLUG_CPU
#include <asm/nmi.h>
/* We don't actually take CPU down, just spin without interrupts. */
static inline void play_dead(void)
{
154 155 156 157
	/* This must be done before dead CPU ack */
	cpu_exit_clear();
	wbinvd();
	mb();
Z
Zwane Mwaikambo 已提交
158 159 160
	/* Ack it */
	__get_cpu_var(cpu_state) = CPU_DEAD;

161 162 163
	/*
	 * With physical CPU hotplug, we should halt the cpu
	 */
Z
Zwane Mwaikambo 已提交
164
	local_irq_disable();
165
	while (1)
Z
Zachary Amsden 已提交
166
		halt();
Z
Zwane Mwaikambo 已提交
167 168 169 170 171 172 173 174
}
#else
static inline void play_dead(void)
{
	BUG();
}
#endif /* CONFIG_HOTPLUG_CPU */

L
Linus Torvalds 已提交
175 176 177 178 179 180
/*
 * The idle thread. There's no useful work to be
 * done, so just try to conserve power and have a
 * low exit latency (ie sit in a loop waiting for
 * somebody to say that they'd like to reschedule)
 */
Z
Zwane Mwaikambo 已提交
181
void cpu_idle(void)
L
Linus Torvalds 已提交
182
{
183
	int cpu = smp_processor_id();
Z
Zwane Mwaikambo 已提交
184

185
	current_thread_info()->status |= TS_POLLING;
186

L
Linus Torvalds 已提交
187 188
	/* endless idle loop with no priority at all */
	while (1) {
I
Ingo Molnar 已提交
189
		tick_nohz_stop_sched_tick();
L
Linus Torvalds 已提交
190 191 192 193 194 195
		while (!need_resched()) {
			void (*idle)(void);

			if (__get_cpu_var(cpu_idle_state))
				__get_cpu_var(cpu_idle_state) = 0;

C
Christoph Lameter 已提交
196
			check_pgt_cache();
L
Linus Torvalds 已提交
197 198 199 200 201 202
			rmb();
			idle = pm_idle;

			if (!idle)
				idle = default_idle;

Z
Zwane Mwaikambo 已提交
203 204 205
			if (cpu_is_offline(cpu))
				play_dead();

L
Linus Torvalds 已提交
206 207 208
			__get_cpu_var(irq_stat).idle_timestamp = jiffies;
			idle();
		}
I
Ingo Molnar 已提交
209
		tick_nohz_restart_sched_tick();
210
		preempt_enable_no_resched();
L
Linus Torvalds 已提交
211
		schedule();
212
		preempt_disable();
L
Linus Torvalds 已提交
213 214 215
	}
}

216 217 218 219
static void do_nothing(void *unused)
{
}

L
Linus Torvalds 已提交
220 221 222
void cpu_idle_wait(void)
{
	unsigned int cpu, this_cpu = get_cpu();
223
	cpumask_t map, tmp = current->cpus_allowed;
L
Linus Torvalds 已提交
224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243

	set_cpus_allowed(current, cpumask_of_cpu(this_cpu));
	put_cpu();

	cpus_clear(map);
	for_each_online_cpu(cpu) {
		per_cpu(cpu_idle_state, cpu) = 1;
		cpu_set(cpu, map);
	}

	__get_cpu_var(cpu_idle_state) = 0;

	wmb();
	do {
		ssleep(1);
		for_each_online_cpu(cpu) {
			if (cpu_isset(cpu, map) && !per_cpu(cpu_idle_state, cpu))
				cpu_clear(cpu, map);
		}
		cpus_and(map, map, cpu_online_map);
244 245 246 247 248 249 250
		/*
		 * We waited 1 sec, if a CPU still did not call idle
		 * it may be because it is in idle and not waking up
		 * because it has nothing to do.
		 * Give all the remaining CPUS a kick.
		 */
		smp_call_function_mask(map, do_nothing, 0, 0);
L
Linus Torvalds 已提交
251
	} while (!cpus_empty(map));
252 253

	set_cpus_allowed(current, tmp);
L
Linus Torvalds 已提交
254 255 256 257 258 259 260 261 262
}
EXPORT_SYMBOL_GPL(cpu_idle_wait);

/*
 * This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
 * which can obviate IPI to trigger checking of need_resched.
 * We execute MONITOR against need_resched and enter optimized wait state
 * through MWAIT. Whenever someone changes need_resched, we would be woken
 * up from MWAIT (without an IPI).
263 264 265
 *
 * New with Core Duo processors, MWAIT can take some hints based on CPU
 * capability.
L
Linus Torvalds 已提交
266
 */
267
void mwait_idle_with_hints(unsigned long eax, unsigned long ecx)
L
Linus Torvalds 已提交
268
{
269
	if (!need_resched()) {
270 271
		__monitor((void *)&current_thread_info()->flags, 0, 0);
		smp_mb();
272
		if (!need_resched())
273
			__mwait(eax, ecx);
L
Linus Torvalds 已提交
274 275 276
	}
}

277 278 279 280
/* Default MONITOR/MWAIT with no hints, used for default C1 state */
static void mwait_idle(void)
{
	local_irq_enable();
281
	mwait_idle_with_hints(0, 0);
282 283
}

284
void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
L
Linus Torvalds 已提交
285 286 287 288 289 290 291 292 293 294 295 296 297 298
{
	if (cpu_has(c, X86_FEATURE_MWAIT)) {
		printk("monitor/mwait feature present.\n");
		/*
		 * Skip, if setup has overridden idle.
		 * One CPU supports mwait => All CPUs supports mwait
		 */
		if (!pm_idle) {
			printk("using mwait in idle threads.\n");
			pm_idle = mwait_idle;
		}
	}
}

299
static int __init idle_setup(char *str)
L
Linus Torvalds 已提交
300
{
301
	if (!strcmp(str, "poll")) {
L
Linus Torvalds 已提交
302 303 304 305 306 307
		printk("using polling idle threads.\n");
		pm_idle = poll_idle;
#ifdef CONFIG_X86_SMP
		if (smp_num_siblings > 1)
			printk("WARNING: polling idle and HT enabled, performance may degrade.\n");
#endif
308 309 310 311
	} else if (!strcmp(str, "mwait"))
		force_mwait = 1;
	else
		return -1;
L
Linus Torvalds 已提交
312 313

	boot_option_idle_override = 1;
314
	return 0;
L
Linus Torvalds 已提交
315
}
316
early_param("idle", idle_setup);
L
Linus Torvalds 已提交
317

318
void __show_registers(struct pt_regs *regs, int all)
L
Linus Torvalds 已提交
319 320
{
	unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L;
321
	unsigned long d0, d1, d2, d3, d6, d7;
322 323 324 325 326 327 328 329 330 331 332 333
	unsigned long esp;
	unsigned short ss, gs;

	if (user_mode_vm(regs)) {
		esp = regs->esp;
		ss = regs->xss & 0xffff;
		savesegment(gs, gs);
	} else {
		esp = (unsigned long) (&regs->esp);
		savesegment(ss, ss);
		savesegment(gs, gs);
	}
L
Linus Torvalds 已提交
334 335

	printk("\n");
336 337
	printk("Pid: %d, comm: %s %s (%s %.*s)\n",
			task_pid_nr(current), current->comm,
338 339 340 341 342 343 344
			print_tainted(), init_utsname()->release,
			(int)strcspn(init_utsname()->version, " "),
			init_utsname()->version);

	printk("EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n",
			0xffff & regs->xcs, regs->eip, regs->eflags,
			smp_processor_id());
L
Linus Torvalds 已提交
345 346 347
	print_symbol("EIP is at %s\n", regs->eip);

	printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
348 349 350 351 352 353 354 355 356
		regs->eax, regs->ebx, regs->ecx, regs->edx);
	printk("ESI: %08lx EDI: %08lx EBP: %08lx ESP: %08lx\n",
		regs->esi, regs->edi, regs->ebp, esp);
	printk(" DS: %04x ES: %04x FS: %04x GS: %04x SS: %04x\n",
	       regs->xds & 0xffff, regs->xes & 0xffff,
	       regs->xfs & 0xffff, gs, ss);

	if (!all)
		return;
L
Linus Torvalds 已提交
357

358 359 360
	cr0 = read_cr0();
	cr2 = read_cr2();
	cr3 = read_cr3();
361
	cr4 = read_cr4_safe();
362 363
	printk("CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n",
			cr0, cr2, cr3, cr4);
364 365 366 367 368 369 370

	get_debugreg(d0, 0);
	get_debugreg(d1, 1);
	get_debugreg(d2, 2);
	get_debugreg(d3, 3);
	printk("DR0: %08lx DR1: %08lx DR2: %08lx DR3: %08lx\n",
			d0, d1, d2, d3);
371

372 373
	get_debugreg(d6, 6);
	get_debugreg(d7, 7);
374 375 376
	printk("DR6: %08lx DR7: %08lx\n",
			d6, d7);
}
377

378 379 380
void show_regs(struct pt_regs *regs)
{
	__show_registers(regs, 1);
381
	show_trace(NULL, regs, &regs->esp);
L
Linus Torvalds 已提交
382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404
}

/*
 * This gets run with %ebx containing the
 * function to call, and %edx containing
 * the "args".
 */
extern void kernel_thread_helper(void);

/*
 * Create a kernel thread
 */
int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
{
	struct pt_regs regs;

	memset(&regs, 0, sizeof(regs));

	regs.ebx = (unsigned long) fn;
	regs.edx = (unsigned long) arg;

	regs.xds = __USER_DS;
	regs.xes = __USER_DS;
405
	regs.xfs = __KERNEL_PERCPU;
L
Linus Torvalds 已提交
406 407
	regs.orig_eax = -1;
	regs.eip = (unsigned long) kernel_thread_helper;
408
	regs.xcs = __KERNEL_CS | get_kernel_rpl();
L
Linus Torvalds 已提交
409 410 411
	regs.eflags = X86_EFLAGS_IF | X86_EFLAGS_SF | X86_EFLAGS_PF | 0x2;

	/* Ok, create the new process.. */
412
	return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
L
Linus Torvalds 已提交
413
}
414
EXPORT_SYMBOL(kernel_thread);
L
Linus Torvalds 已提交
415 416 417 418 419 420 421

/*
 * Free current thread data structures etc..
 */
void exit_thread(void)
{
	/* The process may have allocated an io port bitmap... nuke it. */
422 423 424
	if (unlikely(test_thread_flag(TIF_IO_BITMAP))) {
		struct task_struct *tsk = current;
		struct thread_struct *t = &tsk->thread;
L
Linus Torvalds 已提交
425 426 427 428 429
		int cpu = get_cpu();
		struct tss_struct *tss = &per_cpu(init_tss, cpu);

		kfree(t->io_bitmap_ptr);
		t->io_bitmap_ptr = NULL;
430
		clear_thread_flag(TIF_IO_BITMAP);
L
Linus Torvalds 已提交
431 432 433 434 435 436 437
		/*
		 * Careful, clear this in the TSS too:
		 */
		memset(tss->io_bitmap, 0xff, tss->io_bitmap_max);
		t->io_bitmap_max = 0;
		tss->io_bitmap_owner = NULL;
		tss->io_bitmap_max = 0;
438
		tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET;
L
Linus Torvalds 已提交
439 440 441 442 443 444 445 446 447 448
		put_cpu();
	}
}

void flush_thread(void)
{
	struct task_struct *tsk = current;

	memset(tsk->thread.debugreg, 0, sizeof(unsigned long)*8);
	memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));	
449
	clear_tsk_thread_flag(tsk, TIF_DEBUG);
L
Linus Torvalds 已提交
450 451 452 453 454 455 456 457 458
	/*
	 * Forget coprocessor state..
	 */
	clear_fpu(tsk);
	clear_used_math();
}

void release_thread(struct task_struct *dead_task)
{
459
	BUG_ON(dead_task->mm);
L
Linus Torvalds 已提交
460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479
	release_vm86_irqs(dead_task);
}

/*
 * This gets called before we allocate a new thread and copy
 * the current task into it.
 */
void prepare_to_copy(struct task_struct *tsk)
{
	unlazy_fpu(tsk);
}

int copy_thread(int nr, unsigned long clone_flags, unsigned long esp,
	unsigned long unused,
	struct task_struct * p, struct pt_regs * regs)
{
	struct pt_regs * childregs;
	struct task_struct *tsk;
	int err;

A
akpm@osdl.org 已提交
480
	childregs = task_pt_regs(p);
481 482 483 484 485 486
	*childregs = *regs;
	childregs->eax = 0;
	childregs->esp = esp;

	p->thread.esp = (unsigned long) childregs;
	p->thread.esp0 = (unsigned long) (childregs+1);
L
Linus Torvalds 已提交
487 488 489

	p->thread.eip = (unsigned long) ret_from_fork;

490
	savesegment(gs,p->thread.gs);
L
Linus Torvalds 已提交
491 492

	tsk = current;
493
	if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) {
A
Alexey Dobriyan 已提交
494 495
		p->thread.io_bitmap_ptr = kmemdup(tsk->thread.io_bitmap_ptr,
						IO_BITMAP_BYTES, GFP_KERNEL);
L
Linus Torvalds 已提交
496 497 498 499
		if (!p->thread.io_bitmap_ptr) {
			p->thread.io_bitmap_max = 0;
			return -ENOMEM;
		}
500
		set_tsk_thread_flag(p, TIF_IO_BITMAP);
L
Linus Torvalds 已提交
501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565
	}

	/*
	 * Set a new TLS for the child thread?
	 */
	if (clone_flags & CLONE_SETTLS) {
		struct desc_struct *desc;
		struct user_desc info;
		int idx;

		err = -EFAULT;
		if (copy_from_user(&info, (void __user *)childregs->esi, sizeof(info)))
			goto out;
		err = -EINVAL;
		if (LDT_empty(&info))
			goto out;

		idx = info.entry_number;
		if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
			goto out;

		desc = p->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
		desc->a = LDT_entry_a(&info);
		desc->b = LDT_entry_b(&info);
	}

	err = 0;
 out:
	if (err && p->thread.io_bitmap_ptr) {
		kfree(p->thread.io_bitmap_ptr);
		p->thread.io_bitmap_max = 0;
	}
	return err;
}

/*
 * fill in the user structure for a core dump..
 */
void dump_thread(struct pt_regs * regs, struct user * dump)
{
	int i;

/* changed the size calculations - should hopefully work better. lbt */
	dump->magic = CMAGIC;
	dump->start_code = 0;
	dump->start_stack = regs->esp & ~(PAGE_SIZE - 1);
	dump->u_tsize = ((unsigned long) current->mm->end_code) >> PAGE_SHIFT;
	dump->u_dsize = ((unsigned long) (current->mm->brk + (PAGE_SIZE-1))) >> PAGE_SHIFT;
	dump->u_dsize -= dump->u_tsize;
	dump->u_ssize = 0;
	for (i = 0; i < 8; i++)
		dump->u_debugreg[i] = current->thread.debugreg[i];  

	if (dump->start_stack < TASK_SIZE)
		dump->u_ssize = ((unsigned long) (TASK_SIZE - dump->start_stack)) >> PAGE_SHIFT;

	dump->regs.ebx = regs->ebx;
	dump->regs.ecx = regs->ecx;
	dump->regs.edx = regs->edx;
	dump->regs.esi = regs->esi;
	dump->regs.edi = regs->edi;
	dump->regs.ebp = regs->ebp;
	dump->regs.eax = regs->eax;
	dump->regs.ds = regs->xds;
	dump->regs.es = regs->xes;
566 567
	dump->regs.fs = regs->xfs;
	savesegment(gs,dump->regs.gs);
L
Linus Torvalds 已提交
568 569 570 571 572 573 574 575 576
	dump->regs.orig_eax = regs->orig_eax;
	dump->regs.eip = regs->eip;
	dump->regs.cs = regs->xcs;
	dump->regs.eflags = regs->eflags;
	dump->regs.esp = regs->esp;
	dump->regs.ss = regs->xss;

	dump->u_fpvalid = dump_fpu (regs, &dump->i387);
}
577
EXPORT_SYMBOL(dump_thread);
L
Linus Torvalds 已提交
578 579 580 581 582 583

/* 
 * Capture the user space registers if the task is not running (in user space)
 */
int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
{
A
akpm@osdl.org 已提交
584
	struct pt_regs ptregs = *task_pt_regs(tsk);
L
Linus Torvalds 已提交
585 586 587 588 589 590 591 592 593 594
	ptregs.xcs &= 0xffff;
	ptregs.xds &= 0xffff;
	ptregs.xes &= 0xffff;
	ptregs.xss &= 0xffff;

	elf_core_copy_regs(regs, &ptregs);

	return 1;
}

595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619
#ifdef CONFIG_SECCOMP
void hard_disable_TSC(void)
{
	write_cr4(read_cr4() | X86_CR4_TSD);
}
void disable_TSC(void)
{
	preempt_disable();
	if (!test_and_set_thread_flag(TIF_NOTSC))
		/*
		 * Must flip the CPU state synchronously with
		 * TIF_NOTSC in the current running context.
		 */
		hard_disable_TSC();
	preempt_enable();
}
void hard_enable_TSC(void)
{
	write_cr4(read_cr4() & ~X86_CR4_TSD);
}
#endif /* CONFIG_SECCOMP */

static noinline void
__switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
		 struct tss_struct *tss)
L
Linus Torvalds 已提交
620
{
621 622 623 624 625 626 627 628 629 630 631 632 633 634
	struct thread_struct *next;

	next = &next_p->thread;

	if (test_tsk_thread_flag(next_p, TIF_DEBUG)) {
		set_debugreg(next->debugreg[0], 0);
		set_debugreg(next->debugreg[1], 1);
		set_debugreg(next->debugreg[2], 2);
		set_debugreg(next->debugreg[3], 3);
		/* no 4 and 5 */
		set_debugreg(next->debugreg[6], 6);
		set_debugreg(next->debugreg[7], 7);
	}

635 636 637 638 639 640 641 642 643 644 645
#ifdef CONFIG_SECCOMP
	if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^
	    test_tsk_thread_flag(next_p, TIF_NOTSC)) {
		/* prev and next are different */
		if (test_tsk_thread_flag(next_p, TIF_NOTSC))
			hard_disable_TSC();
		else
			hard_enable_TSC();
	}
#endif

646
	if (!test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
L
Linus Torvalds 已提交
647 648 649 650
		/*
		 * Disable the bitmap via an invalid offset. We still cache
		 * the previous bitmap owner and the IO bitmap contents:
		 */
651
		tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET;
L
Linus Torvalds 已提交
652 653
		return;
	}
654

L
Linus Torvalds 已提交
655 656 657 658 659 660
	if (likely(next == tss->io_bitmap_owner)) {
		/*
		 * Previous owner of the bitmap (hence the bitmap content)
		 * matches the next task, we dont have to do anything but
		 * to set a valid offset in the TSS:
		 */
661
		tss->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET;
L
Linus Torvalds 已提交
662 663 664 665 666 667 668 669 670 671 672
		return;
	}
	/*
	 * Lazy TSS's I/O bitmap copy. We set an invalid offset here
	 * and we let the task to get a GPF in case an I/O instruction
	 * is performed.  The handler of the GPF will verify that the
	 * faulting task has a valid I/O bitmap and, it true, does the
	 * real copy and restart the instruction.  This will save us
	 * redundant copies when the currently switched task does not
	 * perform any I/O during its timeslice.
	 */
673
	tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET_LAZY;
L
Linus Torvalds 已提交
674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713
}

/*
 *	switch_to(x,yn) should switch tasks from x to y.
 *
 * We fsave/fwait so that an exception goes off at the right time
 * (as a call from the fsave or fwait in effect) rather than to
 * the wrong process. Lazy FP saving no longer makes any sense
 * with modern CPU's, and this simplifies a lot of things (SMP
 * and UP become the same).
 *
 * NOTE! We used to use the x86 hardware context switching. The
 * reason for not using it any more becomes apparent when you
 * try to recover gracefully from saved state that is no longer
 * valid (stale segment register values in particular). With the
 * hardware task-switch, there is no way to fix up bad state in
 * a reasonable manner.
 *
 * The fact that Intel documents the hardware task-switching to
 * be slow is a fairly red herring - this code is not noticeably
 * faster. However, there _is_ some room for improvement here,
 * so the performance issues may eventually be a valid point.
 * More important, however, is the fact that this allows us much
 * more flexibility.
 *
 * The return value (in %eax) will be the "prev" task after
 * the task-switch, and shows up in ret_from_fork in entry.S,
 * for example.
 */
struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
{
	struct thread_struct *prev = &prev_p->thread,
				 *next = &next_p->thread;
	int cpu = smp_processor_id();
	struct tss_struct *tss = &per_cpu(init_tss, cpu);

	/* never put a printk in __switch_to... printk() calls wake_up*() indirectly */

	__unlazy_fpu(prev_p);

714 715 716 717 718

	/* we're going to use this soon, after a few expensive things */
	if (next_p->fpu_counter > 5)
		prefetch(&next->i387.fxsave);

L
Linus Torvalds 已提交
719
	/*
Z
Zachary Amsden 已提交
720
	 * Reload esp0.
L
Linus Torvalds 已提交
721 722 723 724
	 */
	load_esp0(tss, next);

	/*
725
	 * Save away %gs. No need to save %fs, as it was saved on the
726 727 728 729 730 731 732
	 * stack on entry.  No need to save %es and %ds, as those are
	 * always kernel segments while inside the kernel.  Doing this
	 * before setting the new TLS descriptors avoids the situation
	 * where we temporarily have non-reloadable segments in %fs
	 * and %gs.  This could be an issue if the NMI handler ever
	 * used %fs or %gs (it does not today), or if the kernel is
	 * running inside of a hypervisor layer.
L
Linus Torvalds 已提交
733
	 */
734
	savesegment(gs, prev->gs);
L
Linus Torvalds 已提交
735 736

	/*
Z
Zachary Amsden 已提交
737
	 * Load the per-thread Thread-Local Storage descriptor.
L
Linus Torvalds 已提交
738
	 */
Z
Zachary Amsden 已提交
739
	load_TLS(next, cpu);
L
Linus Torvalds 已提交
740

741 742 743 744 745 746 747 748 749
	/*
	 * Restore IOPL if needed.  In normal use, the flags restore
	 * in the switch assembly will handle this.  But if the kernel
	 * is running virtualized at a non-zero CPL, the popf will
	 * not restore flags, so it must be done in a separate step.
	 */
	if (get_kernel_rpl() && unlikely(prev->iopl != next->iopl))
		set_iopl_mask(next->iopl);

L
Linus Torvalds 已提交
750
	/*
751
	 * Now maybe handle debug registers and/or IO bitmaps
L
Linus Torvalds 已提交
752
	 */
753 754 755
	if (unlikely(task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV ||
		     task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT))
		__switch_to_xtra(prev_p, next_p, tss);
A
Andrea Arcangeli 已提交
756

757 758 759 760 761 762 763 764 765
	/*
	 * Leave lazy mode, flushing any hypercalls made here.
	 * This must be done before restoring TLS segments so
	 * the GDT and LDT are properly updated, and must be
	 * done before math_state_restore, so the TS bit is up
	 * to date.
	 */
	arch_leave_lazy_cpu_mode();

766 767 768 769 770 771 772
	/* If the task has used fpu the last 5 timeslices, just do a full
	 * restore of the math state immediately to avoid the trap; the
	 * chances of needing FPU soon are obviously high now
	 */
	if (next_p->fpu_counter > 5)
		math_state_restore();

773 774 775 776 777 778
	/*
	 * Restore %gs if needed (which is common)
	 */
	if (prev->gs | next->gs)
		loadsegment(gs, next->gs);

779
	x86_write_percpu(current_task, next_p);
780

L
Linus Torvalds 已提交
781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856
	return prev_p;
}

asmlinkage int sys_fork(struct pt_regs regs)
{
	return do_fork(SIGCHLD, regs.esp, &regs, 0, NULL, NULL);
}

asmlinkage int sys_clone(struct pt_regs regs)
{
	unsigned long clone_flags;
	unsigned long newsp;
	int __user *parent_tidptr, *child_tidptr;

	clone_flags = regs.ebx;
	newsp = regs.ecx;
	parent_tidptr = (int __user *)regs.edx;
	child_tidptr = (int __user *)regs.edi;
	if (!newsp)
		newsp = regs.esp;
	return do_fork(clone_flags, newsp, &regs, 0, parent_tidptr, child_tidptr);
}

/*
 * This is trivial, and on the face of it looks like it
 * could equally well be done in user mode.
 *
 * Not so, for quite unobvious reasons - register pressure.
 * In user mode vfork() cannot have a stack frame, and if
 * done by calling the "clone()" system call directly, you
 * do not have enough call-clobbered registers to hold all
 * the information you need.
 */
asmlinkage int sys_vfork(struct pt_regs regs)
{
	return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs.esp, &regs, 0, NULL, NULL);
}

/*
 * sys_execve() executes a new program.
 */
asmlinkage int sys_execve(struct pt_regs regs)
{
	int error;
	char * filename;

	filename = getname((char __user *) regs.ebx);
	error = PTR_ERR(filename);
	if (IS_ERR(filename))
		goto out;
	error = do_execve(filename,
			(char __user * __user *) regs.ecx,
			(char __user * __user *) regs.edx,
			&regs);
	if (error == 0) {
		task_lock(current);
		current->ptrace &= ~PT_DTRACE;
		task_unlock(current);
		/* Make sure we don't return using sysenter.. */
		set_thread_flag(TIF_IRET);
	}
	putname(filename);
out:
	return error;
}

#define top_esp                (THREAD_SIZE - sizeof(unsigned long))
#define top_ebp                (THREAD_SIZE - 2*sizeof(unsigned long))

unsigned long get_wchan(struct task_struct *p)
{
	unsigned long ebp, esp, eip;
	unsigned long stack_page;
	int count = 0;
	if (!p || p == current || p->state == TASK_RUNNING)
		return 0;
A
Al Viro 已提交
857
	stack_page = (unsigned long)task_stack_page(p);
L
Linus Torvalds 已提交
858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968
	esp = p->thread.esp;
	if (!stack_page || esp < stack_page || esp > top_esp+stack_page)
		return 0;
	/* include/asm-i386/system.h:switch_to() pushes ebp last. */
	ebp = *(unsigned long *) esp;
	do {
		if (ebp < stack_page || ebp > top_ebp+stack_page)
			return 0;
		eip = *(unsigned long *) (ebp+4);
		if (!in_sched_functions(eip))
			return eip;
		ebp = *(unsigned long *) ebp;
	} while (count++ < 16);
	return 0;
}

/*
 * sys_alloc_thread_area: get a yet unused TLS descriptor index.
 */
static int get_free_idx(void)
{
	struct thread_struct *t = &current->thread;
	int idx;

	for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++)
		if (desc_empty(t->tls_array + idx))
			return idx + GDT_ENTRY_TLS_MIN;
	return -ESRCH;
}

/*
 * Set a given TLS descriptor:
 */
asmlinkage int sys_set_thread_area(struct user_desc __user *u_info)
{
	struct thread_struct *t = &current->thread;
	struct user_desc info;
	struct desc_struct *desc;
	int cpu, idx;

	if (copy_from_user(&info, u_info, sizeof(info)))
		return -EFAULT;
	idx = info.entry_number;

	/*
	 * index -1 means the kernel should try to find and
	 * allocate an empty descriptor:
	 */
	if (idx == -1) {
		idx = get_free_idx();
		if (idx < 0)
			return idx;
		if (put_user(idx, &u_info->entry_number))
			return -EFAULT;
	}

	if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
		return -EINVAL;

	desc = t->tls_array + idx - GDT_ENTRY_TLS_MIN;

	/*
	 * We must not get preempted while modifying the TLS.
	 */
	cpu = get_cpu();

	if (LDT_empty(&info)) {
		desc->a = 0;
		desc->b = 0;
	} else {
		desc->a = LDT_entry_a(&info);
		desc->b = LDT_entry_b(&info);
	}
	load_TLS(t, cpu);

	put_cpu();

	return 0;
}

/*
 * Get the current Thread-Local Storage area:
 */

#define GET_BASE(desc) ( \
	(((desc)->a >> 16) & 0x0000ffff) | \
	(((desc)->b << 16) & 0x00ff0000) | \
	( (desc)->b        & 0xff000000)   )

#define GET_LIMIT(desc) ( \
	((desc)->a & 0x0ffff) | \
	 ((desc)->b & 0xf0000) )
	
#define GET_32BIT(desc)		(((desc)->b >> 22) & 1)
#define GET_CONTENTS(desc)	(((desc)->b >> 10) & 3)
#define GET_WRITABLE(desc)	(((desc)->b >>  9) & 1)
#define GET_LIMIT_PAGES(desc)	(((desc)->b >> 23) & 1)
#define GET_PRESENT(desc)	(((desc)->b >> 15) & 1)
#define GET_USEABLE(desc)	(((desc)->b >> 20) & 1)

asmlinkage int sys_get_thread_area(struct user_desc __user *u_info)
{
	struct user_desc info;
	struct desc_struct *desc;
	int idx;

	if (get_user(idx, &u_info->entry_number))
		return -EFAULT;
	if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
		return -EINVAL;

969 970
	memset(&info, 0, sizeof(info));

L
Linus Torvalds 已提交
971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989
	desc = current->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;

	info.entry_number = idx;
	info.base_addr = GET_BASE(desc);
	info.limit = GET_LIMIT(desc);
	info.seg_32bit = GET_32BIT(desc);
	info.contents = GET_CONTENTS(desc);
	info.read_exec_only = !GET_WRITABLE(desc);
	info.limit_in_pages = GET_LIMIT_PAGES(desc);
	info.seg_not_present = !GET_PRESENT(desc);
	info.useable = GET_USEABLE(desc);

	if (copy_to_user(u_info, &info, sizeof(info)))
		return -EFAULT;
	return 0;
}

unsigned long arch_align_stack(unsigned long sp)
{
990
	if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
L
Linus Torvalds 已提交
991 992 993
		sp -= get_random_int() % 8192;
	return sp & ~0xf;
}