process.c 5.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26
/* MN10300  Process handling code
 *
 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
 * Written by David Howells (dhowells@redhat.com)
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public Licence
 * as published by the Free Software Foundation; either version
 * 2 of the Licence, or (at your option) any later version.
 */
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/stddef.h>
#include <linux/unistd.h>
#include <linux/ptrace.h>
#include <linux/user.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/reboot.h>
#include <linux/percpu.h>
#include <linux/err.h>
#include <linux/fs.h>
27
#include <linux/slab.h>
28
#include <linux/rcupdate.h>
29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/io.h>
#include <asm/processor.h>
#include <asm/mmu_context.h>
#include <asm/fpu.h>
#include <asm/reset-regs.h>
#include <asm/gdb-stub.h>
#include "internal.h"

/*
 * power management idle function, if any..
 */
void (*pm_idle)(void);
EXPORT_SYMBOL(pm_idle);

/*
 * return saved PC of a blocked thread.
 */
unsigned long thread_saved_pc(struct task_struct *tsk)
{
	return ((unsigned long *) tsk->thread.sp)[3];
}

/*
 * power off function, if any
 */
void (*pm_power_off)(void);
EXPORT_SYMBOL(pm_power_off);

59
#if !defined(CONFIG_SMP) || defined(CONFIG_HOTPLUG_CPU)
60 61 62 63 64 65 66 67 68 69 70 71
/*
 * we use this if we don't have any better idle routine
 */
static void default_idle(void)
{
	local_irq_disable();
	if (!need_resched())
		safe_halt();
	else
		local_irq_enable();
}

72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100
#else /* !CONFIG_SMP || CONFIG_HOTPLUG_CPU  */
/*
 * On SMP it's slightly faster (but much more power-consuming!)
 * to poll the ->work.need_resched flag instead of waiting for the
 * cross-CPU IPI to arrive. Use this option with caution.
 */
static inline void poll_idle(void)
{
	int oldval;

	local_irq_enable();

	/*
	 * Deal with another CPU just having chosen a thread to
	 * run here:
	 */
	oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED);

	if (!oldval) {
		set_thread_flag(TIF_POLLING_NRFLAG);
		while (!need_resched())
			cpu_relax();
		clear_thread_flag(TIF_POLLING_NRFLAG);
	} else {
		set_need_resched();
	}
}
#endif /* !CONFIG_SMP || CONFIG_HOTPLUG_CPU */

101 102 103 104 105 106 107 108 109 110
/*
 * the idle thread
 * - there's no useful work to be done, so just try to conserve power and have
 *   a low exit latency (ie sit in a loop waiting for somebody to say that
 *   they'd like to reschedule)
 */
void cpu_idle(void)
{
	/* endless idle loop with no priority at all */
	for (;;) {
111
		rcu_idle_enter();
112 113 114 115 116
		while (!need_resched()) {
			void (*idle)(void);

			smp_rmb();
			idle = pm_idle;
117 118 119 120
			if (!idle) {
#if defined(CONFIG_SMP) && !defined(CONFIG_HOTPLUG_CPU)
				idle = poll_idle;
#else  /* CONFIG_SMP && !CONFIG_HOTPLUG_CPU */
121
				idle = default_idle;
122 123
#endif /* CONFIG_SMP && !CONFIG_HOTPLUG_CPU */
			}
124 125
			idle();
		}
126
		rcu_idle_exit();
127

128
		schedule_preempt_disabled();
129 130 131 132 133 134 135 136 137
	}
}

void release_segments(struct mm_struct *mm)
{
}

void machine_restart(char *cmd)
{
138
#ifdef CONFIG_KERNEL_DEBUGGER
139 140 141 142 143 144 145 146 147 148 149 150
	gdbstub_exit(0);
#endif

#ifdef mn10300_unit_hard_reset
	mn10300_unit_hard_reset();
#else
	mn10300_proc_hard_reset();
#endif
}

void machine_halt(void)
{
151
#ifdef CONFIG_KERNEL_DEBUGGER
152 153 154 155 156 157
	gdbstub_exit(0);
#endif
}

void machine_power_off(void)
{
158
#ifdef CONFIG_KERNEL_DEBUGGER
159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192
	gdbstub_exit(0);
#endif
}

void show_regs(struct pt_regs *regs)
{
}

/*
 * free current thread data structures etc..
 */
void exit_thread(void)
{
	exit_fpu();
}

void flush_thread(void)
{
	flush_fpu();
}

void release_thread(struct task_struct *dead_task)
{
}

/*
 * we do not have to muck with descriptors here, that is
 * done in switch_mm() as needed.
 */
void copy_segments(struct task_struct *p, struct mm_struct *new_mm)
{
}

/*
193 194
 * this gets called so that we can store lazy state into memory and copy the
 * current task into the new thread.
195
 */
196
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
197
{
198 199 200
	unlazy_fpu(src);
	*dst = *src;
	return 0;
201 202 203 204 205 206
}

/*
 * set up the kernel stack for a new thread and copy arch-specific thread
 * control information
 */
A
Alexey Dobriyan 已提交
207
int copy_thread(unsigned long clone_flags,
208
		unsigned long c_usp, unsigned long ustk_size,
209
		struct task_struct *p, struct pt_regs *unused)
210
{
211
	struct thread_info *ti = task_thread_info(p);
212
	struct pt_regs *c_regs;
213 214 215 216 217 218
	unsigned long c_ksp;

	c_ksp = (unsigned long) task_stack_page(p) + THREAD_SIZE;

	/* allocate the userspace exception frame and set it up */
	c_ksp -= sizeof(struct pt_regs);
219
	c_regs = (struct pt_regs *) c_ksp;
220
	c_ksp -= 12; /* allocate function call ABI slack */
221

222
	/* set up things up so the scheduler can start the new task */
223
	p->thread.uregs = c_regs;
224 225 226 227 228
	ti->frame	= c_regs;
	p->thread.a3	= (unsigned long) c_regs;
	p->thread.sp	= c_ksp;
	p->thread.wchan	= p->thread.pc;
	p->thread.usp	= c_usp;
229

230
	if (unlikely(p->flags & PF_KTHREAD)) {
231 232 233 234 235 236 237 238
		memset(c_regs, 0, sizeof(struct pt_regs));
		c_regs->a0 = c_usp; /* function */
		c_regs->d0 = ustk_size; /* argument */
		local_save_flags(c_regs->epsw);
		c_regs->epsw |= EPSW_IE | EPSW_IM_7;
		p->thread.pc	= (unsigned long) ret_from_kernel_thread;
		return 0;
	}
239 240 241
	*c_regs = *current_pt_regs();
	if (c_usp)
		c_regs->sp = c_usp;
242
	c_regs->epsw &= ~EPSW_FE; /* my FPU */
243 244 245

	/* the new TLS pointer is passed in as arg #5 to sys_clone() */
	if (clone_flags & CLONE_SETTLS)
246
		c_regs->e2 = current_frame()->d3;
247 248 249 250 251 252 253 254 255 256

	p->thread.pc	= (unsigned long) ret_from_fork;

	return 0;
}

unsigned long get_wchan(struct task_struct *p)
{
	return p->thread.wchan;
}