process.c 9.0 KB
Newer Older
B
Bryan Wu 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33
/*
 * File:         arch/blackfin/kernel/process.c
 * Based on:
 * Author:
 *
 * Created:
 * Description:  Blackfin architecture-dependent process handling.
 *
 * Modified:
 *               Copyright 2004-2006 Analog Devices Inc.
 *
 * Bugs:         Enter bugs at http://blackfin.uclinux.org/
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, see the file COPYING, or write
 * to the Free Software Foundation, Inc.,
 * 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
 */

#include <linux/module.h>
#include <linux/smp_lock.h>
#include <linux/unistd.h>
#include <linux/user.h>
34
#include <linux/uaccess.h>
35 36
#include <linux/sched.h>
#include <linux/tick.h>
37 38
#include <linux/fs.h>
#include <linux/err.h>
B
Bryan Wu 已提交
39 40

#include <asm/blackfin.h>
41
#include <asm/fixed_code.h>
42
#include <asm/mem_map.h>
B
Bryan Wu 已提交
43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74

asmlinkage void ret_from_fork(void);

/* Points to the SDRAM backup memory for the stack that is currently in
 * L1 scratchpad memory.
 */
void *current_l1_stack_save;

/* The number of tasks currently using a L1 stack area.  The SRAM is
 * allocated/deallocated whenever this changes from/to zero.
 */
int nr_l1stack_tasks;

/* Start and length of the area in L1 scratchpad memory which we've allocated
 * for process stacks.
 */
void *l1_stack_base;
unsigned long l1_stack_len;

/*
 * Powermanagement idle function, if any..
 */
void (*pm_idle)(void) = NULL;
EXPORT_SYMBOL(pm_idle);

void (*pm_power_off)(void) = NULL;
EXPORT_SYMBOL(pm_power_off);

/*
 * The idle loop on BFIN
 */
#ifdef CONFIG_IDLE_L1
75
static void default_idle(void)__attribute__((l1_text));
B
Bryan Wu 已提交
76 77 78
void cpu_idle(void)__attribute__((l1_text));
#endif

79 80 81 82 83
/*
 * This is our default idle handler.  We need to disable
 * interrupts here to ensure we don't miss a wakeup call.
 */
static void default_idle(void)
B
Bryan Wu 已提交
84
{
85 86 87 88
#ifdef CONFIG_IPIPE
	ipipe_suspend_domain();
#endif
	local_irq_disable_hw();
89 90
	if (!need_resched())
		idle_with_irq_disabled();
B
Bryan Wu 已提交
91

92
	local_irq_enable_hw();
93
}
B
Bryan Wu 已提交
94 95

/*
96 97 98
 * The idle thread.  We try to conserve power, while trying to keep
 * overall latency low.  The architecture specific idle is passed
 * a value to indicate the level of "idleness" of the system.
B
Bryan Wu 已提交
99 100 101 102 103
 */
void cpu_idle(void)
{
	/* endless idle loop with no priority at all */
	while (1) {
104 105 106 107 108 109 110 111
		void (*idle)(void) = pm_idle;

#ifdef CONFIG_HOTPLUG_CPU
		if (cpu_is_offline(smp_processor_id()))
			cpu_die();
#endif
		if (!idle)
			idle = default_idle;
112
		tick_nohz_stop_sched_tick(1);
113 114 115
		while (!need_resched())
			idle();
		tick_nohz_restart_sched_tick();
B
Bryan Wu 已提交
116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160
		preempt_enable_no_resched();
		schedule();
		preempt_disable();
	}
}

/* Fill in the fpu structure for a core dump.  */

int dump_fpu(struct pt_regs *regs, elf_fpregset_t * fpregs)
{
	return 1;
}

/*
 * This gets run with P1 containing the
 * function to call, and R1 containing
 * the "args".  Note P0 is clobbered on the way here.
 */
void kernel_thread_helper(void);
__asm__(".section .text\n"
	".align 4\n"
	"_kernel_thread_helper:\n\t"
	"\tsp += -12;\n\t"
	"\tr0 = r1;\n\t" "\tcall (p1);\n\t" "\tcall _do_exit;\n" ".previous");

/*
 * Create a kernel thread.
 */
pid_t kernel_thread(int (*fn) (void *), void *arg, unsigned long flags)
{
	struct pt_regs regs;

	memset(&regs, 0, sizeof(regs));

	regs.r1 = (unsigned long)arg;
	regs.p1 = (unsigned long)fn;
	regs.pc = (unsigned long)kernel_thread_helper;
	regs.orig_p0 = -1;
	/* Set bit 2 to tell ret_from_fork we should be returning to kernel
	   mode.  */
	regs.ipend = 0x8002;
	__asm__ __volatile__("%0 = syscfg;":"=da"(regs.syscfg):);
	return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL,
		       NULL);
}
161
EXPORT_SYMBOL(kernel_thread);
B
Bryan Wu 已提交
162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177

void flush_thread(void)
{
}

asmlinkage int bfin_vfork(struct pt_regs *regs)
{
	return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, rdusp(), regs, 0, NULL,
		       NULL);
}

asmlinkage int bfin_clone(struct pt_regs *regs)
{
	unsigned long clone_flags;
	unsigned long newsp;

178 179 180 181 182 183 184
#ifdef __ARCH_SYNC_CORE_DCACHE
	if (current->rt.nr_cpus_allowed == num_possible_cpus()) {
		current->cpus_allowed = cpumask_of_cpu(smp_processor_id());
		current->rt.nr_cpus_allowed = 1;
	}
#endif

B
Bryan Wu 已提交
185 186 187 188 189 190 191 192 193 194 195
	/* syscall2 puts clone_flags in r0 and usp in r1 */
	clone_flags = regs->r0;
	newsp = regs->r1;
	if (!newsp)
		newsp = rdusp();
	else
		newsp -= 12;
	return do_fork(clone_flags, newsp, regs, 0, NULL, NULL);
}

int
A
Alexey Dobriyan 已提交
196
copy_thread(unsigned long clone_flags,
B
Bryan Wu 已提交
197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216
	    unsigned long usp, unsigned long topstk,
	    struct task_struct *p, struct pt_regs *regs)
{
	struct pt_regs *childregs;

	childregs = (struct pt_regs *) (task_stack_page(p) + THREAD_SIZE) - 1;
	*childregs = *regs;
	childregs->r0 = 0;

	p->thread.usp = usp;
	p->thread.ksp = (unsigned long)childregs;
	p->thread.pc = (unsigned long)ret_from_fork;

	return 0;
}

/*
 * sys_execve() executes a new program.
 */

217
asmlinkage int sys_execve(char __user *name, char __user * __user *argv, char __user * __user *envp)
B
Bryan Wu 已提交
218 219 220 221 222 223 224 225 226 227 228 229
{
	int error;
	char *filename;
	struct pt_regs *regs = (struct pt_regs *)((&name) + 6);

	lock_kernel();
	filename = getname(name);
	error = PTR_ERR(filename);
	if (IS_ERR(filename))
		goto out;
	error = do_execve(filename, argv, envp, regs);
	putname(filename);
230
 out:
B
Bryan Wu 已提交
231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257
	unlock_kernel();
	return error;
}

unsigned long get_wchan(struct task_struct *p)
{
	unsigned long fp, pc;
	unsigned long stack_page;
	int count = 0;
	if (!p || p == current || p->state == TASK_RUNNING)
		return 0;

	stack_page = (unsigned long)p;
	fp = p->thread.usp;
	do {
		if (fp < stack_page + sizeof(struct thread_info) ||
		    fp >= 8184 + stack_page)
			return 0;
		pc = ((unsigned long *)fp)[1];
		if (!in_sched_functions(pc))
			return pc;
		fp = *(unsigned long *)fp;
	}
	while (count++ < 16);
	return 0;
}

258 259
void finish_atomic_sections (struct pt_regs *regs)
{
260
	int __user *up0 = (int __user *)regs->p0;
261

262 263 264 265 266
	if (regs->pc < ATOMIC_SEQS_START || regs->pc >= ATOMIC_SEQS_END)
		return;

	switch (regs->pc) {
	case ATOMIC_XCHG32 + 2:
267
		put_user(regs->r1, up0);
268 269 270 271 272 273
		regs->pc += 2;
		break;

	case ATOMIC_CAS32 + 2:
	case ATOMIC_CAS32 + 4:
		if (regs->r0 == regs->r1)
274
			put_user(regs->r2, up0);
275 276 277
		regs->pc = ATOMIC_CAS32 + 8;
		break;
	case ATOMIC_CAS32 + 6:
278
		put_user(regs->r2, up0);
279 280 281 282 283 284 285
		regs->pc += 2;
		break;

	case ATOMIC_ADD32 + 2:
		regs->r0 = regs->r1 + regs->r0;
		/* fall through */
	case ATOMIC_ADD32 + 4:
286
		put_user(regs->r0, up0);
287 288 289 290 291 292 293
		regs->pc = ATOMIC_ADD32 + 6;
		break;

	case ATOMIC_SUB32 + 2:
		regs->r0 = regs->r1 - regs->r0;
		/* fall through */
	case ATOMIC_SUB32 + 4:
294
		put_user(regs->r0, up0);
295 296 297 298 299 300 301
		regs->pc = ATOMIC_SUB32 + 6;
		break;

	case ATOMIC_IOR32 + 2:
		regs->r0 = regs->r1 | regs->r0;
		/* fall through */
	case ATOMIC_IOR32 + 4:
302
		put_user(regs->r0, up0);
303 304 305 306 307 308 309
		regs->pc = ATOMIC_IOR32 + 6;
		break;

	case ATOMIC_AND32 + 2:
		regs->r0 = regs->r1 & regs->r0;
		/* fall through */
	case ATOMIC_AND32 + 4:
310
		put_user(regs->r0, up0);
311 312 313 314 315 316 317
		regs->pc = ATOMIC_AND32 + 6;
		break;

	case ATOMIC_XOR32 + 2:
		regs->r0 = regs->r1 ^ regs->r0;
		/* fall through */
	case ATOMIC_XOR32 + 4:
318
		put_user(regs->r0, up0);
319 320 321 322 323
		regs->pc = ATOMIC_XOR32 + 6;
		break;
	}
}

B
Bryan Wu 已提交
324
#if defined(CONFIG_ACCESS_CHECK)
325 326 327
#ifdef CONFIG_ACCESS_OK_L1
__attribute__((l1_text))
#endif
328
/* Return 1 if access to memory range is OK, 0 otherwise */
B
Bryan Wu 已提交
329 330
int _access_ok(unsigned long addr, unsigned long size)
{
331 332
	if (size == 0)
		return 1;
B
Bryan Wu 已提交
333 334
	if (addr > (addr + size))
		return 0;
335
	if (segment_eq(get_fs(), KERNEL_DS))
B
Bryan Wu 已提交
336 337 338 339 340 341
		return 1;
#ifdef CONFIG_MTD_UCLINUX
	if (addr >= memory_start && (addr + size) <= memory_end)
		return 1;
	if (addr >= memory_mtd_end && (addr + size) <= physical_mem_end)
		return 1;
342

343
#ifdef CONFIG_ROMFS_ON_MTD
344 345 346 347
	/* For XIP, allow user space to use pointers within the ROMFS.  */
	if (addr >= memory_mtd_start && (addr + size) <= memory_mtd_end)
		return 1;
#endif
B
Bryan Wu 已提交
348 349 350 351 352 353 354
#else
	if (addr >= memory_start && (addr + size) <= physical_mem_end)
		return 1;
#endif
	if (addr >= (unsigned long)__init_begin &&
	    addr + size <= (unsigned long)__init_end)
		return 1;
355 356
	if (addr >= get_l1_scratch_start()
	    && addr + size <= get_l1_scratch_start() + L1_SCRATCH_LENGTH)
B
Bryan Wu 已提交
357 358
		return 1;
#if L1_CODE_LENGTH != 0
359 360
	if (addr >= get_l1_code_start() + (_etext_l1 - _stext_l1)
	    && addr + size <= get_l1_code_start() + L1_CODE_LENGTH)
B
Bryan Wu 已提交
361 362 363
		return 1;
#endif
#if L1_DATA_A_LENGTH != 0
364 365
	if (addr >= get_l1_data_a_start() + (_ebss_l1 - _sdata_l1)
	    && addr + size <= get_l1_data_a_start() + L1_DATA_A_LENGTH)
B
Bryan Wu 已提交
366 367 368
		return 1;
#endif
#if L1_DATA_B_LENGTH != 0
369 370
	if (addr >= get_l1_data_b_start() + (_ebss_b_l1 - _sdata_b_l1)
	    && addr + size <= get_l1_data_b_start() + L1_DATA_B_LENGTH)
B
Bryan Wu 已提交
371
		return 1;
372 373 374 375 376
#endif
#if L2_LENGTH != 0
	if (addr >= L2_START + (_ebss_l2 - _stext_l2)
	    && addr + size <= L2_START + L2_LENGTH)
		return 1;
B
Bryan Wu 已提交
377 378 379 380 381
#endif
	return 0;
}
EXPORT_SYMBOL(_access_ok);
#endif /* CONFIG_ACCESS_CHECK */