process.c 19.4 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6
/*
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 *
 * Copyright (C) 1994 - 1999, 2000 by Ralf Baechle and others.
7
 * Copyright (C) 2005, 2006 by Ralf Baechle (ralf@linux-mips.org)
L
Linus Torvalds 已提交
8 9
 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
 * Copyright (C) 2004 Thiemo Seufer
10
 * Copyright (C) 2013  Imagination Technologies Ltd.
L
Linus Torvalds 已提交
11 12 13
 */
#include <linux/errno.h>
#include <linux/sched.h>
14
#include <linux/sched/debug.h>
15
#include <linux/sched/task.h>
16
#include <linux/sched/task_stack.h>
17
#include <linux/tick.h>
L
Linus Torvalds 已提交
18 19 20 21
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/stddef.h>
#include <linux/unistd.h>
22
#include <linux/export.h>
L
Linus Torvalds 已提交
23 24 25 26 27 28
#include <linux/ptrace.h>
#include <linux/mman.h>
#include <linux/personality.h>
#include <linux/sys.h>
#include <linux/init.h>
#include <linux/completion.h>
29
#include <linux/kallsyms.h>
30
#include <linux/random.h>
31
#include <linux/prctl.h>
L
Linus Torvalds 已提交
32

33
#include <asm/asm.h>
L
Linus Torvalds 已提交
34 35
#include <asm/bootinfo.h>
#include <asm/cpu.h>
36
#include <asm/dsemul.h>
37
#include <asm/dsp.h>
L
Linus Torvalds 已提交
38
#include <asm/fpu.h>
39
#include <asm/irq.h>
40
#include <asm/msa.h>
L
Linus Torvalds 已提交
41 42 43
#include <asm/pgtable.h>
#include <asm/mipsregs.h>
#include <asm/processor.h>
A
Alex Smith 已提交
44
#include <asm/reg.h>
45
#include <linux/uaccess.h>
L
Linus Torvalds 已提交
46 47 48 49
#include <asm/io.h>
#include <asm/elf.h>
#include <asm/isadep.h>
#include <asm/inst.h>
50
#include <asm/stacktrace.h>
51
#include <asm/irq_regs.h>
L
Linus Torvalds 已提交
52

T
Thomas Gleixner 已提交
53 54
#ifdef CONFIG_HOTPLUG_CPU
void arch_cpu_idle_dead(void)
L
Linus Torvalds 已提交
55
{
56
	play_dead();
T
Thomas Gleixner 已提交
57 58
}
#endif
59

L
Linus Torvalds 已提交
60
asmlinkage void ret_from_fork(void);
A
Al Viro 已提交
61
asmlinkage void ret_from_kernel_thread(void);
L
Linus Torvalds 已提交
62 63 64 65 66 67

void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp)
{
	unsigned long status;

	/* New thread loses kernel privileges. */
68
	status = regs->cp0_status & ~(ST0_CU0|ST0_CU1|ST0_FR|KU_MASK);
L
Linus Torvalds 已提交
69 70
	status |= KU_USER;
	regs->cp0_status = status;
71 72
	lose_fpu(0);
	clear_thread_flag(TIF_MSA_CTX_LIVE);
L
Linus Torvalds 已提交
73
	clear_used_math();
74
	atomic_set(&current->thread.bd_emu_frame, BD_EMUFRAME_NONE);
P
Paul Burton 已提交
75
	init_dsp();
L
Linus Torvalds 已提交
76 77 78 79
	regs->cp0_epc = pc;
	regs->regs[29] = sp;
}

80 81 82 83 84 85 86 87 88 89
void exit_thread(struct task_struct *tsk)
{
	/*
	 * User threads may have allocated a delay slot emulation frame.
	 * If so, clean up that allocation.
	 */
	if (!(current->flags & PF_KTHREAD))
		dsemul_thread_cleanup(tsk);
}

90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
{
	/*
	 * Save any process state which is live in hardware registers to the
	 * parent context prior to duplication. This prevents the new child
	 * state becoming stale if the parent is preempted before copy_thread()
	 * gets a chance to save the parent's live hardware registers to the
	 * child context.
	 */
	preempt_disable();

	if (is_msa_enabled())
		save_msa(current);
	else if (is_fpu_owner())
		_save_fp(current);

	save_dsp(current);

	preempt_enable();

	*dst = *src;
	return 0;
}

114 115 116
/*
 * Copy architecture-specific thread state
 */
117 118
int copy_thread_tls(unsigned long clone_flags, unsigned long usp,
	unsigned long kthread_arg, struct task_struct *p, unsigned long tls)
L
Linus Torvalds 已提交
119
{
A
Al Viro 已提交
120
	struct thread_info *ti = task_thread_info(p);
121
	struct pt_regs *childregs, *regs = current_pt_regs();
122
	unsigned long childksp;
L
Linus Torvalds 已提交
123

A
Al Viro 已提交
124
	childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE - 32;
L
Linus Torvalds 已提交
125 126 127

	/* set up new TSS. */
	childregs = (struct pt_regs *) childksp - 1;
128 129
	/*  Put the stack after the struct pt_regs.  */
	childksp = (unsigned long) childregs;
A
Al Viro 已提交
130 131
	p->thread.cp0_status = read_c0_status() & ~(ST0_CU2|ST0_CU1);
	if (unlikely(p->flags & PF_KTHREAD)) {
132
		/* kernel thread */
A
Al Viro 已提交
133 134 135 136
		unsigned long status = p->thread.cp0_status;
		memset(childregs, 0, sizeof(struct pt_regs));
		ti->addr_limit = KERNEL_DS;
		p->thread.reg16 = usp; /* fn */
137
		p->thread.reg17 = kthread_arg;
A
Al Viro 已提交
138 139 140 141 142 143 144 145 146 147 148
		p->thread.reg29 = childksp;
		p->thread.reg31 = (unsigned long) ret_from_kernel_thread;
#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
		status = (status & ~(ST0_KUP | ST0_IEP | ST0_IEC)) |
			 ((status & (ST0_KUC | ST0_IEC)) << 2);
#else
		status |= ST0_EXL;
#endif
		childregs->cp0_status = status;
		return 0;
	}
149 150

	/* user thread */
L
Linus Torvalds 已提交
151
	*childregs = *regs;
R
Ralf Baechle 已提交
152 153
	childregs->regs[7] = 0; /* Clear error flag */
	childregs->regs[2] = 0; /* Child gets zero as return value */
154 155
	if (usp)
		childregs->regs[29] = usp;
A
Al Viro 已提交
156
	ti->addr_limit = USER_DS;
L
Linus Torvalds 已提交
157 158 159 160 161 162 163 164 165 166 167

	p->thread.reg29 = (unsigned long) childregs;
	p->thread.reg31 = (unsigned long) ret_from_fork;

	/*
	 * New tasks lose permission to use the fpu. This accelerates context
	 * switching for most programs since they don't use the fpu.
	 */
	childregs->cp0_status &= ~(ST0_CU2|ST0_CU1);

	clear_tsk_thread_flag(p, TIF_USEDFPU);
168 169
	clear_tsk_thread_flag(p, TIF_USEDMSA);
	clear_tsk_thread_flag(p, TIF_MSA_CTX_LIVE);
L
Linus Torvalds 已提交
170

R
Ralf Baechle 已提交
171
#ifdef CONFIG_MIPS_MT_FPAFF
172
	clear_tsk_thread_flag(p, TIF_FPUBOUND);
R
Ralf Baechle 已提交
173 174
#endif /* CONFIG_MIPS_MT_FPAFF */

175 176
	atomic_set(&p->thread.bd_emu_frame, BD_EMUFRAME_NONE);

R
Ralf Baechle 已提交
177
	if (clone_flags & CLONE_SETTLS)
178
		ti->tp_value = tls;
R
Ralf Baechle 已提交
179

L
Linus Torvalds 已提交
180 181 182
	return 0;
}

183 184 185 186 187 188
#ifdef CONFIG_CC_STACKPROTECTOR
#include <linux/stackprotector.h>
unsigned long __stack_chk_guard __read_mostly;
EXPORT_SYMBOL(__stack_chk_guard);
#endif

189 190 191 192 193 194
struct mips_frame_info {
	void		*func;
	unsigned long	func_size;
	int		frame_size;
	int		pc_offset;
};
195

196 197 198
#define J_TARGET(pc,target)	\
		(((unsigned long)(pc) & 0xf0000000) | ((target) << 2))

199
static inline int is_ra_save_ins(union mips_instruction *ip, int *poff)
200
{
201 202 203 204 205 206 207 208 209 210
#ifdef CONFIG_CPU_MICROMIPS
	/*
	 * swsp ra,offset
	 * swm16 reglist,offset(sp)
	 * swm32 reglist,offset(sp)
	 * sw32 ra,offset(sp)
	 * jradiussp - NOT SUPPORTED
	 *
	 * microMIPS is way more fun...
	 */
211
	if (mm_insn_16bit(ip->word >> 16)) {
212 213 214 215 216
		switch (ip->mm16_r5_format.opcode) {
		case mm_swsp16_op:
			if (ip->mm16_r5_format.rt != 31)
				return 0;

217
			*poff = ip->mm16_r5_format.imm;
218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235
			*poff = (*poff << 2) / sizeof(ulong);
			return 1;

		case mm_pool16c_op:
			switch (ip->mm16_m_format.func) {
			case mm_swm16_op:
				*poff = ip->mm16_m_format.imm;
				*poff += 1 + ip->mm16_m_format.rlist;
				*poff = (*poff << 2) / sizeof(ulong);
				return 1;

			default:
				return 0;
			}

		default:
			return 0;
		}
236
	}
237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265

	switch (ip->i_format.opcode) {
	case mm_sw32_op:
		if (ip->i_format.rs != 29)
			return 0;
		if (ip->i_format.rt != 31)
			return 0;

		*poff = ip->i_format.simmediate / sizeof(ulong);
		return 1;

	case mm_pool32b_op:
		switch (ip->mm_m_format.func) {
		case mm_swm32_func:
			if (ip->mm_m_format.rd < 0x10)
				return 0;
			if (ip->mm_m_format.base != 29)
				return 0;

			*poff = ip->mm_m_format.simmediate;
			*poff += (ip->mm_m_format.rd & 0xf) * sizeof(u32);
			*poff /= sizeof(ulong);
			return 1;
		default:
			return 0;
		}

	default:
		return 0;
266 267
	}
#else
268
	/* sw / sd $ra, offset($sp) */
269 270 271 272 273 274 275
	if ((ip->i_format.opcode == sw_op || ip->i_format.opcode == sd_op) &&
		ip->i_format.rs == 29 && ip->i_format.rt == 31) {
		*poff = ip->i_format.simmediate / sizeof(ulong);
		return 1;
	}

	return 0;
276
#endif
277 278
}

279
static inline int is_jump_ins(union mips_instruction *ip)
280
{
281 282 283 284 285 286 287 288 289
#ifdef CONFIG_CPU_MICROMIPS
	/*
	 * jr16,jrc,jalr16,jalr16
	 * jal
	 * jalr/jr,jalr.hb/jr.hb,jalrs,jalrs.hb
	 * jraddiusp - NOT SUPPORTED
	 *
	 * microMIPS is kind of more fun...
	 */
290
	if (mm_insn_16bit(ip->word >> 16)) {
291 292 293 294 295 296
		if ((ip->mm16_r5_format.opcode == mm_pool16c_op &&
		    (ip->mm16_r5_format.rt & mm_jr16_op) == mm_jr16_op))
			return 1;
		return 0;
	}

297 298
	if (ip->j_format.opcode == mm_j32_op)
		return 1;
299
	if (ip->j_format.opcode == mm_jal32_op)
300 301 302 303
		return 1;
	if (ip->r_format.opcode != mm_pool32a_op ||
			ip->r_format.func != mm_pool32axf_op)
		return 0;
R
Ralf Baechle 已提交
304
	return ((ip->u_format.uimmediate >> 6) & mm_jalr_op) == mm_jalr_op;
305
#else
306 307
	if (ip->j_format.opcode == j_op)
		return 1;
308 309 310 311 312
	if (ip->j_format.opcode == jal_op)
		return 1;
	if (ip->r_format.opcode != spec_op)
		return 0;
	return ip->r_format.func == jalr_op || ip->r_format.func == jr_op;
313
#endif
314 315
}

316
static inline int is_sp_move_ins(union mips_instruction *ip, int *frame_size)
317
{
318
#ifdef CONFIG_CPU_MICROMIPS
319 320
	unsigned short tmp;

321 322 323 324 325 326 327 328
	/*
	 * addiusp -imm
	 * addius5 sp,-imm
	 * addiu32 sp,sp,-imm
	 * jradiussp - NOT SUPPORTED
	 *
	 * microMIPS is not more fun...
	 */
329
	if (mm_insn_16bit(ip->word >> 16)) {
330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345
		if (ip->mm16_r3_format.opcode == mm_pool16d_op &&
		    ip->mm16_r3_format.simmediate & mm_addiusp_func) {
			tmp = ip->mm_b0_format.simmediate >> 1;
			tmp = ((tmp & 0x1ff) ^ 0x100) - 0x100;
			if ((tmp + 2) < 4) /* 0x0,0x1,0x1fe,0x1ff are special */
				tmp ^= 0x100;
			*frame_size = -(signed short)(tmp << 2);
			return 1;
		}
		if (ip->mm16_r5_format.opcode == mm_pool16d_op &&
		    ip->mm16_r5_format.rt == 29) {
			tmp = ip->mm16_r5_format.imm >> 1;
			*frame_size = -(signed short)(tmp & 0xf);
			return 1;
		}
		return 0;
346
	}
347

348 349 350 351 352
	if (ip->mm_i_format.opcode == mm_addiu32_op &&
	    ip->mm_i_format.rt == 29 && ip->mm_i_format.rs == 29) {
		*frame_size = -ip->i_format.simmediate;
		return 1;
	}
353
#else
354 355 356
	/* addiu/daddiu sp,sp,-imm */
	if (ip->i_format.rs != 29 || ip->i_format.rt != 29)
		return 0;
357 358 359 360

	if (ip->i_format.opcode == addiu_op ||
	    ip->i_format.opcode == daddiu_op) {
		*frame_size = -ip->i_format.simmediate;
361
		return 1;
362
	}
363
#endif
364 365 366
	return 0;
}

367
static int get_frame_info(struct mips_frame_info *info)
L
Linus Torvalds 已提交
368
{
369
	bool is_mmips = IS_ENABLED(CONFIG_CPU_MICROMIPS);
370 371
	union mips_instruction insn, *ip, *ip_end;
	const unsigned int max_insns = 128;
372
	unsigned int last_insn_size = 0;
373
	unsigned int i;
C
Corey Minyard 已提交
374
	bool saw_jump = false;
375

L
Linus Torvalds 已提交
376
	info->pc_offset = -1;
377
	info->frame_size = 0;
L
Linus Torvalds 已提交
378

379
	ip = (void *)msk_isa16_mode((ulong)info->func);
380 381 382
	if (!ip)
		goto err;

383
	ip_end = (void *)ip + info->func_size;
384

385 386
	for (i = 0; i < max_insns && ip < ip_end; i++) {
		ip = (void *)ip + last_insn_size;
387
		if (is_mmips && mm_insn_16bit(ip->halfword[0])) {
388
			insn.word = ip->halfword[0] << 16;
389
			last_insn_size = 2;
390
		} else if (is_mmips) {
391
			insn.word = ip->halfword[0] << 16 | ip->halfword[1];
392
			last_insn_size = 4;
393 394
		} else {
			insn.word = ip->word;
395
			last_insn_size = 4;
396
		}
397

398
		if (!info->frame_size) {
399
			is_sp_move_ins(&insn, &info->frame_size);
400
			continue;
C
Corey Minyard 已提交
401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416
		} else if (!saw_jump && is_jump_ins(ip)) {
			/*
			 * If we see a jump instruction, we are finished
			 * with the frame save.
			 *
			 * Some functions can have a shortcut return at
			 * the beginning of the function, so don't start
			 * looking for jump instruction until we see the
			 * frame setup.
			 *
			 * The RA save instruction can get put into the
			 * delay slot of the jump instruction, so look
			 * at the next instruction, too.
			 */
			saw_jump = true;
			continue;
417
		}
418 419
		if (info->pc_offset == -1 &&
		    is_ra_save_ins(&insn, &info->pc_offset))
420
			break;
C
Corey Minyard 已提交
421 422
		if (saw_jump)
			break;
L
Linus Torvalds 已提交
423
	}
424 425 426 427
	if (info->frame_size && info->pc_offset >= 0) /* nested */
		return 0;
	if (info->pc_offset < 0) /* leaf */
		return 1;
A
Andrea Gelmini 已提交
428
	/* prologue seems bogus... */
429
err:
430
	return -1;
L
Linus Torvalds 已提交
431 432
}

433 434
static struct mips_frame_info schedule_mfi __read_mostly;

435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454
#ifdef CONFIG_KALLSYMS
static unsigned long get___schedule_addr(void)
{
	return kallsyms_lookup_name("__schedule");
}
#else
static unsigned long get___schedule_addr(void)
{
	union mips_instruction *ip = (void *)schedule;
	int max_insns = 8;
	int i;

	for (i = 0; i < max_insns; i++, ip++) {
		if (ip->j_format.opcode == j_op)
			return J_TARGET(ip, ip->j_format.target);
	}
	return 0;
}
#endif

L
Linus Torvalds 已提交
455 456
static int __init frame_info_init(void)
{
457
	unsigned long size = 0;
458
#ifdef CONFIG_KALLSYMS
459
	unsigned long ofs;
460 461
#endif
	unsigned long addr;
462

463 464 465 466 467 468
	addr = get___schedule_addr();
	if (!addr)
		addr = (unsigned long)schedule;

#ifdef CONFIG_KALLSYMS
	kallsyms_lookup_size_offset(addr, &size, &ofs);
469
#endif
470
	schedule_mfi.func = (void *)addr;
471 472 473
	schedule_mfi.func_size = size;

	get_frame_info(&schedule_mfi);
474 475 476 477 478

	/*
	 * Without schedule() frame info, result given by
	 * thread_saved_pc() and get_wchan() are not reliable.
	 */
479
	if (schedule_mfi.pc_offset < 0)
480
		printk("Can't analyze schedule() prologue at %p\n", schedule);
481

L
Linus Torvalds 已提交
482 483 484 485 486 487 488 489 490 491 492 493 494 495 496
	return 0;
}

arch_initcall(frame_info_init);

/*
 * Return saved PC of a blocked thread.
 */
unsigned long thread_saved_pc(struct task_struct *tsk)
{
	struct thread_struct *t = &tsk->thread;

	/* New born processes are a special case */
	if (t->reg31 == (unsigned long) ret_from_fork)
		return t->reg31;
497
	if (schedule_mfi.pc_offset < 0)
L
Linus Torvalds 已提交
498
		return 0;
499
	return ((unsigned long *)t->reg29)[schedule_mfi.pc_offset];
L
Linus Torvalds 已提交
500 501 502
}


503
#ifdef CONFIG_KALLSYMS
504 505 506 507 508
/* generic stack unwinding function */
unsigned long notrace unwind_stack_by_address(unsigned long stack_page,
					      unsigned long *sp,
					      unsigned long pc,
					      unsigned long *ra)
509
{
510
	unsigned long low, high, irq_stack_high;
511 512
	struct mips_frame_info info;
	unsigned long size, ofs;
513
	struct pt_regs *regs;
F
Franck Bui-Huu 已提交
514
	int leaf;
515 516 517 518

	if (!stack_page)
		return 0;

519
	/*
520 521
	 * IRQ stacks start at IRQ_STACK_START
	 * task stacks at THREAD_SIZE - 32
522
	 */
523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555
	low = stack_page;
	if (!preemptible() && on_irq_stack(raw_smp_processor_id(), *sp)) {
		high = stack_page + IRQ_STACK_START;
		irq_stack_high = high;
	} else {
		high = stack_page + THREAD_SIZE - 32;
		irq_stack_high = 0;
	}

	/*
	 * If we reached the top of the interrupt stack, start unwinding
	 * the interrupted task stack.
	 */
	if (unlikely(*sp == irq_stack_high)) {
		unsigned long task_sp = *(unsigned long *)*sp;

		/*
		 * Check that the pointer saved in the IRQ stack head points to
		 * something within the stack of the current task
		 */
		if (!object_is_on_stack((void *)task_sp))
			return 0;

		/*
		 * Follow pointer to tasks kernel stack frame where interrupted
		 * state was saved.
		 */
		regs = (struct pt_regs *)task_sp;
		pc = regs->cp0_epc;
		if (!user_mode(regs) && __kernel_text_address(pc)) {
			*sp = regs->regs[29];
			*ra = regs->regs[31];
			return pc;
556 557 558
		}
		return 0;
	}
559
	if (!kallsyms_lookup_size_offset(pc, &size, &ofs))
560
		return 0;
561
	/*
L
Lucas De Marchi 已提交
562
	 * Return ra if an exception occurred at the first instruction
563
	 */
564 565 566 567 568
	if (unlikely(ofs == 0)) {
		pc = *ra;
		*ra = 0;
		return pc;
	}
569 570 571

	info.func = (void *)(pc - ofs);
	info.func_size = ofs;	/* analyze from start to ofs */
F
Franck Bui-Huu 已提交
572 573
	leaf = get_frame_info(&info);
	if (leaf < 0)
574
		return 0;
F
Franck Bui-Huu 已提交
575

576
	if (*sp < low || *sp + info.frame_size > high)
577 578
		return 0;

F
Franck Bui-Huu 已提交
579 580 581 582 583 584 585
	if (leaf)
		/*
		 * For some extreme cases, get_frame_info() can
		 * consider wrongly a nested function as a leaf
		 * one. In that cases avoid to return always the
		 * same value.
		 */
586
		pc = pc != *ra ? *ra : 0;
F
Franck Bui-Huu 已提交
587 588 589 590
	else
		pc = ((unsigned long *)(*sp))[info.pc_offset];

	*sp += info.frame_size;
591
	*ra = 0;
F
Franck Bui-Huu 已提交
592
	return __kernel_text_address(pc) ? pc : 0;
593
}
594 595 596 597 598 599
EXPORT_SYMBOL(unwind_stack_by_address);

/* used by show_backtrace() */
unsigned long unwind_stack(struct task_struct *task, unsigned long *sp,
			   unsigned long pc, unsigned long *ra)
{
600 601 602 603 604 605 606 607 608 609 610 611 612
	unsigned long stack_page = 0;
	int cpu;

	for_each_possible_cpu(cpu) {
		if (on_irq_stack(cpu, *sp)) {
			stack_page = (unsigned long)irq_stack[cpu];
			break;
		}
	}

	if (!stack_page)
		stack_page = (unsigned long)task_stack_page(task);

613 614
	return unwind_stack_by_address(stack_page, sp, pc, ra);
}
615
#endif
616 617 618 619 620 621 622 623 624

/*
 * get_wchan - a maintenance nightmare^W^Wpain in the ass ...
 */
unsigned long get_wchan(struct task_struct *task)
{
	unsigned long pc = 0;
#ifdef CONFIG_KALLSYMS
	unsigned long sp;
625
	unsigned long ra = 0;
626 627 628 629 630 631 632 633 634 635 636 637 638
#endif

	if (!task || task == current || task->state == TASK_RUNNING)
		goto out;
	if (!task_stack_page(task))
		goto out;

	pc = thread_saved_pc(task);

#ifdef CONFIG_KALLSYMS
	sp = task->thread.reg29 + schedule_mfi.frame_size;

	while (in_sched_functions(pc))
639
		pc = unwind_stack(task, &sp, pc, &ra);
640 641 642 643 644
#endif

out:
	return pc;
}
645 646 647 648 649 650 651 652 653 654 655 656

/*
 * Don't forget that the stack pointer must be aligned on a 8 bytes
 * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
 */
unsigned long arch_align_stack(unsigned long sp)
{
	if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
		sp -= get_random_int() & ~PAGE_MASK;

	return sp & ALMASK;
}
657 658 659 660 661 662 663 664 665 666 667 668 669

static void arch_dump_stack(void *info)
{
	struct pt_regs *regs;

	regs = get_irq_regs();

	if (regs)
		show_regs(regs);

	dump_stack();
}

670
void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
671
{
672 673 674 675 676 677 678 679
	long this_cpu = get_cpu();

	if (cpumask_test_cpu(this_cpu, mask) && !exclude_self)
		dump_stack();

	smp_call_function_many(mask, arch_dump_stack, NULL, 1);

	put_cpu();
680
}
681 682 683 684 685 686 687 688 689 690 691 692 693

int mips_get_process_fp_mode(struct task_struct *task)
{
	int value = 0;

	if (!test_tsk_thread_flag(task, TIF_32BIT_FPREGS))
		value |= PR_FP_MODE_FR;
	if (test_tsk_thread_flag(task, TIF_HYBRID_FPREGS))
		value |= PR_FP_MODE_FRE;

	return value;
}

694 695 696 697 698 699 700 701
static void prepare_for_fp_mode_switch(void *info)
{
	struct mm_struct *mm = info;

	if (current->mm == mm)
		lose_fpu(1);
}

702 703 704 705
int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
{
	const unsigned int known_bits = PR_FP_MODE_FR | PR_FP_MODE_FRE;
	struct task_struct *t;
706
	int max_users;
707 708 709 710 711 712

	/* Check the value is valid */
	if (value & ~known_bits)
		return -EOPNOTSUPP;

	/* Avoid inadvertently triggering emulation */
713 714
	if ((value & PR_FP_MODE_FR) && raw_cpu_has_fpu &&
	    !(raw_current_cpu_data.fpu_id & MIPS_FPIR_F64))
715
		return -EOPNOTSUPP;
716
	if ((value & PR_FP_MODE_FRE) && raw_cpu_has_fpu && !cpu_has_fre)
717 718
		return -EOPNOTSUPP;

719
	/* FR = 0 not supported in MIPS R6 */
720
	if (!(value & PR_FP_MODE_FR) && raw_cpu_has_fpu && cpu_has_mips_r6)
721 722
		return -EOPNOTSUPP;

723 724 725
	/* Proceed with the mode switch */
	preempt_disable();

726 727 728 729 730 731 732 733 734
	/* Save FP & vector context, then disable FPU & MSA */
	if (task->signal == current->signal)
		lose_fpu(1);

	/* Prevent any threads from obtaining live FP context */
	atomic_set(&task->mm->context.fp_mode_switching, 1);
	smp_mb__after_atomic();

	/*
735 736 737
	 * If there are multiple online CPUs then force any which are running
	 * threads in this process to lose their FPU context, which they can't
	 * regain until fp_mode_switching is cleared later.
738 739
	 */
	if (num_online_cpus() > 1) {
740 741
		/* No need to send an IPI for the local CPU */
		max_users = (task->mm == current->mm) ? 1 : 0;
742

743 744 745
		if (atomic_read(&current->mm->mm_users) > max_users)
			smp_call_function(prepare_for_fp_mode_switch,
					  (void *)current->mm, 1);
746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769
	}

	/*
	 * There are now no threads of the process with live FP context, so it
	 * is safe to proceed with the FP mode switch.
	 */
	for_each_thread(task, t) {
		/* Update desired FP register width */
		if (value & PR_FP_MODE_FR) {
			clear_tsk_thread_flag(t, TIF_32BIT_FPREGS);
		} else {
			set_tsk_thread_flag(t, TIF_32BIT_FPREGS);
			clear_tsk_thread_flag(t, TIF_MSA_CTX_LIVE);
		}

		/* Update desired FP single layout */
		if (value & PR_FP_MODE_FRE)
			set_tsk_thread_flag(t, TIF_HYBRID_FPREGS);
		else
			clear_tsk_thread_flag(t, TIF_HYBRID_FPREGS);
	}

	/* Allow threads to use FP again */
	atomic_set(&task->mm->context.fp_mode_switching, 0);
770
	preempt_enable();
771 772 773

	return 0;
}
774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817

#if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32)
void mips_dump_regs32(u32 *uregs, const struct pt_regs *regs)
{
	unsigned int i;

	for (i = MIPS32_EF_R1; i <= MIPS32_EF_R31; i++) {
		/* k0/k1 are copied as zero. */
		if (i == MIPS32_EF_R26 || i == MIPS32_EF_R27)
			uregs[i] = 0;
		else
			uregs[i] = regs->regs[i - MIPS32_EF_R0];
	}

	uregs[MIPS32_EF_LO] = regs->lo;
	uregs[MIPS32_EF_HI] = regs->hi;
	uregs[MIPS32_EF_CP0_EPC] = regs->cp0_epc;
	uregs[MIPS32_EF_CP0_BADVADDR] = regs->cp0_badvaddr;
	uregs[MIPS32_EF_CP0_STATUS] = regs->cp0_status;
	uregs[MIPS32_EF_CP0_CAUSE] = regs->cp0_cause;
}
#endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */

#ifdef CONFIG_64BIT
void mips_dump_regs64(u64 *uregs, const struct pt_regs *regs)
{
	unsigned int i;

	for (i = MIPS64_EF_R1; i <= MIPS64_EF_R31; i++) {
		/* k0/k1 are copied as zero. */
		if (i == MIPS64_EF_R26 || i == MIPS64_EF_R27)
			uregs[i] = 0;
		else
			uregs[i] = regs->regs[i - MIPS64_EF_R0];
	}

	uregs[MIPS64_EF_LO] = regs->lo;
	uregs[MIPS64_EF_HI] = regs->hi;
	uregs[MIPS64_EF_CP0_EPC] = regs->cp0_epc;
	uregs[MIPS64_EF_CP0_BADVADDR] = regs->cp0_badvaddr;
	uregs[MIPS64_EF_CP0_STATUS] = regs->cp0_status;
	uregs[MIPS64_EF_CP0_CAUSE] = regs->cp0_cause;
}
#endif /* CONFIG_64BIT */