process.c 15.7 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6
/*
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 *
 * Copyright (C) 1994 - 1999, 2000 by Ralf Baechle and others.
7
 * Copyright (C) 2005, 2006 by Ralf Baechle (ralf@linux-mips.org)
L
Linus Torvalds 已提交
8 9
 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
 * Copyright (C) 2004 Thiemo Seufer
10
 * Copyright (C) 2013  Imagination Technologies Ltd.
L
Linus Torvalds 已提交
11 12 13
 */
#include <linux/errno.h>
#include <linux/sched.h>
14
#include <linux/tick.h>
L
Linus Torvalds 已提交
15 16 17 18
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/stddef.h>
#include <linux/unistd.h>
19
#include <linux/export.h>
L
Linus Torvalds 已提交
20 21 22 23 24 25
#include <linux/ptrace.h>
#include <linux/mman.h>
#include <linux/personality.h>
#include <linux/sys.h>
#include <linux/init.h>
#include <linux/completion.h>
26
#include <linux/kallsyms.h>
27
#include <linux/random.h>
28
#include <linux/prctl.h>
L
Linus Torvalds 已提交
29

30
#include <asm/asm.h>
L
Linus Torvalds 已提交
31 32
#include <asm/bootinfo.h>
#include <asm/cpu.h>
33
#include <asm/dsp.h>
L
Linus Torvalds 已提交
34
#include <asm/fpu.h>
35
#include <asm/msa.h>
L
Linus Torvalds 已提交
36 37 38
#include <asm/pgtable.h>
#include <asm/mipsregs.h>
#include <asm/processor.h>
A
Alex Smith 已提交
39
#include <asm/reg.h>
L
Linus Torvalds 已提交
40 41 42 43 44
#include <asm/uaccess.h>
#include <asm/io.h>
#include <asm/elf.h>
#include <asm/isadep.h>
#include <asm/inst.h>
45
#include <asm/stacktrace.h>
46
#include <asm/irq_regs.h>
L
Linus Torvalds 已提交
47

T
Thomas Gleixner 已提交
48 49
#ifdef CONFIG_HOTPLUG_CPU
void arch_cpu_idle_dead(void)
L
Linus Torvalds 已提交
50
{
T
Thomas Gleixner 已提交
51 52 53 54 55
	/* What the heck is this check doing ? */
	if (!cpu_isset(smp_processor_id(), cpu_callin_map))
		play_dead();
}
#endif
56

L
Linus Torvalds 已提交
57
asmlinkage void ret_from_fork(void);
A
Al Viro 已提交
58
asmlinkage void ret_from_kernel_thread(void);
L
Linus Torvalds 已提交
59 60 61 62 63 64

void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp)
{
	unsigned long status;

	/* New thread loses kernel privileges. */
65
	status = regs->cp0_status & ~(ST0_CU0|ST0_CU1|ST0_FR|KU_MASK);
L
Linus Torvalds 已提交
66 67 68
	status |= KU_USER;
	regs->cp0_status = status;
	clear_used_math();
69
	clear_fpu_owner();
P
Paul Burton 已提交
70
	init_dsp();
71
	clear_thread_flag(TIF_USEDMSA);
72 73
	clear_thread_flag(TIF_MSA_CTX_LIVE);
	disable_msa();
L
Linus Torvalds 已提交
74 75 76 77 78 79 80 81 82 83 84 85
	regs->cp0_epc = pc;
	regs->regs[29] = sp;
}

void exit_thread(void)
{
}

void flush_thread(void)
{
}

A
Alexey Dobriyan 已提交
86
int copy_thread(unsigned long clone_flags, unsigned long usp,
87
	unsigned long arg, struct task_struct *p)
L
Linus Torvalds 已提交
88
{
A
Al Viro 已提交
89
	struct thread_info *ti = task_thread_info(p);
90
	struct pt_regs *childregs, *regs = current_pt_regs();
91
	unsigned long childksp;
R
Ralf Baechle 已提交
92
	p->set_child_tid = p->clear_child_tid = NULL;
L
Linus Torvalds 已提交
93

A
Al Viro 已提交
94
	childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE - 32;
L
Linus Torvalds 已提交
95 96 97

	preempt_disable();

98 99 100
	if (is_msa_enabled())
		save_msa(p);
	else if (is_fpu_owner())
L
Linus Torvalds 已提交
101
		save_fp(p);
102 103 104

	if (cpu_has_dsp)
		save_dsp(p);
L
Linus Torvalds 已提交
105 106 107 108 109

	preempt_enable();

	/* set up new TSS. */
	childregs = (struct pt_regs *) childksp - 1;
110 111
	/*  Put the stack after the struct pt_regs.  */
	childksp = (unsigned long) childregs;
A
Al Viro 已提交
112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129
	p->thread.cp0_status = read_c0_status() & ~(ST0_CU2|ST0_CU1);
	if (unlikely(p->flags & PF_KTHREAD)) {
		unsigned long status = p->thread.cp0_status;
		memset(childregs, 0, sizeof(struct pt_regs));
		ti->addr_limit = KERNEL_DS;
		p->thread.reg16 = usp; /* fn */
		p->thread.reg17 = arg;
		p->thread.reg29 = childksp;
		p->thread.reg31 = (unsigned long) ret_from_kernel_thread;
#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
		status = (status & ~(ST0_KUP | ST0_IEP | ST0_IEC)) |
			 ((status & (ST0_KUC | ST0_IEC)) << 2);
#else
		status |= ST0_EXL;
#endif
		childregs->cp0_status = status;
		return 0;
	}
L
Linus Torvalds 已提交
130
	*childregs = *regs;
R
Ralf Baechle 已提交
131 132
	childregs->regs[7] = 0; /* Clear error flag */
	childregs->regs[2] = 0; /* Child gets zero as return value */
133 134
	if (usp)
		childregs->regs[29] = usp;
A
Al Viro 已提交
135
	ti->addr_limit = USER_DS;
L
Linus Torvalds 已提交
136 137 138 139 140 141 142 143 144 145 146

	p->thread.reg29 = (unsigned long) childregs;
	p->thread.reg31 = (unsigned long) ret_from_fork;

	/*
	 * New tasks lose permission to use the fpu. This accelerates context
	 * switching for most programs since they don't use the fpu.
	 */
	childregs->cp0_status &= ~(ST0_CU2|ST0_CU1);

	clear_tsk_thread_flag(p, TIF_USEDFPU);
147 148
	clear_tsk_thread_flag(p, TIF_USEDMSA);
	clear_tsk_thread_flag(p, TIF_MSA_CTX_LIVE);
L
Linus Torvalds 已提交
149

R
Ralf Baechle 已提交
150
#ifdef CONFIG_MIPS_MT_FPAFF
151
	clear_tsk_thread_flag(p, TIF_FPUBOUND);
R
Ralf Baechle 已提交
152 153
#endif /* CONFIG_MIPS_MT_FPAFF */

R
Ralf Baechle 已提交
154 155 156
	if (clone_flags & CLONE_SETTLS)
		ti->tp_value = regs->regs[7];

L
Linus Torvalds 已提交
157 158 159
	return 0;
}

160 161 162 163 164 165
#ifdef CONFIG_CC_STACKPROTECTOR
#include <linux/stackprotector.h>
unsigned long __stack_chk_guard __read_mostly;
EXPORT_SYMBOL(__stack_chk_guard);
#endif

166 167 168 169 170 171
struct mips_frame_info {
	void		*func;
	unsigned long	func_size;
	int		frame_size;
	int		pc_offset;
};
172

173 174 175
#define J_TARGET(pc,target)	\
		(((unsigned long)(pc) & 0xf0000000) | ((target) << 2))

176 177
static inline int is_ra_save_ins(union mips_instruction *ip)
{
178 179 180 181 182 183 184 185 186 187 188 189 190 191
#ifdef CONFIG_CPU_MICROMIPS
	union mips_instruction mmi;

	/*
	 * swsp ra,offset
	 * swm16 reglist,offset(sp)
	 * swm32 reglist,offset(sp)
	 * sw32 ra,offset(sp)
	 * jradiussp - NOT SUPPORTED
	 *
	 * microMIPS is way more fun...
	 */
	if (mm_insn_16bit(ip->halfword[0])) {
		mmi.word = (ip->halfword[0] << 16);
R
Ralf Baechle 已提交
192 193 194 195
		return (mmi.mm16_r5_format.opcode == mm_swsp16_op &&
			mmi.mm16_r5_format.rt == 31) ||
		       (mmi.mm16_m_format.opcode == mm_pool16c_op &&
			mmi.mm16_m_format.func == mm_swm16_op);
196 197 198 199
	}
	else {
		mmi.halfword[0] = ip->halfword[1];
		mmi.halfword[1] = ip->halfword[0];
R
Ralf Baechle 已提交
200 201 202 203 204 205 206
		return (mmi.mm_m_format.opcode == mm_pool32b_op &&
			mmi.mm_m_format.rd > 9 &&
			mmi.mm_m_format.base == 29 &&
			mmi.mm_m_format.func == mm_swm32_func) ||
		       (mmi.i_format.opcode == mm_sw32_op &&
			mmi.i_format.rs == 29 &&
			mmi.i_format.rt == 31);
207 208
	}
#else
209 210 211 212
	/* sw / sd $ra, offset($sp) */
	return (ip->i_format.opcode == sw_op || ip->i_format.opcode == sd_op) &&
		ip->i_format.rs == 29 &&
		ip->i_format.rt == 31;
213
#endif
214 215
}

216
static inline int is_jump_ins(union mips_instruction *ip)
217
{
218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237
#ifdef CONFIG_CPU_MICROMIPS
	/*
	 * jr16,jrc,jalr16,jalr16
	 * jal
	 * jalr/jr,jalr.hb/jr.hb,jalrs,jalrs.hb
	 * jraddiusp - NOT SUPPORTED
	 *
	 * microMIPS is kind of more fun...
	 */
	union mips_instruction mmi;

	mmi.word = (ip->halfword[0] << 16);

	if ((mmi.mm16_r5_format.opcode == mm_pool16c_op &&
	    (mmi.mm16_r5_format.rt & mm_jr16_op) == mm_jr16_op) ||
	    ip->j_format.opcode == mm_jal32_op)
		return 1;
	if (ip->r_format.opcode != mm_pool32a_op ||
			ip->r_format.func != mm_pool32axf_op)
		return 0;
R
Ralf Baechle 已提交
238
	return ((ip->u_format.uimmediate >> 6) & mm_jalr_op) == mm_jalr_op;
239
#else
240 241
	if (ip->j_format.opcode == j_op)
		return 1;
242 243 244 245 246
	if (ip->j_format.opcode == jal_op)
		return 1;
	if (ip->r_format.opcode != spec_op)
		return 0;
	return ip->r_format.func == jalr_op || ip->r_format.func == jr_op;
247
#endif
248 249 250 251
}

static inline int is_sp_move_ins(union mips_instruction *ip)
{
252 253 254 255 256 257 258 259 260 261 262 263 264
#ifdef CONFIG_CPU_MICROMIPS
	/*
	 * addiusp -imm
	 * addius5 sp,-imm
	 * addiu32 sp,sp,-imm
	 * jradiussp - NOT SUPPORTED
	 *
	 * microMIPS is not more fun...
	 */
	if (mm_insn_16bit(ip->halfword[0])) {
		union mips_instruction mmi;

		mmi.word = (ip->halfword[0] << 16);
R
Ralf Baechle 已提交
265 266 267 268
		return (mmi.mm16_r3_format.opcode == mm_pool16d_op &&
			mmi.mm16_r3_format.simmediate && mm_addiusp_func) ||
		       (mmi.mm16_r5_format.opcode == mm_pool16d_op &&
			mmi.mm16_r5_format.rt == 29);
269
	}
R
Ralf Baechle 已提交
270 271
	return ip->mm_i_format.opcode == mm_addiu32_op &&
	       ip->mm_i_format.rt == 29 && ip->mm_i_format.rs == 29;
272
#else
273 274 275 276 277
	/* addiu/daddiu sp,sp,-imm */
	if (ip->i_format.rs != 29 || ip->i_format.rt != 29)
		return 0;
	if (ip->i_format.opcode == addiu_op || ip->i_format.opcode == daddiu_op)
		return 1;
278
#endif
279 280 281
	return 0;
}

282
static int get_frame_info(struct mips_frame_info *info)
L
Linus Torvalds 已提交
283
{
284 285 286
#ifdef CONFIG_CPU_MICROMIPS
	union mips_instruction *ip = (void *) (((char *) info->func) - 1);
#else
287
	union mips_instruction *ip = info->func;
288
#endif
289 290
	unsigned max_insns = info->func_size / sizeof(union mips_instruction);
	unsigned i;
291

L
Linus Torvalds 已提交
292
	info->pc_offset = -1;
293
	info->frame_size = 0;
L
Linus Torvalds 已提交
294

295 296 297 298 299 300 301
	if (!ip)
		goto err;

	if (max_insns == 0)
		max_insns = 128U;	/* unknown function size */
	max_insns = min(128U, max_insns);

302 303
	for (i = 0; i < max_insns; i++, ip++) {

304
		if (is_jump_ins(ip))
305
			break;
306 307
		if (!info->frame_size) {
			if (is_sp_move_ins(ip))
308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325
			{
#ifdef CONFIG_CPU_MICROMIPS
				if (mm_insn_16bit(ip->halfword[0]))
				{
					unsigned short tmp;

					if (ip->halfword[0] & mm_addiusp_func)
					{
						tmp = (((ip->halfword[0] >> 1) & 0x1ff) << 2);
						info->frame_size = -(signed short)(tmp | ((tmp & 0x100) ? 0xfe00 : 0));
					} else {
						tmp = (ip->halfword[0] >> 1);
						info->frame_size = -(signed short)(tmp & 0xf);
					}
					ip = (void *) &ip->halfword[1];
					ip--;
				} else
#endif
326
				info->frame_size = - ip->i_format.simmediate;
327
			}
328
			continue;
329
		}
330
		if (info->pc_offset == -1 && is_ra_save_ins(ip)) {
331 332
			info->pc_offset =
				ip->i_format.simmediate / sizeof(long);
333
			break;
L
Linus Torvalds 已提交
334 335
		}
	}
336 337 338 339 340
	if (info->frame_size && info->pc_offset >= 0) /* nested */
		return 0;
	if (info->pc_offset < 0) /* leaf */
		return 1;
	/* prologue seems boggus... */
341
err:
342
	return -1;
L
Linus Torvalds 已提交
343 344
}

345 346
static struct mips_frame_info schedule_mfi __read_mostly;

347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366
#ifdef CONFIG_KALLSYMS
static unsigned long get___schedule_addr(void)
{
	return kallsyms_lookup_name("__schedule");
}
#else
static unsigned long get___schedule_addr(void)
{
	union mips_instruction *ip = (void *)schedule;
	int max_insns = 8;
	int i;

	for (i = 0; i < max_insns; i++, ip++) {
		if (ip->j_format.opcode == j_op)
			return J_TARGET(ip, ip->j_format.target);
	}
	return 0;
}
#endif

L
Linus Torvalds 已提交
367 368
static int __init frame_info_init(void)
{
369
	unsigned long size = 0;
370
#ifdef CONFIG_KALLSYMS
371
	unsigned long ofs;
372 373
#endif
	unsigned long addr;
374

375 376 377 378 379 380
	addr = get___schedule_addr();
	if (!addr)
		addr = (unsigned long)schedule;

#ifdef CONFIG_KALLSYMS
	kallsyms_lookup_size_offset(addr, &size, &ofs);
381
#endif
382
	schedule_mfi.func = (void *)addr;
383 384 385
	schedule_mfi.func_size = size;

	get_frame_info(&schedule_mfi);
386 387 388 389 390

	/*
	 * Without schedule() frame info, result given by
	 * thread_saved_pc() and get_wchan() are not reliable.
	 */
391
	if (schedule_mfi.pc_offset < 0)
392
		printk("Can't analyze schedule() prologue at %p\n", schedule);
393

L
Linus Torvalds 已提交
394 395 396 397 398 399 400 401 402 403 404 405 406 407 408
	return 0;
}

arch_initcall(frame_info_init);

/*
 * Return saved PC of a blocked thread.
 */
unsigned long thread_saved_pc(struct task_struct *tsk)
{
	struct thread_struct *t = &tsk->thread;

	/* New born processes are a special case */
	if (t->reg31 == (unsigned long) ret_from_fork)
		return t->reg31;
409
	if (schedule_mfi.pc_offset < 0)
L
Linus Torvalds 已提交
410
		return 0;
411
	return ((unsigned long *)t->reg29)[schedule_mfi.pc_offset];
L
Linus Torvalds 已提交
412 413 414
}


415
#ifdef CONFIG_KALLSYMS
416 417 418 419 420
/* generic stack unwinding function */
unsigned long notrace unwind_stack_by_address(unsigned long stack_page,
					      unsigned long *sp,
					      unsigned long pc,
					      unsigned long *ra)
421 422 423
{
	struct mips_frame_info info;
	unsigned long size, ofs;
F
Franck Bui-Huu 已提交
424
	int leaf;
425 426
	extern void ret_from_irq(void);
	extern void ret_from_exception(void);
427 428 429 430

	if (!stack_page)
		return 0;

431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449
	/*
	 * If we reached the bottom of interrupt context,
	 * return saved pc in pt_regs.
	 */
	if (pc == (unsigned long)ret_from_irq ||
	    pc == (unsigned long)ret_from_exception) {
		struct pt_regs *regs;
		if (*sp >= stack_page &&
		    *sp + sizeof(*regs) <= stack_page + THREAD_SIZE - 32) {
			regs = (struct pt_regs *)*sp;
			pc = regs->cp0_epc;
			if (__kernel_text_address(pc)) {
				*sp = regs->regs[29];
				*ra = regs->regs[31];
				return pc;
			}
		}
		return 0;
	}
450
	if (!kallsyms_lookup_size_offset(pc, &size, &ofs))
451
		return 0;
452
	/*
L
Lucas De Marchi 已提交
453
	 * Return ra if an exception occurred at the first instruction
454
	 */
455 456 457 458 459
	if (unlikely(ofs == 0)) {
		pc = *ra;
		*ra = 0;
		return pc;
	}
460 461 462

	info.func = (void *)(pc - ofs);
	info.func_size = ofs;	/* analyze from start to ofs */
F
Franck Bui-Huu 已提交
463 464
	leaf = get_frame_info(&info);
	if (leaf < 0)
465
		return 0;
F
Franck Bui-Huu 已提交
466 467 468

	if (*sp < stack_page ||
	    *sp + info.frame_size > stack_page + THREAD_SIZE - 32)
469 470
		return 0;

F
Franck Bui-Huu 已提交
471 472 473 474 475 476 477
	if (leaf)
		/*
		 * For some extreme cases, get_frame_info() can
		 * consider wrongly a nested function as a leaf
		 * one. In that cases avoid to return always the
		 * same value.
		 */
478
		pc = pc != *ra ? *ra : 0;
F
Franck Bui-Huu 已提交
479 480 481 482
	else
		pc = ((unsigned long *)(*sp))[info.pc_offset];

	*sp += info.frame_size;
483
	*ra = 0;
F
Franck Bui-Huu 已提交
484
	return __kernel_text_address(pc) ? pc : 0;
485
}
486 487 488 489 490 491 492 493 494
EXPORT_SYMBOL(unwind_stack_by_address);

/* used by show_backtrace() */
unsigned long unwind_stack(struct task_struct *task, unsigned long *sp,
			   unsigned long pc, unsigned long *ra)
{
	unsigned long stack_page = (unsigned long)task_stack_page(task);
	return unwind_stack_by_address(stack_page, sp, pc, ra);
}
495
#endif
496 497 498 499 500 501 502 503 504

/*
 * get_wchan - a maintenance nightmare^W^Wpain in the ass ...
 */
unsigned long get_wchan(struct task_struct *task)
{
	unsigned long pc = 0;
#ifdef CONFIG_KALLSYMS
	unsigned long sp;
505
	unsigned long ra = 0;
506 507 508 509 510 511 512 513 514 515 516 517 518
#endif

	if (!task || task == current || task->state == TASK_RUNNING)
		goto out;
	if (!task_stack_page(task))
		goto out;

	pc = thread_saved_pc(task);

#ifdef CONFIG_KALLSYMS
	sp = task->thread.reg29 + schedule_mfi.frame_size;

	while (in_sched_functions(pc))
519
		pc = unwind_stack(task, &sp, pc, &ra);
520 521 522 523 524
#endif

out:
	return pc;
}
525 526 527 528 529 530 531 532 533 534 535 536

/*
 * Don't forget that the stack pointer must be aligned on a 8 bytes
 * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
 */
unsigned long arch_align_stack(unsigned long sp)
{
	if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
		sp -= get_random_int() & ~PAGE_MASK;

	return sp & ALMASK;
}
537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553

static void arch_dump_stack(void *info)
{
	struct pt_regs *regs;

	regs = get_irq_regs();

	if (regs)
		show_regs(regs);

	dump_stack();
}

void arch_trigger_all_cpu_backtrace(bool include_self)
{
	smp_call_function(arch_dump_stack, NULL, 1);
}
554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583

int mips_get_process_fp_mode(struct task_struct *task)
{
	int value = 0;

	if (!test_tsk_thread_flag(task, TIF_32BIT_FPREGS))
		value |= PR_FP_MODE_FR;
	if (test_tsk_thread_flag(task, TIF_HYBRID_FPREGS))
		value |= PR_FP_MODE_FRE;

	return value;
}

int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
{
	const unsigned int known_bits = PR_FP_MODE_FR | PR_FP_MODE_FRE;
	unsigned long switch_count;
	struct task_struct *t;

	/* Check the value is valid */
	if (value & ~known_bits)
		return -EOPNOTSUPP;

	/* Avoid inadvertently triggering emulation */
	if ((value & PR_FP_MODE_FR) && cpu_has_fpu &&
	    !(current_cpu_data.fpu_id & MIPS_FPIR_F64))
		return -EOPNOTSUPP;
	if ((value & PR_FP_MODE_FRE) && cpu_has_fpu && !cpu_has_fre)
		return -EOPNOTSUPP;

584 585 586 587
	/* FR = 0 not supported in MIPS R6 */
	if (!(value & PR_FP_MODE_FR) && cpu_has_fpu && cpu_has_mips_r6)
		return -EOPNOTSUPP;

588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648
	/* Save FP & vector context, then disable FPU & MSA */
	if (task->signal == current->signal)
		lose_fpu(1);

	/* Prevent any threads from obtaining live FP context */
	atomic_set(&task->mm->context.fp_mode_switching, 1);
	smp_mb__after_atomic();

	/*
	 * If there are multiple online CPUs then wait until all threads whose
	 * FP mode is about to change have been context switched. This approach
	 * allows us to only worry about whether an FP mode switch is in
	 * progress when FP is first used in a tasks time slice. Pretty much all
	 * of the mode switch overhead can thus be confined to cases where mode
	 * switches are actually occuring. That is, to here. However for the
	 * thread performing the mode switch it may take a while...
	 */
	if (num_online_cpus() > 1) {
		spin_lock_irq(&task->sighand->siglock);

		for_each_thread(task, t) {
			if (t == current)
				continue;

			switch_count = t->nvcsw + t->nivcsw;

			do {
				spin_unlock_irq(&task->sighand->siglock);
				cond_resched();
				spin_lock_irq(&task->sighand->siglock);
			} while ((t->nvcsw + t->nivcsw) == switch_count);
		}

		spin_unlock_irq(&task->sighand->siglock);
	}

	/*
	 * There are now no threads of the process with live FP context, so it
	 * is safe to proceed with the FP mode switch.
	 */
	for_each_thread(task, t) {
		/* Update desired FP register width */
		if (value & PR_FP_MODE_FR) {
			clear_tsk_thread_flag(t, TIF_32BIT_FPREGS);
		} else {
			set_tsk_thread_flag(t, TIF_32BIT_FPREGS);
			clear_tsk_thread_flag(t, TIF_MSA_CTX_LIVE);
		}

		/* Update desired FP single layout */
		if (value & PR_FP_MODE_FRE)
			set_tsk_thread_flag(t, TIF_HYBRID_FPREGS);
		else
			clear_tsk_thread_flag(t, TIF_HYBRID_FPREGS);
	}

	/* Allow threads to use FP again */
	atomic_set(&task->mm->context.fp_mode_switching, 0);

	return 0;
}