signal_32.c 43.9 KB
Newer Older
L
Linus Torvalds 已提交
1
/*
2
 * Signal handling for 32bit PPC and 32bit tasks on 64bit PPC
L
Linus Torvalds 已提交
3
 *
4 5
 *  PowerPC version
 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
L
Linus Torvalds 已提交
6 7 8 9
 * Copyright (C) 2001 IBM
 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
 *
10 11 12
 *  Derived from "arch/i386/kernel/signal.c"
 *    Copyright (C) 1991, 1992 Linus Torvalds
 *    1997-11-28  Modified for POSIX.1b signals by Richard Henderson
L
Linus Torvalds 已提交
13
 *
14 15 16 17
 *  This program is free software; you can redistribute it and/or
 *  modify it under the terms of the GNU General Public License
 *  as published by the Free Software Foundation; either version
 *  2 of the License, or (at your option) any later version.
L
Linus Torvalds 已提交
18 19 20
 */

#include <linux/sched.h>
21
#include <linux/mm.h>
L
Linus Torvalds 已提交
22 23 24 25 26
#include <linux/smp.h>
#include <linux/kernel.h>
#include <linux/signal.h>
#include <linux/errno.h>
#include <linux/elf.h>
27
#include <linux/ptrace.h>
28
#include <linux/ratelimit.h>
29 30
#ifdef CONFIG_PPC64
#include <linux/syscalls.h>
L
Linus Torvalds 已提交
31
#include <linux/compat.h>
32 33 34 35 36 37 38 39
#else
#include <linux/wait.h>
#include <linux/unistd.h>
#include <linux/stddef.h>
#include <linux/tty.h>
#include <linux/binfmts.h>
#endif

L
Linus Torvalds 已提交
40
#include <asm/uaccess.h>
41
#include <asm/cacheflush.h>
42
#include <asm/syscalls.h>
D
David Gibson 已提交
43
#include <asm/sigcontext.h>
44
#include <asm/vdso.h>
45
#include <asm/switch_to.h>
46
#include <asm/tm.h>
47
#ifdef CONFIG_PPC64
48
#include "ppc32.h"
L
Linus Torvalds 已提交
49
#include <asm/unistd.h>
50 51 52 53
#else
#include <asm/ucontext.h>
#include <asm/pgtable.h>
#endif
L
Linus Torvalds 已提交
54

55 56
#include "signal.h"

57
#undef DEBUG_SIG
L
Linus Torvalds 已提交
58

59
#ifdef CONFIG_PPC64
60 61 62
#define sys_rt_sigreturn	compat_sys_rt_sigreturn
#define sys_swapcontext	compat_sys_swapcontext
#define sys_sigreturn	compat_sys_sigreturn
63 64 65 66 67 68

#define old_sigaction	old_sigaction32
#define sigcontext	sigcontext32
#define mcontext	mcontext32
#define ucontext	ucontext32

A
Al Viro 已提交
69 70
#define __save_altstack __compat_save_altstack

71 72 73 74 75 76 77
/*
 * Userspace code may pass a ucontext which doesn't include VSX added
 * at the end.  We need to check for this case.
 */
#define UCONTEXTSIZEWITHOUTVSX \
		(sizeof(struct ucontext) - sizeof(elf_vsrreghalf_t32))

78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99
/*
 * Returning 0 means we return to userspace via
 * ret_from_except and thus restore all user
 * registers from *regs.  This is what we need
 * to do when a signal has been delivered.
 */

#define GP_REGS_SIZE	min(sizeof(elf_gregset_t32), sizeof(struct pt_regs32))
#undef __SIGNAL_FRAMESIZE
#define __SIGNAL_FRAMESIZE	__SIGNAL_FRAMESIZE32
#undef ELF_NVRREG
#define ELF_NVRREG	ELF_NVRREG32

/*
 * Functions for flipping sigsets (thanks to brain dead generic
 * implementation that makes things simple for little endian only)
 */
static inline int put_sigset_t(compat_sigset_t __user *uset, sigset_t *set)
{
	compat_sigset_t	cset;

	switch (_NSIG_WORDS) {
100
	case 4: cset.sig[6] = set->sig[3] & 0xffffffffull;
101 102 103 104 105 106 107 108 109 110 111
		cset.sig[7] = set->sig[3] >> 32;
	case 3: cset.sig[4] = set->sig[2] & 0xffffffffull;
		cset.sig[5] = set->sig[2] >> 32;
	case 2: cset.sig[2] = set->sig[1] & 0xffffffffull;
		cset.sig[3] = set->sig[1] >> 32;
	case 1: cset.sig[0] = set->sig[0] & 0xffffffffull;
		cset.sig[1] = set->sig[0] >> 32;
	}
	return copy_to_user(uset, &cset, sizeof(*uset));
}

112 113
static inline int get_sigset_t(sigset_t *set,
			       const compat_sigset_t __user *uset)
114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132
{
	compat_sigset_t s32;

	if (copy_from_user(&s32, uset, sizeof(*uset)))
		return -EFAULT;

	/*
	 * Swap the 2 words of the 64-bit sigset_t (they are stored
	 * in the "wrong" endian in 32-bit user storage).
	 */
	switch (_NSIG_WORDS) {
	case 4: set->sig[3] = s32.sig[6] | (((long)s32.sig[7]) << 32);
	case 3: set->sig[2] = s32.sig[4] | (((long)s32.sig[5]) << 32);
	case 2: set->sig[1] = s32.sig[2] | (((long)s32.sig[3]) << 32);
	case 1: set->sig[0] = s32.sig[0] | (((long)s32.sig[1]) << 32);
	}
	return 0;
}

133
#define to_user_ptr(p)		ptr_to_compat(p)
134 135 136 137 138 139 140 141
#define from_user_ptr(p)	compat_ptr(p)

static inline int save_general_regs(struct pt_regs *regs,
		struct mcontext __user *frame)
{
	elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
	int i;

142
	WARN_ON(!FULL_REGS(regs));
143 144 145 146

	for (i = 0; i <= PT_RESULT; i ++) {
		if (i == 14 && !FULL_REGS(regs))
			i = 32;
147 148
		if (__put_user((unsigned int)gregs[i], &frame->mc_gregs[i]))
			return -EFAULT;
149
	}
150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176
	return 0;
}

static inline int restore_general_regs(struct pt_regs *regs,
		struct mcontext __user *sr)
{
	elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
	int i;

	for (i = 0; i <= PT_RESULT; i++) {
		if ((i == PT_MSR) || (i == PT_SOFTE))
			continue;
		if (__get_user(gregs[i], &sr->mc_gregs[i]))
			return -EFAULT;
	}
	return 0;
}

#else /* CONFIG_PPC64 */

#define GP_REGS_SIZE	min(sizeof(elf_gregset_t), sizeof(struct pt_regs))

static inline int put_sigset_t(sigset_t __user *uset, sigset_t *set)
{
	return copy_to_user(uset, set, sizeof(*uset));
}

177
static inline int get_sigset_t(sigset_t *set, const sigset_t __user *uset)
178 179 180 181
{
	return copy_from_user(set, uset, sizeof(*uset));
}

182 183
#define to_user_ptr(p)		((unsigned long)(p))
#define from_user_ptr(p)	((void __user *)(p))
184 185 186 187

static inline int save_general_regs(struct pt_regs *regs,
		struct mcontext __user *frame)
{
188
	WARN_ON(!FULL_REGS(regs));
189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206
	return __copy_to_user(&frame->mc_gregs, regs, GP_REGS_SIZE);
}

static inline int restore_general_regs(struct pt_regs *regs,
		struct mcontext __user *sr)
{
	/* copy up to but not including MSR */
	if (__copy_from_user(regs, &sr->mc_gregs,
				PT_MSR * sizeof(elf_greg_t)))
		return -EFAULT;
	/* copy from orig_r3 (the word after the MSR) up to the end */
	if (__copy_from_user(&regs->orig_gpr3, &sr->mc_gregs[PT_ORIG_R3],
				GP_REGS_SIZE - PT_ORIG_R3 * sizeof(elf_greg_t)))
		return -EFAULT;
	return 0;
}
#endif

L
Linus Torvalds 已提交
207 208 209
/*
 * When we have signals to deliver, we set up on the
 * user stack, going down from the original stack pointer:
210 211
 *	an ABI gap of 56 words
 *	an mcontext struct
212 213
 *	a sigcontext struct
 *	a gap of __SIGNAL_FRAMESIZE bytes
L
Linus Torvalds 已提交
214
 *
215 216
 * Each of these things must be a multiple of 16 bytes in size. The following
 * structure represent all of this except the __SIGNAL_FRAMESIZE gap
L
Linus Torvalds 已提交
217 218
 *
 */
219 220
struct sigframe {
	struct sigcontext sctx;		/* the sigcontext */
221
	struct mcontext	mctx;		/* all the register values */
222 223 224 225
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
	struct sigcontext sctx_transact;
	struct mcontext	mctx_transact;
#endif
L
Linus Torvalds 已提交
226 227 228 229 230 231 232 233 234 235 236 237 238
	/*
	 * Programs using the rs6000/xcoff abi can save up to 19 gp
	 * regs and 18 fp regs below sp before decrementing it.
	 */
	int			abigap[56];
};

/* We use the mc_pad field for the signal return trampoline. */
#define tramp	mc_pad

/*
 *  When we have rt signals to deliver, we set up on the
 *  user stack, going down from the original stack pointer:
239 240 241
 *	one rt_sigframe struct (siginfo + ucontext + ABI gap)
 *	a gap of __SIGNAL_FRAMESIZE+16 bytes
 *  (the +16 is to get the siginfo and ucontext in the same
L
Linus Torvalds 已提交
242 243 244 245 246
 *  positions as in older kernels).
 *
 *  Each of these things must be a multiple of 16 bytes in size.
 *
 */
247 248 249 250 251 252 253
struct rt_sigframe {
#ifdef CONFIG_PPC64
	compat_siginfo_t info;
#else
	struct siginfo info;
#endif
	struct ucontext	uc;
254 255 256
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
	struct ucontext	uc_transact;
#endif
L
Linus Torvalds 已提交
257 258 259 260 261 262 263
	/*
	 * Programs using the rs6000/xcoff abi can save up to 19 gp
	 * regs and 18 fp regs below sp before decrementing it.
	 */
	int			abigap[56];
};

264 265 266 267
#ifdef CONFIG_VSX
unsigned long copy_fpr_to_user(void __user *to,
			       struct task_struct *task)
{
268
	u64 buf[ELF_NFPREG];
269 270 271 272 273
	int i;

	/* save FPR copy to local buffer then write to the thread_struct */
	for (i = 0; i < (ELF_NFPREG - 1) ; i++)
		buf[i] = task->thread.TS_FPR(i);
274
	buf[i] = task->thread.fp_state.fpscr;
275 276 277 278 279 280
	return __copy_to_user(to, buf, ELF_NFPREG * sizeof(double));
}

unsigned long copy_fpr_from_user(struct task_struct *task,
				 void __user *from)
{
281
	u64 buf[ELF_NFPREG];
282 283 284 285 286 287
	int i;

	if (__copy_from_user(buf, from, ELF_NFPREG * sizeof(double)))
		return 1;
	for (i = 0; i < (ELF_NFPREG - 1) ; i++)
		task->thread.TS_FPR(i) = buf[i];
288
	task->thread.fp_state.fpscr = buf[i];
289 290 291 292 293 294 295

	return 0;
}

unsigned long copy_vsx_to_user(void __user *to,
			       struct task_struct *task)
{
296
	u64 buf[ELF_NVSRHALFREG];
297 298 299 300
	int i;

	/* save FPR copy to local buffer then write to the thread_struct */
	for (i = 0; i < ELF_NVSRHALFREG; i++)
301
		buf[i] = task->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
302 303 304 305 306 307
	return __copy_to_user(to, buf, ELF_NVSRHALFREG * sizeof(double));
}

unsigned long copy_vsx_from_user(struct task_struct *task,
				 void __user *from)
{
308
	u64 buf[ELF_NVSRHALFREG];
309 310 311 312 313
	int i;

	if (__copy_from_user(buf, from, ELF_NVSRHALFREG * sizeof(double)))
		return 1;
	for (i = 0; i < ELF_NVSRHALFREG ; i++)
314
		task->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
315 316
	return 0;
}
317 318 319 320 321

#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
unsigned long copy_transact_fpr_to_user(void __user *to,
				  struct task_struct *task)
{
322
	u64 buf[ELF_NFPREG];
323 324 325 326 327
	int i;

	/* save FPR copy to local buffer then write to the thread_struct */
	for (i = 0; i < (ELF_NFPREG - 1) ; i++)
		buf[i] = task->thread.TS_TRANS_FPR(i);
328
	buf[i] = task->thread.transact_fp.fpscr;
329 330 331 332 333 334
	return __copy_to_user(to, buf, ELF_NFPREG * sizeof(double));
}

unsigned long copy_transact_fpr_from_user(struct task_struct *task,
					  void __user *from)
{
335
	u64 buf[ELF_NFPREG];
336 337 338 339 340 341
	int i;

	if (__copy_from_user(buf, from, ELF_NFPREG * sizeof(double)))
		return 1;
	for (i = 0; i < (ELF_NFPREG - 1) ; i++)
		task->thread.TS_TRANS_FPR(i) = buf[i];
342
	task->thread.transact_fp.fpscr = buf[i];
343 344 345 346 347 348 349

	return 0;
}

unsigned long copy_transact_vsx_to_user(void __user *to,
				  struct task_struct *task)
{
350
	u64 buf[ELF_NVSRHALFREG];
351 352 353 354
	int i;

	/* save FPR copy to local buffer then write to the thread_struct */
	for (i = 0; i < ELF_NVSRHALFREG; i++)
355
		buf[i] = task->thread.transact_fp.fpr[i][TS_VSRLOWOFFSET];
356 357 358 359 360 361
	return __copy_to_user(to, buf, ELF_NVSRHALFREG * sizeof(double));
}

unsigned long copy_transact_vsx_from_user(struct task_struct *task,
					  void __user *from)
{
362
	u64 buf[ELF_NVSRHALFREG];
363 364 365 366 367
	int i;

	if (__copy_from_user(buf, from, ELF_NVSRHALFREG * sizeof(double)))
		return 1;
	for (i = 0; i < ELF_NVSRHALFREG ; i++)
368
		task->thread.transact_fp.fpr[i][TS_VSRLOWOFFSET] = buf[i];
369 370 371
	return 0;
}
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
372 373 374 375
#else
inline unsigned long copy_fpr_to_user(void __user *to,
				      struct task_struct *task)
{
376
	return __copy_to_user(to, task->thread.fp_state.fpr,
377 378 379 380 381 382
			      ELF_NFPREG * sizeof(double));
}

inline unsigned long copy_fpr_from_user(struct task_struct *task,
					void __user *from)
{
383
	return __copy_from_user(task->thread.fp_state.fpr, from,
384 385
			      ELF_NFPREG * sizeof(double));
}
386 387 388 389 390

#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
inline unsigned long copy_transact_fpr_to_user(void __user *to,
					 struct task_struct *task)
{
391
	return __copy_to_user(to, task->thread.transact_fp.fpr,
392 393 394 395 396 397
			      ELF_NFPREG * sizeof(double));
}

inline unsigned long copy_transact_fpr_from_user(struct task_struct *task,
						 void __user *from)
{
398
	return __copy_from_user(task->thread.transact_fp.fpr, from,
399 400 401
				ELF_NFPREG * sizeof(double));
}
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
402 403
#endif

L
Linus Torvalds 已提交
404 405
/*
 * Save the current user registers on the user stack.
406 407
 * We only save the altivec/spe registers if the process has used
 * altivec/spe instructions at some point.
L
Linus Torvalds 已提交
408
 */
409
static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
410 411
			  struct mcontext __user *tm_frame, int sigret,
			  int ctx_has_vsx_region)
L
Linus Torvalds 已提交
412
{
413 414
	unsigned long msr = regs->msr;

L
Linus Torvalds 已提交
415 416 417
	/* Make sure floating point registers are stored in regs */
	flush_fp_to_thread(current);

418 419
	/* save general registers */
	if (save_general_regs(regs, frame))
L
Linus Torvalds 已提交
420 421 422 423 424 425
		return 1;

#ifdef CONFIG_ALTIVEC
	/* save altivec registers */
	if (current->thread.used_vr) {
		flush_altivec_to_thread(current);
426
		if (__copy_to_user(&frame->mc_vregs, &current->thread.vr_state,
427
				   ELF_NVRREG * sizeof(vector128)))
L
Linus Torvalds 已提交
428 429 430
			return 1;
		/* set MSR_VEC in the saved MSR value to indicate that
		   frame->mc_vregs contains valid data */
431
		msr |= MSR_VEC;
L
Linus Torvalds 已提交
432 433 434 435 436 437 438
	}
	/* else assert((regs->msr & MSR_VEC) == 0) */

	/* We always copy to/from vrsave, it's 0 if we don't have or don't
	 * use altivec. Since VSCR only contains 32 bits saved in the least
	 * significant bits of a vector, we "cheat" and stuff VRSAVE in the
	 * most significant bits of that same vector. --BenH
P
Paul Mackerras 已提交
439
	 * Note that the current VRSAVE value is in the SPR at this point.
L
Linus Torvalds 已提交
440
	 */
P
Paul Mackerras 已提交
441 442
	if (cpu_has_feature(CPU_FTR_ALTIVEC))
		current->thread.vrsave = mfspr(SPRN_VRSAVE);
L
Linus Torvalds 已提交
443 444 445
	if (__put_user(current->thread.vrsave, (u32 __user *)&frame->mc_vregs[32]))
		return 1;
#endif /* CONFIG_ALTIVEC */
446
	if (copy_fpr_to_user(&frame->mc_fregs, current))
447
		return 1;
448
#ifdef CONFIG_VSX
449 450 451 452 453 454
	/*
	 * Copy VSR 0-31 upper half from thread_struct to local
	 * buffer, then write that to userspace.  Also set MSR_VSX in
	 * the saved MSR value to indicate that frame->mc_vregs
	 * contains valid data
	 */
455
	if (current->thread.used_vsr && ctx_has_vsx_region) {
456
		__giveup_vsx(current);
457
		if (copy_vsx_to_user(&frame->mc_vsregs, current))
458 459 460
			return 1;
		msr |= MSR_VSX;
	}
461
#endif /* CONFIG_VSX */
462 463 464 465 466 467 468 469 470
#ifdef CONFIG_SPE
	/* save spe registers */
	if (current->thread.used_spe) {
		flush_spe_to_thread(current);
		if (__copy_to_user(&frame->mc_vregs, current->thread.evr,
				   ELF_NEVRREG * sizeof(u32)))
			return 1;
		/* set MSR_SPE in the saved MSR value to indicate that
		   frame->mc_vregs contains valid data */
471
		msr |= MSR_SPE;
472 473 474 475 476 477 478 479
	}
	/* else assert((regs->msr & MSR_SPE) == 0) */

	/* We always copy to/from spefscr */
	if (__put_user(current->thread.spefscr, (u32 __user *)&frame->mc_vregs + ELF_NEVRREG))
		return 1;
#endif /* CONFIG_SPE */

480 481
	if (__put_user(msr, &frame->mc_gregs[PT_MSR]))
		return 1;
482 483 484 485 486 487
	/* We need to write 0 the MSR top 32 bits in the tm frame so that we
	 * can check it on the restore to see if TM is active
	 */
	if (tm_frame && __put_user(0, &tm_frame->mc_gregs[PT_MSR]))
		return 1;

L
Linus Torvalds 已提交
488 489 490 491 492 493 494 495 496 497 498 499
	if (sigret) {
		/* Set up the sigreturn trampoline: li r0,sigret; sc */
		if (__put_user(0x38000000UL + sigret, &frame->tramp[0])
		    || __put_user(0x44000002UL, &frame->tramp[1]))
			return 1;
		flush_icache_range((unsigned long) &frame->tramp[0],
				   (unsigned long) &frame->tramp[2]);
	}

	return 0;
}

500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
/*
 * Save the current user registers on the user stack.
 * We only save the altivec/spe registers if the process has used
 * altivec/spe instructions at some point.
 * We also save the transactional registers to a second ucontext in the
 * frame.
 *
 * See save_user_regs() and signal_64.c:setup_tm_sigcontexts().
 */
static int save_tm_user_regs(struct pt_regs *regs,
			     struct mcontext __user *frame,
			     struct mcontext __user *tm_frame, int sigret)
{
	unsigned long msr = regs->msr;

	/* Make sure floating point registers are stored in regs */
	flush_fp_to_thread(current);

	/* Save both sets of general registers */
	if (save_general_regs(&current->thread.ckpt_regs, frame)
	    || save_general_regs(regs, tm_frame))
		return 1;

	/* Stash the top half of the 64bit MSR into the 32bit MSR word
	 * of the transactional mcontext.  This way we have a backward-compatible
	 * MSR in the 'normal' (checkpointed) mcontext and additionally one can
	 * also look at what type of transaction (T or S) was active at the
	 * time of the signal.
	 */
	if (__put_user((msr >> 32), &tm_frame->mc_gregs[PT_MSR]))
		return 1;

#ifdef CONFIG_ALTIVEC
	/* save altivec registers */
	if (current->thread.used_vr) {
		flush_altivec_to_thread(current);
537
		if (__copy_to_user(&frame->mc_vregs, &current->thread.vr_state,
538 539 540 541
				   ELF_NVRREG * sizeof(vector128)))
			return 1;
		if (msr & MSR_VEC) {
			if (__copy_to_user(&tm_frame->mc_vregs,
542
					   &current->thread.transact_vr,
543 544 545 546
					   ELF_NVRREG * sizeof(vector128)))
				return 1;
		} else {
			if (__copy_to_user(&tm_frame->mc_vregs,
547
					   &current->thread.vr_state,
548 549 550 551 552 553 554 555 556 557 558 559 560 561 562
					   ELF_NVRREG * sizeof(vector128)))
				return 1;
		}

		/* set MSR_VEC in the saved MSR value to indicate that
		 * frame->mc_vregs contains valid data
		 */
		msr |= MSR_VEC;
	}

	/* We always copy to/from vrsave, it's 0 if we don't have or don't
	 * use altivec. Since VSCR only contains 32 bits saved in the least
	 * significant bits of a vector, we "cheat" and stuff VRSAVE in the
	 * most significant bits of that same vector. --BenH
	 */
P
Paul Mackerras 已提交
563 564
	if (cpu_has_feature(CPU_FTR_ALTIVEC))
		current->thread.vrsave = mfspr(SPRN_VRSAVE);
565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645
	if (__put_user(current->thread.vrsave,
		       (u32 __user *)&frame->mc_vregs[32]))
		return 1;
	if (msr & MSR_VEC) {
		if (__put_user(current->thread.transact_vrsave,
			       (u32 __user *)&tm_frame->mc_vregs[32]))
			return 1;
	} else {
		if (__put_user(current->thread.vrsave,
			       (u32 __user *)&tm_frame->mc_vregs[32]))
			return 1;
	}
#endif /* CONFIG_ALTIVEC */

	if (copy_fpr_to_user(&frame->mc_fregs, current))
		return 1;
	if (msr & MSR_FP) {
		if (copy_transact_fpr_to_user(&tm_frame->mc_fregs, current))
			return 1;
	} else {
		if (copy_fpr_to_user(&tm_frame->mc_fregs, current))
			return 1;
	}

#ifdef CONFIG_VSX
	/*
	 * Copy VSR 0-31 upper half from thread_struct to local
	 * buffer, then write that to userspace.  Also set MSR_VSX in
	 * the saved MSR value to indicate that frame->mc_vregs
	 * contains valid data
	 */
	if (current->thread.used_vsr) {
		__giveup_vsx(current);
		if (copy_vsx_to_user(&frame->mc_vsregs, current))
			return 1;
		if (msr & MSR_VSX) {
			if (copy_transact_vsx_to_user(&tm_frame->mc_vsregs,
						      current))
				return 1;
		} else {
			if (copy_vsx_to_user(&tm_frame->mc_vsregs, current))
				return 1;
		}

		msr |= MSR_VSX;
	}
#endif /* CONFIG_VSX */
#ifdef CONFIG_SPE
	/* SPE regs are not checkpointed with TM, so this section is
	 * simply the same as in save_user_regs().
	 */
	if (current->thread.used_spe) {
		flush_spe_to_thread(current);
		if (__copy_to_user(&frame->mc_vregs, current->thread.evr,
				   ELF_NEVRREG * sizeof(u32)))
			return 1;
		/* set MSR_SPE in the saved MSR value to indicate that
		 * frame->mc_vregs contains valid data */
		msr |= MSR_SPE;
	}

	/* We always copy to/from spefscr */
	if (__put_user(current->thread.spefscr, (u32 __user *)&frame->mc_vregs + ELF_NEVRREG))
		return 1;
#endif /* CONFIG_SPE */

	if (__put_user(msr, &frame->mc_gregs[PT_MSR]))
		return 1;
	if (sigret) {
		/* Set up the sigreturn trampoline: li r0,sigret; sc */
		if (__put_user(0x38000000UL + sigret, &frame->tramp[0])
		    || __put_user(0x44000002UL, &frame->tramp[1]))
			return 1;
		flush_icache_range((unsigned long) &frame->tramp[0],
				   (unsigned long) &frame->tramp[2]);
	}

	return 0;
}
#endif

L
Linus Torvalds 已提交
646 647 648 649 650
/*
 * Restore the current user register values from the user stack,
 * (except for MSR).
 */
static long restore_user_regs(struct pt_regs *regs,
651
			      struct mcontext __user *sr, int sig)
L
Linus Torvalds 已提交
652
{
653
	long err;
L
Linus Torvalds 已提交
654 655
	unsigned int save_r2 = 0;
	unsigned long msr;
656 657 658
#ifdef CONFIG_VSX
	int i;
#endif
L
Linus Torvalds 已提交
659 660 661 662 663 664 665

	/*
	 * restore general registers but not including MSR or SOFTE. Also
	 * take care of keeping r2 (TLS) intact if not a signal
	 */
	if (!sig)
		save_r2 = (unsigned int)regs->gpr[2];
666
	err = restore_general_regs(regs, sr);
A
Al Viro 已提交
667
	regs->trap = 0;
668
	err |= __get_user(msr, &sr->mc_gregs[PT_MSR]);
L
Linus Torvalds 已提交
669 670 671 672 673
	if (!sig)
		regs->gpr[2] = (unsigned long) save_r2;
	if (err)
		return 1;

674 675 676 677
	/* if doing signal return, restore the previous little-endian mode */
	if (sig)
		regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);

678 679 680 681 682 683 684 685 686
	/*
	 * Do this before updating the thread state in
	 * current->thread.fpr/vr/evr.  That way, if we get preempted
	 * and another task grabs the FPU/Altivec/SPE, it won't be
	 * tempted to save the current CPU state into the thread_struct
	 * and corrupt what we are writing there.
	 */
	discard_lazy_cpu_state();

L
Linus Torvalds 已提交
687
#ifdef CONFIG_ALTIVEC
688 689 690 691
	/*
	 * Force the process to reload the altivec registers from
	 * current->thread when it next does altivec instructions
	 */
L
Linus Torvalds 已提交
692
	regs->msr &= ~MSR_VEC;
693
	if (msr & MSR_VEC) {
L
Linus Torvalds 已提交
694
		/* restore altivec registers from the stack */
695
		if (__copy_from_user(&current->thread.vr_state, &sr->mc_vregs,
L
Linus Torvalds 已提交
696 697 698
				     sizeof(sr->mc_vregs)))
			return 1;
	} else if (current->thread.used_vr)
699 700
		memset(&current->thread.vr_state, 0,
		       ELF_NVRREG * sizeof(vector128));
L
Linus Torvalds 已提交
701 702 703 704

	/* Always get VRSAVE back */
	if (__get_user(current->thread.vrsave, (u32 __user *)&sr->mc_vregs[32]))
		return 1;
P
Paul Mackerras 已提交
705 706
	if (cpu_has_feature(CPU_FTR_ALTIVEC))
		mtspr(SPRN_VRSAVE, current->thread.vrsave);
L
Linus Torvalds 已提交
707
#endif /* CONFIG_ALTIVEC */
708 709
	if (copy_fpr_from_user(current, &sr->mc_fregs))
		return 1;
L
Linus Torvalds 已提交
710

711
#ifdef CONFIG_VSX
712 713 714 715 716 717 718 719 720 721
	/*
	 * Force the process to reload the VSX registers from
	 * current->thread when it next does VSX instruction.
	 */
	regs->msr &= ~MSR_VSX;
	if (msr & MSR_VSX) {
		/*
		 * Restore altivec registers from the stack to a local
		 * buffer, then write this out to the thread_struct
		 */
722
		if (copy_vsx_from_user(current, &sr->mc_vsregs))
723 724 725
			return 1;
	} else if (current->thread.used_vsr)
		for (i = 0; i < 32 ; i++)
726
			current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
727 728 729 730 731 732 733
#endif /* CONFIG_VSX */
	/*
	 * force the process to reload the FP registers from
	 * current->thread when it next does FP instructions
	 */
	regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1);

734 735 736 737
#ifdef CONFIG_SPE
	/* force the process to reload the spe registers from
	   current->thread when it next does spe instructions */
	regs->msr &= ~MSR_SPE;
738
	if (msr & MSR_SPE) {
739 740 741 742 743 744 745 746 747 748 749 750
		/* restore spe registers from the stack */
		if (__copy_from_user(current->thread.evr, &sr->mc_vregs,
				     ELF_NEVRREG * sizeof(u32)))
			return 1;
	} else if (current->thread.used_spe)
		memset(current->thread.evr, 0, ELF_NEVRREG * sizeof(u32));

	/* Always get SPEFSCR back */
	if (__get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs + ELF_NEVRREG))
		return 1;
#endif /* CONFIG_SPE */

L
Linus Torvalds 已提交
751 752 753
	return 0;
}

754 755 756 757 758 759 760 761 762 763 764
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
/*
 * Restore the current user register values from the user stack, except for
 * MSR, and recheckpoint the original checkpointed register state for processes
 * in transactions.
 */
static long restore_tm_user_regs(struct pt_regs *regs,
				 struct mcontext __user *sr,
				 struct mcontext __user *tm_sr)
{
	long err;
765
	unsigned long msr, msr_hi;
766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801
#ifdef CONFIG_VSX
	int i;
#endif

	/*
	 * restore general registers but not including MSR or SOFTE. Also
	 * take care of keeping r2 (TLS) intact if not a signal.
	 * See comment in signal_64.c:restore_tm_sigcontexts();
	 * TFHAR is restored from the checkpointed NIP; TEXASR and TFIAR
	 * were set by the signal delivery.
	 */
	err = restore_general_regs(regs, tm_sr);
	err |= restore_general_regs(&current->thread.ckpt_regs, sr);

	err |= __get_user(current->thread.tm_tfhar, &sr->mc_gregs[PT_NIP]);

	err |= __get_user(msr, &sr->mc_gregs[PT_MSR]);
	if (err)
		return 1;

	/* Restore the previous little-endian mode */
	regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);

	/*
	 * Do this before updating the thread state in
	 * current->thread.fpr/vr/evr.  That way, if we get preempted
	 * and another task grabs the FPU/Altivec/SPE, it won't be
	 * tempted to save the current CPU state into the thread_struct
	 * and corrupt what we are writing there.
	 */
	discard_lazy_cpu_state();

#ifdef CONFIG_ALTIVEC
	regs->msr &= ~MSR_VEC;
	if (msr & MSR_VEC) {
		/* restore altivec registers from the stack */
802
		if (__copy_from_user(&current->thread.vr_state, &sr->mc_vregs,
803
				     sizeof(sr->mc_vregs)) ||
804
		    __copy_from_user(&current->thread.transact_vr,
805 806 807 808
				     &tm_sr->mc_vregs,
				     sizeof(sr->mc_vregs)))
			return 1;
	} else if (current->thread.used_vr) {
809 810 811
		memset(&current->thread.vr_state, 0,
		       ELF_NVRREG * sizeof(vector128));
		memset(&current->thread.transact_vr, 0,
812 813 814 815 816 817 818 819 820
		       ELF_NVRREG * sizeof(vector128));
	}

	/* Always get VRSAVE back */
	if (__get_user(current->thread.vrsave,
		       (u32 __user *)&sr->mc_vregs[32]) ||
	    __get_user(current->thread.transact_vrsave,
		       (u32 __user *)&tm_sr->mc_vregs[32]))
		return 1;
P
Paul Mackerras 已提交
821 822
	if (cpu_has_feature(CPU_FTR_ALTIVEC))
		mtspr(SPRN_VRSAVE, current->thread.vrsave);
823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842
#endif /* CONFIG_ALTIVEC */

	regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1);

	if (copy_fpr_from_user(current, &sr->mc_fregs) ||
	    copy_transact_fpr_from_user(current, &tm_sr->mc_fregs))
		return 1;

#ifdef CONFIG_VSX
	regs->msr &= ~MSR_VSX;
	if (msr & MSR_VSX) {
		/*
		 * Restore altivec registers from the stack to a local
		 * buffer, then write this out to the thread_struct
		 */
		if (copy_vsx_from_user(current, &sr->mc_vsregs) ||
		    copy_transact_vsx_from_user(current, &tm_sr->mc_vsregs))
			return 1;
	} else if (current->thread.used_vsr)
		for (i = 0; i < 32 ; i++) {
843 844
			current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
			current->thread.transact_fp.fpr[i][TS_VSRLOWOFFSET] = 0;
845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872
		}
#endif /* CONFIG_VSX */

#ifdef CONFIG_SPE
	/* SPE regs are not checkpointed with TM, so this section is
	 * simply the same as in restore_user_regs().
	 */
	regs->msr &= ~MSR_SPE;
	if (msr & MSR_SPE) {
		if (__copy_from_user(current->thread.evr, &sr->mc_vregs,
				     ELF_NEVRREG * sizeof(u32)))
			return 1;
	} else if (current->thread.used_spe)
		memset(current->thread.evr, 0, ELF_NEVRREG * sizeof(u32));

	/* Always get SPEFSCR back */
	if (__get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs
		       + ELF_NEVRREG))
		return 1;
#endif /* CONFIG_SPE */

	/* Now, recheckpoint.  This loads up all of the checkpointed (older)
	 * registers, including FP and V[S]Rs.  After recheckpointing, the
	 * transactional versions should be loaded.
	 */
	tm_enable();
	/* This loads the checkpointed FP/VEC state, if used */
	tm_recheckpoint(&current->thread, msr);
873 874 875 876 877
	/* Get the top half of the MSR */
	if (__get_user(msr_hi, &tm_sr->mc_gregs[PT_MSR]))
		return 1;
	/* Pull in MSR TM from user context */
	regs->msr = (regs->msr & ~MSR_TS_MASK) | ((msr_hi<<32) & MSR_TS_MASK);
878 879 880 881 882 883

	/* This loads the speculative FP/VEC state, if used */
	if (msr & MSR_FP) {
		do_load_up_transact_fpu(&current->thread);
		regs->msr |= (MSR_FP | current->thread.fpexc_mode);
	}
884
#ifdef CONFIG_ALTIVEC
885 886 887 888
	if (msr & MSR_VEC) {
		do_load_up_transact_altivec(&current->thread);
		regs->msr |= MSR_VEC;
	}
889
#endif
890 891 892 893 894

	return 0;
}
#endif

895
#ifdef CONFIG_PPC64
A
Al Viro 已提交
896
int copy_siginfo_to_user32(struct compat_siginfo __user *d, const siginfo_t *s)
L
Linus Torvalds 已提交
897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950
{
	int err;

	if (!access_ok (VERIFY_WRITE, d, sizeof(*d)))
		return -EFAULT;

	/* If you change siginfo_t structure, please be sure
	 * this code is fixed accordingly.
	 * It should never copy any pad contained in the structure
	 * to avoid security leaks, but must copy the generic
	 * 3 ints plus the relevant union member.
	 * This routine must convert siginfo from 64bit to 32bit as well
	 * at the same time.
	 */
	err = __put_user(s->si_signo, &d->si_signo);
	err |= __put_user(s->si_errno, &d->si_errno);
	err |= __put_user((short)s->si_code, &d->si_code);
	if (s->si_code < 0)
		err |= __copy_to_user(&d->_sifields._pad, &s->_sifields._pad,
				      SI_PAD_SIZE32);
	else switch(s->si_code >> 16) {
	case __SI_CHLD >> 16:
		err |= __put_user(s->si_pid, &d->si_pid);
		err |= __put_user(s->si_uid, &d->si_uid);
		err |= __put_user(s->si_utime, &d->si_utime);
		err |= __put_user(s->si_stime, &d->si_stime);
		err |= __put_user(s->si_status, &d->si_status);
		break;
	case __SI_FAULT >> 16:
		err |= __put_user((unsigned int)(unsigned long)s->si_addr,
				  &d->si_addr);
		break;
	case __SI_POLL >> 16:
		err |= __put_user(s->si_band, &d->si_band);
		err |= __put_user(s->si_fd, &d->si_fd);
		break;
	case __SI_TIMER >> 16:
		err |= __put_user(s->si_tid, &d->si_tid);
		err |= __put_user(s->si_overrun, &d->si_overrun);
		err |= __put_user(s->si_int, &d->si_int);
		break;
	case __SI_RT >> 16: /* This is not generated by the kernel as of now.  */
	case __SI_MESGQ >> 16:
		err |= __put_user(s->si_int, &d->si_int);
		/* fallthrough */
	case __SI_KILL >> 16:
	default:
		err |= __put_user(s->si_pid, &d->si_pid);
		err |= __put_user(s->si_uid, &d->si_uid);
		break;
	}
	return err;
}

951 952
#define copy_siginfo_to_user	copy_siginfo_to_user32

953 954 955 956 957 958 959 960 961 962 963
int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from)
{
	memset(to, 0, sizeof *to);

	if (copy_from_user(to, from, 3*sizeof(int)) ||
	    copy_from_user(to->_sifields._pad,
			   from->_sifields._pad, SI_PAD_SIZE32))
		return -EFAULT;

	return 0;
}
964
#endif /* CONFIG_PPC64 */
L
Linus Torvalds 已提交
965 966 967 968 969

/*
 * Set up a signal frame for a "real-time" signal handler
 * (one which gets siginfo).
 */
970
int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
971
		siginfo_t *info, sigset_t *oldset,
972
		struct pt_regs *regs)
L
Linus Torvalds 已提交
973
{
974 975
	struct rt_sigframe __user *rt_sf;
	struct mcontext __user *frame;
976
	struct mcontext __user *tm_frame = NULL;
977
	void __user *addr;
978
	unsigned long newsp = 0;
979 980
	int sigret;
	unsigned long tramp;
L
Linus Torvalds 已提交
981 982 983

	/* Set up Signal Frame */
	/* Put a Real Time Context onto stack */
984
	rt_sf = get_sigframe(ka, get_tm_stackpointer(regs), sizeof(*rt_sf), 1);
985
	addr = rt_sf;
986
	if (unlikely(rt_sf == NULL))
L
Linus Torvalds 已提交
987 988 989
		goto badframe;

	/* Put the siginfo & fill in most of the ucontext */
990
	if (copy_siginfo_to_user(&rt_sf->info, info)
L
Linus Torvalds 已提交
991
	    || __put_user(0, &rt_sf->uc.uc_flags)
A
Al Viro 已提交
992
	    || __save_altstack(&rt_sf->uc.uc_stack, regs->gpr[1])
993 994 995
	    || __put_user(to_user_ptr(&rt_sf->uc.uc_mcontext),
		    &rt_sf->uc.uc_regs)
	    || put_sigset_t(&rt_sf->uc.uc_sigmask, oldset))
L
Linus Torvalds 已提交
996 997 998 999
		goto badframe;

	/* Save user registers on the stack */
	frame = &rt_sf->uc.uc_mcontext;
1000
	addr = frame;
1001
	if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
1002 1003
		sigret = 0;
		tramp = current->mm->context.vdso_base + vdso32_rt_sigtramp;
1004
	} else {
1005 1006 1007 1008 1009
		sigret = __NR_rt_sigreturn;
		tramp = (unsigned long) frame->tramp;
	}

#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1010
	tm_frame = &rt_sf->uc_transact.uc_mcontext;
1011
	if (MSR_TM_ACTIVE(regs->msr)) {
1012
		if (save_tm_user_regs(regs, frame, tm_frame, sigret))
L
Linus Torvalds 已提交
1013 1014
			goto badframe;
	}
1015 1016
	else
#endif
1017 1018
	{
		if (save_user_regs(regs, frame, tm_frame, sigret, 1))
1019
			goto badframe;
1020
	}
1021 1022 1023 1024 1025 1026
	regs->link = tramp;

#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
	if (MSR_TM_ACTIVE(regs->msr)) {
		if (__put_user((unsigned long)&rt_sf->uc_transact,
			       &rt_sf->uc.uc_link)
1027
		    || __put_user((unsigned long)tm_frame, &rt_sf->uc_transact.uc_regs))
1028 1029 1030 1031 1032 1033
			goto badframe;
	}
	else
#endif
		if (__put_user(0, &rt_sf->uc.uc_link))
			goto badframe;
1034

1035
	current->thread.fp_state.fpscr = 0;	/* turn off all fp exceptions */
1036

1037 1038
	/* create a stack frame for the caller of the handler */
	newsp = ((unsigned long)rt_sf) - (__SIGNAL_FRAMESIZE + 16);
1039
	addr = (void __user *)regs->gpr[1];
1040
	if (put_user(regs->gpr[1], (u32 __user *)newsp))
1041
		goto badframe;
1042 1043

	/* Fill registers for signal handler */
1044
	regs->gpr[1] = newsp;
L
Linus Torvalds 已提交
1045 1046 1047 1048 1049
	regs->gpr[3] = sig;
	regs->gpr[4] = (unsigned long) &rt_sf->info;
	regs->gpr[5] = (unsigned long) &rt_sf->uc;
	regs->gpr[6] = (unsigned long) rt_sf;
	regs->nip = (unsigned long) ka->sa.sa_handler;
1050
	/* enter the signal handler in native-endian mode */
1051
	regs->msr &= ~MSR_LE;
1052
	regs->msr |= (MSR_KERNEL & MSR_LE);
1053 1054 1055 1056 1057 1058 1059
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
	/* Remove TM bits from thread's MSR.  The MSR in the sigcontext
	 * just indicates to userland that we were doing a transaction, but we
	 * don't want to return in transactional state:
	 */
	regs->msr &= ~MSR_TS_MASK;
#endif
L
Linus Torvalds 已提交
1060 1061 1062
	return 1;

badframe:
1063
#ifdef DEBUG_SIG
L
Linus Torvalds 已提交
1064 1065 1066
	printk("badframe in handle_rt_signal, regs=%p frame=%p newsp=%lx\n",
	       regs, frame, newsp);
#endif
1067 1068 1069 1070 1071 1072
	if (show_unhandled_signals)
		printk_ratelimited(KERN_INFO
				   "%s[%d]: bad frame in handle_rt_signal32: "
				   "%p nip %08lx lr %08lx\n",
				   current->comm, current->pid,
				   addr, regs->nip, regs->link);
1073

L
Linus Torvalds 已提交
1074 1075 1076 1077
	force_sigsegv(sig, current);
	return 0;
}

1078
static int do_setcontext(struct ucontext __user *ucp, struct pt_regs *regs, int sig)
L
Linus Torvalds 已提交
1079 1080
{
	sigset_t set;
1081 1082 1083 1084 1085 1086 1087
	struct mcontext __user *mcp;

	if (get_sigset_t(&set, &ucp->uc_sigmask))
		return -EFAULT;
#ifdef CONFIG_PPC64
	{
		u32 cmcp;
L
Linus Torvalds 已提交
1088

1089 1090 1091
		if (__get_user(cmcp, &ucp->uc_regs))
			return -EFAULT;
		mcp = (struct mcontext __user *)(u64)cmcp;
1092
		/* no need to check access_ok(mcp), since mcp < 4GB */
1093 1094 1095
	}
#else
	if (__get_user(mcp, &ucp->uc_regs))
L
Linus Torvalds 已提交
1096
		return -EFAULT;
1097 1098
	if (!access_ok(VERIFY_READ, mcp, sizeof(*mcp)))
		return -EFAULT;
1099
#endif
A
Al Viro 已提交
1100
	set_current_blocked(&set);
1101
	if (restore_user_regs(regs, mcp, sig))
L
Linus Torvalds 已提交
1102 1103 1104 1105 1106
		return -EFAULT;

	return 0;
}

1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
static int do_setcontext_tm(struct ucontext __user *ucp,
			    struct ucontext __user *tm_ucp,
			    struct pt_regs *regs)
{
	sigset_t set;
	struct mcontext __user *mcp;
	struct mcontext __user *tm_mcp;
	u32 cmcp;
	u32 tm_cmcp;

	if (get_sigset_t(&set, &ucp->uc_sigmask))
		return -EFAULT;

	if (__get_user(cmcp, &ucp->uc_regs) ||
	    __get_user(tm_cmcp, &tm_ucp->uc_regs))
		return -EFAULT;
	mcp = (struct mcontext __user *)(u64)cmcp;
	tm_mcp = (struct mcontext __user *)(u64)tm_cmcp;
	/* no need to check access_ok(mcp), since mcp < 4GB */

	set_current_blocked(&set);
	if (restore_tm_user_regs(regs, mcp, tm_mcp))
		return -EFAULT;

	return 0;
}
#endif

1136
long sys_swapcontext(struct ucontext __user *old_ctx,
1137 1138
		     struct ucontext __user *new_ctx,
		     int ctx_size, int r6, int r7, int r8, struct pt_regs *regs)
L
Linus Torvalds 已提交
1139 1140
{
	unsigned char tmp;
1141
	int ctx_has_vsx_region = 0;
L
Linus Torvalds 已提交
1142

1143 1144 1145
#ifdef CONFIG_PPC64
	unsigned long new_msr = 0;

1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160
	if (new_ctx) {
		struct mcontext __user *mcp;
		u32 cmcp;

		/*
		 * Get pointer to the real mcontext.  No need for
		 * access_ok since we are dealing with compat
		 * pointers.
		 */
		if (__get_user(cmcp, &new_ctx->uc_regs))
			return -EFAULT;
		mcp = (struct mcontext __user *)(u64)cmcp;
		if (__get_user(new_msr, &mcp->mc_gregs[PT_MSR]))
			return -EFAULT;
	}
1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173
	/*
	 * Check that the context is not smaller than the original
	 * size (with VMX but without VSX)
	 */
	if (ctx_size < UCONTEXTSIZEWITHOUTVSX)
		return -EINVAL;
	/*
	 * If the new context state sets the MSR VSX bits but
	 * it doesn't provide VSX state.
	 */
	if ((ctx_size < sizeof(struct ucontext)) &&
	    (new_msr & MSR_VSX))
		return -EINVAL;
1174 1175 1176
	/* Does the context have enough room to store VSX data? */
	if (ctx_size >= sizeof(struct ucontext))
		ctx_has_vsx_region = 1;
1177
#else
L
Linus Torvalds 已提交
1178 1179 1180
	/* Context size is for future use. Right now, we only make sure
	 * we are passed something we understand
	 */
1181
	if (ctx_size < sizeof(struct ucontext))
L
Linus Torvalds 已提交
1182
		return -EINVAL;
1183
#endif
L
Linus Torvalds 已提交
1184
	if (old_ctx != NULL) {
1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195
		struct mcontext __user *mctx;

		/*
		 * old_ctx might not be 16-byte aligned, in which
		 * case old_ctx->uc_mcontext won't be either.
		 * Because we have the old_ctx->uc_pad2 field
		 * before old_ctx->uc_mcontext, we need to round down
		 * from &old_ctx->uc_mcontext to a 16-byte boundary.
		 */
		mctx = (struct mcontext __user *)
			((unsigned long) &old_ctx->uc_mcontext & ~0xfUL);
1196
		if (!access_ok(VERIFY_WRITE, old_ctx, ctx_size)
1197
		    || save_user_regs(regs, mctx, NULL, 0, ctx_has_vsx_region)
1198
		    || put_sigset_t(&old_ctx->uc_sigmask, &current->blocked)
1199
		    || __put_user(to_user_ptr(mctx), &old_ctx->uc_regs))
L
Linus Torvalds 已提交
1200 1201 1202 1203
			return -EFAULT;
	}
	if (new_ctx == NULL)
		return 0;
1204
	if (!access_ok(VERIFY_READ, new_ctx, ctx_size)
L
Linus Torvalds 已提交
1205
	    || __get_user(tmp, (u8 __user *) new_ctx)
1206
	    || __get_user(tmp, (u8 __user *) new_ctx + ctx_size - 1))
L
Linus Torvalds 已提交
1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219
		return -EFAULT;

	/*
	 * If we get a fault copying the context into the kernel's
	 * image of the user's registers, we can't just return -EFAULT
	 * because the user's registers will be corrupted.  For instance
	 * the NIP value may have been updated but not some of the
	 * other registers.  Given that we have done the access_ok
	 * and successfully read the first and last bytes of the region
	 * above, this should only happen in an out-of-memory situation
	 * or if another thread unmaps the region containing the context.
	 * We kill the task with a SIGSEGV in this situation.
	 */
1220
	if (do_setcontext(new_ctx, regs, 0))
L
Linus Torvalds 已提交
1221
		do_exit(SIGSEGV);
1222 1223

	set_thread_flag(TIF_RESTOREALL);
L
Linus Torvalds 已提交
1224 1225 1226
	return 0;
}

1227
long sys_rt_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
L
Linus Torvalds 已提交
1228 1229
		     struct pt_regs *regs)
{
1230
	struct rt_sigframe __user *rt_sf;
1231 1232 1233 1234 1235 1236
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
	struct ucontext __user *uc_transact;
	unsigned long msr_hi;
	unsigned long tmp;
	int tm_restore = 0;
#endif
L
Linus Torvalds 已提交
1237 1238 1239
	/* Always make any pending restarted system calls return -EINTR */
	current_thread_info()->restart_block.fn = do_no_restart_syscall;

1240 1241
	rt_sf = (struct rt_sigframe __user *)
		(regs->gpr[1] + __SIGNAL_FRAMESIZE + 16);
L
Linus Torvalds 已提交
1242 1243
	if (!access_ok(VERIFY_READ, rt_sf, sizeof(*rt_sf)))
		goto bad;
1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
	if (__get_user(tmp, &rt_sf->uc.uc_link))
		goto bad;
	uc_transact = (struct ucontext __user *)(uintptr_t)tmp;
	if (uc_transact) {
		u32 cmcp;
		struct mcontext __user *mcp;

		if (__get_user(cmcp, &uc_transact->uc_regs))
			return -EFAULT;
		mcp = (struct mcontext __user *)(u64)cmcp;
		/* The top 32 bits of the MSR are stashed in the transactional
		 * ucontext. */
		if (__get_user(msr_hi, &mcp->mc_gregs[PT_MSR]))
			goto bad;

1260
		if (MSR_TM_ACTIVE(msr_hi<<32)) {
1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271
			/* We only recheckpoint on return if we're
			 * transaction.
			 */
			tm_restore = 1;
			if (do_setcontext_tm(&rt_sf->uc, uc_transact, regs))
				goto bad;
		}
	}
	if (!tm_restore)
		/* Fall through, for non-TM restore */
#endif
1272
	if (do_setcontext(&rt_sf->uc, regs, 1))
L
Linus Torvalds 已提交
1273 1274 1275 1276 1277 1278 1279 1280
		goto bad;

	/*
	 * It's not clear whether or why it is desirable to save the
	 * sigaltstack setting on signal delivery and restore it on
	 * signal return.  But other architectures do this and we have
	 * always done it up until now so it is probably better not to
	 * change it.  -- paulus
1281 1282
	 */
#ifdef CONFIG_PPC64
A
Al Viro 已提交
1283 1284
	if (compat_restore_altstack(&rt_sf->uc.uc_stack))
		goto bad;
1285
#else
A
Al Viro 已提交
1286 1287
	if (restore_altstack(&rt_sf->uc.uc_stack))
		goto bad;
1288
#endif
1289 1290
	set_thread_flag(TIF_RESTOREALL);
	return 0;
L
Linus Torvalds 已提交
1291 1292

 bad:
1293 1294 1295 1296 1297 1298
	if (show_unhandled_signals)
		printk_ratelimited(KERN_INFO
				   "%s[%d]: bad frame in sys_rt_sigreturn: "
				   "%p nip %08lx lr %08lx\n",
				   current->comm, current->pid,
				   rt_sf, regs->nip, regs->link);
1299

L
Linus Torvalds 已提交
1300 1301 1302 1303
	force_sig(SIGSEGV, current);
	return 0;
}

1304 1305 1306 1307 1308 1309 1310 1311
#ifdef CONFIG_PPC32
int sys_debug_setcontext(struct ucontext __user *ctx,
			 int ndbg, struct sig_dbg_op __user *dbg,
			 int r6, int r7, int r8,
			 struct pt_regs *regs)
{
	struct sig_dbg_op op;
	int i;
1312
	unsigned char tmp;
1313
	unsigned long new_msr = regs->msr;
1314
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
1315
	unsigned long new_dbcr0 = current->thread.debug.dbcr0;
1316 1317 1318
#endif

	for (i=0; i<ndbg; i++) {
1319
		if (copy_from_user(&op, dbg + i, sizeof(op)))
1320 1321 1322
			return -EFAULT;
		switch (op.dbg_type) {
		case SIG_DBG_SINGLE_STEPPING:
1323
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
1324 1325 1326 1327
			if (op.dbg_value) {
				new_msr |= MSR_DE;
				new_dbcr0 |= (DBCR0_IDM | DBCR0_IC);
			} else {
1328 1329
				new_dbcr0 &= ~DBCR0_IC;
				if (!DBCR_ACTIVE_EVENTS(new_dbcr0,
1330
						current->thread.debug.dbcr1)) {
1331 1332 1333
					new_msr &= ~MSR_DE;
					new_dbcr0 &= ~DBCR0_IDM;
				}
1334 1335 1336 1337 1338 1339 1340 1341 1342
			}
#else
			if (op.dbg_value)
				new_msr |= MSR_SE;
			else
				new_msr &= ~MSR_SE;
#endif
			break;
		case SIG_DBG_BRANCH_TRACING:
1343
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363
			return -EINVAL;
#else
			if (op.dbg_value)
				new_msr |= MSR_BE;
			else
				new_msr &= ~MSR_BE;
#endif
			break;

		default:
			return -EINVAL;
		}
	}

	/* We wait until here to actually install the values in the
	   registers so if we fail in the above loop, it will not
	   affect the contents of these registers.  After this point,
	   failure is a problem, anyway, and it's very unlikely unless
	   the user is really doing something wrong. */
	regs->msr = new_msr;
1364
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
1365
	current->thread.debug.dbcr0 = new_dbcr0;
1366 1367
#endif

1368 1369 1370 1371 1372
	if (!access_ok(VERIFY_READ, ctx, sizeof(*ctx))
	    || __get_user(tmp, (u8 __user *) ctx)
	    || __get_user(tmp, (u8 __user *) (ctx + 1) - 1))
		return -EFAULT;

1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384
	/*
	 * If we get a fault copying the context into the kernel's
	 * image of the user's registers, we can't just return -EFAULT
	 * because the user's registers will be corrupted.  For instance
	 * the NIP value may have been updated but not some of the
	 * other registers.  Given that we have done the access_ok
	 * and successfully read the first and last bytes of the region
	 * above, this should only happen in an out-of-memory situation
	 * or if another thread unmaps the region containing the context.
	 * We kill the task with a SIGSEGV in this situation.
	 */
	if (do_setcontext(ctx, regs, 1)) {
1385 1386 1387 1388 1389 1390
		if (show_unhandled_signals)
			printk_ratelimited(KERN_INFO "%s[%d]: bad frame in "
					   "sys_debug_setcontext: %p nip %08lx "
					   "lr %08lx\n",
					   current->comm, current->pid,
					   ctx, regs->nip, regs->link);
1391

1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402
		force_sig(SIGSEGV, current);
		goto out;
	}

	/*
	 * It's not clear whether or why it is desirable to save the
	 * sigaltstack setting on signal delivery and restore it on
	 * signal return.  But other architectures do this and we have
	 * always done it up until now so it is probably better not to
	 * change it.  -- paulus
	 */
A
Al Viro 已提交
1403
	restore_altstack(&ctx->uc_stack);
1404

1405
	set_thread_flag(TIF_RESTOREALL);
1406 1407 1408 1409
 out:
	return 0;
}
#endif
L
Linus Torvalds 已提交
1410 1411 1412 1413

/*
 * OK, we're invoking a handler
 */
1414
int handle_signal32(unsigned long sig, struct k_sigaction *ka,
1415
		    siginfo_t *info, sigset_t *oldset, struct pt_regs *regs)
L
Linus Torvalds 已提交
1416
{
1417
	struct sigcontext __user *sc;
1418
	struct sigframe __user *frame;
1419
	struct mcontext __user *tm_mctx = NULL;
1420
	unsigned long newsp = 0;
1421 1422
	int sigret;
	unsigned long tramp;
L
Linus Torvalds 已提交
1423 1424

	/* Set up Signal Frame */
1425
	frame = get_sigframe(ka, get_tm_stackpointer(regs), sizeof(*frame), 1);
1426
	if (unlikely(frame == NULL))
L
Linus Torvalds 已提交
1427
		goto badframe;
1428
	sc = (struct sigcontext __user *) &frame->sctx;
L
Linus Torvalds 已提交
1429 1430

#if _NSIG != 64
1431
#error "Please adjust handle_signal()"
L
Linus Torvalds 已提交
1432
#endif
1433
	if (__put_user(to_user_ptr(ka->sa.sa_handler), &sc->handler)
L
Linus Torvalds 已提交
1434
	    || __put_user(oldset->sig[0], &sc->oldmask)
1435
#ifdef CONFIG_PPC64
L
Linus Torvalds 已提交
1436
	    || __put_user((oldset->sig[0] >> 32), &sc->_unused[3])
1437 1438 1439
#else
	    || __put_user(oldset->sig[1], &sc->_unused[3])
#endif
1440
	    || __put_user(to_user_ptr(&frame->mctx), &sc->regs)
L
Linus Torvalds 已提交
1441 1442 1443
	    || __put_user(sig, &sc->signal))
		goto badframe;

1444
	if (vdso32_sigtramp && current->mm->context.vdso_base) {
1445 1446
		sigret = 0;
		tramp = current->mm->context.vdso_base + vdso32_sigtramp;
1447
	} else {
1448 1449 1450 1451 1452
		sigret = __NR_sigreturn;
		tramp = (unsigned long) frame->mctx.tramp;
	}

#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1453
	tm_mctx = &frame->mctx_transact;
1454 1455 1456
	if (MSR_TM_ACTIVE(regs->msr)) {
		if (save_tm_user_regs(regs, &frame->mctx, &frame->mctx_transact,
				      sigret))
L
Linus Torvalds 已提交
1457 1458
			goto badframe;
	}
1459 1460
	else
#endif
1461 1462
	{
		if (save_user_regs(regs, &frame->mctx, tm_mctx, sigret, 1))
1463
			goto badframe;
1464
	}
1465 1466

	regs->link = tramp;
L
Linus Torvalds 已提交
1467

1468
	current->thread.fp_state.fpscr = 0;	/* turn off all fp exceptions */
1469

1470 1471
	/* create a stack frame for the caller of the handler */
	newsp = ((unsigned long)frame) - __SIGNAL_FRAMESIZE;
1472
	if (put_user(regs->gpr[1], (u32 __user *)newsp))
L
Linus Torvalds 已提交
1473
		goto badframe;
1474

1475
	regs->gpr[1] = newsp;
L
Linus Torvalds 已提交
1476 1477 1478
	regs->gpr[3] = sig;
	regs->gpr[4] = (unsigned long) sc;
	regs->nip = (unsigned long) ka->sa.sa_handler;
1479 1480
	/* enter the signal handler in big-endian mode */
	regs->msr &= ~MSR_LE;
1481 1482 1483 1484 1485 1486 1487
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
	/* Remove TM bits from thread's MSR.  The MSR in the sigcontext
	 * just indicates to userland that we were doing a transaction, but we
	 * don't want to return in transactional state:
	 */
	regs->msr &= ~MSR_TS_MASK;
#endif
L
Linus Torvalds 已提交
1488 1489 1490
	return 1;

badframe:
1491 1492 1493
#ifdef DEBUG_SIG
	printk("badframe in handle_signal, regs=%p frame=%p newsp=%lx\n",
	       regs, frame, newsp);
L
Linus Torvalds 已提交
1494
#endif
1495 1496 1497 1498 1499 1500
	if (show_unhandled_signals)
		printk_ratelimited(KERN_INFO
				   "%s[%d]: bad frame in handle_signal32: "
				   "%p nip %08lx lr %08lx\n",
				   current->comm, current->pid,
				   frame, regs->nip, regs->link);
1501

L
Linus Torvalds 已提交
1502 1503 1504 1505 1506 1507 1508
	force_sigsegv(sig, current);
	return 0;
}

/*
 * Do a signal return; undo the signal stack.
 */
1509
long sys_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
L
Linus Torvalds 已提交
1510 1511
		       struct pt_regs *regs)
{
1512
	struct sigframe __user *sf;
1513 1514 1515
	struct sigcontext __user *sc;
	struct sigcontext sigctx;
	struct mcontext __user *sr;
1516
	void __user *addr;
L
Linus Torvalds 已提交
1517
	sigset_t set;
1518 1519 1520 1521
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
	struct mcontext __user *mcp, *tm_mcp;
	unsigned long msr_hi;
#endif
L
Linus Torvalds 已提交
1522 1523 1524 1525

	/* Always make any pending restarted system calls return -EINTR */
	current_thread_info()->restart_block.fn = do_no_restart_syscall;

1526 1527
	sf = (struct sigframe __user *)(regs->gpr[1] + __SIGNAL_FRAMESIZE);
	sc = &sf->sctx;
1528
	addr = sc;
L
Linus Torvalds 已提交
1529 1530 1531
	if (copy_from_user(&sigctx, sc, sizeof(sigctx)))
		goto badframe;

1532
#ifdef CONFIG_PPC64
L
Linus Torvalds 已提交
1533 1534 1535 1536 1537
	/*
	 * Note that PPC32 puts the upper 32 bits of the sigmask in the
	 * unused part of the signal stackframe
	 */
	set.sig[0] = sigctx.oldmask + ((long)(sigctx._unused[3]) << 32);
1538 1539 1540 1541
#else
	set.sig[0] = sigctx.oldmask;
	set.sig[1] = sigctx._unused[3];
#endif
A
Al Viro 已提交
1542
	set_current_blocked(&set);
L
Linus Torvalds 已提交
1543

1544 1545 1546 1547
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
	mcp = (struct mcontext __user *)&sf->mctx;
	tm_mcp = (struct mcontext __user *)&sf->mctx_transact;
	if (__get_user(msr_hi, &tm_mcp->mc_gregs[PT_MSR]))
L
Linus Torvalds 已提交
1548
		goto badframe;
1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562
	if (MSR_TM_ACTIVE(msr_hi<<32)) {
		if (!cpu_has_feature(CPU_FTR_TM))
			goto badframe;
		if (restore_tm_user_regs(regs, mcp, tm_mcp))
			goto badframe;
	} else
#endif
	{
		sr = (struct mcontext __user *)from_user_ptr(sigctx.regs);
		addr = sr;
		if (!access_ok(VERIFY_READ, sr, sizeof(*sr))
		    || restore_user_regs(regs, sr, 1))
			goto badframe;
	}
L
Linus Torvalds 已提交
1563

1564
	set_thread_flag(TIF_RESTOREALL);
1565
	return 0;
L
Linus Torvalds 已提交
1566 1567

badframe:
1568 1569 1570 1571 1572 1573
	if (show_unhandled_signals)
		printk_ratelimited(KERN_INFO
				   "%s[%d]: bad frame in sys_sigreturn: "
				   "%p nip %08lx lr %08lx\n",
				   current->comm, current->pid,
				   addr, regs->nip, regs->link);
1574

L
Linus Torvalds 已提交
1575 1576 1577
	force_sig(SIGSEGV, current);
	return 0;
}