sys.c 42.9 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
/*
 *  linux/kernel/sys.c
 *
 *  Copyright (C) 1991, 1992  Linus Torvalds
 */

#include <linux/module.h>
#include <linux/mm.h>
#include <linux/utsname.h>
#include <linux/mman.h>
#include <linux/smp_lock.h>
#include <linux/notifier.h>
#include <linux/reboot.h>
#include <linux/prctl.h>
#include <linux/highuid.h>
#include <linux/fs.h>
17
#include <linux/resource.h>
18 19
#include <linux/kernel.h>
#include <linux/kexec.h>
L
Linus Torvalds 已提交
20
#include <linux/workqueue.h>
21
#include <linux/capability.h>
L
Linus Torvalds 已提交
22 23 24 25 26 27 28 29
#include <linux/device.h>
#include <linux/key.h>
#include <linux/times.h>
#include <linux/posix-timers.h>
#include <linux/security.h>
#include <linux/dcookies.h>
#include <linux/suspend.h>
#include <linux/tty.h>
30
#include <linux/signal.h>
M
Matt Helsley 已提交
31
#include <linux/cn_proc.h>
32
#include <linux/getcpu.h>
33
#include <linux/task_io_accounting_ops.h>
34
#include <linux/seccomp.h>
M
Mark Lord 已提交
35
#include <linux/cpu.h>
L
Linus Torvalds 已提交
36 37 38

#include <linux/compat.h>
#include <linux/syscalls.h>
39
#include <linux/kprobes.h>
40
#include <linux/user_namespace.h>
L
Linus Torvalds 已提交
41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63

#include <asm/uaccess.h>
#include <asm/io.h>
#include <asm/unistd.h>

#ifndef SET_UNALIGN_CTL
# define SET_UNALIGN_CTL(a,b)	(-EINVAL)
#endif
#ifndef GET_UNALIGN_CTL
# define GET_UNALIGN_CTL(a,b)	(-EINVAL)
#endif
#ifndef SET_FPEMU_CTL
# define SET_FPEMU_CTL(a,b)	(-EINVAL)
#endif
#ifndef GET_FPEMU_CTL
# define GET_FPEMU_CTL(a,b)	(-EINVAL)
#endif
#ifndef SET_FPEXC_CTL
# define SET_FPEXC_CTL(a,b)	(-EINVAL)
#endif
#ifndef GET_FPEXC_CTL
# define GET_FPEXC_CTL(a,b)	(-EINVAL)
#endif
64 65 66 67 68 69
#ifndef GET_ENDIAN
# define GET_ENDIAN(a,b)	(-EINVAL)
#endif
#ifndef SET_ENDIAN
# define SET_ENDIAN(a,b)	(-EINVAL)
#endif
L
Linus Torvalds 已提交
70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99

/*
 * this is where the system-wide overflow UID and GID are defined, for
 * architectures that now have 32-bit UID/GID but didn't in the past
 */

int overflowuid = DEFAULT_OVERFLOWUID;
int overflowgid = DEFAULT_OVERFLOWGID;

#ifdef CONFIG_UID16
EXPORT_SYMBOL(overflowuid);
EXPORT_SYMBOL(overflowgid);
#endif

/*
 * the same as above, but for filesystems which can only store a 16-bit
 * UID and GID. as such, this is needed on all architectures
 */

int fs_overflowuid = DEFAULT_FS_OVERFLOWUID;
int fs_overflowgid = DEFAULT_FS_OVERFLOWUID;

EXPORT_SYMBOL(fs_overflowuid);
EXPORT_SYMBOL(fs_overflowgid);

/*
 * this indicates whether you can reboot with ctrl-alt-del: the default is yes
 */

int C_A_D = 1;
100 101
struct pid *cad_pid;
EXPORT_SYMBOL(cad_pid);
L
Linus Torvalds 已提交
102

103 104 105 106 107 108
/*
 * If set, this is used for preparing the system to power off.
 */

void (*pm_power_off_prepare)(void);

L
Linus Torvalds 已提交
109 110 111 112 113 114 115 116 117
static int set_one_prio(struct task_struct *p, int niceval, int error)
{
	int no_nice;

	if (p->uid != current->euid &&
		p->euid != current->euid && !capable(CAP_SYS_NICE)) {
		error = -EPERM;
		goto out;
	}
M
Matt Mackall 已提交
118
	if (niceval < task_nice(p) && !can_nice(p, niceval)) {
L
Linus Torvalds 已提交
119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138
		error = -EACCES;
		goto out;
	}
	no_nice = security_task_setnice(p, niceval);
	if (no_nice) {
		error = no_nice;
		goto out;
	}
	if (error == -ESRCH)
		error = 0;
	set_user_nice(p, niceval);
out:
	return error;
}

asmlinkage long sys_setpriority(int which, int who, int niceval)
{
	struct task_struct *g, *p;
	struct user_struct *user;
	int error = -EINVAL;
139
	struct pid *pgrp;
L
Linus Torvalds 已提交
140

141
	if (which > PRIO_USER || which < PRIO_PROCESS)
L
Linus Torvalds 已提交
142 143 144 145 146 147 148 149 150 151 152 153
		goto out;

	/* normalize: avoid signed division (rounding problems) */
	error = -ESRCH;
	if (niceval < -20)
		niceval = -20;
	if (niceval > 19)
		niceval = 19;

	read_lock(&tasklist_lock);
	switch (which) {
		case PRIO_PROCESS:
154
			if (who)
155
				p = find_task_by_vpid(who);
156 157
			else
				p = current;
L
Linus Torvalds 已提交
158 159 160 161
			if (p)
				error = set_one_prio(p, niceval, error);
			break;
		case PRIO_PGRP:
162
			if (who)
163
				pgrp = find_vpid(who);
164 165 166
			else
				pgrp = task_pgrp(current);
			do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
L
Linus Torvalds 已提交
167
				error = set_one_prio(p, niceval, error);
168
			} while_each_pid_task(pgrp, PIDTYPE_PGID, p);
L
Linus Torvalds 已提交
169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202
			break;
		case PRIO_USER:
			user = current->user;
			if (!who)
				who = current->uid;
			else
				if ((who != current->uid) && !(user = find_user(who)))
					goto out_unlock;	/* No processes for this user */

			do_each_thread(g, p)
				if (p->uid == who)
					error = set_one_prio(p, niceval, error);
			while_each_thread(g, p);
			if (who != current->uid)
				free_uid(user);		/* For find_user() */
			break;
	}
out_unlock:
	read_unlock(&tasklist_lock);
out:
	return error;
}

/*
 * Ugh. To avoid negative return values, "getpriority()" will
 * not return the normal nice-value, but a negated value that
 * has been offset by 20 (ie it returns 40..1 instead of -20..19)
 * to stay compatible.
 */
asmlinkage long sys_getpriority(int which, int who)
{
	struct task_struct *g, *p;
	struct user_struct *user;
	long niceval, retval = -ESRCH;
203
	struct pid *pgrp;
L
Linus Torvalds 已提交
204

205
	if (which > PRIO_USER || which < PRIO_PROCESS)
L
Linus Torvalds 已提交
206 207 208 209 210
		return -EINVAL;

	read_lock(&tasklist_lock);
	switch (which) {
		case PRIO_PROCESS:
211
			if (who)
212
				p = find_task_by_vpid(who);
213 214
			else
				p = current;
L
Linus Torvalds 已提交
215 216 217 218 219 220 221
			if (p) {
				niceval = 20 - task_nice(p);
				if (niceval > retval)
					retval = niceval;
			}
			break;
		case PRIO_PGRP:
222
			if (who)
223
				pgrp = find_vpid(who);
224 225 226
			else
				pgrp = task_pgrp(current);
			do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
L
Linus Torvalds 已提交
227 228 229
				niceval = 20 - task_nice(p);
				if (niceval > retval)
					retval = niceval;
230
			} while_each_pid_task(pgrp, PIDTYPE_PGID, p);
L
Linus Torvalds 已提交
231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256
			break;
		case PRIO_USER:
			user = current->user;
			if (!who)
				who = current->uid;
			else
				if ((who != current->uid) && !(user = find_user(who)))
					goto out_unlock;	/* No processes for this user */

			do_each_thread(g, p)
				if (p->uid == who) {
					niceval = 20 - task_nice(p);
					if (niceval > retval)
						retval = niceval;
				}
			while_each_thread(g, p);
			if (who != current->uid)
				free_uid(user);		/* for find_user() */
			break;
	}
out_unlock:
	read_unlock(&tasklist_lock);

	return retval;
}

257 258 259 260 261 262 263 264
/**
 *	emergency_restart - reboot the system
 *
 *	Without shutting down any hardware or taking any locks
 *	reboot the system.  This is called when we know we are in
 *	trouble so this is our best effort to reboot.  This is
 *	safe to call in interrupt context.
 */
265 266 267 268 269 270
void emergency_restart(void)
{
	machine_emergency_restart();
}
EXPORT_SYMBOL_GPL(emergency_restart);

A
Adrian Bunk 已提交
271
static void kernel_restart_prepare(char *cmd)
272
{
273
	blocking_notifier_call_chain(&reboot_notifier_list, SYS_RESTART, cmd);
274 275
	system_state = SYSTEM_RESTART;
	device_shutdown();
276
	sysdev_shutdown();
277
}
278 279 280 281

/**
 *	kernel_restart - reboot the system
 *	@cmd: pointer to buffer containing command to execute for restart
282
 *		or %NULL
283 284 285 286
 *
 *	Shutdown everything and perform a clean reboot.
 *	This is not safe to call in interrupt context.
 */
287 288 289
void kernel_restart(char *cmd)
{
	kernel_restart_prepare(cmd);
290
	if (!cmd)
291
		printk(KERN_EMERG "Restarting system.\n");
292
	else
293 294 295 296 297
		printk(KERN_EMERG "Restarting system with command '%s'.\n", cmd);
	machine_restart(cmd);
}
EXPORT_SYMBOL_GPL(kernel_restart);

298 299 300 301 302 303
/**
 *	kernel_kexec - reboot the system
 *
 *	Move into place and start executing a preloaded standalone
 *	executable.  If nothing was preloaded return an error.
 */
A
Adrian Bunk 已提交
304
static void kernel_kexec(void)
305 306 307
{
#ifdef CONFIG_KEXEC
	struct kimage *image;
308
	image = xchg(&kexec_image, NULL);
309
	if (!image)
310
		return;
311
	kernel_restart_prepare(NULL);
312 313 314 315 316 317
	printk(KERN_EMERG "Starting new kernel\n");
	machine_shutdown();
	machine_kexec(image);
#endif
}

318
static void kernel_shutdown_prepare(enum system_states state)
319
{
320
	blocking_notifier_call_chain(&reboot_notifier_list,
321 322 323 324
		(state == SYSTEM_HALT)?SYS_HALT:SYS_POWER_OFF, NULL);
	system_state = state;
	device_shutdown();
}
325 326 327 328 329 330 331
/**
 *	kernel_halt - halt the system
 *
 *	Shutdown everything and perform a clean system halt.
 */
void kernel_halt(void)
{
332
	kernel_shutdown_prepare(SYSTEM_HALT);
333
	sysdev_shutdown();
334 335 336
	printk(KERN_EMERG "System halted.\n");
	machine_halt();
}
337

338 339
EXPORT_SYMBOL_GPL(kernel_halt);

340 341 342 343 344 345 346
/**
 *	kernel_power_off - power_off the system
 *
 *	Shutdown everything and perform a clean system power_off.
 */
void kernel_power_off(void)
{
347
	kernel_shutdown_prepare(SYSTEM_POWER_OFF);
348 349
	if (pm_power_off_prepare)
		pm_power_off_prepare();
M
Mark Lord 已提交
350
	disable_nonboot_cpus();
351
	sysdev_shutdown();
352 353 354 355
	printk(KERN_EMERG "Power down.\n");
	machine_power_off();
}
EXPORT_SYMBOL_GPL(kernel_power_off);
L
Linus Torvalds 已提交
356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379
/*
 * Reboot system call: for obvious reasons only root may call it,
 * and even root needs to set up some magic numbers in the registers
 * so that some mistake won't make this reboot the whole machine.
 * You can also set the meaning of the ctrl-alt-del-key here.
 *
 * reboot doesn't sync: do that yourself before calling this.
 */
asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, void __user * arg)
{
	char buffer[256];

	/* We only trust the superuser with rebooting the system. */
	if (!capable(CAP_SYS_BOOT))
		return -EPERM;

	/* For safety, we require "magic" arguments. */
	if (magic1 != LINUX_REBOOT_MAGIC1 ||
	    (magic2 != LINUX_REBOOT_MAGIC2 &&
	                magic2 != LINUX_REBOOT_MAGIC2A &&
			magic2 != LINUX_REBOOT_MAGIC2B &&
	                magic2 != LINUX_REBOOT_MAGIC2C))
		return -EINVAL;

380 381 382 383 384 385
	/* Instead of trying to make the power_off code look like
	 * halt when pm_power_off is not set do it the easy way.
	 */
	if ((cmd == LINUX_REBOOT_CMD_POWER_OFF) && !pm_power_off)
		cmd = LINUX_REBOOT_CMD_HALT;

L
Linus Torvalds 已提交
386 387 388
	lock_kernel();
	switch (cmd) {
	case LINUX_REBOOT_CMD_RESTART:
389
		kernel_restart(NULL);
L
Linus Torvalds 已提交
390 391 392 393 394 395 396 397 398 399 400
		break;

	case LINUX_REBOOT_CMD_CAD_ON:
		C_A_D = 1;
		break;

	case LINUX_REBOOT_CMD_CAD_OFF:
		C_A_D = 0;
		break;

	case LINUX_REBOOT_CMD_HALT:
401
		kernel_halt();
L
Linus Torvalds 已提交
402 403 404 405 406
		unlock_kernel();
		do_exit(0);
		break;

	case LINUX_REBOOT_CMD_POWER_OFF:
407
		kernel_power_off();
L
Linus Torvalds 已提交
408 409 410 411 412 413 414 415 416 417 418
		unlock_kernel();
		do_exit(0);
		break;

	case LINUX_REBOOT_CMD_RESTART2:
		if (strncpy_from_user(&buffer[0], arg, sizeof(buffer) - 1) < 0) {
			unlock_kernel();
			return -EFAULT;
		}
		buffer[sizeof(buffer) - 1] = '\0';

419
		kernel_restart(buffer);
L
Linus Torvalds 已提交
420 421
		break;

422
	case LINUX_REBOOT_CMD_KEXEC:
423 424 425 426
		kernel_kexec();
		unlock_kernel();
		return -EINVAL;

427
#ifdef CONFIG_HIBERNATION
L
Linus Torvalds 已提交
428 429
	case LINUX_REBOOT_CMD_SW_SUSPEND:
		{
430
			int ret = hibernate();
L
Linus Torvalds 已提交
431 432 433 434 435 436 437 438 439 440 441 442 443
			unlock_kernel();
			return ret;
		}
#endif

	default:
		unlock_kernel();
		return -EINVAL;
	}
	unlock_kernel();
	return 0;
}

444
static void deferred_cad(struct work_struct *dummy)
L
Linus Torvalds 已提交
445
{
446
	kernel_restart(NULL);
L
Linus Torvalds 已提交
447 448 449 450 451 452 453 454 455
}

/*
 * This function gets called by ctrl-alt-del - ie the keyboard interrupt.
 * As it's called within an interrupt, it may NOT sync: the only choice
 * is whether to reboot at once, or just ignore the ctrl-alt-del.
 */
void ctrl_alt_del(void)
{
456
	static DECLARE_WORK(cad_work, deferred_cad);
L
Linus Torvalds 已提交
457 458 459 460

	if (C_A_D)
		schedule_work(&cad_work);
	else
461
		kill_cad_pid(SIGINT, 1);
L
Linus Torvalds 已提交
462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507
}
	
/*
 * Unprivileged users may change the real gid to the effective gid
 * or vice versa.  (BSD-style)
 *
 * If you set the real gid at all, or set the effective gid to a value not
 * equal to the real gid, then the saved gid is set to the new effective gid.
 *
 * This makes it possible for a setgid program to completely drop its
 * privileges, which is often a useful assertion to make when you are doing
 * a security audit over a program.
 *
 * The general idea is that a program which uses just setregid() will be
 * 100% compatible with BSD.  A program which uses just setgid() will be
 * 100% compatible with POSIX with saved IDs. 
 *
 * SMP: There are not races, the GIDs are checked only by filesystem
 *      operations (as far as semantic preservation is concerned).
 */
asmlinkage long sys_setregid(gid_t rgid, gid_t egid)
{
	int old_rgid = current->gid;
	int old_egid = current->egid;
	int new_rgid = old_rgid;
	int new_egid = old_egid;
	int retval;

	retval = security_task_setgid(rgid, egid, (gid_t)-1, LSM_SETID_RE);
	if (retval)
		return retval;

	if (rgid != (gid_t) -1) {
		if ((old_rgid == rgid) ||
		    (current->egid==rgid) ||
		    capable(CAP_SETGID))
			new_rgid = rgid;
		else
			return -EPERM;
	}
	if (egid != (gid_t) -1) {
		if ((old_rgid == egid) ||
		    (current->egid == egid) ||
		    (current->sgid == egid) ||
		    capable(CAP_SETGID))
			new_egid = egid;
508
		else
L
Linus Torvalds 已提交
509 510
			return -EPERM;
	}
511
	if (new_egid != old_egid) {
512
		set_dumpable(current->mm, suid_dumpable);
513
		smp_wmb();
L
Linus Torvalds 已提交
514 515 516 517 518 519 520 521
	}
	if (rgid != (gid_t) -1 ||
	    (egid != (gid_t) -1 && egid != old_rgid))
		current->sgid = new_egid;
	current->fsgid = new_egid;
	current->egid = new_egid;
	current->gid = new_rgid;
	key_fsgid_changed(current);
M
Matt Helsley 已提交
522
	proc_id_connector(current, PROC_EVENT_GID);
L
Linus Torvalds 已提交
523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539
	return 0;
}

/*
 * setgid() is implemented like SysV w/ SAVED_IDS 
 *
 * SMP: Same implicit races as above.
 */
asmlinkage long sys_setgid(gid_t gid)
{
	int old_egid = current->egid;
	int retval;

	retval = security_task_setgid(gid, (gid_t)-1, (gid_t)-1, LSM_SETID_ID);
	if (retval)
		return retval;

540 541
	if (capable(CAP_SETGID)) {
		if (old_egid != gid) {
542
			set_dumpable(current->mm, suid_dumpable);
543
			smp_wmb();
L
Linus Torvalds 已提交
544 545
		}
		current->gid = current->egid = current->sgid = current->fsgid = gid;
546 547
	} else if ((gid == current->gid) || (gid == current->sgid)) {
		if (old_egid != gid) {
548
			set_dumpable(current->mm, suid_dumpable);
549
			smp_wmb();
L
Linus Torvalds 已提交
550 551 552 553 554 555 556
		}
		current->egid = current->fsgid = gid;
	}
	else
		return -EPERM;

	key_fsgid_changed(current);
M
Matt Helsley 已提交
557
	proc_id_connector(current, PROC_EVENT_GID);
L
Linus Torvalds 已提交
558 559 560 561 562 563 564
	return 0;
}
  
static int set_user(uid_t new_ruid, int dumpclear)
{
	struct user_struct *new_user;

565
	new_user = alloc_uid(current->nsproxy->user_ns, new_ruid);
L
Linus Torvalds 已提交
566 567 568 569 570
	if (!new_user)
		return -EAGAIN;

	if (atomic_read(&new_user->processes) >=
				current->signal->rlim[RLIMIT_NPROC].rlim_cur &&
571
			new_user != current->nsproxy->user_ns->root_user) {
L
Linus Torvalds 已提交
572 573 574 575 576 577
		free_uid(new_user);
		return -EAGAIN;
	}

	switch_uid(new_user);

578
	if (dumpclear) {
579
		set_dumpable(current->mm, suid_dumpable);
580
		smp_wmb();
L
Linus Torvalds 已提交
581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633
	}
	current->uid = new_ruid;
	return 0;
}

/*
 * Unprivileged users may change the real uid to the effective uid
 * or vice versa.  (BSD-style)
 *
 * If you set the real uid at all, or set the effective uid to a value not
 * equal to the real uid, then the saved uid is set to the new effective uid.
 *
 * This makes it possible for a setuid program to completely drop its
 * privileges, which is often a useful assertion to make when you are doing
 * a security audit over a program.
 *
 * The general idea is that a program which uses just setreuid() will be
 * 100% compatible with BSD.  A program which uses just setuid() will be
 * 100% compatible with POSIX with saved IDs. 
 */
asmlinkage long sys_setreuid(uid_t ruid, uid_t euid)
{
	int old_ruid, old_euid, old_suid, new_ruid, new_euid;
	int retval;

	retval = security_task_setuid(ruid, euid, (uid_t)-1, LSM_SETID_RE);
	if (retval)
		return retval;

	new_ruid = old_ruid = current->uid;
	new_euid = old_euid = current->euid;
	old_suid = current->suid;

	if (ruid != (uid_t) -1) {
		new_ruid = ruid;
		if ((old_ruid != ruid) &&
		    (current->euid != ruid) &&
		    !capable(CAP_SETUID))
			return -EPERM;
	}

	if (euid != (uid_t) -1) {
		new_euid = euid;
		if ((old_ruid != euid) &&
		    (current->euid != euid) &&
		    (current->suid != euid) &&
		    !capable(CAP_SETUID))
			return -EPERM;
	}

	if (new_ruid != old_ruid && set_user(new_ruid, new_euid != old_euid) < 0)
		return -EAGAIN;

634
	if (new_euid != old_euid) {
635
		set_dumpable(current->mm, suid_dumpable);
636
		smp_wmb();
L
Linus Torvalds 已提交
637 638 639 640 641 642 643 644
	}
	current->fsuid = current->euid = new_euid;
	if (ruid != (uid_t) -1 ||
	    (euid != (uid_t) -1 && euid != old_ruid))
		current->suid = current->euid;
	current->fsuid = current->euid;

	key_fsuid_changed(current);
M
Matt Helsley 已提交
645
	proc_id_connector(current, PROC_EVENT_UID);
L
Linus Torvalds 已提交
646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665

	return security_task_post_setuid(old_ruid, old_euid, old_suid, LSM_SETID_RE);
}


		
/*
 * setuid() is implemented like SysV with SAVED_IDS 
 * 
 * Note that SAVED_ID's is deficient in that a setuid root program
 * like sendmail, for example, cannot set its uid to be a normal 
 * user and then switch back, because if you're root, setuid() sets
 * the saved uid too.  If you don't like this, blame the bright people
 * in the POSIX committee and/or USG.  Note that the BSD-style setreuid()
 * will allow a root program to temporarily drop privileges and be able to
 * regain them by swapping the real and effective uid.  
 */
asmlinkage long sys_setuid(uid_t uid)
{
	int old_euid = current->euid;
666
	int old_ruid, old_suid, new_suid;
L
Linus Torvalds 已提交
667 668 669 670 671 672
	int retval;

	retval = security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_ID);
	if (retval)
		return retval;

673
	old_ruid = current->uid;
L
Linus Torvalds 已提交
674 675 676 677 678 679 680 681 682 683
	old_suid = current->suid;
	new_suid = old_suid;
	
	if (capable(CAP_SETUID)) {
		if (uid != old_ruid && set_user(uid, old_euid != uid) < 0)
			return -EAGAIN;
		new_suid = uid;
	} else if ((uid != current->uid) && (uid != new_suid))
		return -EPERM;

684
	if (old_euid != uid) {
685
		set_dumpable(current->mm, suid_dumpable);
686
		smp_wmb();
L
Linus Torvalds 已提交
687 688 689 690 691
	}
	current->fsuid = current->euid = uid;
	current->suid = new_suid;

	key_fsuid_changed(current);
M
Matt Helsley 已提交
692
	proc_id_connector(current, PROC_EVENT_UID);
L
Linus Torvalds 已提交
693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728

	return security_task_post_setuid(old_ruid, old_euid, old_suid, LSM_SETID_ID);
}


/*
 * This function implements a generic ability to update ruid, euid,
 * and suid.  This allows you to implement the 4.4 compatible seteuid().
 */
asmlinkage long sys_setresuid(uid_t ruid, uid_t euid, uid_t suid)
{
	int old_ruid = current->uid;
	int old_euid = current->euid;
	int old_suid = current->suid;
	int retval;

	retval = security_task_setuid(ruid, euid, suid, LSM_SETID_RES);
	if (retval)
		return retval;

	if (!capable(CAP_SETUID)) {
		if ((ruid != (uid_t) -1) && (ruid != current->uid) &&
		    (ruid != current->euid) && (ruid != current->suid))
			return -EPERM;
		if ((euid != (uid_t) -1) && (euid != current->uid) &&
		    (euid != current->euid) && (euid != current->suid))
			return -EPERM;
		if ((suid != (uid_t) -1) && (suid != current->uid) &&
		    (suid != current->euid) && (suid != current->suid))
			return -EPERM;
	}
	if (ruid != (uid_t) -1) {
		if (ruid != current->uid && set_user(ruid, euid != current->euid) < 0)
			return -EAGAIN;
	}
	if (euid != (uid_t) -1) {
729
		if (euid != current->euid) {
730
			set_dumpable(current->mm, suid_dumpable);
731
			smp_wmb();
L
Linus Torvalds 已提交
732 733 734 735 736 737 738 739
		}
		current->euid = euid;
	}
	current->fsuid = current->euid;
	if (suid != (uid_t) -1)
		current->suid = suid;

	key_fsuid_changed(current);
M
Matt Helsley 已提交
740
	proc_id_connector(current, PROC_EVENT_UID);
L
Linus Torvalds 已提交
741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778

	return security_task_post_setuid(old_ruid, old_euid, old_suid, LSM_SETID_RES);
}

asmlinkage long sys_getresuid(uid_t __user *ruid, uid_t __user *euid, uid_t __user *suid)
{
	int retval;

	if (!(retval = put_user(current->uid, ruid)) &&
	    !(retval = put_user(current->euid, euid)))
		retval = put_user(current->suid, suid);

	return retval;
}

/*
 * Same as above, but for rgid, egid, sgid.
 */
asmlinkage long sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid)
{
	int retval;

	retval = security_task_setgid(rgid, egid, sgid, LSM_SETID_RES);
	if (retval)
		return retval;

	if (!capable(CAP_SETGID)) {
		if ((rgid != (gid_t) -1) && (rgid != current->gid) &&
		    (rgid != current->egid) && (rgid != current->sgid))
			return -EPERM;
		if ((egid != (gid_t) -1) && (egid != current->gid) &&
		    (egid != current->egid) && (egid != current->sgid))
			return -EPERM;
		if ((sgid != (gid_t) -1) && (sgid != current->gid) &&
		    (sgid != current->egid) && (sgid != current->sgid))
			return -EPERM;
	}
	if (egid != (gid_t) -1) {
779
		if (egid != current->egid) {
780
			set_dumpable(current->mm, suid_dumpable);
781
			smp_wmb();
L
Linus Torvalds 已提交
782 783 784 785 786 787 788 789 790 791
		}
		current->egid = egid;
	}
	current->fsgid = current->egid;
	if (rgid != (gid_t) -1)
		current->gid = rgid;
	if (sgid != (gid_t) -1)
		current->sgid = sgid;

	key_fsgid_changed(current);
M
Matt Helsley 已提交
792
	proc_id_connector(current, PROC_EVENT_GID);
L
Linus Torvalds 已提交
793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823
	return 0;
}

asmlinkage long sys_getresgid(gid_t __user *rgid, gid_t __user *egid, gid_t __user *sgid)
{
	int retval;

	if (!(retval = put_user(current->gid, rgid)) &&
	    !(retval = put_user(current->egid, egid)))
		retval = put_user(current->sgid, sgid);

	return retval;
}


/*
 * "setfsuid()" sets the fsuid - the uid used for filesystem checks. This
 * is used for "access()" and for the NFS daemon (letting nfsd stay at
 * whatever uid it wants to). It normally shadows "euid", except when
 * explicitly set by setfsuid() or for access..
 */
asmlinkage long sys_setfsuid(uid_t uid)
{
	int old_fsuid;

	old_fsuid = current->fsuid;
	if (security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS))
		return old_fsuid;

	if (uid == current->uid || uid == current->euid ||
	    uid == current->suid || uid == current->fsuid || 
824 825
	    capable(CAP_SETUID)) {
		if (uid != old_fsuid) {
826
			set_dumpable(current->mm, suid_dumpable);
827
			smp_wmb();
L
Linus Torvalds 已提交
828 829 830 831 832
		}
		current->fsuid = uid;
	}

	key_fsuid_changed(current);
M
Matt Helsley 已提交
833
	proc_id_connector(current, PROC_EVENT_UID);
L
Linus Torvalds 已提交
834 835 836 837 838 839 840

	security_task_post_setuid(old_fsuid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS);

	return old_fsuid;
}

/*
841
 * Samma på svenska..
L
Linus Torvalds 已提交
842 843 844 845 846 847 848 849 850 851 852
 */
asmlinkage long sys_setfsgid(gid_t gid)
{
	int old_fsgid;

	old_fsgid = current->fsgid;
	if (security_task_setgid(gid, (gid_t)-1, (gid_t)-1, LSM_SETID_FS))
		return old_fsgid;

	if (gid == current->gid || gid == current->egid ||
	    gid == current->sgid || gid == current->fsgid || 
853 854
	    capable(CAP_SETGID)) {
		if (gid != old_fsgid) {
855
			set_dumpable(current->mm, suid_dumpable);
856
			smp_wmb();
L
Linus Torvalds 已提交
857 858 859
		}
		current->fsgid = gid;
		key_fsgid_changed(current);
M
Matt Helsley 已提交
860
		proc_id_connector(current, PROC_EVENT_GID);
L
Linus Torvalds 已提交
861 862 863 864 865 866 867 868 869 870 871 872 873 874
	}
	return old_fsgid;
}

asmlinkage long sys_times(struct tms __user * tbuf)
{
	/*
	 *	In the SMP world we might just be unlucky and have one of
	 *	the times increment as we use it. Since the value is an
	 *	atomically safe type this is just fine. Conceptually its
	 *	as if the syscall took an instant longer to occur.
	 */
	if (tbuf) {
		struct tms tmp;
875 876
		struct task_struct *tsk = current;
		struct task_struct *t;
L
Linus Torvalds 已提交
877 878
		cputime_t utime, stime, cutime, cstime;

879
		spin_lock_irq(&tsk->sighand->siglock);
880 881 882 883 884 885 886 887 888 889 890 891
		utime = tsk->signal->utime;
		stime = tsk->signal->stime;
		t = tsk;
		do {
			utime = cputime_add(utime, t->utime);
			stime = cputime_add(stime, t->stime);
			t = next_thread(t);
		} while (t != tsk);

		cutime = tsk->signal->cutime;
		cstime = tsk->signal->cstime;
		spin_unlock_irq(&tsk->sighand->siglock);
L
Linus Torvalds 已提交
892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917

		tmp.tms_utime = cputime_to_clock_t(utime);
		tmp.tms_stime = cputime_to_clock_t(stime);
		tmp.tms_cutime = cputime_to_clock_t(cutime);
		tmp.tms_cstime = cputime_to_clock_t(cstime);
		if (copy_to_user(tbuf, &tmp, sizeof(struct tms)))
			return -EFAULT;
	}
	return (long) jiffies_64_to_clock_t(get_jiffies_64());
}

/*
 * This needs some heavy checking ...
 * I just haven't the stomach for it. I also don't fully
 * understand sessions/pgrp etc. Let somebody who does explain it.
 *
 * OK, I think I have the protection semantics right.... this is really
 * only important on a multi-user system anyway, to make sure one user
 * can't send a signal to a process owned by another.  -TYT, 12/12/91
 *
 * Auch. Had to add the 'did_exec' flag to conform completely to POSIX.
 * LBT 04.03.94
 */
asmlinkage long sys_setpgid(pid_t pid, pid_t pgid)
{
	struct task_struct *p;
918
	struct task_struct *group_leader = current->group_leader;
919 920
	struct pid *pgrp;
	int err;
L
Linus Torvalds 已提交
921 922

	if (!pid)
923
		pid = task_pid_vnr(group_leader);
L
Linus Torvalds 已提交
924 925 926 927 928 929 930 931 932 933 934
	if (!pgid)
		pgid = pid;
	if (pgid < 0)
		return -EINVAL;

	/* From this point forward we keep holding onto the tasklist lock
	 * so that our parent does not change from under us. -DaveM
	 */
	write_lock_irq(&tasklist_lock);

	err = -ESRCH;
935
	p = find_task_by_vpid(pid);
L
Linus Torvalds 已提交
936 937 938 939 940 941 942
	if (!p)
		goto out;

	err = -EINVAL;
	if (!thread_group_leader(p))
		goto out;

943
	if (same_thread_group(p->real_parent, group_leader)) {
L
Linus Torvalds 已提交
944
		err = -EPERM;
945
		if (task_session(p) != task_session(group_leader))
L
Linus Torvalds 已提交
946 947 948 949 950 951
			goto out;
		err = -EACCES;
		if (p->did_exec)
			goto out;
	} else {
		err = -ESRCH;
952
		if (p != group_leader)
L
Linus Torvalds 已提交
953 954 955 956 957 958 959
			goto out;
	}

	err = -EPERM;
	if (p->signal->leader)
		goto out;

960
	pgrp = task_pid(p);
L
Linus Torvalds 已提交
961
	if (pgid != pid) {
962
		struct task_struct *g;
L
Linus Torvalds 已提交
963

964 965
		pgrp = find_vpid(pgid);
		g = pid_task(pgrp, PIDTYPE_PGID);
966
		if (!g || task_session(g) != task_session(group_leader))
967
			goto out;
L
Linus Torvalds 已提交
968 969 970 971 972 973
	}

	err = security_task_setpgid(p, pgid);
	if (err)
		goto out;

974
	if (task_pgrp(p) != pgrp) {
L
Linus Torvalds 已提交
975
		detach_pid(p, PIDTYPE_PGID);
976 977
		attach_pid(p, PIDTYPE_PGID, pgrp);
		set_task_pgrp(p, pid_nr(pgrp));
L
Linus Torvalds 已提交
978 979 980 981 982 983 984 985 986 987 988
	}

	err = 0;
out:
	/* All paths lead to here, thus we are safe. -DaveM */
	write_unlock_irq(&tasklist_lock);
	return err;
}

asmlinkage long sys_getpgid(pid_t pid)
{
989
	if (!pid)
990
		return task_pgrp_vnr(current);
991
	else {
L
Linus Torvalds 已提交
992 993
		int retval;
		struct task_struct *p;
994
		struct pid_namespace *ns;
L
Linus Torvalds 已提交
995

996
		ns = current->nsproxy->pid_ns;
L
Linus Torvalds 已提交
997

998 999
		read_lock(&tasklist_lock);
		p = find_task_by_pid_ns(pid, ns);
L
Linus Torvalds 已提交
1000 1001 1002 1003
		retval = -ESRCH;
		if (p) {
			retval = security_task_getpgid(p);
			if (!retval)
1004
				retval = task_pgrp_nr_ns(p, ns);
L
Linus Torvalds 已提交
1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015
		}
		read_unlock(&tasklist_lock);
		return retval;
	}
}

#ifdef __ARCH_WANT_SYS_GETPGRP

asmlinkage long sys_getpgrp(void)
{
	/* SMP - assuming writes are word atomic this is fine */
1016
	return task_pgrp_vnr(current);
L
Linus Torvalds 已提交
1017 1018 1019 1020 1021 1022
}

#endif

asmlinkage long sys_getsid(pid_t pid)
{
1023
	if (!pid)
1024
		return task_session_vnr(current);
1025
	else {
L
Linus Torvalds 已提交
1026 1027
		int retval;
		struct task_struct *p;
1028
		struct pid_namespace *ns;
L
Linus Torvalds 已提交
1029

1030
		ns = current->nsproxy->pid_ns;
L
Linus Torvalds 已提交
1031

1032 1033
		read_lock(&tasklist_lock);
		p = find_task_by_pid_ns(pid, ns);
L
Linus Torvalds 已提交
1034
		retval = -ESRCH;
1035
		if (p) {
L
Linus Torvalds 已提交
1036 1037
			retval = security_task_getsid(p);
			if (!retval)
1038
				retval = task_session_nr_ns(p, ns);
L
Linus Torvalds 已提交
1039 1040 1041 1042 1043 1044 1045 1046
		}
		read_unlock(&tasklist_lock);
		return retval;
	}
}

asmlinkage long sys_setsid(void)
{
1047
	struct task_struct *group_leader = current->group_leader;
1048
	pid_t session;
L
Linus Torvalds 已提交
1049 1050 1051 1052
	int err = -EPERM;

	write_lock_irq(&tasklist_lock);

1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064
	/* Fail if I am already a session leader */
	if (group_leader->signal->leader)
		goto out;

	session = group_leader->pid;
	/* Fail if a process group id already exists that equals the
	 * proposed session id.
	 *
	 * Don't check if session id == 1 because kernel threads use this
	 * session id and so the check will always fail and make it so
	 * init cannot successfully call setsid.
	 */
1065 1066
	if (session > 1 && find_task_by_pid_type_ns(PIDTYPE_PGID,
				session, &init_pid_ns))
L
Linus Torvalds 已提交
1067 1068
		goto out;

1069
	group_leader->signal->leader = 1;
1070
	__set_special_pids(session, session);
1071 1072

	spin_lock(&group_leader->sighand->siglock);
1073
	group_leader->signal->tty = NULL;
1074 1075
	spin_unlock(&group_leader->sighand->siglock);

1076
	err = task_pgrp_vnr(group_leader);
L
Linus Torvalds 已提交
1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104
out:
	write_unlock_irq(&tasklist_lock);
	return err;
}

/*
 * Supplementary group IDs
 */

/* init to 2 - one for init_task, one to ensure it is never freed */
struct group_info init_groups = { .usage = ATOMIC_INIT(2) };

struct group_info *groups_alloc(int gidsetsize)
{
	struct group_info *group_info;
	int nblocks;
	int i;

	nblocks = (gidsetsize + NGROUPS_PER_BLOCK - 1) / NGROUPS_PER_BLOCK;
	/* Make sure we always allocate at least one indirect block pointer */
	nblocks = nblocks ? : 1;
	group_info = kmalloc(sizeof(*group_info) + nblocks*sizeof(gid_t *), GFP_USER);
	if (!group_info)
		return NULL;
	group_info->ngroups = gidsetsize;
	group_info->nblocks = nblocks;
	atomic_set(&group_info->usage, 1);

1105
	if (gidsetsize <= NGROUPS_SMALL)
L
Linus Torvalds 已提交
1106
		group_info->blocks[0] = group_info->small_block;
1107
	else {
L
Linus Torvalds 已提交
1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144
		for (i = 0; i < nblocks; i++) {
			gid_t *b;
			b = (void *)__get_free_page(GFP_USER);
			if (!b)
				goto out_undo_partial_alloc;
			group_info->blocks[i] = b;
		}
	}
	return group_info;

out_undo_partial_alloc:
	while (--i >= 0) {
		free_page((unsigned long)group_info->blocks[i]);
	}
	kfree(group_info);
	return NULL;
}

EXPORT_SYMBOL(groups_alloc);

void groups_free(struct group_info *group_info)
{
	if (group_info->blocks[0] != group_info->small_block) {
		int i;
		for (i = 0; i < group_info->nblocks; i++)
			free_page((unsigned long)group_info->blocks[i]);
	}
	kfree(group_info);
}

EXPORT_SYMBOL(groups_free);

/* export the group_info to a user-space array */
static int groups_to_user(gid_t __user *grouplist,
    struct group_info *group_info)
{
	int i;
1145
	unsigned int count = group_info->ngroups;
L
Linus Torvalds 已提交
1146 1147

	for (i = 0; i < group_info->nblocks; i++) {
1148 1149
		unsigned int cp_count = min(NGROUPS_PER_BLOCK, count);
		unsigned int len = cp_count * sizeof(*grouplist);
L
Linus Torvalds 已提交
1150

1151
		if (copy_to_user(grouplist, group_info->blocks[i], len))
L
Linus Torvalds 已提交
1152 1153
			return -EFAULT;

1154
		grouplist += NGROUPS_PER_BLOCK;
L
Linus Torvalds 已提交
1155 1156 1157 1158 1159 1160 1161 1162
		count -= cp_count;
	}
	return 0;
}

/* fill a group_info from a user-space array - it must be allocated already */
static int groups_from_user(struct group_info *group_info,
    gid_t __user *grouplist)
1163
{
L
Linus Torvalds 已提交
1164
	int i;
1165
	unsigned int count = group_info->ngroups;
L
Linus Torvalds 已提交
1166 1167

	for (i = 0; i < group_info->nblocks; i++) {
1168 1169
		unsigned int cp_count = min(NGROUPS_PER_BLOCK, count);
		unsigned int len = cp_count * sizeof(*grouplist);
L
Linus Torvalds 已提交
1170

1171
		if (copy_from_user(group_info->blocks[i], grouplist, len))
L
Linus Torvalds 已提交
1172 1173
			return -EFAULT;

1174
		grouplist += NGROUPS_PER_BLOCK;
L
Linus Torvalds 已提交
1175 1176 1177 1178 1179
		count -= cp_count;
	}
	return 0;
}

1180
/* a simple Shell sort */
L
Linus Torvalds 已提交
1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209
static void groups_sort(struct group_info *group_info)
{
	int base, max, stride;
	int gidsetsize = group_info->ngroups;

	for (stride = 1; stride < gidsetsize; stride = 3 * stride + 1)
		; /* nothing */
	stride /= 3;

	while (stride) {
		max = gidsetsize - stride;
		for (base = 0; base < max; base++) {
			int left = base;
			int right = left + stride;
			gid_t tmp = GROUP_AT(group_info, right);

			while (left >= 0 && GROUP_AT(group_info, left) > tmp) {
				GROUP_AT(group_info, right) =
				    GROUP_AT(group_info, left);
				right = left;
				left -= stride;
			}
			GROUP_AT(group_info, right) = tmp;
		}
		stride /= 3;
	}
}

/* a simple bsearch */
1210
int groups_search(struct group_info *group_info, gid_t grp)
L
Linus Torvalds 已提交
1211
{
1212
	unsigned int left, right;
L
Linus Torvalds 已提交
1213 1214 1215 1216 1217 1218 1219

	if (!group_info)
		return 0;

	left = 0;
	right = group_info->ngroups;
	while (left < right) {
1220
		unsigned int mid = (left+right)/2;
L
Linus Torvalds 已提交
1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320
		int cmp = grp - GROUP_AT(group_info, mid);
		if (cmp > 0)
			left = mid + 1;
		else if (cmp < 0)
			right = mid;
		else
			return 1;
	}
	return 0;
}

/* validate and set current->group_info */
int set_current_groups(struct group_info *group_info)
{
	int retval;
	struct group_info *old_info;

	retval = security_task_setgroups(group_info);
	if (retval)
		return retval;

	groups_sort(group_info);
	get_group_info(group_info);

	task_lock(current);
	old_info = current->group_info;
	current->group_info = group_info;
	task_unlock(current);

	put_group_info(old_info);

	return 0;
}

EXPORT_SYMBOL(set_current_groups);

asmlinkage long sys_getgroups(int gidsetsize, gid_t __user *grouplist)
{
	int i = 0;

	/*
	 *	SMP: Nobody else can change our grouplist. Thus we are
	 *	safe.
	 */

	if (gidsetsize < 0)
		return -EINVAL;

	/* no need to grab task_lock here; it cannot change */
	i = current->group_info->ngroups;
	if (gidsetsize) {
		if (i > gidsetsize) {
			i = -EINVAL;
			goto out;
		}
		if (groups_to_user(grouplist, current->group_info)) {
			i = -EFAULT;
			goto out;
		}
	}
out:
	return i;
}

/*
 *	SMP: Our groups are copy-on-write. We can set them safely
 *	without another task interfering.
 */
 
asmlinkage long sys_setgroups(int gidsetsize, gid_t __user *grouplist)
{
	struct group_info *group_info;
	int retval;

	if (!capable(CAP_SETGID))
		return -EPERM;
	if ((unsigned)gidsetsize > NGROUPS_MAX)
		return -EINVAL;

	group_info = groups_alloc(gidsetsize);
	if (!group_info)
		return -ENOMEM;
	retval = groups_from_user(group_info, grouplist);
	if (retval) {
		put_group_info(group_info);
		return retval;
	}

	retval = set_current_groups(group_info);
	put_group_info(group_info);

	return retval;
}

/*
 * Check whether we're fsgid/egid or in the supplemental group..
 */
int in_group_p(gid_t grp)
{
	int retval = 1;
1321
	if (grp != current->fsgid)
L
Linus Torvalds 已提交
1322 1323 1324 1325 1326 1327 1328 1329 1330
		retval = groups_search(current->group_info, grp);
	return retval;
}

EXPORT_SYMBOL(in_group_p);

int in_egroup_p(gid_t grp)
{
	int retval = 1;
1331
	if (grp != current->egid)
L
Linus Torvalds 已提交
1332 1333 1334 1335 1336 1337 1338 1339
		retval = groups_search(current->group_info, grp);
	return retval;
}

EXPORT_SYMBOL(in_egroup_p);

DECLARE_RWSEM(uts_sem);

1340 1341
EXPORT_SYMBOL(uts_sem);

L
Linus Torvalds 已提交
1342 1343 1344 1345 1346
asmlinkage long sys_newuname(struct new_utsname __user * name)
{
	int errno = 0;

	down_read(&uts_sem);
1347
	if (copy_to_user(name, utsname(), sizeof *name))
L
Linus Torvalds 已提交
1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364
		errno = -EFAULT;
	up_read(&uts_sem);
	return errno;
}

asmlinkage long sys_sethostname(char __user *name, int len)
{
	int errno;
	char tmp[__NEW_UTS_LEN];

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;
	if (len < 0 || len > __NEW_UTS_LEN)
		return -EINVAL;
	down_write(&uts_sem);
	errno = -EFAULT;
	if (!copy_from_user(tmp, name, len)) {
1365 1366
		memcpy(utsname()->nodename, tmp, len);
		utsname()->nodename[len] = 0;
L
Linus Torvalds 已提交
1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381
		errno = 0;
	}
	up_write(&uts_sem);
	return errno;
}

#ifdef __ARCH_WANT_SYS_GETHOSTNAME

asmlinkage long sys_gethostname(char __user *name, int len)
{
	int i, errno;

	if (len < 0)
		return -EINVAL;
	down_read(&uts_sem);
1382
	i = 1 + strlen(utsname()->nodename);
L
Linus Torvalds 已提交
1383 1384 1385
	if (i > len)
		i = len;
	errno = 0;
1386
	if (copy_to_user(name, utsname()->nodename, i))
L
Linus Torvalds 已提交
1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410
		errno = -EFAULT;
	up_read(&uts_sem);
	return errno;
}

#endif

/*
 * Only setdomainname; getdomainname can be implemented by calling
 * uname()
 */
asmlinkage long sys_setdomainname(char __user *name, int len)
{
	int errno;
	char tmp[__NEW_UTS_LEN];

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;
	if (len < 0 || len > __NEW_UTS_LEN)
		return -EINVAL;

	down_write(&uts_sem);
	errno = -EFAULT;
	if (!copy_from_user(tmp, name, len)) {
1411 1412
		memcpy(utsname()->domainname, tmp, len);
		utsname()->domainname[len] = 0;
L
Linus Torvalds 已提交
1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446
		errno = 0;
	}
	up_write(&uts_sem);
	return errno;
}

asmlinkage long sys_getrlimit(unsigned int resource, struct rlimit __user *rlim)
{
	if (resource >= RLIM_NLIMITS)
		return -EINVAL;
	else {
		struct rlimit value;
		task_lock(current->group_leader);
		value = current->signal->rlim[resource];
		task_unlock(current->group_leader);
		return copy_to_user(rlim, &value, sizeof(*rlim)) ? -EFAULT : 0;
	}
}

#ifdef __ARCH_WANT_SYS_OLD_GETRLIMIT

/*
 *	Back compatibility for getrlimit. Needed for some apps.
 */
 
asmlinkage long sys_old_getrlimit(unsigned int resource, struct rlimit __user *rlim)
{
	struct rlimit x;
	if (resource >= RLIM_NLIMITS)
		return -EINVAL;

	task_lock(current->group_leader);
	x = current->signal->rlim[resource];
	task_unlock(current->group_leader);
1447
	if (x.rlim_cur > 0x7FFFFFFF)
L
Linus Torvalds 已提交
1448
		x.rlim_cur = 0x7FFFFFFF;
1449
	if (x.rlim_max > 0x7FFFFFFF)
L
Linus Torvalds 已提交
1450 1451 1452 1453 1454 1455 1456 1457 1458
		x.rlim_max = 0x7FFFFFFF;
	return copy_to_user(rlim, &x, sizeof(x))?-EFAULT:0;
}

#endif

asmlinkage long sys_setrlimit(unsigned int resource, struct rlimit __user *rlim)
{
	struct rlimit new_rlim, *old_rlim;
A
Andrew Morton 已提交
1459
	unsigned long it_prof_secs;
L
Linus Torvalds 已提交
1460 1461 1462 1463
	int retval;

	if (resource >= RLIM_NLIMITS)
		return -EINVAL;
A
Andrew Morton 已提交
1464
	if (copy_from_user(&new_rlim, rlim, sizeof(*rlim)))
L
Linus Torvalds 已提交
1465
		return -EFAULT;
A
Andrew Morton 已提交
1466 1467
	if (new_rlim.rlim_cur > new_rlim.rlim_max)
		return -EINVAL;
L
Linus Torvalds 已提交
1468 1469 1470 1471
	old_rlim = current->signal->rlim + resource;
	if ((new_rlim.rlim_max > old_rlim->rlim_max) &&
	    !capable(CAP_SYS_RESOURCE))
		return -EPERM;
1472
	if (resource == RLIMIT_NOFILE && new_rlim.rlim_max > sysctl_nr_open)
A
Andrew Morton 已提交
1473
		return -EPERM;
L
Linus Torvalds 已提交
1474 1475 1476 1477 1478

	retval = security_task_setrlimit(resource, &new_rlim);
	if (retval)
		return retval;

1479 1480 1481 1482 1483 1484 1485 1486 1487 1488
	if (resource == RLIMIT_CPU && new_rlim.rlim_cur == 0) {
		/*
		 * The caller is asking for an immediate RLIMIT_CPU
		 * expiry.  But we use the zero value to mean "it was
		 * never set".  So let's cheat and make it one second
		 * instead
		 */
		new_rlim.rlim_cur = 1;
	}

L
Linus Torvalds 已提交
1489 1490 1491 1492
	task_lock(current->group_leader);
	*old_rlim = new_rlim;
	task_unlock(current->group_leader);

A
Andrew Morton 已提交
1493 1494
	if (resource != RLIMIT_CPU)
		goto out;
1495 1496 1497 1498 1499 1500 1501

	/*
	 * RLIMIT_CPU handling.   Note that the kernel fails to return an error
	 * code if it rejected the user's attempt to set RLIMIT_CPU.  This is a
	 * very long-standing error, and fixing it now risks breakage of
	 * applications, so we live with it
	 */
A
Andrew Morton 已提交
1502 1503 1504 1505 1506
	if (new_rlim.rlim_cur == RLIM_INFINITY)
		goto out;

	it_prof_secs = cputime_to_secs(current->signal->it_prof_expires);
	if (it_prof_secs == 0 || new_rlim.rlim_cur <= it_prof_secs) {
1507 1508
		unsigned long rlim_cur = new_rlim.rlim_cur;
		cputime_t cputime;
A
Andrew Morton 已提交
1509

1510
		cputime = secs_to_cputime(rlim_cur);
L
Linus Torvalds 已提交
1511 1512
		read_lock(&tasklist_lock);
		spin_lock_irq(&current->sighand->siglock);
A
Andrew Morton 已提交
1513
		set_process_cpu_timer(current, CPUCLOCK_PROF, &cputime, NULL);
L
Linus Torvalds 已提交
1514 1515 1516
		spin_unlock_irq(&current->sighand->siglock);
		read_unlock(&tasklist_lock);
	}
A
Andrew Morton 已提交
1517
out:
L
Linus Torvalds 已提交
1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535
	return 0;
}

/*
 * It would make sense to put struct rusage in the task_struct,
 * except that would make the task_struct be *really big*.  After
 * task_struct gets moved into malloc'ed memory, it would
 * make sense to do this.  It will make moving the rest of the information
 * a lot simpler!  (Which we're not doing right now because we're not
 * measuring them yet).
 *
 * When sampling multiple threads for RUSAGE_SELF, under SMP we might have
 * races with threads incrementing their own counters.  But since word
 * reads are atomic, we either get new values or old values and we don't
 * care which for the sums.  We always take the siglock to protect reading
 * the c* fields from p->signal from races with exit.c updating those
 * fields when reaping, so a sample either gets all the additions of a
 * given child after it's reaped, or none so this sample is before reaping.
1536
 *
1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550
 * Locking:
 * We need to take the siglock for CHILDEREN, SELF and BOTH
 * for  the cases current multithreaded, non-current single threaded
 * non-current multithreaded.  Thread traversal is now safe with
 * the siglock held.
 * Strictly speaking, we donot need to take the siglock if we are current and
 * single threaded,  as no one else can take our signal_struct away, no one
 * else can  reap the  children to update signal->c* counters, and no one else
 * can race with the signal-> fields. If we do not take any lock, the
 * signal-> fields could be read out of order while another thread was just
 * exiting. So we should  place a read memory barrier when we avoid the lock.
 * On the writer side,  write memory barrier is implied in  __exit_signal
 * as __exit_signal releases  the siglock spinlock after updating the signal->
 * fields. But we don't do this yet to keep things simple.
1551
 *
L
Linus Torvalds 已提交
1552 1553 1554 1555 1556 1557 1558 1559 1560
 */

static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
{
	struct task_struct *t;
	unsigned long flags;
	cputime_t utime, stime;

	memset((char *) r, 0, sizeof *r);
1561
	utime = stime = cputime_zero;
L
Linus Torvalds 已提交
1562

1563 1564 1565 1566 1567
	rcu_read_lock();
	if (!lock_task_sighand(p, &flags)) {
		rcu_read_unlock();
		return;
	}
O
Oleg Nesterov 已提交
1568

L
Linus Torvalds 已提交
1569
	switch (who) {
O
Oleg Nesterov 已提交
1570
		case RUSAGE_BOTH:
L
Linus Torvalds 已提交
1571 1572 1573 1574 1575 1576 1577
		case RUSAGE_CHILDREN:
			utime = p->signal->cutime;
			stime = p->signal->cstime;
			r->ru_nvcsw = p->signal->cnvcsw;
			r->ru_nivcsw = p->signal->cnivcsw;
			r->ru_minflt = p->signal->cmin_flt;
			r->ru_majflt = p->signal->cmaj_flt;
1578 1579
			r->ru_inblock = p->signal->cinblock;
			r->ru_oublock = p->signal->coublock;
O
Oleg Nesterov 已提交
1580 1581 1582 1583

			if (who == RUSAGE_CHILDREN)
				break;

L
Linus Torvalds 已提交
1584 1585 1586 1587 1588 1589 1590
		case RUSAGE_SELF:
			utime = cputime_add(utime, p->signal->utime);
			stime = cputime_add(stime, p->signal->stime);
			r->ru_nvcsw += p->signal->nvcsw;
			r->ru_nivcsw += p->signal->nivcsw;
			r->ru_minflt += p->signal->min_flt;
			r->ru_majflt += p->signal->maj_flt;
1591 1592
			r->ru_inblock += p->signal->inblock;
			r->ru_oublock += p->signal->oublock;
L
Linus Torvalds 已提交
1593 1594 1595 1596 1597 1598 1599 1600
			t = p;
			do {
				utime = cputime_add(utime, t->utime);
				stime = cputime_add(stime, t->stime);
				r->ru_nvcsw += t->nvcsw;
				r->ru_nivcsw += t->nivcsw;
				r->ru_minflt += t->min_flt;
				r->ru_majflt += t->maj_flt;
1601 1602
				r->ru_inblock += task_io_get_inblock(t);
				r->ru_oublock += task_io_get_oublock(t);
L
Linus Torvalds 已提交
1603 1604 1605
				t = next_thread(t);
			} while (t != p);
			break;
O
Oleg Nesterov 已提交
1606

L
Linus Torvalds 已提交
1607 1608 1609
		default:
			BUG();
	}
O
Oleg Nesterov 已提交
1610

1611 1612 1613
	unlock_task_sighand(p, &flags);
	rcu_read_unlock();

O
Oleg Nesterov 已提交
1614 1615
	cputime_to_timeval(utime, &r->ru_utime);
	cputime_to_timeval(stime, &r->ru_stime);
L
Linus Torvalds 已提交
1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636
}

int getrusage(struct task_struct *p, int who, struct rusage __user *ru)
{
	struct rusage r;
	k_getrusage(p, who, &r);
	return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0;
}

asmlinkage long sys_getrusage(int who, struct rusage __user *ru)
{
	if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN)
		return -EINVAL;
	return getrusage(current, who, ru);
}

asmlinkage long sys_umask(int mask)
{
	mask = xchg(&current->fs->umask, mask & S_IRWXUGO);
	return mask;
}
1637

L
Linus Torvalds 已提交
1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648
asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3,
			  unsigned long arg4, unsigned long arg5)
{
	long error;

	error = security_task_prctl(option, arg2, arg3, arg4, arg5);
	if (error)
		return error;

	switch (option) {
		case PR_SET_PDEATHSIG:
1649
			if (!valid_signal(arg2)) {
L
Linus Torvalds 已提交
1650 1651 1652
				error = -EINVAL;
				break;
			}
1653
			current->pdeath_signal = arg2;
L
Linus Torvalds 已提交
1654 1655 1656 1657 1658
			break;
		case PR_GET_PDEATHSIG:
			error = put_user(current->pdeath_signal, (int __user *)arg2);
			break;
		case PR_GET_DUMPABLE:
1659
			error = get_dumpable(current->mm);
L
Linus Torvalds 已提交
1660 1661
			break;
		case PR_SET_DUMPABLE:
1662
			if (arg2 < 0 || arg2 > 1) {
L
Linus Torvalds 已提交
1663 1664 1665
				error = -EINVAL;
				break;
			}
1666
			set_dumpable(current->mm, arg2);
L
Linus Torvalds 已提交
1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727
			break;

		case PR_SET_UNALIGN:
			error = SET_UNALIGN_CTL(current, arg2);
			break;
		case PR_GET_UNALIGN:
			error = GET_UNALIGN_CTL(current, arg2);
			break;
		case PR_SET_FPEMU:
			error = SET_FPEMU_CTL(current, arg2);
			break;
		case PR_GET_FPEMU:
			error = GET_FPEMU_CTL(current, arg2);
			break;
		case PR_SET_FPEXC:
			error = SET_FPEXC_CTL(current, arg2);
			break;
		case PR_GET_FPEXC:
			error = GET_FPEXC_CTL(current, arg2);
			break;
		case PR_GET_TIMING:
			error = PR_TIMING_STATISTICAL;
			break;
		case PR_SET_TIMING:
			if (arg2 == PR_TIMING_STATISTICAL)
				error = 0;
			else
				error = -EINVAL;
			break;

		case PR_GET_KEEPCAPS:
			if (current->keep_capabilities)
				error = 1;
			break;
		case PR_SET_KEEPCAPS:
			if (arg2 != 0 && arg2 != 1) {
				error = -EINVAL;
				break;
			}
			current->keep_capabilities = arg2;
			break;
		case PR_SET_NAME: {
			struct task_struct *me = current;
			unsigned char ncomm[sizeof(me->comm)];

			ncomm[sizeof(me->comm)-1] = 0;
			if (strncpy_from_user(ncomm, (char __user *)arg2,
						sizeof(me->comm)-1) < 0)
				return -EFAULT;
			set_task_comm(me, ncomm);
			return 0;
		}
		case PR_GET_NAME: {
			struct task_struct *me = current;
			unsigned char tcomm[sizeof(me->comm)];

			get_task_comm(tcomm, me);
			if (copy_to_user((char __user *)arg2, tcomm, sizeof(tcomm)))
				return -EFAULT;
			return 0;
		}
1728 1729 1730 1731 1732 1733 1734
		case PR_GET_ENDIAN:
			error = GET_ENDIAN(current, arg2);
			break;
		case PR_SET_ENDIAN:
			error = SET_ENDIAN(current, arg2);
			break;

1735 1736 1737 1738 1739 1740 1741
		case PR_GET_SECCOMP:
			error = prctl_get_seccomp();
			break;
		case PR_SET_SECCOMP:
			error = prctl_set_seccomp(arg2);
			break;

1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752
		case PR_CAPBSET_READ:
			if (!cap_valid(arg2))
				return -EINVAL;
			return !!cap_raised(current->cap_bset, arg2);
		case PR_CAPBSET_DROP:
#ifdef CONFIG_SECURITY_FILE_CAPABILITIES
			return cap_prctl_drop(arg2);
#else
			return -EINVAL;
#endif

L
Linus Torvalds 已提交
1753 1754 1755 1756 1757 1758
		default:
			error = -EINVAL;
			break;
	}
	return error;
}
1759 1760

asmlinkage long sys_getcpu(unsigned __user *cpup, unsigned __user *nodep,
1761
			   struct getcpu_cache __user *unused)
1762 1763 1764 1765 1766 1767 1768 1769 1770
{
	int err = 0;
	int cpu = raw_smp_processor_id();
	if (cpup)
		err |= put_user(cpu, cpup);
	if (nodep)
		err |= put_user(cpu_to_node(cpu), nodep);
	return err ? -EFAULT : 0;
}
1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811

char poweroff_cmd[POWEROFF_CMD_PATH_LEN] = "/sbin/poweroff";

static void argv_cleanup(char **argv, char **envp)
{
	argv_free(argv);
}

/**
 * orderly_poweroff - Trigger an orderly system poweroff
 * @force: force poweroff if command execution fails
 *
 * This may be called from any context to trigger a system shutdown.
 * If the orderly shutdown fails, it will force an immediate shutdown.
 */
int orderly_poweroff(bool force)
{
	int argc;
	char **argv = argv_split(GFP_ATOMIC, poweroff_cmd, &argc);
	static char *envp[] = {
		"HOME=/",
		"PATH=/sbin:/bin:/usr/sbin:/usr/bin",
		NULL
	};
	int ret = -ENOMEM;
	struct subprocess_info *info;

	if (argv == NULL) {
		printk(KERN_WARNING "%s failed to allocate memory for \"%s\"\n",
		       __func__, poweroff_cmd);
		goto out;
	}

	info = call_usermodehelper_setup(argv[0], argv, envp);
	if (info == NULL) {
		argv_free(argv);
		goto out;
	}

	call_usermodehelper_setcleanup(info, argv_cleanup);

1812
	ret = call_usermodehelper_exec(info, UMH_NO_WAIT);
1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828

  out:
	if (ret && force) {
		printk(KERN_WARNING "Failed to start orderly shutdown: "
		       "forcing the issue\n");

		/* I guess this should try to kick off some daemon to
		   sync and poweroff asap.  Or not even bother syncing
		   if we're doing an emergency shutdown? */
		emergency_sync();
		kernel_power_off();
	}

	return ret;
}
EXPORT_SYMBOL_GPL(orderly_poweroff);