kmod.c 20.1 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
/*
	kmod, the new module loader (replaces kerneld)
	Kirk Petersen

	Reorganized not to be a daemon by Adam Richter, with guidance
	from Greg Zornetzer.

	Modified to avoid chroot and file sharing problems.
	Mikael Pettersson

	Limit the concurrent number of kmod modprobes to catch loops from
	"modprobe needs a service that is in a module".
	Keith Owens <kaos@ocs.com.au> December 1999

	Unblock all signals when we exec a usermode process.
	Shuu Yamaguchi <shuu@wondernetworkresources.com> December 2000

	call_usermodehelper wait flag, and remove exec_usermodehelper.
	Rusty Russell <rusty@rustcorp.com.au>  Jan 2003
*/
#include <linux/module.h>
#include <linux/sched.h>
23
#include <linux/sched/task.h>
24
#include <linux/binfmts.h>
L
Linus Torvalds 已提交
25 26 27 28 29
#include <linux/syscalls.h>
#include <linux/unistd.h>
#include <linux/kmod.h>
#include <linux/slab.h>
#include <linux/completion.h>
30
#include <linux/cred.h>
L
Linus Torvalds 已提交
31
#include <linux/file.h>
A
Al Viro 已提交
32
#include <linux/fdtable.h>
L
Linus Torvalds 已提交
33 34 35 36 37
#include <linux/workqueue.h>
#include <linux/security.h>
#include <linux/mount.h>
#include <linux/kernel.h>
#include <linux/init.h>
38
#include <linux/resource.h>
39 40
#include <linux/notifier.h>
#include <linux/suspend.h>
41
#include <linux/rwsem.h>
42
#include <linux/ptrace.h>
43
#include <linux/async.h>
44
#include <linux/uaccess.h>
L
Linus Torvalds 已提交
45

46 47
#include <trace/events/module.h>

48 49 50 51 52 53
#define CAP_BSET	(void *)1
#define CAP_PI		(void *)2

static kernel_cap_t usermodehelper_bset = CAP_FULL_SET;
static kernel_cap_t usermodehelper_inheritable = CAP_FULL_SET;
static DEFINE_SPINLOCK(umh_sysctl_lock);
54
static DECLARE_RWSEM(umhelper_sem);
55

56
#ifdef CONFIG_MODULES
57 58 59 60 61 62 63 64 65 66 67 68 69 70
/*
 * Assuming:
 *
 * threads = div64_u64((u64) totalram_pages * (u64) PAGE_SIZE,
 *		       (u64) THREAD_SIZE * 8UL);
 *
 * If you need less than 50 threads would mean we're dealing with systems
 * smaller than 3200 pages. This assuems you are capable of having ~13M memory,
 * and this would only be an be an upper limit, after which the OOM killer
 * would take effect. Systems like these are very unlikely if modules are
 * enabled.
 */
#define MAX_KMOD_CONCURRENT 50
static atomic_t kmod_concurrent_max = ATOMIC_INIT(MAX_KMOD_CONCURRENT);
71
static DECLARE_WAIT_QUEUE_HEAD(kmod_wq);
L
Linus Torvalds 已提交
72

73 74 75 76 77 78 79 80 81 82 83 84
/*
 * This is a restriction on having *all* MAX_KMOD_CONCURRENT threads
 * running at the same time without returning. When this happens we
 * believe you've somehow ended up with a recursive module dependency
 * creating a loop.
 *
 * We have no option but to fail.
 *
 * Userspace should proactively try to detect and prevent these.
 */
#define MAX_KMOD_ALL_BUSY_TIMEOUT 5

L
Linus Torvalds 已提交
85 86 87 88 89
/*
	modprobe_path is set via /proc/sys.
*/
char modprobe_path[KMOD_PATH_LEN] = "/sbin/modprobe";

90 91 92 93 94 95
static void free_modprobe_argv(struct subprocess_info *info)
{
	kfree(info->argv[3]); /* check call_modprobe() */
	kfree(info->argv);
}

96 97
static int call_modprobe(char *module_name, int wait)
{
98
	struct subprocess_info *info;
99 100 101 102 103 104 105
	static char *envp[] = {
		"HOME=/",
		"TERM=linux",
		"PATH=/sbin:/usr/sbin:/bin:/usr/bin",
		NULL
	};

106 107 108 109 110 111 112 113 114 115 116 117 118
	char **argv = kmalloc(sizeof(char *[5]), GFP_KERNEL);
	if (!argv)
		goto out;

	module_name = kstrdup(module_name, GFP_KERNEL);
	if (!module_name)
		goto free_argv;

	argv[0] = modprobe_path;
	argv[1] = "-q";
	argv[2] = "--";
	argv[3] = module_name;	/* check free_modprobe_argv() */
	argv[4] = NULL;
119

120 121 122 123 124 125 126 127 128
	info = call_usermodehelper_setup(modprobe_path, argv, envp, GFP_KERNEL,
					 NULL, free_modprobe_argv, NULL);
	if (!info)
		goto free_module_name;

	return call_usermodehelper_exec(info, wait | UMH_KILLABLE);

free_module_name:
	kfree(module_name);
129 130 131 132
free_argv:
	kfree(argv);
out:
	return -ENOMEM;
133 134
}

L
Linus Torvalds 已提交
135
/**
136 137
 * __request_module - try to load a kernel module
 * @wait: wait (or not) for the operation to complete
R
Randy Dunlap 已提交
138 139
 * @fmt: printf style format string for the name of the module
 * @...: arguments as specified in the format string
L
Linus Torvalds 已提交
140 141
 *
 * Load a module using the user mode module loader. The function returns
142 143 144 145 146
 * zero on success or a negative errno code or positive exit code from
 * "modprobe" on failure. Note that a successful module load does not mean
 * the module did not then unload and exit on an error of its own. Callers
 * must check that the service they requested is now available not blindly
 * invoke it.
L
Linus Torvalds 已提交
147 148 149 150
 *
 * If module auto-loading support is disabled then this function
 * becomes a no-operation.
 */
151
int __request_module(bool wait, const char *fmt, ...)
L
Linus Torvalds 已提交
152 153 154 155 156
{
	va_list args;
	char module_name[MODULE_NAME_LEN];
	int ret;

157 158 159 160 161 162 163 164
	/*
	 * We don't allow synchronous module loading from async.  Module
	 * init may invoke async_synchronize_full() which will end up
	 * waiting for this task which already is waiting for the module
	 * loading to complete, leading to a deadlock.
	 */
	WARN_ON_ONCE(wait && current_is_async());

165 166 167
	if (!modprobe_path[0])
		return 0;

L
Linus Torvalds 已提交
168 169 170 171 172 173
	va_start(args, fmt);
	ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
	va_end(args);
	if (ret >= MODULE_NAME_LEN)
		return -ENAMETOOLONG;

174 175 176 177
	ret = security_kernel_module_request(module_name);
	if (ret)
		return ret;

178
	if (atomic_dec_if_positive(&kmod_concurrent_max) < 0) {
179 180 181
		pr_warn_ratelimited("request_module: kmod_concurrent_max (%u) close to 0 (max_modprobes: %u), for module %s, throttling...",
				    atomic_read(&kmod_concurrent_max),
				    MAX_KMOD_CONCURRENT, module_name);
182 183 184 185 186 187 188 189 190 191 192
		ret = wait_event_killable_timeout(kmod_wq,
						  atomic_dec_if_positive(&kmod_concurrent_max) >= 0,
						  MAX_KMOD_ALL_BUSY_TIMEOUT * HZ);
		if (!ret) {
			pr_warn_ratelimited("request_module: modprobe %s cannot be processed, kmod busy with %d threads for more than %d seconds now",
					    module_name, MAX_KMOD_CONCURRENT, MAX_KMOD_ALL_BUSY_TIMEOUT);
			return -ETIME;
		} else if (ret == -ERESTARTSYS) {
			pr_warn_ratelimited("request_module: sigkill sent for modprobe %s, giving up", module_name);
			return ret;
		}
L
Linus Torvalds 已提交
193 194
	}

195 196
	trace_module_request(module_name, wait, _RET_IP_);

197
	ret = call_modprobe(module_name, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
198

199
	atomic_inc(&kmod_concurrent_max);
200
	wake_up(&kmod_wq);
201

L
Linus Torvalds 已提交
202 203
	return ret;
}
204
EXPORT_SYMBOL(__request_module);
205

206
#endif /* CONFIG_MODULES */
L
Linus Torvalds 已提交
207

208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228
static void call_usermodehelper_freeinfo(struct subprocess_info *info)
{
	if (info->cleanup)
		(*info->cleanup)(info);
	kfree(info);
}

static void umh_complete(struct subprocess_info *sub_info)
{
	struct completion *comp = xchg(&sub_info->complete, NULL);
	/*
	 * See call_usermodehelper_exec(). If xchg() returns NULL
	 * we own sub_info, the UMH_KILLABLE caller has gone away
	 * or the caller used UMH_NO_WAIT.
	 */
	if (comp)
		complete(comp);
	else
		call_usermodehelper_freeinfo(sub_info);
}

L
Linus Torvalds 已提交
229 230 231
/*
 * This is the task which runs the usermode application
 */
232
static int call_usermodehelper_exec_async(void *data)
L
Linus Torvalds 已提交
233 234
{
	struct subprocess_info *sub_info = data;
235
	struct cred *new;
L
Linus Torvalds 已提交
236 237 238 239 240 241
	int retval;

	spin_lock_irq(&current->sighand->siglock);
	flush_signal_handlers(current, 1);
	spin_unlock_irq(&current->sighand->siglock);

242
	/*
243
	 * Our parent (unbound workqueue) runs with elevated scheduling
244
	 * priority. Avoid propagating that into the userspace child.
245 246 247
	 */
	set_user_nice(current, 0);

248 249 250
	retval = -ENOMEM;
	new = prepare_kernel_cred(current);
	if (!new)
251
		goto out;
252 253 254 255 256 257 258

	spin_lock(&umh_sysctl_lock);
	new->cap_bset = cap_intersect(usermodehelper_bset, new->cap_bset);
	new->cap_inheritable = cap_intersect(usermodehelper_inheritable,
					     new->cap_inheritable);
	spin_unlock(&umh_sysctl_lock);

259 260 261 262
	if (sub_info->init) {
		retval = sub_info->init(sub_info, new);
		if (retval) {
			abort_creds(new);
263
			goto out;
264 265 266
		}
	}

267 268
	commit_creds(new);

269
	retval = do_execve(getname_kernel(sub_info->path),
270 271
			   (const char __user *const __user *)sub_info->argv,
			   (const char __user *const __user *)sub_info->envp);
272 273
out:
	sub_info->retval = retval;
274 275 276 277
	/*
	 * call_usermodehelper_exec_sync() will call umh_complete
	 * if UHM_WAIT_PROC.
	 */
278
	if (!(sub_info->wait & UMH_WAIT_PROC))
279
		umh_complete(sub_info);
280 281
	if (!retval)
		return 0;
282
	do_exit(0);
L
Linus Torvalds 已提交
283 284
}

285 286
/* Handles UMH_WAIT_PROC.  */
static void call_usermodehelper_exec_sync(struct subprocess_info *sub_info)
L
Linus Torvalds 已提交
287 288 289
{
	pid_t pid;

290
	/* If SIGCLD is ignored sys_wait4 won't populate the status. */
291
	kernel_sigaction(SIGCHLD, SIG_DFL);
292
	pid = kernel_thread(call_usermodehelper_exec_async, sub_info, SIGCHLD);
L
Linus Torvalds 已提交
293 294 295
	if (pid < 0) {
		sub_info->retval = pid;
	} else {
296
		int ret = -ECHILD;
L
Linus Torvalds 已提交
297 298 299
		/*
		 * Normally it is bogus to call wait4() from in-kernel because
		 * wait4() wants to write the exit code to a userspace address.
300
		 * But call_usermodehelper_exec_sync() always runs as kernel
301 302 303
		 * thread (workqueue) and put_user() to a kernel address works
		 * OK for kernel threads, due to their having an mm_segment_t
		 * which spans the entire address space.
L
Linus Torvalds 已提交
304 305 306
		 *
		 * Thus the __user pointer cast is valid here.
		 */
307 308 309
		sys_wait4(pid, (int __user *)&ret, 0, NULL);

		/*
310 311
		 * If ret is 0, either call_usermodehelper_exec_async failed and
		 * the real error code is already in sub_info->retval or
312 313 314 315
		 * sub_info->retval is 0 anyway, so don't mess with it then.
		 */
		if (ret)
			sub_info->retval = ret;
L
Linus Torvalds 已提交
316 317
	}

318 319 320
	/* Restore default kernel sig handler */
	kernel_sigaction(SIGCHLD, SIG_IGN);

321
	umh_complete(sub_info);
L
Linus Torvalds 已提交
322 323
}

324
/*
325
 * We need to create the usermodehelper kernel thread from a task that is affine
326 327 328 329 330
 * to an optimized set of CPUs (or nohz housekeeping ones) such that they
 * inherit a widest affinity irrespective of call_usermodehelper() callers with
 * possibly reduced affinity (eg: per-cpu workqueues). We don't want
 * usermodehelper targets to contend a busy CPU.
 *
331 332
 * Unbound workqueues provide such wide affinity and allow to block on
 * UMH_WAIT_PROC requests without blocking pending request (up to some limit).
333
 *
334 335
 * Besides, workqueues provide the privilege level that caller might not have
 * to perform the usermodehelper request.
336 337
 *
 */
338
static void call_usermodehelper_exec_work(struct work_struct *work)
L
Linus Torvalds 已提交
339
{
340 341
	struct subprocess_info *sub_info =
		container_of(work, struct subprocess_info, work);
L
Linus Torvalds 已提交
342

343 344 345 346
	if (sub_info->wait & UMH_WAIT_PROC) {
		call_usermodehelper_exec_sync(sub_info);
	} else {
		pid_t pid;
347 348 349 350 351
		/*
		 * Use CLONE_PARENT to reparent it to kthreadd; we do not
		 * want to pollute current->children, and we need a parent
		 * that always ignores SIGCHLD to ensure auto-reaping.
		 */
352
		pid = kernel_thread(call_usermodehelper_exec_async, sub_info,
353
				    CLONE_PARENT | SIGCHLD);
354 355 356 357
		if (pid < 0) {
			sub_info->retval = pid;
			umh_complete(sub_info);
		}
358
	}
L
Linus Torvalds 已提交
359 360
}

361 362 363 364
/*
 * If set, call_usermodehelper_exec() will exit immediately returning -EBUSY
 * (used for preventing user land processes from being created after the user
 * land has been frozen during a system-wide hibernation or suspend operation).
365
 * Should always be manipulated under umhelper_sem acquired for write.
366
 */
367
static enum umh_disable_depth usermodehelper_disabled = UMH_DISABLED;
368 369 370 371 372

/* Number of helpers running */
static atomic_t running_helpers = ATOMIC_INIT(0);

/*
373
 * Wait queue head used by usermodehelper_disable() to wait for all running
374 375 376 377
 * helpers to finish.
 */
static DECLARE_WAIT_QUEUE_HEAD(running_helpers_waitq);

378 379 380 381 382 383
/*
 * Used by usermodehelper_read_lock_wait() to wait for usermodehelper_disabled
 * to become 'false'.
 */
static DECLARE_WAIT_QUEUE_HEAD(usermodehelper_disabled_waitq);

384 385
/*
 * Time to wait for running_helpers to become zero before the setting of
386
 * usermodehelper_disabled in usermodehelper_disable() fails
387 388 389
 */
#define RUNNING_HELPERS_TIMEOUT	(5 * HZ)

390
int usermodehelper_read_trylock(void)
391
{
392
	DEFINE_WAIT(wait);
393 394
	int ret = 0;

395
	down_read(&umhelper_sem);
396 397 398 399 400 401 402 403 404
	for (;;) {
		prepare_to_wait(&usermodehelper_disabled_waitq, &wait,
				TASK_INTERRUPTIBLE);
		if (!usermodehelper_disabled)
			break;

		if (usermodehelper_disabled == UMH_DISABLED)
			ret = -EAGAIN;

405
		up_read(&umhelper_sem);
406 407 408 409 410 411 412 413

		if (ret)
			break;

		schedule();
		try_to_freeze();

		down_read(&umhelper_sem);
414
	}
415
	finish_wait(&usermodehelper_disabled_waitq, &wait);
416
	return ret;
417
}
418
EXPORT_SYMBOL_GPL(usermodehelper_read_trylock);
419

420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446
long usermodehelper_read_lock_wait(long timeout)
{
	DEFINE_WAIT(wait);

	if (timeout < 0)
		return -EINVAL;

	down_read(&umhelper_sem);
	for (;;) {
		prepare_to_wait(&usermodehelper_disabled_waitq, &wait,
				TASK_UNINTERRUPTIBLE);
		if (!usermodehelper_disabled)
			break;

		up_read(&umhelper_sem);

		timeout = schedule_timeout(timeout);
		if (!timeout)
			break;

		down_read(&umhelper_sem);
	}
	finish_wait(&usermodehelper_disabled_waitq, &wait);
	return timeout;
}
EXPORT_SYMBOL_GPL(usermodehelper_read_lock_wait);

447
void usermodehelper_read_unlock(void)
448 449 450
{
	up_read(&umhelper_sem);
}
451
EXPORT_SYMBOL_GPL(usermodehelper_read_unlock);
452

453
/**
454
 * __usermodehelper_set_disable_depth - Modify usermodehelper_disabled.
R
Randy Dunlap 已提交
455
 * @depth: New value to assign to usermodehelper_disabled.
456 457 458
 *
 * Change the value of usermodehelper_disabled (under umhelper_sem locked for
 * writing) and wakeup tasks waiting for it to change.
459
 */
460
void __usermodehelper_set_disable_depth(enum umh_disable_depth depth)
461 462
{
	down_write(&umhelper_sem);
463
	usermodehelper_disabled = depth;
464 465 466 467
	wake_up(&usermodehelper_disabled_waitq);
	up_write(&umhelper_sem);
}

468
/**
469 470 471 472
 * __usermodehelper_disable - Prevent new helpers from being started.
 * @depth: New value to assign to usermodehelper_disabled.
 *
 * Set usermodehelper_disabled to @depth and wait for running helpers to exit.
473
 */
474
int __usermodehelper_disable(enum umh_disable_depth depth)
475
{
476 477
	long retval;

478 479 480
	if (!depth)
		return -EINVAL;

481
	down_write(&umhelper_sem);
482
	usermodehelper_disabled = depth;
483 484
	up_write(&umhelper_sem);

485 486 487 488 489 490 491
	/*
	 * From now on call_usermodehelper_exec() won't start any new
	 * helpers, so it is sufficient if running_helpers turns out to
	 * be zero at one point (it may be increased later, but that
	 * doesn't matter).
	 */
	retval = wait_event_timeout(running_helpers_waitq,
492 493
					atomic_read(&running_helpers) == 0,
					RUNNING_HELPERS_TIMEOUT);
494 495
	if (retval)
		return 0;
496

497
	__usermodehelper_set_disable_depth(UMH_ENABLED);
498 499 500
	return -EAGAIN;
}

501 502 503
static void helper_lock(void)
{
	atomic_inc(&running_helpers);
504
	smp_mb__after_atomic();
505 506 507 508 509 510 511 512
}

static void helper_unlock(void)
{
	if (atomic_dec_and_test(&running_helpers))
		wake_up(&running_helpers_waitq);
}

L
Linus Torvalds 已提交
513
/**
514
 * call_usermodehelper_setup - prepare to call a usermode helper
R
Randy Dunlap 已提交
515 516 517
 * @path: path to usermode executable
 * @argv: arg vector for process
 * @envp: environment for process
518
 * @gfp_mask: gfp mask for memory allocation
519 520 521
 * @cleanup: a cleanup function
 * @init: an init function
 * @data: arbitrary context sensitive data
522
 *
R
Randy Dunlap 已提交
523
 * Returns either %NULL on allocation failure, or a subprocess_info
524 525
 * structure.  This should be passed to call_usermodehelper_exec to
 * exec the process and free the structure.
526 527 528 529 530 531 532 533 534
 *
 * The init function is used to customize the helper process prior to
 * exec.  A non-zero return code causes the process to error out, exit,
 * and return the failure to the calling process
 *
 * The cleanup function is just before ethe subprocess_info is about to
 * be freed.  This can be used for freeing the argv and envp.  The
 * Function must be runnable in either a process context or the
 * context in which call_usermodehelper_exec is called.
535
 */
536
struct subprocess_info *call_usermodehelper_setup(const char *path, char **argv,
537 538 539 540
		char **envp, gfp_t gfp_mask,
		int (*init)(struct subprocess_info *info, struct cred *new),
		void (*cleanup)(struct subprocess_info *info),
		void *data)
541 542
{
	struct subprocess_info *sub_info;
543
	sub_info = kzalloc(sizeof(struct subprocess_info), gfp_mask);
544 545 546
	if (!sub_info)
		goto out;

547
	INIT_WORK(&sub_info->work, call_usermodehelper_exec_work);
548 549 550 551

#ifdef CONFIG_STATIC_USERMODEHELPER
	sub_info->path = CONFIG_STATIC_USERMODEHELPER_PATH;
#else
552
	sub_info->path = path;
553
#endif
554 555
	sub_info->argv = argv;
	sub_info->envp = envp;
556 557 558 559

	sub_info->cleanup = cleanup;
	sub_info->init = init;
	sub_info->data = data;
560 561 562
  out:
	return sub_info;
}
563
EXPORT_SYMBOL(call_usermodehelper_setup);
564 565 566 567

/**
 * call_usermodehelper_exec - start a usermode application
 * @sub_info: information about the subprocessa
L
Linus Torvalds 已提交
568
 * @wait: wait for the application to finish and return status.
569 570
 *        when UMH_NO_WAIT don't wait at all, but you get no useful error back
 *        when the program couldn't be exec'ed. This makes it safe to call
571
 *        from interrupt context.
L
Linus Torvalds 已提交
572 573
 *
 * Runs a user-space application.  The application is started
574 575
 * asynchronously if wait is not set, and runs as a child of system workqueues.
 * (ie. it runs with full root capabilities and optimized affinity).
L
Linus Torvalds 已提交
576
 */
577
int call_usermodehelper_exec(struct subprocess_info *sub_info, int wait)
L
Linus Torvalds 已提交
578
{
579
	DECLARE_COMPLETION_ONSTACK(done);
580
	int retval = 0;
L
Linus Torvalds 已提交
581

582 583 584 585
	if (!sub_info->path) {
		call_usermodehelper_freeinfo(sub_info);
		return -EINVAL;
	}
586
	helper_lock();
587
	if (usermodehelper_disabled) {
588 589 590
		retval = -EBUSY;
		goto out;
	}
591 592 593 594 595 596 597 598 599

	/*
	 * If there is no binary for us to call, then just return and get out of
	 * here.  This allows us to set STATIC_USERMODEHELPER_PATH to "" and
	 * disable all call_usermodehelper() calls.
	 */
	if (strlen(sub_info->path) == 0)
		goto out;

600 601 602 603 604 605
	/*
	 * Set the completion pointer only if there is a waiter.
	 * This makes it possible to use umh_complete to free
	 * the data structure in case of UMH_NO_WAIT.
	 */
	sub_info->complete = (wait == UMH_NO_WAIT) ? NULL : &done;
606 607
	sub_info->wait = wait;

608
	queue_work(system_unbound_wq, &sub_info->work);
609 610
	if (wait == UMH_NO_WAIT)	/* task has freed sub_info */
		goto unlock;
611 612 613 614 615 616 617 618 619 620 621 622

	if (wait & UMH_KILLABLE) {
		retval = wait_for_completion_killable(&done);
		if (!retval)
			goto wait_done;

		/* umh_complete() will see NULL and free sub_info */
		if (xchg(&sub_info->complete, NULL))
			goto unlock;
		/* fallthrough, umh_complete() was already called */
	}

L
Linus Torvalds 已提交
623
	wait_for_completion(&done);
624
wait_done:
625
	retval = sub_info->retval;
626
out:
627
	call_usermodehelper_freeinfo(sub_info);
628
unlock:
629
	helper_unlock();
630
	return retval;
L
Linus Torvalds 已提交
631
}
632
EXPORT_SYMBOL(call_usermodehelper_exec);
633

634 635 636 637 638 639 640 641 642 643 644 645
/**
 * call_usermodehelper() - prepare and start a usermode application
 * @path: path to usermode executable
 * @argv: arg vector for process
 * @envp: environment for process
 * @wait: wait for the application to finish and return status.
 *        when UMH_NO_WAIT don't wait at all, but you get no useful error back
 *        when the program couldn't be exec'ed. This makes it safe to call
 *        from interrupt context.
 *
 * This function is the equivalent to use call_usermodehelper_setup() and
 * call_usermodehelper_exec().
646
 */
647
int call_usermodehelper(const char *path, char **argv, char **envp, int wait)
648 649 650 651
{
	struct subprocess_info *info;
	gfp_t gfp_mask = (wait == UMH_NO_WAIT) ? GFP_ATOMIC : GFP_KERNEL;

652
	info = call_usermodehelper_setup(path, argv, envp, gfp_mask,
653
					 NULL, NULL, NULL);
654 655 656 657 658
	if (info == NULL)
		return -ENOMEM;

	return call_usermodehelper_exec(info, wait);
}
659
EXPORT_SYMBOL(call_usermodehelper);
L
Linus Torvalds 已提交
660

661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737
static int proc_cap_handler(struct ctl_table *table, int write,
			 void __user *buffer, size_t *lenp, loff_t *ppos)
{
	struct ctl_table t;
	unsigned long cap_array[_KERNEL_CAPABILITY_U32S];
	kernel_cap_t new_cap;
	int err, i;

	if (write && (!capable(CAP_SETPCAP) ||
		      !capable(CAP_SYS_MODULE)))
		return -EPERM;

	/*
	 * convert from the global kernel_cap_t to the ulong array to print to
	 * userspace if this is a read.
	 */
	spin_lock(&umh_sysctl_lock);
	for (i = 0; i < _KERNEL_CAPABILITY_U32S; i++)  {
		if (table->data == CAP_BSET)
			cap_array[i] = usermodehelper_bset.cap[i];
		else if (table->data == CAP_PI)
			cap_array[i] = usermodehelper_inheritable.cap[i];
		else
			BUG();
	}
	spin_unlock(&umh_sysctl_lock);

	t = *table;
	t.data = &cap_array;

	/*
	 * actually read or write and array of ulongs from userspace.  Remember
	 * these are least significant 32 bits first
	 */
	err = proc_doulongvec_minmax(&t, write, buffer, lenp, ppos);
	if (err < 0)
		return err;

	/*
	 * convert from the sysctl array of ulongs to the kernel_cap_t
	 * internal representation
	 */
	for (i = 0; i < _KERNEL_CAPABILITY_U32S; i++)
		new_cap.cap[i] = cap_array[i];

	/*
	 * Drop everything not in the new_cap (but don't add things)
	 */
	spin_lock(&umh_sysctl_lock);
	if (write) {
		if (table->data == CAP_BSET)
			usermodehelper_bset = cap_intersect(usermodehelper_bset, new_cap);
		if (table->data == CAP_PI)
			usermodehelper_inheritable = cap_intersect(usermodehelper_inheritable, new_cap);
	}
	spin_unlock(&umh_sysctl_lock);

	return 0;
}

struct ctl_table usermodehelper_table[] = {
	{
		.procname	= "bset",
		.data		= CAP_BSET,
		.maxlen		= _KERNEL_CAPABILITY_U32S * sizeof(unsigned long),
		.mode		= 0600,
		.proc_handler	= proc_cap_handler,
	},
	{
		.procname	= "inheritable",
		.data		= CAP_PI,
		.maxlen		= _KERNEL_CAPABILITY_U32S * sizeof(unsigned long),
		.mode		= 0600,
		.proc_handler	= proc_cap_handler,
	},
	{ }
};