net_namespace.c 15.2 KB
Newer Older
J
Joe Perches 已提交
1 2
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

3 4 5 6 7 8
#include <linux/workqueue.h>
#include <linux/rtnetlink.h>
#include <linux/cache.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/delay.h>
9
#include <linux/sched.h>
10
#include <linux/idr.h>
11
#include <linux/rculist.h>
12
#include <linux/nsproxy.h>
13 14
#include <linux/fs.h>
#include <linux/proc_ns.h>
15
#include <linux/file.h>
16
#include <linux/export.h>
17
#include <linux/user_namespace.h>
18
#include <net/net_namespace.h>
19
#include <net/netns/generic.h>
20 21 22 23 24 25 26 27 28 29

/*
 *	Our network namespace constructor/destructor lists
 */

static LIST_HEAD(pernet_list);
static struct list_head *first_device = &pernet_list;
static DEFINE_MUTEX(net_mutex);

LIST_HEAD(net_namespace_list);
A
Alexey Dobriyan 已提交
30
EXPORT_SYMBOL_GPL(net_namespace_list);
31

32 33 34
struct net init_net = {
	.dev_base_head = LIST_HEAD_INIT(init_net.dev_base_head),
};
35
EXPORT_SYMBOL(init_net);
36

37 38
#define INITIAL_NET_GEN_PTRS	13 /* +1 for len +2 for rcu_head */

E
Eric Dumazet 已提交
39 40 41 42 43 44 45 46 47 48 49 50 51 52
static unsigned int max_gen_ptrs = INITIAL_NET_GEN_PTRS;

static struct net_generic *net_alloc_generic(void)
{
	struct net_generic *ng;
	size_t generic_size = offsetof(struct net_generic, ptr[max_gen_ptrs]);

	ng = kzalloc(generic_size, GFP_KERNEL);
	if (ng)
		ng->len = max_gen_ptrs;

	return ng;
}

53 54 55 56 57 58 59
static int net_assign_generic(struct net *net, int id, void *data)
{
	struct net_generic *ng, *old_ng;

	BUG_ON(!mutex_is_locked(&net_mutex));
	BUG_ON(id == 0);

E
Eric Dumazet 已提交
60 61 62
	old_ng = rcu_dereference_protected(net->gen,
					   lockdep_is_held(&net_mutex));
	ng = old_ng;
63 64 65
	if (old_ng->len >= id)
		goto assign;

E
Eric Dumazet 已提交
66
	ng = net_alloc_generic();
67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83
	if (ng == NULL)
		return -ENOMEM;

	/*
	 * Some synchronisation notes:
	 *
	 * The net_generic explores the net->gen array inside rcu
	 * read section. Besides once set the net->gen->ptr[x]
	 * pointer never changes (see rules in netns/generic.h).
	 *
	 * That said, we simply duplicate this array and schedule
	 * the old copy for kfree after a grace period.
	 */

	memcpy(&ng->ptr, &old_ng->ptr, old_ng->len * sizeof(void*));

	rcu_assign_pointer(net->gen, ng);
84
	kfree_rcu(old_ng, rcu);
85 86 87 88 89
assign:
	ng->ptr[id - 1] = data;
	return 0;
}

90 91
static int ops_init(const struct pernet_operations *ops, struct net *net)
{
92 93 94
	int err = -ENOMEM;
	void *data = NULL;

95
	if (ops->id && ops->size) {
96
		data = kzalloc(ops->size, GFP_KERNEL);
97
		if (!data)
98
			goto out;
99 100

		err = net_assign_generic(net, *ops->id, data);
101 102
		if (err)
			goto cleanup;
103
	}
104
	err = 0;
105
	if (ops->init)
106 107 108 109 110 111 112 113 114
		err = ops->init(net);
	if (!err)
		return 0;

cleanup:
	kfree(data);

out:
	return err;
115 116 117 118 119 120 121 122 123 124
}

static void ops_free(const struct pernet_operations *ops, struct net *net)
{
	if (ops->id && ops->size) {
		int id = *ops->id;
		kfree(net_generic(net, id));
	}
}

125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146
static void ops_exit_list(const struct pernet_operations *ops,
			  struct list_head *net_exit_list)
{
	struct net *net;
	if (ops->exit) {
		list_for_each_entry(net, net_exit_list, exit_list)
			ops->exit(net);
	}
	if (ops->exit_batch)
		ops->exit_batch(net_exit_list);
}

static void ops_free_list(const struct pernet_operations *ops,
			  struct list_head *net_exit_list)
{
	struct net *net;
	if (ops->size && ops->id) {
		list_for_each_entry(net, net_exit_list, exit_list)
			ops_free(ops, net);
	}
}

147 148 149
/*
 * setup_net runs the initializers for the network namespace object.
 */
150
static __net_init int setup_net(struct net *net, struct user_namespace *user_ns)
151 152
{
	/* Must be called with net_mutex held */
153
	const struct pernet_operations *ops, *saved_ops;
154
	int error = 0;
155
	LIST_HEAD(net_exit_list);
156 157

	atomic_set(&net->count, 1);
158
	atomic_set(&net->passive, 1);
159
	net->dev_base_seq = 1;
160
	net->user_ns = user_ns;
161

162
#ifdef NETNS_REFCNT_DEBUG
163
	atomic_set(&net->use_count, 0);
164
#endif
165

166
	list_for_each_entry(ops, &pernet_list, list) {
167 168 169
		error = ops_init(ops, net);
		if (error < 0)
			goto out_undo;
170 171 172
	}
out:
	return error;
173

174 175 176 177
out_undo:
	/* Walk through the list backwards calling the exit functions
	 * for the pernet modules whose init functions did not fail.
	 */
178
	list_add(&net->exit_list, &net_exit_list);
179
	saved_ops = ops;
180 181 182
	list_for_each_entry_continue_reverse(ops, &pernet_list, list)
		ops_exit_list(ops, &net_exit_list);

183 184
	ops = saved_ops;
	list_for_each_entry_continue_reverse(ops, &pernet_list, list)
185
		ops_free_list(ops, &net_exit_list);
186 187

	rcu_barrier();
188 189 190
	goto out;
}

191

192 193 194 195
#ifdef CONFIG_NET_NS
static struct kmem_cache *net_cachep;
static struct workqueue_struct *netns_wq;

196
static struct net *net_alloc(void)
197
{
198 199 200 201 202 203 204 205
	struct net *net = NULL;
	struct net_generic *ng;

	ng = net_alloc_generic();
	if (!ng)
		goto out;

	net = kmem_cache_zalloc(net_cachep, GFP_KERNEL);
206
	if (!net)
207
		goto out_free;
208

209 210 211 212 213 214 215 216 217 218 219
	rcu_assign_pointer(net->gen, ng);
out:
	return net;

out_free:
	kfree(ng);
	goto out;
}

static void net_free(struct net *net)
{
220
#ifdef NETNS_REFCNT_DEBUG
221
	if (unlikely(atomic_read(&net->use_count) != 0)) {
J
Joe Perches 已提交
222 223
		pr_emerg("network namespace not free! Usage: %d\n",
			 atomic_read(&net->use_count));
224 225
		return;
	}
226
#endif
227
	kfree(net->gen);
228 229 230
	kmem_cache_free(net_cachep, net);
}

231 232 233 234 235 236 237
void net_drop_ns(void *p)
{
	struct net *ns = p;
	if (ns && atomic_dec_and_test(&ns->passive))
		net_free(ns);
}

238 239
struct net *copy_net_ns(unsigned long flags,
			struct user_namespace *user_ns, struct net *old_net)
240
{
241 242
	struct net *net;
	int rv;
243

244 245 246
	if (!(flags & CLONE_NEWNET))
		return get_net(old_net);

247 248 249
	net = net_alloc();
	if (!net)
		return ERR_PTR(-ENOMEM);
250 251 252

	get_user_ns(user_ns);

253
	mutex_lock(&net_mutex);
254
	rv = setup_net(net, user_ns);
255
	if (rv == 0) {
256
		rtnl_lock();
257
		list_add_tail_rcu(&net->list, &net_namespace_list);
258 259
		rtnl_unlock();
	}
260
	mutex_unlock(&net_mutex);
261
	if (rv < 0) {
262
		put_user_ns(user_ns);
263
		net_drop_ns(net);
264 265 266 267
		return ERR_PTR(rv);
	}
	return net;
}
268

269 270 271
static DEFINE_SPINLOCK(cleanup_list_lock);
static LIST_HEAD(cleanup_list);  /* Must hold cleanup_list_lock to touch */

272 273
static void cleanup_net(struct work_struct *work)
{
274
	const struct pernet_operations *ops;
275 276
	struct net *net, *tmp;
	LIST_HEAD(net_kill_list);
277
	LIST_HEAD(net_exit_list);
278

279 280 281 282
	/* Atomically snapshot the list of namespaces to cleanup */
	spin_lock_irq(&cleanup_list_lock);
	list_replace_init(&cleanup_list, &net_kill_list);
	spin_unlock_irq(&cleanup_list_lock);
283 284 285 286 287

	mutex_lock(&net_mutex);

	/* Don't let anyone else find us. */
	rtnl_lock();
288
	list_for_each_entry(net, &net_kill_list, cleanup_list) {
289
		list_del_rcu(&net->list);
290 291
		list_add_tail(&net->exit_list, &net_exit_list);
	}
292 293
	rtnl_unlock();

294 295 296 297 298 299 300
	/*
	 * Another CPU might be rcu-iterating the list, wait for it.
	 * This needs to be before calling the exit() notifiers, so
	 * the rcu_barrier() below isn't sufficient alone.
	 */
	synchronize_rcu();

301
	/* Run all of the network namespace exit methods */
302 303 304
	list_for_each_entry_reverse(ops, &pernet_list, list)
		ops_exit_list(ops, &net_exit_list);

305
	/* Free the net generic variables */
306 307
	list_for_each_entry_reverse(ops, &pernet_list, list)
		ops_free_list(ops, &net_exit_list);
308 309 310 311 312 313 314 315 316

	mutex_unlock(&net_mutex);

	/* Ensure there are no outstanding rcu callbacks using this
	 * network namespace.
	 */
	rcu_barrier();

	/* Finally it is safe to free my network namespace structure */
317 318
	list_for_each_entry_safe(net, tmp, &net_exit_list, exit_list) {
		list_del_init(&net->exit_list);
319
		put_user_ns(net->user_ns);
320
		net_drop_ns(net);
321
	}
322
}
323
static DECLARE_WORK(net_cleanup_work, cleanup_net);
324 325 326 327

void __put_net(struct net *net)
{
	/* Cleanup the network namespace in process context */
328 329 330 331 332 333 334
	unsigned long flags;

	spin_lock_irqsave(&cleanup_list_lock, flags);
	list_add(&net->cleanup_list, &cleanup_list);
	spin_unlock_irqrestore(&cleanup_list_lock, flags);

	queue_work(netns_wq, &net_cleanup_work);
335 336 337
}
EXPORT_SYMBOL_GPL(__put_net);

338 339
struct net *get_net_ns_by_fd(int fd)
{
340
	struct proc_ns *ei;
341 342 343 344
	struct file *file;
	struct net *net;

	file = proc_ns_fget(fd);
345 346
	if (IS_ERR(file))
		return ERR_CAST(file);
347

348
	ei = get_proc_ns(file_inode(file));
349 350 351 352
	if (ei->ns_ops == &netns_operations)
		net = get_net(ei->ns);
	else
		net = ERR_PTR(-EINVAL);
353

354
	fput(file);
355 356 357
	return net;
}

358
#else
359 360 361 362
struct net *get_net_ns_by_fd(int fd)
{
	return ERR_PTR(-EINVAL);
}
363 364
#endif

365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384
struct net *get_net_ns_by_pid(pid_t pid)
{
	struct task_struct *tsk;
	struct net *net;

	/* Lookup the network namespace */
	net = ERR_PTR(-ESRCH);
	rcu_read_lock();
	tsk = find_task_by_vpid(pid);
	if (tsk) {
		struct nsproxy *nsproxy;
		nsproxy = task_nsproxy(tsk);
		if (nsproxy)
			net = get_net(nsproxy->net_ns);
	}
	rcu_read_unlock();
	return net;
}
EXPORT_SYMBOL_GPL(get_net_ns_by_pid);

385 386 387 388 389 390 391 392 393 394 395 396 397 398 399
static __net_init int net_ns_net_init(struct net *net)
{
	return proc_alloc_inum(&net->proc_inum);
}

static __net_exit void net_ns_net_exit(struct net *net)
{
	proc_free_inum(net->proc_inum);
}

static struct pernet_operations __net_initdata net_ns_ops = {
	.init = net_ns_net_init,
	.exit = net_ns_net_exit,
};

400 401
static int __init net_ns_init(void)
{
402
	struct net_generic *ng;
403

404
#ifdef CONFIG_NET_NS
405 406 407
	net_cachep = kmem_cache_create("net_namespace", sizeof(struct net),
					SMP_CACHE_BYTES,
					SLAB_PANIC, NULL);
408 409 410 411 412

	/* Create workqueue for cleanup */
	netns_wq = create_singlethread_workqueue("netns");
	if (!netns_wq)
		panic("Could not create netns workq");
413
#endif
414

415 416 417 418 419 420
	ng = net_alloc_generic();
	if (!ng)
		panic("Could not allocate generic netns");

	rcu_assign_pointer(init_net.gen, ng);

421
	mutex_lock(&net_mutex);
422
	if (setup_net(&init_net, &init_user_ns))
S
Stephen Hemminger 已提交
423
		panic("Could not setup the initial network namespace");
424

425
	rtnl_lock();
426
	list_add_tail_rcu(&init_net.list, &net_namespace_list);
427
	rtnl_unlock();
428 429 430

	mutex_unlock(&net_mutex);

431 432
	register_pernet_subsys(&net_ns_ops);

433 434 435 436 437
	return 0;
}

pure_initcall(net_ns_init);

438
#ifdef CONFIG_NET_NS
439 440
static int __register_pernet_operations(struct list_head *list,
					struct pernet_operations *ops)
441
{
442
	struct net *net;
443
	int error;
444
	LIST_HEAD(net_exit_list);
445 446

	list_add_tail(&ops->list, list);
447
	if (ops->init || (ops->id && ops->size)) {
448
		for_each_net(net) {
449
			error = ops_init(ops, net);
450 451
			if (error)
				goto out_undo;
452
			list_add_tail(&net->exit_list, &net_exit_list);
453 454
		}
	}
455
	return 0;
456 457 458 459

out_undo:
	/* If I have an error cleanup all namespaces I initialized */
	list_del(&ops->list);
460 461
	ops_exit_list(ops, &net_exit_list);
	ops_free_list(ops, &net_exit_list);
462
	return error;
463 464
}

465
static void __unregister_pernet_operations(struct pernet_operations *ops)
466 467
{
	struct net *net;
468
	LIST_HEAD(net_exit_list);
469 470

	list_del(&ops->list);
471 472 473 474
	for_each_net(net)
		list_add_tail(&net->exit_list, &net_exit_list);
	ops_exit_list(ops, &net_exit_list);
	ops_free_list(ops, &net_exit_list);
475 476
}

477 478
#else

479 480
static int __register_pernet_operations(struct list_head *list,
					struct pernet_operations *ops)
481
{
482
	return ops_init(ops, &init_net);
483 484
}

485
static void __unregister_pernet_operations(struct pernet_operations *ops)
486
{
487 488 489 490
	LIST_HEAD(net_exit_list);
	list_add(&init_net.exit_list, &net_exit_list);
	ops_exit_list(ops, &net_exit_list);
	ops_free_list(ops, &net_exit_list);
491
}
492 493

#endif /* CONFIG_NET_NS */
494

495 496
static DEFINE_IDA(net_generic_ids);

497 498 499 500 501 502 503 504 505 506 507 508 509 510 511
static int register_pernet_operations(struct list_head *list,
				      struct pernet_operations *ops)
{
	int error;

	if (ops->id) {
again:
		error = ida_get_new_above(&net_generic_ids, 1, ops->id);
		if (error < 0) {
			if (error == -EAGAIN) {
				ida_pre_get(&net_generic_ids, GFP_KERNEL);
				goto again;
			}
			return error;
		}
E
Eric Dumazet 已提交
512
		max_gen_ptrs = max_t(unsigned int, max_gen_ptrs, *ops->id);
513 514
	}
	error = __register_pernet_operations(list, ops);
515 516 517 518 519
	if (error) {
		rcu_barrier();
		if (ops->id)
			ida_remove(&net_generic_ids, *ops->id);
	}
520 521 522 523 524 525 526 527

	return error;
}

static void unregister_pernet_operations(struct pernet_operations *ops)
{
	
	__unregister_pernet_operations(ops);
528
	rcu_barrier();
529 530 531 532
	if (ops->id)
		ida_remove(&net_generic_ids, *ops->id);
}

533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566
/**
 *      register_pernet_subsys - register a network namespace subsystem
 *	@ops:  pernet operations structure for the subsystem
 *
 *	Register a subsystem which has init and exit functions
 *	that are called when network namespaces are created and
 *	destroyed respectively.
 *
 *	When registered all network namespace init functions are
 *	called for every existing network namespace.  Allowing kernel
 *	modules to have a race free view of the set of network namespaces.
 *
 *	When a new network namespace is created all of the init
 *	methods are called in the order in which they were registered.
 *
 *	When a network namespace is destroyed all of the exit methods
 *	are called in the reverse of the order with which they were
 *	registered.
 */
int register_pernet_subsys(struct pernet_operations *ops)
{
	int error;
	mutex_lock(&net_mutex);
	error =  register_pernet_operations(first_device, ops);
	mutex_unlock(&net_mutex);
	return error;
}
EXPORT_SYMBOL_GPL(register_pernet_subsys);

/**
 *      unregister_pernet_subsys - unregister a network namespace subsystem
 *	@ops: pernet operations structure to manipulate
 *
 *	Remove the pernet operations structure from the list to be
567
 *	used when network namespaces are created or destroyed.  In
568 569 570
 *	addition run the exit method for all existing network
 *	namespaces.
 */
571
void unregister_pernet_subsys(struct pernet_operations *ops)
572 573
{
	mutex_lock(&net_mutex);
574
	unregister_pernet_operations(ops);
575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614
	mutex_unlock(&net_mutex);
}
EXPORT_SYMBOL_GPL(unregister_pernet_subsys);

/**
 *      register_pernet_device - register a network namespace device
 *	@ops:  pernet operations structure for the subsystem
 *
 *	Register a device which has init and exit functions
 *	that are called when network namespaces are created and
 *	destroyed respectively.
 *
 *	When registered all network namespace init functions are
 *	called for every existing network namespace.  Allowing kernel
 *	modules to have a race free view of the set of network namespaces.
 *
 *	When a new network namespace is created all of the init
 *	methods are called in the order in which they were registered.
 *
 *	When a network namespace is destroyed all of the exit methods
 *	are called in the reverse of the order with which they were
 *	registered.
 */
int register_pernet_device(struct pernet_operations *ops)
{
	int error;
	mutex_lock(&net_mutex);
	error = register_pernet_operations(&pernet_list, ops);
	if (!error && (first_device == &pernet_list))
		first_device = &ops->list;
	mutex_unlock(&net_mutex);
	return error;
}
EXPORT_SYMBOL_GPL(register_pernet_device);

/**
 *      unregister_pernet_device - unregister a network namespace netdevice
 *	@ops: pernet operations structure to manipulate
 *
 *	Remove the pernet operations structure from the list to be
615
 *	used when network namespaces are created or destroyed.  In
616 617 618 619 620 621 622 623 624 625 626 627
 *	addition run the exit method for all existing network
 *	namespaces.
 */
void unregister_pernet_device(struct pernet_operations *ops)
{
	mutex_lock(&net_mutex);
	if (&ops->list == first_device)
		first_device = first_device->next;
	unregister_pernet_operations(ops);
	mutex_unlock(&net_mutex);
}
EXPORT_SYMBOL_GPL(unregister_pernet_device);
628 629 630 631

#ifdef CONFIG_NET_NS
static void *netns_get(struct task_struct *task)
{
632 633 634
	struct net *net = NULL;
	struct nsproxy *nsproxy;

635
	rcu_read_lock();
636 637 638
	nsproxy = task_nsproxy(task);
	if (nsproxy)
		net = get_net(nsproxy->net_ns);
639
	rcu_read_unlock();
640

641 642 643 644 645 646 647 648 649 650
	return net;
}

static void netns_put(void *ns)
{
	put_net(ns);
}

static int netns_install(struct nsproxy *nsproxy, void *ns)
{
651 652
	struct net *net = ns;

653 654
	if (!ns_capable(net->user_ns, CAP_SYS_ADMIN) ||
	    !nsown_capable(CAP_SYS_ADMIN))
655 656
		return -EPERM;

657
	put_net(nsproxy->net_ns);
658
	nsproxy->net_ns = get_net(net);
659 660 661
	return 0;
}

662 663 664 665 666 667
static unsigned int netns_inum(void *ns)
{
	struct net *net = ns;
	return net->proc_inum;
}

668 669 670 671 672 673
const struct proc_ns_operations netns_operations = {
	.name		= "net",
	.type		= CLONE_NEWNET,
	.get		= netns_get,
	.put		= netns_put,
	.install	= netns_install,
674
	.inum		= netns_inum,
675 676
};
#endif