net_namespace.c 24.2 KB
Newer Older
J
Joe Perches 已提交
1 2
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

3 4 5 6 7 8
#include <linux/workqueue.h>
#include <linux/rtnetlink.h>
#include <linux/cache.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/delay.h>
9
#include <linux/sched.h>
10
#include <linux/idr.h>
11
#include <linux/rculist.h>
12
#include <linux/nsproxy.h>
13 14
#include <linux/fs.h>
#include <linux/proc_ns.h>
15
#include <linux/file.h>
16
#include <linux/export.h>
17
#include <linux/user_namespace.h>
18
#include <linux/net_namespace.h>
19 20
#include <linux/sched/task.h>

21 22
#include <net/sock.h>
#include <net/netlink.h>
23
#include <net/net_namespace.h>
24
#include <net/netns/generic.h>
25 26 27 28 29 30 31

/*
 *	Our network namespace constructor/destructor lists
 */

static LIST_HEAD(pernet_list);
static struct list_head *first_device = &pernet_list;
32
DEFINE_MUTEX(net_mutex);
33 34

LIST_HEAD(net_namespace_list);
A
Alexey Dobriyan 已提交
35
EXPORT_SYMBOL_GPL(net_namespace_list);
36

37
struct net init_net = {
38 39
	.count		= ATOMIC_INIT(1),
	.dev_base_head	= LIST_HEAD_INIT(init_net.dev_base_head),
40
};
41
EXPORT_SYMBOL(init_net);
42

43 44
static bool init_net_initialized;

45 46 47
#define MIN_PERNET_OPS_ID	\
	((sizeof(struct net_generic) + sizeof(void *) - 1) / sizeof(void *))

48 49
#define INITIAL_NET_GEN_PTRS	13 /* +1 for len +2 for rcu_head */

E
Eric Dumazet 已提交
50 51 52 53 54
static unsigned int max_gen_ptrs = INITIAL_NET_GEN_PTRS;

static struct net_generic *net_alloc_generic(void)
{
	struct net_generic *ng;
55
	unsigned int generic_size = offsetof(struct net_generic, ptr[max_gen_ptrs]);
E
Eric Dumazet 已提交
56 57 58

	ng = kzalloc(generic_size, GFP_KERNEL);
	if (ng)
59
		ng->s.len = max_gen_ptrs;
E
Eric Dumazet 已提交
60 61 62 63

	return ng;
}

64
static int net_assign_generic(struct net *net, unsigned int id, void *data)
65 66 67 68
{
	struct net_generic *ng, *old_ng;

	BUG_ON(!mutex_is_locked(&net_mutex));
69
	BUG_ON(id < MIN_PERNET_OPS_ID);
70

E
Eric Dumazet 已提交
71 72
	old_ng = rcu_dereference_protected(net->gen,
					   lockdep_is_held(&net_mutex));
73 74
	if (old_ng->s.len > id) {
		old_ng->ptr[id] = data;
75 76
		return 0;
	}
77

E
Eric Dumazet 已提交
78
	ng = net_alloc_generic();
79 80 81 82 83 84 85 86 87 88 89 90 91 92
	if (ng == NULL)
		return -ENOMEM;

	/*
	 * Some synchronisation notes:
	 *
	 * The net_generic explores the net->gen array inside rcu
	 * read section. Besides once set the net->gen->ptr[x]
	 * pointer never changes (see rules in netns/generic.h).
	 *
	 * That said, we simply duplicate this array and schedule
	 * the old copy for kfree after a grace period.
	 */

93 94 95
	memcpy(&ng->ptr[MIN_PERNET_OPS_ID], &old_ng->ptr[MIN_PERNET_OPS_ID],
	       (old_ng->s.len - MIN_PERNET_OPS_ID) * sizeof(void *));
	ng->ptr[id] = data;
96 97

	rcu_assign_pointer(net->gen, ng);
98
	kfree_rcu(old_ng, s.rcu);
99 100 101
	return 0;
}

102 103
static int ops_init(const struct pernet_operations *ops, struct net *net)
{
104 105 106
	int err = -ENOMEM;
	void *data = NULL;

107
	if (ops->id && ops->size) {
108
		data = kzalloc(ops->size, GFP_KERNEL);
109
		if (!data)
110
			goto out;
111 112

		err = net_assign_generic(net, *ops->id, data);
113 114
		if (err)
			goto cleanup;
115
	}
116
	err = 0;
117
	if (ops->init)
118 119 120 121 122 123 124 125 126
		err = ops->init(net);
	if (!err)
		return 0;

cleanup:
	kfree(data);

out:
	return err;
127 128 129 130 131
}

static void ops_free(const struct pernet_operations *ops, struct net *net)
{
	if (ops->id && ops->size) {
132
		kfree(net_generic(net, *ops->id));
133 134 135
	}
}

136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157
static void ops_exit_list(const struct pernet_operations *ops,
			  struct list_head *net_exit_list)
{
	struct net *net;
	if (ops->exit) {
		list_for_each_entry(net, net_exit_list, exit_list)
			ops->exit(net);
	}
	if (ops->exit_batch)
		ops->exit_batch(net_exit_list);
}

static void ops_free_list(const struct pernet_operations *ops,
			  struct list_head *net_exit_list)
{
	struct net *net;
	if (ops->size && ops->id) {
		list_for_each_entry(net, net_exit_list, exit_list)
			ops_free(ops, net);
	}
}

158
/* should be called with nsid_lock held */
159 160
static int alloc_netid(struct net *net, struct net *peer, int reqid)
{
161
	int min = 0, max = 0;
162 163 164 165 166 167

	if (reqid >= 0) {
		min = reqid;
		max = reqid + 1;
	}

168
	return idr_alloc(&net->netns_ids, peer, min, max, GFP_ATOMIC);
169 170 171 172 173 174 175 176 177 178 179 180 181 182 183
}

/* This function is used by idr_for_each(). If net is equal to peer, the
 * function returns the id so that idr_for_each() stops. Because we cannot
 * returns the id 0 (idr_for_each() will not stop), we return the magic value
 * NET_ID_ZERO (-1) for it.
 */
#define NET_ID_ZERO -1
static int net_eq_idr(int id, void *net, void *peer)
{
	if (net_eq(net, peer))
		return id ? : NET_ID_ZERO;
	return 0;
}

184 185 186 187
/* Should be called with nsid_lock held. If a new id is assigned, the bool alloc
 * is set to true, thus the caller knows that the new id must be notified via
 * rtnl.
 */
188
static int __peernet2id_alloc(struct net *net, struct net *peer, bool *alloc)
189 190
{
	int id = idr_for_each(&net->netns_ids, net_eq_idr, peer);
191
	bool alloc_it = *alloc;
192

193 194
	*alloc = false;

195 196 197 198 199 200
	/* Magic value for id 0. */
	if (id == NET_ID_ZERO)
		return 0;
	if (id > 0)
		return id;

201
	if (alloc_it) {
202
		id = alloc_netid(net, peer, -1);
203
		*alloc = true;
204 205
		return id >= 0 ? id : NETNSA_NSID_NOT_ASSIGNED;
	}
206

207
	return NETNSA_NSID_NOT_ASSIGNED;
208 209
}

210
/* should be called with nsid_lock held */
211 212 213 214 215 216 217 218
static int __peernet2id(struct net *net, struct net *peer)
{
	bool no = false;

	return __peernet2id_alloc(net, peer, &no);
}

static void rtnl_net_notifyid(struct net *net, int cmd, int id);
219 220 221
/* This function returns the id of a peer netns. If no id is assigned, one will
 * be allocated and returned.
 */
222
int peernet2id_alloc(struct net *net, struct net *peer)
223
{
224
	bool alloc;
225
	int id;
226

227 228
	if (atomic_read(&net->count) == 0)
		return NETNSA_NSID_NOT_ASSIGNED;
229
	spin_lock_bh(&net->nsid_lock);
230
	alloc = atomic_read(&peer->count) == 0 ? false : true;
231
	id = __peernet2id_alloc(net, peer, &alloc);
232
	spin_unlock_bh(&net->nsid_lock);
233 234 235
	if (alloc && id >= 0)
		rtnl_net_notifyid(net, RTM_NEWNSID, id);
	return id;
236 237
}

238
/* This function returns, if assigned, the id of a peer netns. */
239
int peernet2id(struct net *net, struct net *peer)
240 241 242
{
	int id;

243
	spin_lock_bh(&net->nsid_lock);
244
	id = __peernet2id(net, peer);
245
	spin_unlock_bh(&net->nsid_lock);
246 247
	return id;
}
248
EXPORT_SYMBOL(peernet2id);
249

250 251 252 253 254 255 256 257
/* This function returns true is the peer netns has an id assigned into the
 * current netns.
 */
bool peernet_has_id(struct net *net, struct net *peer)
{
	return peernet2id(net, peer) >= 0;
}

258 259 260 261 262 263 264 265
struct net *get_net_ns_by_id(struct net *net, int id)
{
	struct net *peer;

	if (id < 0)
		return NULL;

	rcu_read_lock();
266
	spin_lock_bh(&net->nsid_lock);
267 268 269
	peer = idr_find(&net->netns_ids, id);
	if (peer)
		get_net(peer);
270
	spin_unlock_bh(&net->nsid_lock);
271 272 273 274 275
	rcu_read_unlock();

	return peer;
}

276 277 278
/*
 * setup_net runs the initializers for the network namespace object.
 */
279
static __net_init int setup_net(struct net *net, struct user_namespace *user_ns)
280 281
{
	/* Must be called with net_mutex held */
282
	const struct pernet_operations *ops, *saved_ops;
283
	int error = 0;
284
	LIST_HEAD(net_exit_list);
285 286

	atomic_set(&net->count, 1);
287
	atomic_set(&net->passive, 1);
288
	net->dev_base_seq = 1;
289
	net->user_ns = user_ns;
290
	idr_init(&net->netns_ids);
W
WANG Cong 已提交
291
	spin_lock_init(&net->nsid_lock);
292

293
	list_for_each_entry(ops, &pernet_list, list) {
294 295 296
		error = ops_init(ops, net);
		if (error < 0)
			goto out_undo;
297 298 299
	}
out:
	return error;
300

301 302 303 304
out_undo:
	/* Walk through the list backwards calling the exit functions
	 * for the pernet modules whose init functions did not fail.
	 */
305
	list_add(&net->exit_list, &net_exit_list);
306
	saved_ops = ops;
307 308 309
	list_for_each_entry_continue_reverse(ops, &pernet_list, list)
		ops_exit_list(ops, &net_exit_list);

310 311
	ops = saved_ops;
	list_for_each_entry_continue_reverse(ops, &pernet_list, list)
312
		ops_free_list(ops, &net_exit_list);
313 314

	rcu_barrier();
315 316 317
	goto out;
}

318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336
static int __net_init net_defaults_init_net(struct net *net)
{
	net->core.sysctl_somaxconn = SOMAXCONN;
	return 0;
}

static struct pernet_operations net_defaults_ops = {
	.init = net_defaults_init_net,
};

static __init int net_defaults_init(void)
{
	if (register_pernet_subsys(&net_defaults_ops))
		panic("Cannot initialize net default settings");

	return 0;
}

core_initcall(net_defaults_init);
337

338
#ifdef CONFIG_NET_NS
339 340 341 342 343 344 345 346 347 348
static struct ucounts *inc_net_namespaces(struct user_namespace *ns)
{
	return inc_ucount(ns, current_euid(), UCOUNT_NET_NAMESPACES);
}

static void dec_net_namespaces(struct ucounts *ucounts)
{
	dec_ucount(ucounts, UCOUNT_NET_NAMESPACES);
}

349 350 351
static struct kmem_cache *net_cachep;
static struct workqueue_struct *netns_wq;

352
static struct net *net_alloc(void)
353
{
354 355 356 357 358 359 360 361
	struct net *net = NULL;
	struct net_generic *ng;

	ng = net_alloc_generic();
	if (!ng)
		goto out;

	net = kmem_cache_zalloc(net_cachep, GFP_KERNEL);
362
	if (!net)
363
		goto out_free;
364

365 366 367 368 369 370 371 372 373 374 375
	rcu_assign_pointer(net->gen, ng);
out:
	return net;

out_free:
	kfree(ng);
	goto out;
}

static void net_free(struct net *net)
{
E
Eric Dumazet 已提交
376
	kfree(rcu_access_pointer(net->gen));
377 378 379
	kmem_cache_free(net_cachep, net);
}

380 381 382 383 384 385 386
void net_drop_ns(void *p)
{
	struct net *ns = p;
	if (ns && atomic_dec_and_test(&ns->passive))
		net_free(ns);
}

387 388
struct net *copy_net_ns(unsigned long flags,
			struct user_namespace *user_ns, struct net *old_net)
389
{
390
	struct ucounts *ucounts;
391 392
	struct net *net;
	int rv;
393

394 395 396
	if (!(flags & CLONE_NEWNET))
		return get_net(old_net);

397 398
	ucounts = inc_net_namespaces(user_ns);
	if (!ucounts)
399
		return ERR_PTR(-ENOSPC);
400

401
	net = net_alloc();
402 403
	if (!net) {
		dec_net_namespaces(ucounts);
404
		return ERR_PTR(-ENOMEM);
405
	}
406 407 408

	get_user_ns(user_ns);

409 410 411 412 413 414 415 416
	rv = mutex_lock_killable(&net_mutex);
	if (rv < 0) {
		net_free(net);
		dec_net_namespaces(ucounts);
		put_user_ns(user_ns);
		return ERR_PTR(rv);
	}

417
	net->ucounts = ucounts;
418
	rv = setup_net(net, user_ns);
419
	if (rv == 0) {
420
		rtnl_lock();
421
		list_add_tail_rcu(&net->list, &net_namespace_list);
422 423
		rtnl_unlock();
	}
424
	mutex_unlock(&net_mutex);
425
	if (rv < 0) {
426
		dec_net_namespaces(ucounts);
427
		put_user_ns(user_ns);
428
		net_drop_ns(net);
429 430 431 432
		return ERR_PTR(rv);
	}
	return net;
}
433

434 435 436
static DEFINE_SPINLOCK(cleanup_list_lock);
static LIST_HEAD(cleanup_list);  /* Must hold cleanup_list_lock to touch */

437 438
static void cleanup_net(struct work_struct *work)
{
439
	const struct pernet_operations *ops;
440
	struct net *net, *tmp;
X
xiao jin 已提交
441
	struct list_head net_kill_list;
442
	LIST_HEAD(net_exit_list);
443

444 445 446 447
	/* Atomically snapshot the list of namespaces to cleanup */
	spin_lock_irq(&cleanup_list_lock);
	list_replace_init(&cleanup_list, &net_kill_list);
	spin_unlock_irq(&cleanup_list_lock);
448 449 450 451 452

	mutex_lock(&net_mutex);

	/* Don't let anyone else find us. */
	rtnl_lock();
453
	list_for_each_entry(net, &net_kill_list, cleanup_list) {
454
		list_del_rcu(&net->list);
455
		list_add_tail(&net->exit_list, &net_exit_list);
456
		for_each_net(tmp) {
457
			int id;
458

459
			spin_lock_bh(&tmp->nsid_lock);
460 461
			id = __peernet2id(tmp, net);
			if (id >= 0)
462
				idr_remove(&tmp->netns_ids, id);
463
			spin_unlock_bh(&tmp->nsid_lock);
464 465
			if (id >= 0)
				rtnl_net_notifyid(tmp, RTM_DELNSID, id);
466
		}
467
		spin_lock_bh(&net->nsid_lock);
468
		idr_destroy(&net->netns_ids);
469
		spin_unlock_bh(&net->nsid_lock);
470

471
	}
472 473
	rtnl_unlock();

474 475 476 477 478 479 480
	/*
	 * Another CPU might be rcu-iterating the list, wait for it.
	 * This needs to be before calling the exit() notifiers, so
	 * the rcu_barrier() below isn't sufficient alone.
	 */
	synchronize_rcu();

481
	/* Run all of the network namespace exit methods */
482 483 484
	list_for_each_entry_reverse(ops, &pernet_list, list)
		ops_exit_list(ops, &net_exit_list);

485
	/* Free the net generic variables */
486 487
	list_for_each_entry_reverse(ops, &pernet_list, list)
		ops_free_list(ops, &net_exit_list);
488 489 490 491 492 493 494 495 496

	mutex_unlock(&net_mutex);

	/* Ensure there are no outstanding rcu callbacks using this
	 * network namespace.
	 */
	rcu_barrier();

	/* Finally it is safe to free my network namespace structure */
497 498
	list_for_each_entry_safe(net, tmp, &net_exit_list, exit_list) {
		list_del_init(&net->exit_list);
499
		dec_net_namespaces(net->ucounts);
500
		put_user_ns(net->user_ns);
501
		net_drop_ns(net);
502
	}
503
}
504
static DECLARE_WORK(net_cleanup_work, cleanup_net);
505 506 507 508

void __put_net(struct net *net)
{
	/* Cleanup the network namespace in process context */
509 510 511 512 513 514 515
	unsigned long flags;

	spin_lock_irqsave(&cleanup_list_lock, flags);
	list_add(&net->cleanup_list, &cleanup_list);
	spin_unlock_irqrestore(&cleanup_list_lock, flags);

	queue_work(netns_wq, &net_cleanup_work);
516 517 518
}
EXPORT_SYMBOL_GPL(__put_net);

519 520 521
struct net *get_net_ns_by_fd(int fd)
{
	struct file *file;
522
	struct ns_common *ns;
523 524 525
	struct net *net;

	file = proc_ns_fget(fd);
526 527
	if (IS_ERR(file))
		return ERR_CAST(file);
528

A
Al Viro 已提交
529
	ns = get_proc_ns(file_inode(file));
530 531
	if (ns->ops == &netns_operations)
		net = get_net(container_of(ns, struct net, ns));
532 533
	else
		net = ERR_PTR(-EINVAL);
534

535
	fput(file);
536 537 538
	return net;
}

539
#else
540 541 542 543
struct net *get_net_ns_by_fd(int fd)
{
	return ERR_PTR(-EINVAL);
}
544
#endif
545
EXPORT_SYMBOL_GPL(get_net_ns_by_fd);
546

547 548 549 550 551 552 553 554 555 556 557
struct net *get_net_ns_by_pid(pid_t pid)
{
	struct task_struct *tsk;
	struct net *net;

	/* Lookup the network namespace */
	net = ERR_PTR(-ESRCH);
	rcu_read_lock();
	tsk = find_task_by_vpid(pid);
	if (tsk) {
		struct nsproxy *nsproxy;
558 559
		task_lock(tsk);
		nsproxy = tsk->nsproxy;
560 561
		if (nsproxy)
			net = get_net(nsproxy->net_ns);
562
		task_unlock(tsk);
563 564 565 566 567 568
	}
	rcu_read_unlock();
	return net;
}
EXPORT_SYMBOL_GPL(get_net_ns_by_pid);

569 570
static __net_init int net_ns_net_init(struct net *net)
{
571 572 573
#ifdef CONFIG_NET_NS
	net->ns.ops = &netns_operations;
#endif
A
Al Viro 已提交
574
	return ns_alloc_inum(&net->ns);
575 576 577 578
}

static __net_exit void net_ns_net_exit(struct net *net)
{
A
Al Viro 已提交
579
	ns_free_inum(&net->ns);
580 581 582 583 584 585 586
}

static struct pernet_operations __net_initdata net_ns_ops = {
	.init = net_ns_net_init,
	.exit = net_ns_net_exit,
};

S
stephen hemminger 已提交
587
static const struct nla_policy rtnl_net_policy[NETNSA_MAX + 1] = {
588 589 590 591 592 593
	[NETNSA_NONE]		= { .type = NLA_UNSPEC },
	[NETNSA_NSID]		= { .type = NLA_S32 },
	[NETNSA_PID]		= { .type = NLA_U32 },
	[NETNSA_FD]		= { .type = NLA_U32 },
};

594 595
static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh,
			  struct netlink_ext_ack *extack)
596 597 598 599 600 601 602
{
	struct net *net = sock_net(skb->sk);
	struct nlattr *tb[NETNSA_MAX + 1];
	struct net *peer;
	int nsid, err;

	err = nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX,
603
			  rtnl_net_policy, extack);
604 605 606 607 608 609 610 611 612 613 614 615 616 617 618
	if (err < 0)
		return err;
	if (!tb[NETNSA_NSID])
		return -EINVAL;
	nsid = nla_get_s32(tb[NETNSA_NSID]);

	if (tb[NETNSA_PID])
		peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID]));
	else if (tb[NETNSA_FD])
		peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD]));
	else
		return -EINVAL;
	if (IS_ERR(peer))
		return PTR_ERR(peer);

619
	spin_lock_bh(&net->nsid_lock);
620
	if (__peernet2id(net, peer) >= 0) {
621
		spin_unlock_bh(&net->nsid_lock);
622 623 624 625 626
		err = -EEXIST;
		goto out;
	}

	err = alloc_netid(net, peer, nsid);
627
	spin_unlock_bh(&net->nsid_lock);
628 629
	if (err >= 0) {
		rtnl_net_notifyid(net, RTM_NEWNSID, err);
630
		err = 0;
631
	}
632 633 634 635 636 637 638 639 640 641 642 643 644
out:
	put_net(peer);
	return err;
}

static int rtnl_net_get_size(void)
{
	return NLMSG_ALIGN(sizeof(struct rtgenmsg))
	       + nla_total_size(sizeof(s32)) /* NETNSA_NSID */
	       ;
}

static int rtnl_net_fill(struct sk_buff *skb, u32 portid, u32 seq, int flags,
645
			 int cmd, struct net *net, int nsid)
646 647 648 649 650 651 652 653 654 655 656
{
	struct nlmsghdr *nlh;
	struct rtgenmsg *rth;

	nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rth), flags);
	if (!nlh)
		return -EMSGSIZE;

	rth = nlmsg_data(nlh);
	rth->rtgen_family = AF_UNSPEC;

657
	if (nla_put_s32(skb, NETNSA_NSID, nsid))
658 659 660 661 662 663 664 665 666 667
		goto nla_put_failure;

	nlmsg_end(skb, nlh);
	return 0;

nla_put_failure:
	nlmsg_cancel(skb, nlh);
	return -EMSGSIZE;
}

668 669
static int rtnl_net_getid(struct sk_buff *skb, struct nlmsghdr *nlh,
			  struct netlink_ext_ack *extack)
670 671 672 673 674
{
	struct net *net = sock_net(skb->sk);
	struct nlattr *tb[NETNSA_MAX + 1];
	struct sk_buff *msg;
	struct net *peer;
675
	int err, id;
676 677

	err = nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX,
678
			  rtnl_net_policy, extack);
679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696
	if (err < 0)
		return err;
	if (tb[NETNSA_PID])
		peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID]));
	else if (tb[NETNSA_FD])
		peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD]));
	else
		return -EINVAL;

	if (IS_ERR(peer))
		return PTR_ERR(peer);

	msg = nlmsg_new(rtnl_net_get_size(), GFP_KERNEL);
	if (!msg) {
		err = -ENOMEM;
		goto out;
	}

697
	id = peernet2id(net, peer);
698
	err = rtnl_net_fill(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0,
699
			    RTM_NEWNSID, net, id);
700 701 702 703 704 705 706 707 708 709 710 711 712
	if (err < 0)
		goto err_out;

	err = rtnl_unicast(msg, net, NETLINK_CB(skb).portid);
	goto out;

err_out:
	nlmsg_free(msg);
out:
	put_net(peer);
	return err;
}

N
Nicolas Dichtel 已提交
713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730
struct rtnl_net_dump_cb {
	struct net *net;
	struct sk_buff *skb;
	struct netlink_callback *cb;
	int idx;
	int s_idx;
};

static int rtnl_net_dumpid_one(int id, void *peer, void *data)
{
	struct rtnl_net_dump_cb *net_cb = (struct rtnl_net_dump_cb *)data;
	int ret;

	if (net_cb->idx < net_cb->s_idx)
		goto cont;

	ret = rtnl_net_fill(net_cb->skb, NETLINK_CB(net_cb->cb->skb).portid,
			    net_cb->cb->nlh->nlmsg_seq, NLM_F_MULTI,
731
			    RTM_NEWNSID, net_cb->net, id);
N
Nicolas Dichtel 已提交
732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750
	if (ret < 0)
		return ret;

cont:
	net_cb->idx++;
	return 0;
}

static int rtnl_net_dumpid(struct sk_buff *skb, struct netlink_callback *cb)
{
	struct net *net = sock_net(skb->sk);
	struct rtnl_net_dump_cb net_cb = {
		.net = net,
		.skb = skb,
		.cb = cb,
		.idx = 0,
		.s_idx = cb->args[0],
	};

751
	spin_lock_bh(&net->nsid_lock);
N
Nicolas Dichtel 已提交
752
	idr_for_each(&net->netns_ids, rtnl_net_dumpid_one, &net_cb);
753
	spin_unlock_bh(&net->nsid_lock);
N
Nicolas Dichtel 已提交
754 755 756 757 758

	cb->args[0] = net_cb.idx;
	return skb->len;
}

759
static void rtnl_net_notifyid(struct net *net, int cmd, int id)
N
Nicolas Dichtel 已提交
760 761 762 763 764 765 766 767
{
	struct sk_buff *msg;
	int err = -ENOMEM;

	msg = nlmsg_new(rtnl_net_get_size(), GFP_KERNEL);
	if (!msg)
		goto out;

768
	err = rtnl_net_fill(msg, 0, 0, 0, cmd, net, id);
N
Nicolas Dichtel 已提交
769 770 771 772 773 774 775 776 777 778 779 780
	if (err < 0)
		goto err_out;

	rtnl_notify(msg, net, 0, RTNLGRP_NSID, NULL, 0);
	return;

err_out:
	nlmsg_free(msg);
out:
	rtnl_set_sk_err(net, RTNLGRP_NSID, err);
}

781 782
static int __init net_ns_init(void)
{
783
	struct net_generic *ng;
784

785
#ifdef CONFIG_NET_NS
786 787 788
	net_cachep = kmem_cache_create("net_namespace", sizeof(struct net),
					SMP_CACHE_BYTES,
					SLAB_PANIC, NULL);
789 790 791 792 793

	/* Create workqueue for cleanup */
	netns_wq = create_singlethread_workqueue("netns");
	if (!netns_wq)
		panic("Could not create netns workq");
794
#endif
795

796 797 798 799 800 801
	ng = net_alloc_generic();
	if (!ng)
		panic("Could not allocate generic netns");

	rcu_assign_pointer(init_net.gen, ng);

802
	mutex_lock(&net_mutex);
803
	if (setup_net(&init_net, &init_user_ns))
S
Stephen Hemminger 已提交
804
		panic("Could not setup the initial network namespace");
805

806 807
	init_net_initialized = true;

808
	rtnl_lock();
809
	list_add_tail_rcu(&init_net.list, &net_namespace_list);
810
	rtnl_unlock();
811 812 813

	mutex_unlock(&net_mutex);

814 815
	register_pernet_subsys(&net_ns_ops);

816
	rtnl_register(PF_UNSPEC, RTM_NEWNSID, rtnl_net_newid, NULL, NULL);
N
Nicolas Dichtel 已提交
817 818
	rtnl_register(PF_UNSPEC, RTM_GETNSID, rtnl_net_getid, rtnl_net_dumpid,
		      NULL);
819

820 821 822 823 824
	return 0;
}

pure_initcall(net_ns_init);

825
#ifdef CONFIG_NET_NS
826 827
static int __register_pernet_operations(struct list_head *list,
					struct pernet_operations *ops)
828
{
829
	struct net *net;
830
	int error;
831
	LIST_HEAD(net_exit_list);
832 833

	list_add_tail(&ops->list, list);
834
	if (ops->init || (ops->id && ops->size)) {
835
		for_each_net(net) {
836
			error = ops_init(ops, net);
837 838
			if (error)
				goto out_undo;
839
			list_add_tail(&net->exit_list, &net_exit_list);
840 841
		}
	}
842
	return 0;
843 844 845 846

out_undo:
	/* If I have an error cleanup all namespaces I initialized */
	list_del(&ops->list);
847 848
	ops_exit_list(ops, &net_exit_list);
	ops_free_list(ops, &net_exit_list);
849
	return error;
850 851
}

852
static void __unregister_pernet_operations(struct pernet_operations *ops)
853 854
{
	struct net *net;
855
	LIST_HEAD(net_exit_list);
856 857

	list_del(&ops->list);
858 859 860 861
	for_each_net(net)
		list_add_tail(&net->exit_list, &net_exit_list);
	ops_exit_list(ops, &net_exit_list);
	ops_free_list(ops, &net_exit_list);
862 863
}

864 865
#else

866 867
static int __register_pernet_operations(struct list_head *list,
					struct pernet_operations *ops)
868
{
869 870 871 872 873
	if (!init_net_initialized) {
		list_add_tail(&ops->list, list);
		return 0;
	}

874
	return ops_init(ops, &init_net);
875 876
}

877
static void __unregister_pernet_operations(struct pernet_operations *ops)
878
{
879 880 881 882 883 884 885 886
	if (!init_net_initialized) {
		list_del(&ops->list);
	} else {
		LIST_HEAD(net_exit_list);
		list_add(&init_net.exit_list, &net_exit_list);
		ops_exit_list(ops, &net_exit_list);
		ops_free_list(ops, &net_exit_list);
	}
887
}
888 889

#endif /* CONFIG_NET_NS */
890

891 892
static DEFINE_IDA(net_generic_ids);

893 894 895 896 897 898 899
static int register_pernet_operations(struct list_head *list,
				      struct pernet_operations *ops)
{
	int error;

	if (ops->id) {
again:
900
		error = ida_get_new_above(&net_generic_ids, MIN_PERNET_OPS_ID, ops->id);
901 902 903 904 905 906 907
		if (error < 0) {
			if (error == -EAGAIN) {
				ida_pre_get(&net_generic_ids, GFP_KERNEL);
				goto again;
			}
			return error;
		}
908
		max_gen_ptrs = max(max_gen_ptrs, *ops->id + 1);
909 910
	}
	error = __register_pernet_operations(list, ops);
911 912 913 914 915
	if (error) {
		rcu_barrier();
		if (ops->id)
			ida_remove(&net_generic_ids, *ops->id);
	}
916 917 918 919 920 921 922 923

	return error;
}

static void unregister_pernet_operations(struct pernet_operations *ops)
{
	
	__unregister_pernet_operations(ops);
924
	rcu_barrier();
925 926 927 928
	if (ops->id)
		ida_remove(&net_generic_ids, *ops->id);
}

929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962
/**
 *      register_pernet_subsys - register a network namespace subsystem
 *	@ops:  pernet operations structure for the subsystem
 *
 *	Register a subsystem which has init and exit functions
 *	that are called when network namespaces are created and
 *	destroyed respectively.
 *
 *	When registered all network namespace init functions are
 *	called for every existing network namespace.  Allowing kernel
 *	modules to have a race free view of the set of network namespaces.
 *
 *	When a new network namespace is created all of the init
 *	methods are called in the order in which they were registered.
 *
 *	When a network namespace is destroyed all of the exit methods
 *	are called in the reverse of the order with which they were
 *	registered.
 */
int register_pernet_subsys(struct pernet_operations *ops)
{
	int error;
	mutex_lock(&net_mutex);
	error =  register_pernet_operations(first_device, ops);
	mutex_unlock(&net_mutex);
	return error;
}
EXPORT_SYMBOL_GPL(register_pernet_subsys);

/**
 *      unregister_pernet_subsys - unregister a network namespace subsystem
 *	@ops: pernet operations structure to manipulate
 *
 *	Remove the pernet operations structure from the list to be
963
 *	used when network namespaces are created or destroyed.  In
964 965 966
 *	addition run the exit method for all existing network
 *	namespaces.
 */
967
void unregister_pernet_subsys(struct pernet_operations *ops)
968 969
{
	mutex_lock(&net_mutex);
970
	unregister_pernet_operations(ops);
971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010
	mutex_unlock(&net_mutex);
}
EXPORT_SYMBOL_GPL(unregister_pernet_subsys);

/**
 *      register_pernet_device - register a network namespace device
 *	@ops:  pernet operations structure for the subsystem
 *
 *	Register a device which has init and exit functions
 *	that are called when network namespaces are created and
 *	destroyed respectively.
 *
 *	When registered all network namespace init functions are
 *	called for every existing network namespace.  Allowing kernel
 *	modules to have a race free view of the set of network namespaces.
 *
 *	When a new network namespace is created all of the init
 *	methods are called in the order in which they were registered.
 *
 *	When a network namespace is destroyed all of the exit methods
 *	are called in the reverse of the order with which they were
 *	registered.
 */
int register_pernet_device(struct pernet_operations *ops)
{
	int error;
	mutex_lock(&net_mutex);
	error = register_pernet_operations(&pernet_list, ops);
	if (!error && (first_device == &pernet_list))
		first_device = &ops->list;
	mutex_unlock(&net_mutex);
	return error;
}
EXPORT_SYMBOL_GPL(register_pernet_device);

/**
 *      unregister_pernet_device - unregister a network namespace netdevice
 *	@ops: pernet operations structure to manipulate
 *
 *	Remove the pernet operations structure from the list to be
1011
 *	used when network namespaces are created or destroyed.  In
1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023
 *	addition run the exit method for all existing network
 *	namespaces.
 */
void unregister_pernet_device(struct pernet_operations *ops)
{
	mutex_lock(&net_mutex);
	if (&ops->list == first_device)
		first_device = first_device->next;
	unregister_pernet_operations(ops);
	mutex_unlock(&net_mutex);
}
EXPORT_SYMBOL_GPL(unregister_pernet_device);
1024 1025

#ifdef CONFIG_NET_NS
1026
static struct ns_common *netns_get(struct task_struct *task)
1027
{
1028 1029 1030
	struct net *net = NULL;
	struct nsproxy *nsproxy;

1031 1032
	task_lock(task);
	nsproxy = task->nsproxy;
1033 1034
	if (nsproxy)
		net = get_net(nsproxy->net_ns);
1035
	task_unlock(task);
1036

1037 1038 1039 1040 1041 1042
	return net ? &net->ns : NULL;
}

static inline struct net *to_net_ns(struct ns_common *ns)
{
	return container_of(ns, struct net, ns);
1043 1044
}

1045
static void netns_put(struct ns_common *ns)
1046
{
1047
	put_net(to_net_ns(ns));
1048 1049
}

1050
static int netns_install(struct nsproxy *nsproxy, struct ns_common *ns)
1051
{
1052
	struct net *net = to_net_ns(ns);
1053

1054
	if (!ns_capable(net->user_ns, CAP_SYS_ADMIN) ||
1055
	    !ns_capable(current_user_ns(), CAP_SYS_ADMIN))
1056 1057
		return -EPERM;

1058
	put_net(nsproxy->net_ns);
1059
	nsproxy->net_ns = get_net(net);
1060 1061 1062
	return 0;
}

1063 1064 1065 1066 1067
static struct user_namespace *netns_owner(struct ns_common *ns)
{
	return to_net_ns(ns)->user_ns;
}

1068 1069 1070 1071 1072 1073
const struct proc_ns_operations netns_operations = {
	.name		= "net",
	.type		= CLONE_NEWNET,
	.get		= netns_get,
	.put		= netns_put,
	.install	= netns_install,
1074
	.owner		= netns_owner,
1075 1076
};
#endif