net_namespace.c 25.5 KB
Newer Older
J
Joe Perches 已提交
1 2
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

3 4 5 6 7 8
#include <linux/workqueue.h>
#include <linux/rtnetlink.h>
#include <linux/cache.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/delay.h>
9
#include <linux/sched.h>
10
#include <linux/idr.h>
11
#include <linux/rculist.h>
12
#include <linux/nsproxy.h>
13 14
#include <linux/fs.h>
#include <linux/proc_ns.h>
15
#include <linux/file.h>
16
#include <linux/export.h>
17
#include <linux/user_namespace.h>
18
#include <linux/net_namespace.h>
19 20
#include <linux/sched/task.h>

21 22
#include <net/sock.h>
#include <net/netlink.h>
23
#include <net/net_namespace.h>
24
#include <net/netns/generic.h>
25 26 27 28 29 30 31

/*
 *	Our network namespace constructor/destructor lists
 */

static LIST_HEAD(pernet_list);
static struct list_head *first_device = &pernet_list;
32
DEFINE_MUTEX(net_mutex);
33 34

LIST_HEAD(net_namespace_list);
A
Alexey Dobriyan 已提交
35
EXPORT_SYMBOL_GPL(net_namespace_list);
36

37
struct net init_net = {
38
	.count		= REFCOUNT_INIT(1),
39
	.dev_base_head	= LIST_HEAD_INIT(init_net.dev_base_head),
40
};
41
EXPORT_SYMBOL(init_net);
42

43 44
static bool init_net_initialized;

45 46 47
#define MIN_PERNET_OPS_ID	\
	((sizeof(struct net_generic) + sizeof(void *) - 1) / sizeof(void *))

48 49
#define INITIAL_NET_GEN_PTRS	13 /* +1 for len +2 for rcu_head */

E
Eric Dumazet 已提交
50 51 52 53 54
static unsigned int max_gen_ptrs = INITIAL_NET_GEN_PTRS;

static struct net_generic *net_alloc_generic(void)
{
	struct net_generic *ng;
55
	unsigned int generic_size = offsetof(struct net_generic, ptr[max_gen_ptrs]);
E
Eric Dumazet 已提交
56 57 58

	ng = kzalloc(generic_size, GFP_KERNEL);
	if (ng)
59
		ng->s.len = max_gen_ptrs;
E
Eric Dumazet 已提交
60 61 62 63

	return ng;
}

64
static int net_assign_generic(struct net *net, unsigned int id, void *data)
65 66 67 68
{
	struct net_generic *ng, *old_ng;

	BUG_ON(!mutex_is_locked(&net_mutex));
69
	BUG_ON(id < MIN_PERNET_OPS_ID);
70

E
Eric Dumazet 已提交
71 72
	old_ng = rcu_dereference_protected(net->gen,
					   lockdep_is_held(&net_mutex));
73 74
	if (old_ng->s.len > id) {
		old_ng->ptr[id] = data;
75 76
		return 0;
	}
77

E
Eric Dumazet 已提交
78
	ng = net_alloc_generic();
79 80 81 82 83 84 85 86 87 88 89 90 91 92
	if (ng == NULL)
		return -ENOMEM;

	/*
	 * Some synchronisation notes:
	 *
	 * The net_generic explores the net->gen array inside rcu
	 * read section. Besides once set the net->gen->ptr[x]
	 * pointer never changes (see rules in netns/generic.h).
	 *
	 * That said, we simply duplicate this array and schedule
	 * the old copy for kfree after a grace period.
	 */

93 94 95
	memcpy(&ng->ptr[MIN_PERNET_OPS_ID], &old_ng->ptr[MIN_PERNET_OPS_ID],
	       (old_ng->s.len - MIN_PERNET_OPS_ID) * sizeof(void *));
	ng->ptr[id] = data;
96 97

	rcu_assign_pointer(net->gen, ng);
98
	kfree_rcu(old_ng, s.rcu);
99 100 101
	return 0;
}

102 103
static int ops_init(const struct pernet_operations *ops, struct net *net)
{
104 105 106
	int err = -ENOMEM;
	void *data = NULL;

107
	if (ops->id && ops->size) {
108
		data = kzalloc(ops->size, GFP_KERNEL);
109
		if (!data)
110
			goto out;
111 112

		err = net_assign_generic(net, *ops->id, data);
113 114
		if (err)
			goto cleanup;
115
	}
116
	err = 0;
117
	if (ops->init)
118 119 120 121 122 123 124 125 126
		err = ops->init(net);
	if (!err)
		return 0;

cleanup:
	kfree(data);

out:
	return err;
127 128 129 130 131
}

static void ops_free(const struct pernet_operations *ops, struct net *net)
{
	if (ops->id && ops->size) {
132
		kfree(net_generic(net, *ops->id));
133 134 135
	}
}

136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157
static void ops_exit_list(const struct pernet_operations *ops,
			  struct list_head *net_exit_list)
{
	struct net *net;
	if (ops->exit) {
		list_for_each_entry(net, net_exit_list, exit_list)
			ops->exit(net);
	}
	if (ops->exit_batch)
		ops->exit_batch(net_exit_list);
}

static void ops_free_list(const struct pernet_operations *ops,
			  struct list_head *net_exit_list)
{
	struct net *net;
	if (ops->size && ops->id) {
		list_for_each_entry(net, net_exit_list, exit_list)
			ops_free(ops, net);
	}
}

158
/* should be called with nsid_lock held */
159 160
static int alloc_netid(struct net *net, struct net *peer, int reqid)
{
161
	int min = 0, max = 0;
162 163 164 165 166 167

	if (reqid >= 0) {
		min = reqid;
		max = reqid + 1;
	}

168
	return idr_alloc(&net->netns_ids, peer, min, max, GFP_ATOMIC);
169 170 171 172 173 174 175 176 177 178 179 180 181 182 183
}

/* This function is used by idr_for_each(). If net is equal to peer, the
 * function returns the id so that idr_for_each() stops. Because we cannot
 * returns the id 0 (idr_for_each() will not stop), we return the magic value
 * NET_ID_ZERO (-1) for it.
 */
#define NET_ID_ZERO -1
static int net_eq_idr(int id, void *net, void *peer)
{
	if (net_eq(net, peer))
		return id ? : NET_ID_ZERO;
	return 0;
}

184 185 186 187
/* Should be called with nsid_lock held. If a new id is assigned, the bool alloc
 * is set to true, thus the caller knows that the new id must be notified via
 * rtnl.
 */
188
static int __peernet2id_alloc(struct net *net, struct net *peer, bool *alloc)
189 190
{
	int id = idr_for_each(&net->netns_ids, net_eq_idr, peer);
191
	bool alloc_it = *alloc;
192

193 194
	*alloc = false;

195 196 197 198 199 200
	/* Magic value for id 0. */
	if (id == NET_ID_ZERO)
		return 0;
	if (id > 0)
		return id;

201
	if (alloc_it) {
202
		id = alloc_netid(net, peer, -1);
203
		*alloc = true;
204 205
		return id >= 0 ? id : NETNSA_NSID_NOT_ASSIGNED;
	}
206

207
	return NETNSA_NSID_NOT_ASSIGNED;
208 209
}

210
/* should be called with nsid_lock held */
211 212 213 214 215 216 217 218
static int __peernet2id(struct net *net, struct net *peer)
{
	bool no = false;

	return __peernet2id_alloc(net, peer, &no);
}

static void rtnl_net_notifyid(struct net *net, int cmd, int id);
219 220 221
/* This function returns the id of a peer netns. If no id is assigned, one will
 * be allocated and returned.
 */
222
int peernet2id_alloc(struct net *net, struct net *peer)
223
{
224
	bool alloc;
225
	int id;
226

227
	if (refcount_read(&net->count) == 0)
228
		return NETNSA_NSID_NOT_ASSIGNED;
229
	spin_lock_bh(&net->nsid_lock);
230
	alloc = refcount_read(&peer->count) == 0 ? false : true;
231
	id = __peernet2id_alloc(net, peer, &alloc);
232
	spin_unlock_bh(&net->nsid_lock);
233 234 235
	if (alloc && id >= 0)
		rtnl_net_notifyid(net, RTM_NEWNSID, id);
	return id;
236
}
J
Jiri Benc 已提交
237
EXPORT_SYMBOL_GPL(peernet2id_alloc);
238

239
/* This function returns, if assigned, the id of a peer netns. */
240
int peernet2id(struct net *net, struct net *peer)
241 242 243
{
	int id;

244
	spin_lock_bh(&net->nsid_lock);
245
	id = __peernet2id(net, peer);
246
	spin_unlock_bh(&net->nsid_lock);
247 248
	return id;
}
249
EXPORT_SYMBOL(peernet2id);
250

251 252 253 254 255 256 257 258
/* This function returns true is the peer netns has an id assigned into the
 * current netns.
 */
bool peernet_has_id(struct net *net, struct net *peer)
{
	return peernet2id(net, peer) >= 0;
}

259 260 261 262 263 264 265 266
struct net *get_net_ns_by_id(struct net *net, int id)
{
	struct net *peer;

	if (id < 0)
		return NULL;

	rcu_read_lock();
267
	spin_lock_bh(&net->nsid_lock);
268 269
	peer = idr_find(&net->netns_ids, id);
	if (peer)
270
		peer = maybe_get_net(peer);
271
	spin_unlock_bh(&net->nsid_lock);
272 273 274 275 276
	rcu_read_unlock();

	return peer;
}

277 278 279
/*
 * setup_net runs the initializers for the network namespace object.
 */
280
static __net_init int setup_net(struct net *net, struct user_namespace *user_ns)
281 282
{
	/* Must be called with net_mutex held */
283
	const struct pernet_operations *ops, *saved_ops;
284
	int error = 0;
285
	LIST_HEAD(net_exit_list);
286

287
	refcount_set(&net->count, 1);
288
	refcount_set(&net->passive, 1);
289
	net->dev_base_seq = 1;
290
	net->user_ns = user_ns;
291
	idr_init(&net->netns_ids);
W
WANG Cong 已提交
292
	spin_lock_init(&net->nsid_lock);
293

294
	list_for_each_entry(ops, &pernet_list, list) {
295 296 297
		error = ops_init(ops, net);
		if (error < 0)
			goto out_undo;
298 299 300
	}
out:
	return error;
301

302 303 304 305
out_undo:
	/* Walk through the list backwards calling the exit functions
	 * for the pernet modules whose init functions did not fail.
	 */
306
	list_add(&net->exit_list, &net_exit_list);
307
	saved_ops = ops;
308 309 310
	list_for_each_entry_continue_reverse(ops, &pernet_list, list)
		ops_exit_list(ops, &net_exit_list);

311 312
	ops = saved_ops;
	list_for_each_entry_continue_reverse(ops, &pernet_list, list)
313
		ops_free_list(ops, &net_exit_list);
314 315

	rcu_barrier();
316 317 318
	goto out;
}

319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337
static int __net_init net_defaults_init_net(struct net *net)
{
	net->core.sysctl_somaxconn = SOMAXCONN;
	return 0;
}

static struct pernet_operations net_defaults_ops = {
	.init = net_defaults_init_net,
};

static __init int net_defaults_init(void)
{
	if (register_pernet_subsys(&net_defaults_ops))
		panic("Cannot initialize net default settings");

	return 0;
}

core_initcall(net_defaults_init);
338

339
#ifdef CONFIG_NET_NS
340 341 342 343 344 345 346 347 348 349
static struct ucounts *inc_net_namespaces(struct user_namespace *ns)
{
	return inc_ucount(ns, current_euid(), UCOUNT_NET_NAMESPACES);
}

static void dec_net_namespaces(struct ucounts *ucounts)
{
	dec_ucount(ucounts, UCOUNT_NET_NAMESPACES);
}

350 351 352
static struct kmem_cache *net_cachep;
static struct workqueue_struct *netns_wq;

353
static struct net *net_alloc(void)
354
{
355 356 357 358 359 360 361 362
	struct net *net = NULL;
	struct net_generic *ng;

	ng = net_alloc_generic();
	if (!ng)
		goto out;

	net = kmem_cache_zalloc(net_cachep, GFP_KERNEL);
363
	if (!net)
364
		goto out_free;
365

366 367 368 369 370 371 372 373 374 375 376
	rcu_assign_pointer(net->gen, ng);
out:
	return net;

out_free:
	kfree(ng);
	goto out;
}

static void net_free(struct net *net)
{
E
Eric Dumazet 已提交
377
	kfree(rcu_access_pointer(net->gen));
378 379 380
	kmem_cache_free(net_cachep, net);
}

381 382 383
void net_drop_ns(void *p)
{
	struct net *ns = p;
384
	if (ns && refcount_dec_and_test(&ns->passive))
385 386 387
		net_free(ns);
}

388 389
struct net *copy_net_ns(unsigned long flags,
			struct user_namespace *user_ns, struct net *old_net)
390
{
391
	struct ucounts *ucounts;
392 393
	struct net *net;
	int rv;
394

395 396 397
	if (!(flags & CLONE_NEWNET))
		return get_net(old_net);

398 399
	ucounts = inc_net_namespaces(user_ns);
	if (!ucounts)
400
		return ERR_PTR(-ENOSPC);
401

402
	net = net_alloc();
403 404
	if (!net) {
		dec_net_namespaces(ucounts);
405
		return ERR_PTR(-ENOMEM);
406
	}
407 408 409

	get_user_ns(user_ns);

410 411 412 413 414 415 416 417
	rv = mutex_lock_killable(&net_mutex);
	if (rv < 0) {
		net_free(net);
		dec_net_namespaces(ucounts);
		put_user_ns(user_ns);
		return ERR_PTR(rv);
	}

418
	net->ucounts = ucounts;
419
	rv = setup_net(net, user_ns);
420
	if (rv == 0) {
421
		rtnl_lock();
422
		list_add_tail_rcu(&net->list, &net_namespace_list);
423 424
		rtnl_unlock();
	}
425
	mutex_unlock(&net_mutex);
426
	if (rv < 0) {
427
		dec_net_namespaces(ucounts);
428
		put_user_ns(user_ns);
429
		net_drop_ns(net);
430 431 432 433
		return ERR_PTR(rv);
	}
	return net;
}
434

435 436 437
static DEFINE_SPINLOCK(cleanup_list_lock);
static LIST_HEAD(cleanup_list);  /* Must hold cleanup_list_lock to touch */

438 439
static void cleanup_net(struct work_struct *work)
{
440
	const struct pernet_operations *ops;
441
	struct net *net, *tmp;
X
xiao jin 已提交
442
	struct list_head net_kill_list;
443
	LIST_HEAD(net_exit_list);
444

445 446 447 448
	/* Atomically snapshot the list of namespaces to cleanup */
	spin_lock_irq(&cleanup_list_lock);
	list_replace_init(&cleanup_list, &net_kill_list);
	spin_unlock_irq(&cleanup_list_lock);
449 450 451 452 453

	mutex_lock(&net_mutex);

	/* Don't let anyone else find us. */
	rtnl_lock();
454
	list_for_each_entry(net, &net_kill_list, cleanup_list) {
455
		list_del_rcu(&net->list);
456
		list_add_tail(&net->exit_list, &net_exit_list);
457
		for_each_net(tmp) {
458
			int id;
459

460
			spin_lock_bh(&tmp->nsid_lock);
461 462
			id = __peernet2id(tmp, net);
			if (id >= 0)
463
				idr_remove(&tmp->netns_ids, id);
464
			spin_unlock_bh(&tmp->nsid_lock);
465 466
			if (id >= 0)
				rtnl_net_notifyid(tmp, RTM_DELNSID, id);
467
		}
468
		spin_lock_bh(&net->nsid_lock);
469
		idr_destroy(&net->netns_ids);
470
		spin_unlock_bh(&net->nsid_lock);
471

472
	}
473 474
	rtnl_unlock();

475 476 477 478 479 480 481
	/*
	 * Another CPU might be rcu-iterating the list, wait for it.
	 * This needs to be before calling the exit() notifiers, so
	 * the rcu_barrier() below isn't sufficient alone.
	 */
	synchronize_rcu();

482
	/* Run all of the network namespace exit methods */
483 484 485
	list_for_each_entry_reverse(ops, &pernet_list, list)
		ops_exit_list(ops, &net_exit_list);

486
	/* Free the net generic variables */
487 488
	list_for_each_entry_reverse(ops, &pernet_list, list)
		ops_free_list(ops, &net_exit_list);
489 490 491 492 493 494 495 496 497

	mutex_unlock(&net_mutex);

	/* Ensure there are no outstanding rcu callbacks using this
	 * network namespace.
	 */
	rcu_barrier();

	/* Finally it is safe to free my network namespace structure */
498 499
	list_for_each_entry_safe(net, tmp, &net_exit_list, exit_list) {
		list_del_init(&net->exit_list);
500
		dec_net_namespaces(net->ucounts);
501
		put_user_ns(net->user_ns);
502
		net_drop_ns(net);
503
	}
504
}
505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521

/**
 * net_ns_barrier - wait until concurrent net_cleanup_work is done
 *
 * cleanup_net runs from work queue and will first remove namespaces
 * from the global list, then run net exit functions.
 *
 * Call this in module exit path to make sure that all netns
 * ->exit ops have been invoked before the function is removed.
 */
void net_ns_barrier(void)
{
	mutex_lock(&net_mutex);
	mutex_unlock(&net_mutex);
}
EXPORT_SYMBOL(net_ns_barrier);

522
static DECLARE_WORK(net_cleanup_work, cleanup_net);
523 524 525 526

void __put_net(struct net *net)
{
	/* Cleanup the network namespace in process context */
527 528 529 530 531 532 533
	unsigned long flags;

	spin_lock_irqsave(&cleanup_list_lock, flags);
	list_add(&net->cleanup_list, &cleanup_list);
	spin_unlock_irqrestore(&cleanup_list_lock, flags);

	queue_work(netns_wq, &net_cleanup_work);
534 535 536
}
EXPORT_SYMBOL_GPL(__put_net);

537 538 539
struct net *get_net_ns_by_fd(int fd)
{
	struct file *file;
540
	struct ns_common *ns;
541 542 543
	struct net *net;

	file = proc_ns_fget(fd);
544 545
	if (IS_ERR(file))
		return ERR_CAST(file);
546

A
Al Viro 已提交
547
	ns = get_proc_ns(file_inode(file));
548 549
	if (ns->ops == &netns_operations)
		net = get_net(container_of(ns, struct net, ns));
550 551
	else
		net = ERR_PTR(-EINVAL);
552

553
	fput(file);
554 555 556
	return net;
}

557
#else
558 559 560 561
struct net *get_net_ns_by_fd(int fd)
{
	return ERR_PTR(-EINVAL);
}
562
#endif
563
EXPORT_SYMBOL_GPL(get_net_ns_by_fd);
564

565 566 567 568 569 570 571 572 573 574 575
struct net *get_net_ns_by_pid(pid_t pid)
{
	struct task_struct *tsk;
	struct net *net;

	/* Lookup the network namespace */
	net = ERR_PTR(-ESRCH);
	rcu_read_lock();
	tsk = find_task_by_vpid(pid);
	if (tsk) {
		struct nsproxy *nsproxy;
576 577
		task_lock(tsk);
		nsproxy = tsk->nsproxy;
578 579
		if (nsproxy)
			net = get_net(nsproxy->net_ns);
580
		task_unlock(tsk);
581 582 583 584 585 586
	}
	rcu_read_unlock();
	return net;
}
EXPORT_SYMBOL_GPL(get_net_ns_by_pid);

587 588
static __net_init int net_ns_net_init(struct net *net)
{
589 590 591
#ifdef CONFIG_NET_NS
	net->ns.ops = &netns_operations;
#endif
A
Al Viro 已提交
592
	return ns_alloc_inum(&net->ns);
593 594 595 596
}

static __net_exit void net_ns_net_exit(struct net *net)
{
A
Al Viro 已提交
597
	ns_free_inum(&net->ns);
598 599 600 601 602 603 604
}

static struct pernet_operations __net_initdata net_ns_ops = {
	.init = net_ns_net_init,
	.exit = net_ns_net_exit,
};

S
stephen hemminger 已提交
605
static const struct nla_policy rtnl_net_policy[NETNSA_MAX + 1] = {
606 607 608 609 610 611
	[NETNSA_NONE]		= { .type = NLA_UNSPEC },
	[NETNSA_NSID]		= { .type = NLA_S32 },
	[NETNSA_PID]		= { .type = NLA_U32 },
	[NETNSA_FD]		= { .type = NLA_U32 },
};

612 613
static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh,
			  struct netlink_ext_ack *extack)
614 615 616
{
	struct net *net = sock_net(skb->sk);
	struct nlattr *tb[NETNSA_MAX + 1];
617
	struct nlattr *nla;
618 619 620 621
	struct net *peer;
	int nsid, err;

	err = nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX,
622
			  rtnl_net_policy, extack);
623 624
	if (err < 0)
		return err;
625 626
	if (!tb[NETNSA_NSID]) {
		NL_SET_ERR_MSG(extack, "nsid is missing");
627
		return -EINVAL;
628
	}
629 630
	nsid = nla_get_s32(tb[NETNSA_NSID]);

631
	if (tb[NETNSA_PID]) {
632
		peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID]));
633 634
		nla = tb[NETNSA_PID];
	} else if (tb[NETNSA_FD]) {
635
		peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD]));
636 637 638
		nla = tb[NETNSA_FD];
	} else {
		NL_SET_ERR_MSG(extack, "Peer netns reference is missing");
639
		return -EINVAL;
640 641 642 643
	}
	if (IS_ERR(peer)) {
		NL_SET_BAD_ATTR(extack, nla);
		NL_SET_ERR_MSG(extack, "Peer netns reference is invalid");
644
		return PTR_ERR(peer);
645
	}
646

647
	spin_lock_bh(&net->nsid_lock);
648
	if (__peernet2id(net, peer) >= 0) {
649
		spin_unlock_bh(&net->nsid_lock);
650
		err = -EEXIST;
651 652 653
		NL_SET_BAD_ATTR(extack, nla);
		NL_SET_ERR_MSG(extack,
			       "Peer netns already has a nsid assigned");
654 655 656 657
		goto out;
	}

	err = alloc_netid(net, peer, nsid);
658
	spin_unlock_bh(&net->nsid_lock);
659 660
	if (err >= 0) {
		rtnl_net_notifyid(net, RTM_NEWNSID, err);
661
		err = 0;
662
	} else if (err == -ENOSPC && nsid >= 0) {
663
		err = -EEXIST;
664 665
		NL_SET_BAD_ATTR(extack, tb[NETNSA_NSID]);
		NL_SET_ERR_MSG(extack, "The specified nsid is already used");
666
	}
667 668 669 670 671 672 673 674 675 676 677 678 679
out:
	put_net(peer);
	return err;
}

static int rtnl_net_get_size(void)
{
	return NLMSG_ALIGN(sizeof(struct rtgenmsg))
	       + nla_total_size(sizeof(s32)) /* NETNSA_NSID */
	       ;
}

static int rtnl_net_fill(struct sk_buff *skb, u32 portid, u32 seq, int flags,
680
			 int cmd, struct net *net, int nsid)
681 682 683 684 685 686 687 688 689 690 691
{
	struct nlmsghdr *nlh;
	struct rtgenmsg *rth;

	nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rth), flags);
	if (!nlh)
		return -EMSGSIZE;

	rth = nlmsg_data(nlh);
	rth->rtgen_family = AF_UNSPEC;

692
	if (nla_put_s32(skb, NETNSA_NSID, nsid))
693 694 695 696 697 698 699 700 701 702
		goto nla_put_failure;

	nlmsg_end(skb, nlh);
	return 0;

nla_put_failure:
	nlmsg_cancel(skb, nlh);
	return -EMSGSIZE;
}

703 704
static int rtnl_net_getid(struct sk_buff *skb, struct nlmsghdr *nlh,
			  struct netlink_ext_ack *extack)
705 706 707
{
	struct net *net = sock_net(skb->sk);
	struct nlattr *tb[NETNSA_MAX + 1];
708
	struct nlattr *nla;
709 710
	struct sk_buff *msg;
	struct net *peer;
711
	int err, id;
712 713

	err = nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX,
714
			  rtnl_net_policy, extack);
715 716
	if (err < 0)
		return err;
717
	if (tb[NETNSA_PID]) {
718
		peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID]));
719 720
		nla = tb[NETNSA_PID];
	} else if (tb[NETNSA_FD]) {
721
		peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD]));
722 723 724
		nla = tb[NETNSA_FD];
	} else {
		NL_SET_ERR_MSG(extack, "Peer netns reference is missing");
725
		return -EINVAL;
726
	}
727

728 729 730
	if (IS_ERR(peer)) {
		NL_SET_BAD_ATTR(extack, nla);
		NL_SET_ERR_MSG(extack, "Peer netns reference is invalid");
731
		return PTR_ERR(peer);
732
	}
733 734 735 736 737 738 739

	msg = nlmsg_new(rtnl_net_get_size(), GFP_KERNEL);
	if (!msg) {
		err = -ENOMEM;
		goto out;
	}

740
	id = peernet2id(net, peer);
741
	err = rtnl_net_fill(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0,
742
			    RTM_NEWNSID, net, id);
743 744 745 746 747 748 749 750 751 752 753 754 755
	if (err < 0)
		goto err_out;

	err = rtnl_unicast(msg, net, NETLINK_CB(skb).portid);
	goto out;

err_out:
	nlmsg_free(msg);
out:
	put_net(peer);
	return err;
}

N
Nicolas Dichtel 已提交
756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773
struct rtnl_net_dump_cb {
	struct net *net;
	struct sk_buff *skb;
	struct netlink_callback *cb;
	int idx;
	int s_idx;
};

static int rtnl_net_dumpid_one(int id, void *peer, void *data)
{
	struct rtnl_net_dump_cb *net_cb = (struct rtnl_net_dump_cb *)data;
	int ret;

	if (net_cb->idx < net_cb->s_idx)
		goto cont;

	ret = rtnl_net_fill(net_cb->skb, NETLINK_CB(net_cb->cb->skb).portid,
			    net_cb->cb->nlh->nlmsg_seq, NLM_F_MULTI,
774
			    RTM_NEWNSID, net_cb->net, id);
N
Nicolas Dichtel 已提交
775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793
	if (ret < 0)
		return ret;

cont:
	net_cb->idx++;
	return 0;
}

static int rtnl_net_dumpid(struct sk_buff *skb, struct netlink_callback *cb)
{
	struct net *net = sock_net(skb->sk);
	struct rtnl_net_dump_cb net_cb = {
		.net = net,
		.skb = skb,
		.cb = cb,
		.idx = 0,
		.s_idx = cb->args[0],
	};

794
	spin_lock_bh(&net->nsid_lock);
N
Nicolas Dichtel 已提交
795
	idr_for_each(&net->netns_ids, rtnl_net_dumpid_one, &net_cb);
796
	spin_unlock_bh(&net->nsid_lock);
N
Nicolas Dichtel 已提交
797 798 799 800 801

	cb->args[0] = net_cb.idx;
	return skb->len;
}

802
static void rtnl_net_notifyid(struct net *net, int cmd, int id)
N
Nicolas Dichtel 已提交
803 804 805 806 807 808 809 810
{
	struct sk_buff *msg;
	int err = -ENOMEM;

	msg = nlmsg_new(rtnl_net_get_size(), GFP_KERNEL);
	if (!msg)
		goto out;

811
	err = rtnl_net_fill(msg, 0, 0, 0, cmd, net, id);
N
Nicolas Dichtel 已提交
812 813 814 815 816 817 818 819 820 821 822 823
	if (err < 0)
		goto err_out;

	rtnl_notify(msg, net, 0, RTNLGRP_NSID, NULL, 0);
	return;

err_out:
	nlmsg_free(msg);
out:
	rtnl_set_sk_err(net, RTNLGRP_NSID, err);
}

824 825
static int __init net_ns_init(void)
{
826
	struct net_generic *ng;
827

828
#ifdef CONFIG_NET_NS
829 830 831
	net_cachep = kmem_cache_create("net_namespace", sizeof(struct net),
					SMP_CACHE_BYTES,
					SLAB_PANIC, NULL);
832 833 834 835 836

	/* Create workqueue for cleanup */
	netns_wq = create_singlethread_workqueue("netns");
	if (!netns_wq)
		panic("Could not create netns workq");
837
#endif
838

839 840 841 842 843 844
	ng = net_alloc_generic();
	if (!ng)
		panic("Could not allocate generic netns");

	rcu_assign_pointer(init_net.gen, ng);

845
	mutex_lock(&net_mutex);
846
	if (setup_net(&init_net, &init_user_ns))
S
Stephen Hemminger 已提交
847
		panic("Could not setup the initial network namespace");
848

849 850
	init_net_initialized = true;

851
	rtnl_lock();
852
	list_add_tail_rcu(&init_net.list, &net_namespace_list);
853
	rtnl_unlock();
854 855 856

	mutex_unlock(&net_mutex);

857 858
	register_pernet_subsys(&net_ns_ops);

859 860
	rtnl_register(PF_UNSPEC, RTM_NEWNSID, rtnl_net_newid, NULL,
		      RTNL_FLAG_DOIT_UNLOCKED);
N
Nicolas Dichtel 已提交
861
	rtnl_register(PF_UNSPEC, RTM_GETNSID, rtnl_net_getid, rtnl_net_dumpid,
862
		      RTNL_FLAG_DOIT_UNLOCKED);
863

864 865 866 867 868
	return 0;
}

pure_initcall(net_ns_init);

869
#ifdef CONFIG_NET_NS
870 871
static int __register_pernet_operations(struct list_head *list,
					struct pernet_operations *ops)
872
{
873
	struct net *net;
874
	int error;
875
	LIST_HEAD(net_exit_list);
876 877

	list_add_tail(&ops->list, list);
878
	if (ops->init || (ops->id && ops->size)) {
879
		for_each_net(net) {
880
			error = ops_init(ops, net);
881 882
			if (error)
				goto out_undo;
883
			list_add_tail(&net->exit_list, &net_exit_list);
884 885
		}
	}
886
	return 0;
887 888 889 890

out_undo:
	/* If I have an error cleanup all namespaces I initialized */
	list_del(&ops->list);
891 892
	ops_exit_list(ops, &net_exit_list);
	ops_free_list(ops, &net_exit_list);
893
	return error;
894 895
}

896
static void __unregister_pernet_operations(struct pernet_operations *ops)
897 898
{
	struct net *net;
899
	LIST_HEAD(net_exit_list);
900 901

	list_del(&ops->list);
902 903 904 905
	for_each_net(net)
		list_add_tail(&net->exit_list, &net_exit_list);
	ops_exit_list(ops, &net_exit_list);
	ops_free_list(ops, &net_exit_list);
906 907
}

908 909
#else

910 911
static int __register_pernet_operations(struct list_head *list,
					struct pernet_operations *ops)
912
{
913 914 915 916 917
	if (!init_net_initialized) {
		list_add_tail(&ops->list, list);
		return 0;
	}

918
	return ops_init(ops, &init_net);
919 920
}

921
static void __unregister_pernet_operations(struct pernet_operations *ops)
922
{
923 924 925 926 927 928 929 930
	if (!init_net_initialized) {
		list_del(&ops->list);
	} else {
		LIST_HEAD(net_exit_list);
		list_add(&init_net.exit_list, &net_exit_list);
		ops_exit_list(ops, &net_exit_list);
		ops_free_list(ops, &net_exit_list);
	}
931
}
932 933

#endif /* CONFIG_NET_NS */
934

935 936
static DEFINE_IDA(net_generic_ids);

937 938 939 940 941 942 943
static int register_pernet_operations(struct list_head *list,
				      struct pernet_operations *ops)
{
	int error;

	if (ops->id) {
again:
944
		error = ida_get_new_above(&net_generic_ids, MIN_PERNET_OPS_ID, ops->id);
945 946 947 948 949 950 951
		if (error < 0) {
			if (error == -EAGAIN) {
				ida_pre_get(&net_generic_ids, GFP_KERNEL);
				goto again;
			}
			return error;
		}
952
		max_gen_ptrs = max(max_gen_ptrs, *ops->id + 1);
953 954
	}
	error = __register_pernet_operations(list, ops);
955 956 957 958 959
	if (error) {
		rcu_barrier();
		if (ops->id)
			ida_remove(&net_generic_ids, *ops->id);
	}
960 961 962 963 964 965 966 967

	return error;
}

static void unregister_pernet_operations(struct pernet_operations *ops)
{
	
	__unregister_pernet_operations(ops);
968
	rcu_barrier();
969 970 971 972
	if (ops->id)
		ida_remove(&net_generic_ids, *ops->id);
}

973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006
/**
 *      register_pernet_subsys - register a network namespace subsystem
 *	@ops:  pernet operations structure for the subsystem
 *
 *	Register a subsystem which has init and exit functions
 *	that are called when network namespaces are created and
 *	destroyed respectively.
 *
 *	When registered all network namespace init functions are
 *	called for every existing network namespace.  Allowing kernel
 *	modules to have a race free view of the set of network namespaces.
 *
 *	When a new network namespace is created all of the init
 *	methods are called in the order in which they were registered.
 *
 *	When a network namespace is destroyed all of the exit methods
 *	are called in the reverse of the order with which they were
 *	registered.
 */
int register_pernet_subsys(struct pernet_operations *ops)
{
	int error;
	mutex_lock(&net_mutex);
	error =  register_pernet_operations(first_device, ops);
	mutex_unlock(&net_mutex);
	return error;
}
EXPORT_SYMBOL_GPL(register_pernet_subsys);

/**
 *      unregister_pernet_subsys - unregister a network namespace subsystem
 *	@ops: pernet operations structure to manipulate
 *
 *	Remove the pernet operations structure from the list to be
1007
 *	used when network namespaces are created or destroyed.  In
1008 1009 1010
 *	addition run the exit method for all existing network
 *	namespaces.
 */
1011
void unregister_pernet_subsys(struct pernet_operations *ops)
1012 1013
{
	mutex_lock(&net_mutex);
1014
	unregister_pernet_operations(ops);
1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054
	mutex_unlock(&net_mutex);
}
EXPORT_SYMBOL_GPL(unregister_pernet_subsys);

/**
 *      register_pernet_device - register a network namespace device
 *	@ops:  pernet operations structure for the subsystem
 *
 *	Register a device which has init and exit functions
 *	that are called when network namespaces are created and
 *	destroyed respectively.
 *
 *	When registered all network namespace init functions are
 *	called for every existing network namespace.  Allowing kernel
 *	modules to have a race free view of the set of network namespaces.
 *
 *	When a new network namespace is created all of the init
 *	methods are called in the order in which they were registered.
 *
 *	When a network namespace is destroyed all of the exit methods
 *	are called in the reverse of the order with which they were
 *	registered.
 */
int register_pernet_device(struct pernet_operations *ops)
{
	int error;
	mutex_lock(&net_mutex);
	error = register_pernet_operations(&pernet_list, ops);
	if (!error && (first_device == &pernet_list))
		first_device = &ops->list;
	mutex_unlock(&net_mutex);
	return error;
}
EXPORT_SYMBOL_GPL(register_pernet_device);

/**
 *      unregister_pernet_device - unregister a network namespace netdevice
 *	@ops: pernet operations structure to manipulate
 *
 *	Remove the pernet operations structure from the list to be
1055
 *	used when network namespaces are created or destroyed.  In
1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067
 *	addition run the exit method for all existing network
 *	namespaces.
 */
void unregister_pernet_device(struct pernet_operations *ops)
{
	mutex_lock(&net_mutex);
	if (&ops->list == first_device)
		first_device = first_device->next;
	unregister_pernet_operations(ops);
	mutex_unlock(&net_mutex);
}
EXPORT_SYMBOL_GPL(unregister_pernet_device);
1068 1069

#ifdef CONFIG_NET_NS
1070
static struct ns_common *netns_get(struct task_struct *task)
1071
{
1072 1073 1074
	struct net *net = NULL;
	struct nsproxy *nsproxy;

1075 1076
	task_lock(task);
	nsproxy = task->nsproxy;
1077 1078
	if (nsproxy)
		net = get_net(nsproxy->net_ns);
1079
	task_unlock(task);
1080

1081 1082 1083 1084 1085 1086
	return net ? &net->ns : NULL;
}

static inline struct net *to_net_ns(struct ns_common *ns)
{
	return container_of(ns, struct net, ns);
1087 1088
}

1089
static void netns_put(struct ns_common *ns)
1090
{
1091
	put_net(to_net_ns(ns));
1092 1093
}

1094
static int netns_install(struct nsproxy *nsproxy, struct ns_common *ns)
1095
{
1096
	struct net *net = to_net_ns(ns);
1097

1098
	if (!ns_capable(net->user_ns, CAP_SYS_ADMIN) ||
1099
	    !ns_capable(current_user_ns(), CAP_SYS_ADMIN))
1100 1101
		return -EPERM;

1102
	put_net(nsproxy->net_ns);
1103
	nsproxy->net_ns = get_net(net);
1104 1105 1106
	return 0;
}

1107 1108 1109 1110 1111
static struct user_namespace *netns_owner(struct ns_common *ns)
{
	return to_net_ns(ns)->user_ns;
}

1112 1113 1114 1115 1116 1117
const struct proc_ns_operations netns_operations = {
	.name		= "net",
	.type		= CLONE_NEWNET,
	.get		= netns_get,
	.put		= netns_put,
	.install	= netns_install,
1118
	.owner		= netns_owner,
1119 1120
};
#endif