net_namespace.c 27.3 KB
Newer Older
J
Joe Perches 已提交
1 2
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

3 4 5 6 7 8
#include <linux/workqueue.h>
#include <linux/rtnetlink.h>
#include <linux/cache.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/delay.h>
9
#include <linux/sched.h>
10
#include <linux/idr.h>
11
#include <linux/rculist.h>
12
#include <linux/nsproxy.h>
13 14
#include <linux/fs.h>
#include <linux/proc_ns.h>
15
#include <linux/file.h>
16
#include <linux/export.h>
17
#include <linux/user_namespace.h>
18
#include <linux/net_namespace.h>
19 20
#include <linux/sched/task.h>

21 22
#include <net/sock.h>
#include <net/netlink.h>
23
#include <net/net_namespace.h>
24
#include <net/netns/generic.h>
25 26 27 28 29 30 31 32 33

/*
 *	Our network namespace constructor/destructor lists
 */

static LIST_HEAD(pernet_list);
static struct list_head *first_device = &pernet_list;

LIST_HEAD(net_namespace_list);
A
Alexey Dobriyan 已提交
34
EXPORT_SYMBOL_GPL(net_namespace_list);
35

36
struct net init_net = {
37
	.count		= REFCOUNT_INIT(1),
38
	.dev_base_head	= LIST_HEAD_INIT(init_net.dev_base_head),
39
};
40
EXPORT_SYMBOL(init_net);
41

42
static bool init_net_initialized;
43
static unsigned nr_sync_pernet_ops;
44
/*
45
 * net_sem: protects: pernet_list, net_generic_ids, nr_sync_pernet_ops,
46 47 48
 * init_net_initialized and first_device pointer.
 */
DECLARE_RWSEM(net_sem);
49

50 51 52
#define MIN_PERNET_OPS_ID	\
	((sizeof(struct net_generic) + sizeof(void *) - 1) / sizeof(void *))

53 54
#define INITIAL_NET_GEN_PTRS	13 /* +1 for len +2 for rcu_head */

E
Eric Dumazet 已提交
55 56 57 58 59
static unsigned int max_gen_ptrs = INITIAL_NET_GEN_PTRS;

static struct net_generic *net_alloc_generic(void)
{
	struct net_generic *ng;
60
	unsigned int generic_size = offsetof(struct net_generic, ptr[max_gen_ptrs]);
E
Eric Dumazet 已提交
61 62 63

	ng = kzalloc(generic_size, GFP_KERNEL);
	if (ng)
64
		ng->s.len = max_gen_ptrs;
E
Eric Dumazet 已提交
65 66 67 68

	return ng;
}

69
static int net_assign_generic(struct net *net, unsigned int id, void *data)
70 71 72
{
	struct net_generic *ng, *old_ng;

73
	BUG_ON(id < MIN_PERNET_OPS_ID);
74

E
Eric Dumazet 已提交
75
	old_ng = rcu_dereference_protected(net->gen,
76
					   lockdep_is_held(&net_sem));
77 78
	if (old_ng->s.len > id) {
		old_ng->ptr[id] = data;
79 80
		return 0;
	}
81

E
Eric Dumazet 已提交
82
	ng = net_alloc_generic();
83 84 85 86 87 88 89 90 91 92 93 94 95 96
	if (ng == NULL)
		return -ENOMEM;

	/*
	 * Some synchronisation notes:
	 *
	 * The net_generic explores the net->gen array inside rcu
	 * read section. Besides once set the net->gen->ptr[x]
	 * pointer never changes (see rules in netns/generic.h).
	 *
	 * That said, we simply duplicate this array and schedule
	 * the old copy for kfree after a grace period.
	 */

97 98 99
	memcpy(&ng->ptr[MIN_PERNET_OPS_ID], &old_ng->ptr[MIN_PERNET_OPS_ID],
	       (old_ng->s.len - MIN_PERNET_OPS_ID) * sizeof(void *));
	ng->ptr[id] = data;
100 101

	rcu_assign_pointer(net->gen, ng);
102
	kfree_rcu(old_ng, s.rcu);
103 104 105
	return 0;
}

106 107
static int ops_init(const struct pernet_operations *ops, struct net *net)
{
108 109 110
	int err = -ENOMEM;
	void *data = NULL;

111
	if (ops->id && ops->size) {
112
		data = kzalloc(ops->size, GFP_KERNEL);
113
		if (!data)
114
			goto out;
115 116

		err = net_assign_generic(net, *ops->id, data);
117 118
		if (err)
			goto cleanup;
119
	}
120
	err = 0;
121
	if (ops->init)
122 123 124 125 126 127 128 129 130
		err = ops->init(net);
	if (!err)
		return 0;

cleanup:
	kfree(data);

out:
	return err;
131 132 133 134 135
}

static void ops_free(const struct pernet_operations *ops, struct net *net)
{
	if (ops->id && ops->size) {
136
		kfree(net_generic(net, *ops->id));
137 138 139
	}
}

140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161
static void ops_exit_list(const struct pernet_operations *ops,
			  struct list_head *net_exit_list)
{
	struct net *net;
	if (ops->exit) {
		list_for_each_entry(net, net_exit_list, exit_list)
			ops->exit(net);
	}
	if (ops->exit_batch)
		ops->exit_batch(net_exit_list);
}

static void ops_free_list(const struct pernet_operations *ops,
			  struct list_head *net_exit_list)
{
	struct net *net;
	if (ops->size && ops->id) {
		list_for_each_entry(net, net_exit_list, exit_list)
			ops_free(ops, net);
	}
}

162
/* should be called with nsid_lock held */
163 164
static int alloc_netid(struct net *net, struct net *peer, int reqid)
{
165
	int min = 0, max = 0;
166 167 168 169 170 171

	if (reqid >= 0) {
		min = reqid;
		max = reqid + 1;
	}

172
	return idr_alloc(&net->netns_ids, peer, min, max, GFP_ATOMIC);
173 174 175 176 177 178 179 180 181 182 183 184 185 186 187
}

/* This function is used by idr_for_each(). If net is equal to peer, the
 * function returns the id so that idr_for_each() stops. Because we cannot
 * returns the id 0 (idr_for_each() will not stop), we return the magic value
 * NET_ID_ZERO (-1) for it.
 */
#define NET_ID_ZERO -1
static int net_eq_idr(int id, void *net, void *peer)
{
	if (net_eq(net, peer))
		return id ? : NET_ID_ZERO;
	return 0;
}

188 189 190 191
/* Should be called with nsid_lock held. If a new id is assigned, the bool alloc
 * is set to true, thus the caller knows that the new id must be notified via
 * rtnl.
 */
192
static int __peernet2id_alloc(struct net *net, struct net *peer, bool *alloc)
193 194
{
	int id = idr_for_each(&net->netns_ids, net_eq_idr, peer);
195
	bool alloc_it = *alloc;
196

197 198
	*alloc = false;

199 200 201 202 203 204
	/* Magic value for id 0. */
	if (id == NET_ID_ZERO)
		return 0;
	if (id > 0)
		return id;

205
	if (alloc_it) {
206
		id = alloc_netid(net, peer, -1);
207
		*alloc = true;
208 209
		return id >= 0 ? id : NETNSA_NSID_NOT_ASSIGNED;
	}
210

211
	return NETNSA_NSID_NOT_ASSIGNED;
212 213
}

214
/* should be called with nsid_lock held */
215 216 217 218 219 220 221 222
static int __peernet2id(struct net *net, struct net *peer)
{
	bool no = false;

	return __peernet2id_alloc(net, peer, &no);
}

static void rtnl_net_notifyid(struct net *net, int cmd, int id);
223 224 225
/* This function returns the id of a peer netns. If no id is assigned, one will
 * be allocated and returned.
 */
226
int peernet2id_alloc(struct net *net, struct net *peer)
227
{
228
	bool alloc = false, alive = false;
229
	int id;
230

231
	if (refcount_read(&net->count) == 0)
232
		return NETNSA_NSID_NOT_ASSIGNED;
233
	spin_lock_bh(&net->nsid_lock);
234 235 236 237 238 239 240 241
	/*
	 * When peer is obtained from RCU lists, we may race with
	 * its cleanup. Check whether it's alive, and this guarantees
	 * we never hash a peer back to net->netns_ids, after it has
	 * just been idr_remove()'d from there in cleanup_net().
	 */
	if (maybe_get_net(peer))
		alive = alloc = true;
242
	id = __peernet2id_alloc(net, peer, &alloc);
243
	spin_unlock_bh(&net->nsid_lock);
244 245
	if (alloc && id >= 0)
		rtnl_net_notifyid(net, RTM_NEWNSID, id);
246 247
	if (alive)
		put_net(peer);
248
	return id;
249
}
J
Jiri Benc 已提交
250
EXPORT_SYMBOL_GPL(peernet2id_alloc);
251

252
/* This function returns, if assigned, the id of a peer netns. */
253
int peernet2id(struct net *net, struct net *peer)
254 255 256
{
	int id;

257
	spin_lock_bh(&net->nsid_lock);
258
	id = __peernet2id(net, peer);
259
	spin_unlock_bh(&net->nsid_lock);
260 261
	return id;
}
262
EXPORT_SYMBOL(peernet2id);
263

264 265 266 267 268 269 270 271
/* This function returns true is the peer netns has an id assigned into the
 * current netns.
 */
bool peernet_has_id(struct net *net, struct net *peer)
{
	return peernet2id(net, peer) >= 0;
}

272 273 274 275 276 277 278 279 280 281
struct net *get_net_ns_by_id(struct net *net, int id)
{
	struct net *peer;

	if (id < 0)
		return NULL;

	rcu_read_lock();
	peer = idr_find(&net->netns_ids, id);
	if (peer)
282
		peer = maybe_get_net(peer);
283 284 285 286 287
	rcu_read_unlock();

	return peer;
}

288 289 290
/*
 * setup_net runs the initializers for the network namespace object.
 */
291
static __net_init int setup_net(struct net *net, struct user_namespace *user_ns)
292
{
293
	/* Must be called with net_sem held */
294
	const struct pernet_operations *ops, *saved_ops;
295
	int error = 0;
296
	LIST_HEAD(net_exit_list);
297

298
	refcount_set(&net->count, 1);
299
	refcount_set(&net->passive, 1);
300
	net->dev_base_seq = 1;
301
	net->user_ns = user_ns;
302
	idr_init(&net->netns_ids);
W
WANG Cong 已提交
303
	spin_lock_init(&net->nsid_lock);
304

305
	list_for_each_entry(ops, &pernet_list, list) {
306 307 308
		error = ops_init(ops, net);
		if (error < 0)
			goto out_undo;
309
	}
310 311 312
	rtnl_lock();
	list_add_tail_rcu(&net->list, &net_namespace_list);
	rtnl_unlock();
313 314
out:
	return error;
315

316 317 318 319
out_undo:
	/* Walk through the list backwards calling the exit functions
	 * for the pernet modules whose init functions did not fail.
	 */
320
	list_add(&net->exit_list, &net_exit_list);
321
	saved_ops = ops;
322 323 324
	list_for_each_entry_continue_reverse(ops, &pernet_list, list)
		ops_exit_list(ops, &net_exit_list);

325 326
	ops = saved_ops;
	list_for_each_entry_continue_reverse(ops, &pernet_list, list)
327
		ops_free_list(ops, &net_exit_list);
328 329

	rcu_barrier();
330 331 332
	goto out;
}

333 334 335 336 337 338 339 340
static int __net_init net_defaults_init_net(struct net *net)
{
	net->core.sysctl_somaxconn = SOMAXCONN;
	return 0;
}

static struct pernet_operations net_defaults_ops = {
	.init = net_defaults_init_net,
K
Kirill Tkhai 已提交
341
	.async = true,
342 343 344 345 346 347 348 349 350 351 352
};

static __init int net_defaults_init(void)
{
	if (register_pernet_subsys(&net_defaults_ops))
		panic("Cannot initialize net default settings");

	return 0;
}

core_initcall(net_defaults_init);
353

354
#ifdef CONFIG_NET_NS
355 356 357 358 359 360 361 362 363 364
static struct ucounts *inc_net_namespaces(struct user_namespace *ns)
{
	return inc_ucount(ns, current_euid(), UCOUNT_NET_NAMESPACES);
}

static void dec_net_namespaces(struct ucounts *ucounts)
{
	dec_ucount(ucounts, UCOUNT_NET_NAMESPACES);
}

365 366 367
static struct kmem_cache *net_cachep;
static struct workqueue_struct *netns_wq;

368
static struct net *net_alloc(void)
369
{
370 371 372 373 374 375 376 377
	struct net *net = NULL;
	struct net_generic *ng;

	ng = net_alloc_generic();
	if (!ng)
		goto out;

	net = kmem_cache_zalloc(net_cachep, GFP_KERNEL);
378
	if (!net)
379
		goto out_free;
380

381 382 383 384 385 386 387 388 389 390 391
	rcu_assign_pointer(net->gen, ng);
out:
	return net;

out_free:
	kfree(ng);
	goto out;
}

static void net_free(struct net *net)
{
E
Eric Dumazet 已提交
392
	kfree(rcu_access_pointer(net->gen));
393 394 395
	kmem_cache_free(net_cachep, net);
}

396 397 398
void net_drop_ns(void *p)
{
	struct net *ns = p;
399
	if (ns && refcount_dec_and_test(&ns->passive))
400 401 402
		net_free(ns);
}

403 404
struct net *copy_net_ns(unsigned long flags,
			struct user_namespace *user_ns, struct net *old_net)
405
{
406
	struct ucounts *ucounts;
407
	struct net *net;
K
Kirill Tkhai 已提交
408
	unsigned write;
409
	int rv;
410

411 412 413
	if (!(flags & CLONE_NEWNET))
		return get_net(old_net);

414 415
	ucounts = inc_net_namespaces(user_ns);
	if (!ucounts)
416
		return ERR_PTR(-ENOSPC);
417

418
	net = net_alloc();
419
	if (!net) {
K
Kirill Tkhai 已提交
420 421
		rv = -ENOMEM;
		goto dec_ucounts;
422
	}
K
Kirill Tkhai 已提交
423 424
	refcount_set(&net->passive, 1);
	net->ucounts = ucounts;
425
	get_user_ns(user_ns);
K
Kirill Tkhai 已提交
426 427 428 429 430 431
again:
	write = READ_ONCE(nr_sync_pernet_ops);
	if (write)
		rv = down_write_killable(&net_sem);
	else
		rv = down_read_killable(&net_sem);
K
Kirill Tkhai 已提交
432 433
	if (rv < 0)
		goto put_userns;
K
Kirill Tkhai 已提交
434 435 436 437

	if (!write && unlikely(READ_ONCE(nr_sync_pernet_ops))) {
		up_read(&net_sem);
		goto again;
438
	}
439
	rv = setup_net(net, user_ns);
K
Kirill Tkhai 已提交
440 441 442 443 444 445

	if (write)
		up_write(&net_sem);
	else
		up_read(&net_sem);

446
	if (rv < 0) {
K
Kirill Tkhai 已提交
447
put_userns:
448
		put_user_ns(user_ns);
449
		net_drop_ns(net);
K
Kirill Tkhai 已提交
450 451
dec_ucounts:
		dec_net_namespaces(ucounts);
452 453 454 455
		return ERR_PTR(rv);
	}
	return net;
}
456

457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483
static void unhash_nsid(struct net *net, struct net *last)
{
	struct net *tmp;
	/* This function is only called from cleanup_net() work,
	 * and this work is the only process, that may delete
	 * a net from net_namespace_list. So, when the below
	 * is executing, the list may only grow. Thus, we do not
	 * use for_each_net_rcu() or rtnl_lock().
	 */
	for_each_net(tmp) {
		int id;

		spin_lock_bh(&tmp->nsid_lock);
		id = __peernet2id(tmp, net);
		if (id >= 0)
			idr_remove(&tmp->netns_ids, id);
		spin_unlock_bh(&tmp->nsid_lock);
		if (id >= 0)
			rtnl_net_notifyid(tmp, RTM_DELNSID, id);
		if (tmp == last)
			break;
	}
	spin_lock_bh(&net->nsid_lock);
	idr_destroy(&net->netns_ids);
	spin_unlock_bh(&net->nsid_lock);
}

484 485 486
static DEFINE_SPINLOCK(cleanup_list_lock);
static LIST_HEAD(cleanup_list);  /* Must hold cleanup_list_lock to touch */

487 488
static void cleanup_net(struct work_struct *work)
{
489
	const struct pernet_operations *ops;
490
	struct net *net, *tmp, *last;
X
xiao jin 已提交
491
	struct list_head net_kill_list;
492
	LIST_HEAD(net_exit_list);
K
Kirill Tkhai 已提交
493
	unsigned write;
494

495 496 497 498
	/* Atomically snapshot the list of namespaces to cleanup */
	spin_lock_irq(&cleanup_list_lock);
	list_replace_init(&cleanup_list, &net_kill_list);
	spin_unlock_irq(&cleanup_list_lock);
K
Kirill Tkhai 已提交
499 500 501 502 503 504
again:
	write = READ_ONCE(nr_sync_pernet_ops);
	if (write)
		down_write(&net_sem);
	else
		down_read(&net_sem);
505

K
Kirill Tkhai 已提交
506 507 508 509
	if (!write && unlikely(READ_ONCE(nr_sync_pernet_ops))) {
		up_read(&net_sem);
		goto again;
	}
510 511 512

	/* Don't let anyone else find us. */
	rtnl_lock();
513
	list_for_each_entry(net, &net_kill_list, cleanup_list)
514
		list_del_rcu(&net->list);
515 516 517 518 519 520 521 522 523 524 525 526
	/* Cache last net. After we unlock rtnl, no one new net
	 * added to net_namespace_list can assign nsid pointer
	 * to a net from net_kill_list (see peernet2id_alloc()).
	 * So, we skip them in unhash_nsid().
	 *
	 * Note, that unhash_nsid() does not delete nsid links
	 * between net_kill_list's nets, as they've already
	 * deleted from net_namespace_list. But, this would be
	 * useless anyway, as netns_ids are destroyed there.
	 */
	last = list_last_entry(&net_namespace_list, struct net, list);
	rtnl_unlock();
527

528 529 530
	list_for_each_entry(net, &net_kill_list, cleanup_list) {
		unhash_nsid(net, last);
		list_add_tail(&net->exit_list, &net_exit_list);
531
	}
532

533 534 535 536 537 538 539
	/*
	 * Another CPU might be rcu-iterating the list, wait for it.
	 * This needs to be before calling the exit() notifiers, so
	 * the rcu_barrier() below isn't sufficient alone.
	 */
	synchronize_rcu();

540
	/* Run all of the network namespace exit methods */
541 542 543
	list_for_each_entry_reverse(ops, &pernet_list, list)
		ops_exit_list(ops, &net_exit_list);

544
	/* Free the net generic variables */
545 546
	list_for_each_entry_reverse(ops, &pernet_list, list)
		ops_free_list(ops, &net_exit_list);
547

K
Kirill Tkhai 已提交
548 549 550 551
	if (write)
		up_write(&net_sem);
	else
		up_read(&net_sem);
552 553 554 555 556 557 558

	/* Ensure there are no outstanding rcu callbacks using this
	 * network namespace.
	 */
	rcu_barrier();

	/* Finally it is safe to free my network namespace structure */
559 560
	list_for_each_entry_safe(net, tmp, &net_exit_list, exit_list) {
		list_del_init(&net->exit_list);
561
		dec_net_namespaces(net->ucounts);
562
		put_user_ns(net->user_ns);
563
		net_drop_ns(net);
564
	}
565
}
566 567 568 569 570 571 572 573 574 575 576 577

/**
 * net_ns_barrier - wait until concurrent net_cleanup_work is done
 *
 * cleanup_net runs from work queue and will first remove namespaces
 * from the global list, then run net exit functions.
 *
 * Call this in module exit path to make sure that all netns
 * ->exit ops have been invoked before the function is removed.
 */
void net_ns_barrier(void)
{
578 579
	down_write(&net_sem);
	up_write(&net_sem);
580 581 582
}
EXPORT_SYMBOL(net_ns_barrier);

583
static DECLARE_WORK(net_cleanup_work, cleanup_net);
584 585 586 587

void __put_net(struct net *net)
{
	/* Cleanup the network namespace in process context */
588 589 590 591 592 593 594
	unsigned long flags;

	spin_lock_irqsave(&cleanup_list_lock, flags);
	list_add(&net->cleanup_list, &cleanup_list);
	spin_unlock_irqrestore(&cleanup_list_lock, flags);

	queue_work(netns_wq, &net_cleanup_work);
595 596 597
}
EXPORT_SYMBOL_GPL(__put_net);

598 599 600
struct net *get_net_ns_by_fd(int fd)
{
	struct file *file;
601
	struct ns_common *ns;
602 603 604
	struct net *net;

	file = proc_ns_fget(fd);
605 606
	if (IS_ERR(file))
		return ERR_CAST(file);
607

A
Al Viro 已提交
608
	ns = get_proc_ns(file_inode(file));
609 610
	if (ns->ops == &netns_operations)
		net = get_net(container_of(ns, struct net, ns));
611 612
	else
		net = ERR_PTR(-EINVAL);
613

614
	fput(file);
615 616 617
	return net;
}

618
#else
619 620 621 622
struct net *get_net_ns_by_fd(int fd)
{
	return ERR_PTR(-EINVAL);
}
623
#endif
624
EXPORT_SYMBOL_GPL(get_net_ns_by_fd);
625

626 627 628 629 630 631 632 633 634 635 636
struct net *get_net_ns_by_pid(pid_t pid)
{
	struct task_struct *tsk;
	struct net *net;

	/* Lookup the network namespace */
	net = ERR_PTR(-ESRCH);
	rcu_read_lock();
	tsk = find_task_by_vpid(pid);
	if (tsk) {
		struct nsproxy *nsproxy;
637 638
		task_lock(tsk);
		nsproxy = tsk->nsproxy;
639 640
		if (nsproxy)
			net = get_net(nsproxy->net_ns);
641
		task_unlock(tsk);
642 643 644 645 646 647
	}
	rcu_read_unlock();
	return net;
}
EXPORT_SYMBOL_GPL(get_net_ns_by_pid);

648 649
static __net_init int net_ns_net_init(struct net *net)
{
650 651 652
#ifdef CONFIG_NET_NS
	net->ns.ops = &netns_operations;
#endif
A
Al Viro 已提交
653
	return ns_alloc_inum(&net->ns);
654 655 656 657
}

static __net_exit void net_ns_net_exit(struct net *net)
{
A
Al Viro 已提交
658
	ns_free_inum(&net->ns);
659 660 661 662 663
}

static struct pernet_operations __net_initdata net_ns_ops = {
	.init = net_ns_net_init,
	.exit = net_ns_net_exit,
K
Kirill Tkhai 已提交
664
	.async = true,
665 666
};

S
stephen hemminger 已提交
667
static const struct nla_policy rtnl_net_policy[NETNSA_MAX + 1] = {
668 669 670 671 672 673
	[NETNSA_NONE]		= { .type = NLA_UNSPEC },
	[NETNSA_NSID]		= { .type = NLA_S32 },
	[NETNSA_PID]		= { .type = NLA_U32 },
	[NETNSA_FD]		= { .type = NLA_U32 },
};

674 675
static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh,
			  struct netlink_ext_ack *extack)
676 677 678
{
	struct net *net = sock_net(skb->sk);
	struct nlattr *tb[NETNSA_MAX + 1];
679
	struct nlattr *nla;
680 681 682 683
	struct net *peer;
	int nsid, err;

	err = nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX,
684
			  rtnl_net_policy, extack);
685 686
	if (err < 0)
		return err;
687 688
	if (!tb[NETNSA_NSID]) {
		NL_SET_ERR_MSG(extack, "nsid is missing");
689
		return -EINVAL;
690
	}
691 692
	nsid = nla_get_s32(tb[NETNSA_NSID]);

693
	if (tb[NETNSA_PID]) {
694
		peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID]));
695 696
		nla = tb[NETNSA_PID];
	} else if (tb[NETNSA_FD]) {
697
		peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD]));
698 699 700
		nla = tb[NETNSA_FD];
	} else {
		NL_SET_ERR_MSG(extack, "Peer netns reference is missing");
701
		return -EINVAL;
702 703 704 705
	}
	if (IS_ERR(peer)) {
		NL_SET_BAD_ATTR(extack, nla);
		NL_SET_ERR_MSG(extack, "Peer netns reference is invalid");
706
		return PTR_ERR(peer);
707
	}
708

709
	spin_lock_bh(&net->nsid_lock);
710
	if (__peernet2id(net, peer) >= 0) {
711
		spin_unlock_bh(&net->nsid_lock);
712
		err = -EEXIST;
713 714 715
		NL_SET_BAD_ATTR(extack, nla);
		NL_SET_ERR_MSG(extack,
			       "Peer netns already has a nsid assigned");
716 717 718 719
		goto out;
	}

	err = alloc_netid(net, peer, nsid);
720
	spin_unlock_bh(&net->nsid_lock);
721 722
	if (err >= 0) {
		rtnl_net_notifyid(net, RTM_NEWNSID, err);
723
		err = 0;
724
	} else if (err == -ENOSPC && nsid >= 0) {
725
		err = -EEXIST;
726 727
		NL_SET_BAD_ATTR(extack, tb[NETNSA_NSID]);
		NL_SET_ERR_MSG(extack, "The specified nsid is already used");
728
	}
729 730 731 732 733 734 735 736 737 738 739 740 741
out:
	put_net(peer);
	return err;
}

static int rtnl_net_get_size(void)
{
	return NLMSG_ALIGN(sizeof(struct rtgenmsg))
	       + nla_total_size(sizeof(s32)) /* NETNSA_NSID */
	       ;
}

static int rtnl_net_fill(struct sk_buff *skb, u32 portid, u32 seq, int flags,
742
			 int cmd, struct net *net, int nsid)
743 744 745 746 747 748 749 750 751 752 753
{
	struct nlmsghdr *nlh;
	struct rtgenmsg *rth;

	nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rth), flags);
	if (!nlh)
		return -EMSGSIZE;

	rth = nlmsg_data(nlh);
	rth->rtgen_family = AF_UNSPEC;

754
	if (nla_put_s32(skb, NETNSA_NSID, nsid))
755 756 757 758 759 760 761 762 763 764
		goto nla_put_failure;

	nlmsg_end(skb, nlh);
	return 0;

nla_put_failure:
	nlmsg_cancel(skb, nlh);
	return -EMSGSIZE;
}

765 766
static int rtnl_net_getid(struct sk_buff *skb, struct nlmsghdr *nlh,
			  struct netlink_ext_ack *extack)
767 768 769
{
	struct net *net = sock_net(skb->sk);
	struct nlattr *tb[NETNSA_MAX + 1];
770
	struct nlattr *nla;
771 772
	struct sk_buff *msg;
	struct net *peer;
773
	int err, id;
774 775

	err = nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX,
776
			  rtnl_net_policy, extack);
777 778
	if (err < 0)
		return err;
779
	if (tb[NETNSA_PID]) {
780
		peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID]));
781 782
		nla = tb[NETNSA_PID];
	} else if (tb[NETNSA_FD]) {
783
		peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD]));
784 785 786
		nla = tb[NETNSA_FD];
	} else {
		NL_SET_ERR_MSG(extack, "Peer netns reference is missing");
787
		return -EINVAL;
788
	}
789

790 791 792
	if (IS_ERR(peer)) {
		NL_SET_BAD_ATTR(extack, nla);
		NL_SET_ERR_MSG(extack, "Peer netns reference is invalid");
793
		return PTR_ERR(peer);
794
	}
795 796 797 798 799 800 801

	msg = nlmsg_new(rtnl_net_get_size(), GFP_KERNEL);
	if (!msg) {
		err = -ENOMEM;
		goto out;
	}

802
	id = peernet2id(net, peer);
803
	err = rtnl_net_fill(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0,
804
			    RTM_NEWNSID, net, id);
805 806 807 808 809 810 811 812 813 814 815 816 817
	if (err < 0)
		goto err_out;

	err = rtnl_unicast(msg, net, NETLINK_CB(skb).portid);
	goto out;

err_out:
	nlmsg_free(msg);
out:
	put_net(peer);
	return err;
}

N
Nicolas Dichtel 已提交
818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835
struct rtnl_net_dump_cb {
	struct net *net;
	struct sk_buff *skb;
	struct netlink_callback *cb;
	int idx;
	int s_idx;
};

static int rtnl_net_dumpid_one(int id, void *peer, void *data)
{
	struct rtnl_net_dump_cb *net_cb = (struct rtnl_net_dump_cb *)data;
	int ret;

	if (net_cb->idx < net_cb->s_idx)
		goto cont;

	ret = rtnl_net_fill(net_cb->skb, NETLINK_CB(net_cb->cb->skb).portid,
			    net_cb->cb->nlh->nlmsg_seq, NLM_F_MULTI,
836
			    RTM_NEWNSID, net_cb->net, id);
N
Nicolas Dichtel 已提交
837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855
	if (ret < 0)
		return ret;

cont:
	net_cb->idx++;
	return 0;
}

static int rtnl_net_dumpid(struct sk_buff *skb, struct netlink_callback *cb)
{
	struct net *net = sock_net(skb->sk);
	struct rtnl_net_dump_cb net_cb = {
		.net = net,
		.skb = skb,
		.cb = cb,
		.idx = 0,
		.s_idx = cb->args[0],
	};

856
	spin_lock_bh(&net->nsid_lock);
N
Nicolas Dichtel 已提交
857
	idr_for_each(&net->netns_ids, rtnl_net_dumpid_one, &net_cb);
858
	spin_unlock_bh(&net->nsid_lock);
N
Nicolas Dichtel 已提交
859 860 861 862 863

	cb->args[0] = net_cb.idx;
	return skb->len;
}

864
static void rtnl_net_notifyid(struct net *net, int cmd, int id)
N
Nicolas Dichtel 已提交
865 866 867 868 869 870 871 872
{
	struct sk_buff *msg;
	int err = -ENOMEM;

	msg = nlmsg_new(rtnl_net_get_size(), GFP_KERNEL);
	if (!msg)
		goto out;

873
	err = rtnl_net_fill(msg, 0, 0, 0, cmd, net, id);
N
Nicolas Dichtel 已提交
874 875 876 877 878 879 880 881 882 883 884 885
	if (err < 0)
		goto err_out;

	rtnl_notify(msg, net, 0, RTNLGRP_NSID, NULL, 0);
	return;

err_out:
	nlmsg_free(msg);
out:
	rtnl_set_sk_err(net, RTNLGRP_NSID, err);
}

886 887
static int __init net_ns_init(void)
{
888
	struct net_generic *ng;
889

890
#ifdef CONFIG_NET_NS
891 892 893
	net_cachep = kmem_cache_create("net_namespace", sizeof(struct net),
					SMP_CACHE_BYTES,
					SLAB_PANIC, NULL);
894 895 896 897 898

	/* Create workqueue for cleanup */
	netns_wq = create_singlethread_workqueue("netns");
	if (!netns_wq)
		panic("Could not create netns workq");
899
#endif
900

901 902 903 904 905 906
	ng = net_alloc_generic();
	if (!ng)
		panic("Could not allocate generic netns");

	rcu_assign_pointer(init_net.gen, ng);

907
	down_write(&net_sem);
908
	if (setup_net(&init_net, &init_user_ns))
S
Stephen Hemminger 已提交
909
		panic("Could not setup the initial network namespace");
910

911
	init_net_initialized = true;
912
	up_write(&net_sem);
913

914 915
	register_pernet_subsys(&net_ns_ops);

916 917
	rtnl_register(PF_UNSPEC, RTM_NEWNSID, rtnl_net_newid, NULL,
		      RTNL_FLAG_DOIT_UNLOCKED);
N
Nicolas Dichtel 已提交
918
	rtnl_register(PF_UNSPEC, RTM_GETNSID, rtnl_net_getid, rtnl_net_dumpid,
919
		      RTNL_FLAG_DOIT_UNLOCKED);
920

921 922 923 924 925
	return 0;
}

pure_initcall(net_ns_init);

926
#ifdef CONFIG_NET_NS
927 928
static int __register_pernet_operations(struct list_head *list,
					struct pernet_operations *ops)
929
{
930
	struct net *net;
931
	int error;
932
	LIST_HEAD(net_exit_list);
933 934

	list_add_tail(&ops->list, list);
935
	if (ops->init || (ops->id && ops->size)) {
936
		for_each_net(net) {
937
			error = ops_init(ops, net);
938 939
			if (error)
				goto out_undo;
940
			list_add_tail(&net->exit_list, &net_exit_list);
941 942
		}
	}
943
	return 0;
944 945 946 947

out_undo:
	/* If I have an error cleanup all namespaces I initialized */
	list_del(&ops->list);
948 949
	ops_exit_list(ops, &net_exit_list);
	ops_free_list(ops, &net_exit_list);
950
	return error;
951 952
}

953
static void __unregister_pernet_operations(struct pernet_operations *ops)
954 955
{
	struct net *net;
956
	LIST_HEAD(net_exit_list);
957 958

	list_del(&ops->list);
959 960 961 962
	for_each_net(net)
		list_add_tail(&net->exit_list, &net_exit_list);
	ops_exit_list(ops, &net_exit_list);
	ops_free_list(ops, &net_exit_list);
963 964
}

965 966
#else

967 968
static int __register_pernet_operations(struct list_head *list,
					struct pernet_operations *ops)
969
{
970 971 972 973 974
	if (!init_net_initialized) {
		list_add_tail(&ops->list, list);
		return 0;
	}

975
	return ops_init(ops, &init_net);
976 977
}

978
static void __unregister_pernet_operations(struct pernet_operations *ops)
979
{
980 981 982 983 984 985 986 987
	if (!init_net_initialized) {
		list_del(&ops->list);
	} else {
		LIST_HEAD(net_exit_list);
		list_add(&init_net.exit_list, &net_exit_list);
		ops_exit_list(ops, &net_exit_list);
		ops_free_list(ops, &net_exit_list);
	}
988
}
989 990

#endif /* CONFIG_NET_NS */
991

992 993
static DEFINE_IDA(net_generic_ids);

994 995 996 997 998 999 1000
static int register_pernet_operations(struct list_head *list,
				      struct pernet_operations *ops)
{
	int error;

	if (ops->id) {
again:
1001
		error = ida_get_new_above(&net_generic_ids, MIN_PERNET_OPS_ID, ops->id);
1002 1003 1004 1005 1006 1007 1008
		if (error < 0) {
			if (error == -EAGAIN) {
				ida_pre_get(&net_generic_ids, GFP_KERNEL);
				goto again;
			}
			return error;
		}
1009
		max_gen_ptrs = max(max_gen_ptrs, *ops->id + 1);
1010 1011
	}
	error = __register_pernet_operations(list, ops);
1012 1013 1014 1015
	if (error) {
		rcu_barrier();
		if (ops->id)
			ida_remove(&net_generic_ids, *ops->id);
1016 1017 1018
	} else if (!ops->async) {
		pr_info_once("Pernet operations %ps are sync.\n", ops);
		nr_sync_pernet_ops++;
1019
	}
1020 1021 1022 1023 1024 1025

	return error;
}

static void unregister_pernet_operations(struct pernet_operations *ops)
{
1026 1027
	if (!ops->async)
		BUG_ON(nr_sync_pernet_ops-- == 0);
1028
	__unregister_pernet_operations(ops);
1029
	rcu_barrier();
1030 1031 1032 1033
	if (ops->id)
		ida_remove(&net_generic_ids, *ops->id);
}

1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055
/**
 *      register_pernet_subsys - register a network namespace subsystem
 *	@ops:  pernet operations structure for the subsystem
 *
 *	Register a subsystem which has init and exit functions
 *	that are called when network namespaces are created and
 *	destroyed respectively.
 *
 *	When registered all network namespace init functions are
 *	called for every existing network namespace.  Allowing kernel
 *	modules to have a race free view of the set of network namespaces.
 *
 *	When a new network namespace is created all of the init
 *	methods are called in the order in which they were registered.
 *
 *	When a network namespace is destroyed all of the exit methods
 *	are called in the reverse of the order with which they were
 *	registered.
 */
int register_pernet_subsys(struct pernet_operations *ops)
{
	int error;
1056
	down_write(&net_sem);
1057
	error =  register_pernet_operations(first_device, ops);
1058
	up_write(&net_sem);
1059 1060 1061 1062 1063 1064 1065 1066 1067
	return error;
}
EXPORT_SYMBOL_GPL(register_pernet_subsys);

/**
 *      unregister_pernet_subsys - unregister a network namespace subsystem
 *	@ops: pernet operations structure to manipulate
 *
 *	Remove the pernet operations structure from the list to be
1068
 *	used when network namespaces are created or destroyed.  In
1069 1070 1071
 *	addition run the exit method for all existing network
 *	namespaces.
 */
1072
void unregister_pernet_subsys(struct pernet_operations *ops)
1073
{
1074
	down_write(&net_sem);
1075
	unregister_pernet_operations(ops);
1076
	up_write(&net_sem);
1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101
}
EXPORT_SYMBOL_GPL(unregister_pernet_subsys);

/**
 *      register_pernet_device - register a network namespace device
 *	@ops:  pernet operations structure for the subsystem
 *
 *	Register a device which has init and exit functions
 *	that are called when network namespaces are created and
 *	destroyed respectively.
 *
 *	When registered all network namespace init functions are
 *	called for every existing network namespace.  Allowing kernel
 *	modules to have a race free view of the set of network namespaces.
 *
 *	When a new network namespace is created all of the init
 *	methods are called in the order in which they were registered.
 *
 *	When a network namespace is destroyed all of the exit methods
 *	are called in the reverse of the order with which they were
 *	registered.
 */
int register_pernet_device(struct pernet_operations *ops)
{
	int error;
1102
	down_write(&net_sem);
1103 1104 1105
	error = register_pernet_operations(&pernet_list, ops);
	if (!error && (first_device == &pernet_list))
		first_device = &ops->list;
1106
	up_write(&net_sem);
1107 1108 1109 1110 1111 1112 1113 1114 1115
	return error;
}
EXPORT_SYMBOL_GPL(register_pernet_device);

/**
 *      unregister_pernet_device - unregister a network namespace netdevice
 *	@ops: pernet operations structure to manipulate
 *
 *	Remove the pernet operations structure from the list to be
1116
 *	used when network namespaces are created or destroyed.  In
1117 1118 1119 1120 1121
 *	addition run the exit method for all existing network
 *	namespaces.
 */
void unregister_pernet_device(struct pernet_operations *ops)
{
1122
	down_write(&net_sem);
1123 1124 1125
	if (&ops->list == first_device)
		first_device = first_device->next;
	unregister_pernet_operations(ops);
1126
	up_write(&net_sem);
1127 1128
}
EXPORT_SYMBOL_GPL(unregister_pernet_device);
1129 1130

#ifdef CONFIG_NET_NS
1131
static struct ns_common *netns_get(struct task_struct *task)
1132
{
1133 1134 1135
	struct net *net = NULL;
	struct nsproxy *nsproxy;

1136 1137
	task_lock(task);
	nsproxy = task->nsproxy;
1138 1139
	if (nsproxy)
		net = get_net(nsproxy->net_ns);
1140
	task_unlock(task);
1141

1142 1143 1144 1145 1146 1147
	return net ? &net->ns : NULL;
}

static inline struct net *to_net_ns(struct ns_common *ns)
{
	return container_of(ns, struct net, ns);
1148 1149
}

1150
static void netns_put(struct ns_common *ns)
1151
{
1152
	put_net(to_net_ns(ns));
1153 1154
}

1155
static int netns_install(struct nsproxy *nsproxy, struct ns_common *ns)
1156
{
1157
	struct net *net = to_net_ns(ns);
1158

1159
	if (!ns_capable(net->user_ns, CAP_SYS_ADMIN) ||
1160
	    !ns_capable(current_user_ns(), CAP_SYS_ADMIN))
1161 1162
		return -EPERM;

1163
	put_net(nsproxy->net_ns);
1164
	nsproxy->net_ns = get_net(net);
1165 1166 1167
	return 0;
}

1168 1169 1170 1171 1172
static struct user_namespace *netns_owner(struct ns_common *ns)
{
	return to_net_ns(ns)->user_ns;
}

1173 1174 1175 1176 1177 1178
const struct proc_ns_operations netns_operations = {
	.name		= "net",
	.type		= CLONE_NEWNET,
	.get		= netns_get,
	.put		= netns_put,
	.install	= netns_install,
1179
	.owner		= netns_owner,
1180 1181
};
#endif