net_namespace.c 22.6 KB
Newer Older
J
Joe Perches 已提交
1 2
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

3 4 5 6 7 8
#include <linux/workqueue.h>
#include <linux/rtnetlink.h>
#include <linux/cache.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/delay.h>
9
#include <linux/sched.h>
10
#include <linux/idr.h>
11
#include <linux/rculist.h>
12
#include <linux/nsproxy.h>
13 14
#include <linux/fs.h>
#include <linux/proc_ns.h>
15
#include <linux/file.h>
16
#include <linux/export.h>
17
#include <linux/user_namespace.h>
18 19 20
#include <linux/net_namespace.h>
#include <net/sock.h>
#include <net/netlink.h>
21
#include <net/net_namespace.h>
22
#include <net/netns/generic.h>
23 24 25 26 27 28 29

/*
 *	Our network namespace constructor/destructor lists
 */

static LIST_HEAD(pernet_list);
static struct list_head *first_device = &pernet_list;
30
DEFINE_MUTEX(net_mutex);
31
static DEFINE_SPINLOCK(nsid_lock);
32 33

LIST_HEAD(net_namespace_list);
A
Alexey Dobriyan 已提交
34
EXPORT_SYMBOL_GPL(net_namespace_list);
35

36 37 38
struct net init_net = {
	.dev_base_head = LIST_HEAD_INIT(init_net.dev_base_head),
};
39
EXPORT_SYMBOL(init_net);
40

41 42
#define INITIAL_NET_GEN_PTRS	13 /* +1 for len +2 for rcu_head */

E
Eric Dumazet 已提交
43 44 45 46 47 48 49 50 51 52 53 54 55 56
static unsigned int max_gen_ptrs = INITIAL_NET_GEN_PTRS;

static struct net_generic *net_alloc_generic(void)
{
	struct net_generic *ng;
	size_t generic_size = offsetof(struct net_generic, ptr[max_gen_ptrs]);

	ng = kzalloc(generic_size, GFP_KERNEL);
	if (ng)
		ng->len = max_gen_ptrs;

	return ng;
}

57 58 59 60 61 62 63
static int net_assign_generic(struct net *net, int id, void *data)
{
	struct net_generic *ng, *old_ng;

	BUG_ON(!mutex_is_locked(&net_mutex));
	BUG_ON(id == 0);

E
Eric Dumazet 已提交
64 65 66
	old_ng = rcu_dereference_protected(net->gen,
					   lockdep_is_held(&net_mutex));
	ng = old_ng;
67 68 69
	if (old_ng->len >= id)
		goto assign;

E
Eric Dumazet 已提交
70
	ng = net_alloc_generic();
71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87
	if (ng == NULL)
		return -ENOMEM;

	/*
	 * Some synchronisation notes:
	 *
	 * The net_generic explores the net->gen array inside rcu
	 * read section. Besides once set the net->gen->ptr[x]
	 * pointer never changes (see rules in netns/generic.h).
	 *
	 * That said, we simply duplicate this array and schedule
	 * the old copy for kfree after a grace period.
	 */

	memcpy(&ng->ptr, &old_ng->ptr, old_ng->len * sizeof(void*));

	rcu_assign_pointer(net->gen, ng);
88
	kfree_rcu(old_ng, rcu);
89 90 91 92 93
assign:
	ng->ptr[id - 1] = data;
	return 0;
}

94 95
static int ops_init(const struct pernet_operations *ops, struct net *net)
{
96 97 98
	int err = -ENOMEM;
	void *data = NULL;

99
	if (ops->id && ops->size) {
100
		data = kzalloc(ops->size, GFP_KERNEL);
101
		if (!data)
102
			goto out;
103 104

		err = net_assign_generic(net, *ops->id, data);
105 106
		if (err)
			goto cleanup;
107
	}
108
	err = 0;
109
	if (ops->init)
110 111 112 113 114 115 116 117 118
		err = ops->init(net);
	if (!err)
		return 0;

cleanup:
	kfree(data);

out:
	return err;
119 120 121 122 123 124 125 126 127 128
}

static void ops_free(const struct pernet_operations *ops, struct net *net)
{
	if (ops->id && ops->size) {
		int id = *ops->id;
		kfree(net_generic(net, id));
	}
}

129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150
static void ops_exit_list(const struct pernet_operations *ops,
			  struct list_head *net_exit_list)
{
	struct net *net;
	if (ops->exit) {
		list_for_each_entry(net, net_exit_list, exit_list)
			ops->exit(net);
	}
	if (ops->exit_batch)
		ops->exit_batch(net_exit_list);
}

static void ops_free_list(const struct pernet_operations *ops,
			  struct list_head *net_exit_list)
{
	struct net *net;
	if (ops->size && ops->id) {
		list_for_each_entry(net, net_exit_list, exit_list)
			ops_free(ops, net);
	}
}

151
/* should be called with nsid_lock held */
152 153
static int alloc_netid(struct net *net, struct net *peer, int reqid)
{
154
	int min = 0, max = 0;
155 156 157 158 159 160

	if (reqid >= 0) {
		min = reqid;
		max = reqid + 1;
	}

161
	return idr_alloc(&net->netns_ids, peer, min, max, GFP_ATOMIC);
162 163 164 165 166 167 168 169 170 171 172 173 174 175 176
}

/* This function is used by idr_for_each(). If net is equal to peer, the
 * function returns the id so that idr_for_each() stops. Because we cannot
 * returns the id 0 (idr_for_each() will not stop), we return the magic value
 * NET_ID_ZERO (-1) for it.
 */
#define NET_ID_ZERO -1
static int net_eq_idr(int id, void *net, void *peer)
{
	if (net_eq(net, peer))
		return id ? : NET_ID_ZERO;
	return 0;
}

177 178 179 180
/* Should be called with nsid_lock held. If a new id is assigned, the bool alloc
 * is set to true, thus the caller knows that the new id must be notified via
 * rtnl.
 */
181
static int __peernet2id_alloc(struct net *net, struct net *peer, bool *alloc)
182 183
{
	int id = idr_for_each(&net->netns_ids, net_eq_idr, peer);
184
	bool alloc_it = *alloc;
185

186 187
	*alloc = false;

188 189 190 191 192 193
	/* Magic value for id 0. */
	if (id == NET_ID_ZERO)
		return 0;
	if (id > 0)
		return id;

194
	if (alloc_it) {
195
		id = alloc_netid(net, peer, -1);
196
		*alloc = true;
197 198
		return id >= 0 ? id : NETNSA_NSID_NOT_ASSIGNED;
	}
199

200
	return NETNSA_NSID_NOT_ASSIGNED;
201 202
}

203
/* should be called with nsid_lock held */
204 205 206 207 208 209 210 211
static int __peernet2id(struct net *net, struct net *peer)
{
	bool no = false;

	return __peernet2id_alloc(net, peer, &no);
}

static void rtnl_net_notifyid(struct net *net, int cmd, int id);
212 213 214
/* This function returns the id of a peer netns. If no id is assigned, one will
 * be allocated and returned.
 */
215
int peernet2id_alloc(struct net *net, struct net *peer)
216
{
217 218
	unsigned long flags;
	bool alloc;
219
	int id;
220

221 222
	spin_lock_irqsave(&nsid_lock, flags);
	alloc = atomic_read(&peer->count) == 0 ? false : true;
223
	id = __peernet2id_alloc(net, peer, &alloc);
224
	spin_unlock_irqrestore(&nsid_lock, flags);
225 226 227
	if (alloc && id >= 0)
		rtnl_net_notifyid(net, RTM_NEWNSID, id);
	return id;
228
}
229
EXPORT_SYMBOL(peernet2id_alloc);
230

231
/* This function returns, if assigned, the id of a peer netns. */
232
int peernet2id(struct net *net, struct net *peer)
233 234 235 236 237 238 239 240 241 242
{
	unsigned long flags;
	int id;

	spin_lock_irqsave(&nsid_lock, flags);
	id = __peernet2id(net, peer);
	spin_unlock_irqrestore(&nsid_lock, flags);
	return id;
}

243 244 245 246 247 248 249 250
/* This function returns true is the peer netns has an id assigned into the
 * current netns.
 */
bool peernet_has_id(struct net *net, struct net *peer)
{
	return peernet2id(net, peer) >= 0;
}

251 252
struct net *get_net_ns_by_id(struct net *net, int id)
{
253
	unsigned long flags;
254 255 256 257 258 259
	struct net *peer;

	if (id < 0)
		return NULL;

	rcu_read_lock();
260
	spin_lock_irqsave(&nsid_lock, flags);
261 262 263
	peer = idr_find(&net->netns_ids, id);
	if (peer)
		get_net(peer);
264
	spin_unlock_irqrestore(&nsid_lock, flags);
265 266 267 268 269
	rcu_read_unlock();

	return peer;
}

270 271 272
/*
 * setup_net runs the initializers for the network namespace object.
 */
273
static __net_init int setup_net(struct net *net, struct user_namespace *user_ns)
274 275
{
	/* Must be called with net_mutex held */
276
	const struct pernet_operations *ops, *saved_ops;
277
	int error = 0;
278
	LIST_HEAD(net_exit_list);
279 280

	atomic_set(&net->count, 1);
281
	atomic_set(&net->passive, 1);
282
	net->dev_base_seq = 1;
283
	net->user_ns = user_ns;
284
	idr_init(&net->netns_ids);
285

286
	list_for_each_entry(ops, &pernet_list, list) {
287 288 289
		error = ops_init(ops, net);
		if (error < 0)
			goto out_undo;
290 291 292
	}
out:
	return error;
293

294 295 296 297
out_undo:
	/* Walk through the list backwards calling the exit functions
	 * for the pernet modules whose init functions did not fail.
	 */
298
	list_add(&net->exit_list, &net_exit_list);
299
	saved_ops = ops;
300 301 302
	list_for_each_entry_continue_reverse(ops, &pernet_list, list)
		ops_exit_list(ops, &net_exit_list);

303 304
	ops = saved_ops;
	list_for_each_entry_continue_reverse(ops, &pernet_list, list)
305
		ops_free_list(ops, &net_exit_list);
306 307

	rcu_barrier();
308 309 310
	goto out;
}

311

312 313 314 315
#ifdef CONFIG_NET_NS
static struct kmem_cache *net_cachep;
static struct workqueue_struct *netns_wq;

316
static struct net *net_alloc(void)
317
{
318 319 320 321 322 323 324 325
	struct net *net = NULL;
	struct net_generic *ng;

	ng = net_alloc_generic();
	if (!ng)
		goto out;

	net = kmem_cache_zalloc(net_cachep, GFP_KERNEL);
326
	if (!net)
327
		goto out_free;
328

329 330 331 332 333 334 335 336 337 338 339
	rcu_assign_pointer(net->gen, ng);
out:
	return net;

out_free:
	kfree(ng);
	goto out;
}

static void net_free(struct net *net)
{
E
Eric Dumazet 已提交
340
	kfree(rcu_access_pointer(net->gen));
341 342 343
	kmem_cache_free(net_cachep, net);
}

344 345 346 347 348 349 350
void net_drop_ns(void *p)
{
	struct net *ns = p;
	if (ns && atomic_dec_and_test(&ns->passive))
		net_free(ns);
}

351 352
struct net *copy_net_ns(unsigned long flags,
			struct user_namespace *user_ns, struct net *old_net)
353
{
354 355
	struct net *net;
	int rv;
356

357 358 359
	if (!(flags & CLONE_NEWNET))
		return get_net(old_net);

360 361 362
	net = net_alloc();
	if (!net)
		return ERR_PTR(-ENOMEM);
363 364 365

	get_user_ns(user_ns);

366
	mutex_lock(&net_mutex);
367
	rv = setup_net(net, user_ns);
368
	if (rv == 0) {
369
		rtnl_lock();
370
		list_add_tail_rcu(&net->list, &net_namespace_list);
371 372
		rtnl_unlock();
	}
373
	mutex_unlock(&net_mutex);
374
	if (rv < 0) {
375
		put_user_ns(user_ns);
376
		net_drop_ns(net);
377 378 379 380
		return ERR_PTR(rv);
	}
	return net;
}
381

382 383 384
static DEFINE_SPINLOCK(cleanup_list_lock);
static LIST_HEAD(cleanup_list);  /* Must hold cleanup_list_lock to touch */

385 386
static void cleanup_net(struct work_struct *work)
{
387
	const struct pernet_operations *ops;
388
	struct net *net, *tmp;
X
xiao jin 已提交
389
	struct list_head net_kill_list;
390
	LIST_HEAD(net_exit_list);
391

392 393 394 395
	/* Atomically snapshot the list of namespaces to cleanup */
	spin_lock_irq(&cleanup_list_lock);
	list_replace_init(&cleanup_list, &net_kill_list);
	spin_unlock_irq(&cleanup_list_lock);
396 397 398 399 400

	mutex_lock(&net_mutex);

	/* Don't let anyone else find us. */
	rtnl_lock();
401
	list_for_each_entry(net, &net_kill_list, cleanup_list) {
402
		list_del_rcu(&net->list);
403
		list_add_tail(&net->exit_list, &net_exit_list);
404
		for_each_net(tmp) {
405
			int id;
406

407 408 409
			spin_lock_irq(&nsid_lock);
			id = __peernet2id(tmp, net);
			if (id >= 0)
410
				idr_remove(&tmp->netns_ids, id);
411 412 413
			spin_unlock_irq(&nsid_lock);
			if (id >= 0)
				rtnl_net_notifyid(tmp, RTM_DELNSID, id);
414
		}
415
		spin_lock_irq(&nsid_lock);
416
		idr_destroy(&net->netns_ids);
417
		spin_unlock_irq(&nsid_lock);
418

419
	}
420 421
	rtnl_unlock();

422 423 424 425 426 427 428
	/*
	 * Another CPU might be rcu-iterating the list, wait for it.
	 * This needs to be before calling the exit() notifiers, so
	 * the rcu_barrier() below isn't sufficient alone.
	 */
	synchronize_rcu();

429
	/* Run all of the network namespace exit methods */
430 431 432
	list_for_each_entry_reverse(ops, &pernet_list, list)
		ops_exit_list(ops, &net_exit_list);

433
	/* Free the net generic variables */
434 435
	list_for_each_entry_reverse(ops, &pernet_list, list)
		ops_free_list(ops, &net_exit_list);
436 437 438 439 440 441 442 443 444

	mutex_unlock(&net_mutex);

	/* Ensure there are no outstanding rcu callbacks using this
	 * network namespace.
	 */
	rcu_barrier();

	/* Finally it is safe to free my network namespace structure */
445 446
	list_for_each_entry_safe(net, tmp, &net_exit_list, exit_list) {
		list_del_init(&net->exit_list);
447
		put_user_ns(net->user_ns);
448
		net_drop_ns(net);
449
	}
450
}
451
static DECLARE_WORK(net_cleanup_work, cleanup_net);
452 453 454 455

void __put_net(struct net *net)
{
	/* Cleanup the network namespace in process context */
456 457 458 459 460 461 462
	unsigned long flags;

	spin_lock_irqsave(&cleanup_list_lock, flags);
	list_add(&net->cleanup_list, &cleanup_list);
	spin_unlock_irqrestore(&cleanup_list_lock, flags);

	queue_work(netns_wq, &net_cleanup_work);
463 464 465
}
EXPORT_SYMBOL_GPL(__put_net);

466 467 468
struct net *get_net_ns_by_fd(int fd)
{
	struct file *file;
469
	struct ns_common *ns;
470 471 472
	struct net *net;

	file = proc_ns_fget(fd);
473 474
	if (IS_ERR(file))
		return ERR_CAST(file);
475

A
Al Viro 已提交
476
	ns = get_proc_ns(file_inode(file));
477 478
	if (ns->ops == &netns_operations)
		net = get_net(container_of(ns, struct net, ns));
479 480
	else
		net = ERR_PTR(-EINVAL);
481

482
	fput(file);
483 484 485
	return net;
}

486
#else
487 488 489 490
struct net *get_net_ns_by_fd(int fd)
{
	return ERR_PTR(-EINVAL);
}
491
#endif
492
EXPORT_SYMBOL_GPL(get_net_ns_by_fd);
493

494 495 496 497 498 499 500 501 502 503 504
struct net *get_net_ns_by_pid(pid_t pid)
{
	struct task_struct *tsk;
	struct net *net;

	/* Lookup the network namespace */
	net = ERR_PTR(-ESRCH);
	rcu_read_lock();
	tsk = find_task_by_vpid(pid);
	if (tsk) {
		struct nsproxy *nsproxy;
505 506
		task_lock(tsk);
		nsproxy = tsk->nsproxy;
507 508
		if (nsproxy)
			net = get_net(nsproxy->net_ns);
509
		task_unlock(tsk);
510 511 512 513 514 515
	}
	rcu_read_unlock();
	return net;
}
EXPORT_SYMBOL_GPL(get_net_ns_by_pid);

516 517
static __net_init int net_ns_net_init(struct net *net)
{
518 519 520
#ifdef CONFIG_NET_NS
	net->ns.ops = &netns_operations;
#endif
A
Al Viro 已提交
521
	return ns_alloc_inum(&net->ns);
522 523 524 525
}

static __net_exit void net_ns_net_exit(struct net *net)
{
A
Al Viro 已提交
526
	ns_free_inum(&net->ns);
527 528 529 530 531 532 533
}

static struct pernet_operations __net_initdata net_ns_ops = {
	.init = net_ns_net_init,
	.exit = net_ns_net_exit,
};

534 535 536 537 538 539 540 541 542 543 544
static struct nla_policy rtnl_net_policy[NETNSA_MAX + 1] = {
	[NETNSA_NONE]		= { .type = NLA_UNSPEC },
	[NETNSA_NSID]		= { .type = NLA_S32 },
	[NETNSA_PID]		= { .type = NLA_U32 },
	[NETNSA_FD]		= { .type = NLA_U32 },
};

static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh)
{
	struct net *net = sock_net(skb->sk);
	struct nlattr *tb[NETNSA_MAX + 1];
545
	unsigned long flags;
546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565
	struct net *peer;
	int nsid, err;

	err = nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX,
			  rtnl_net_policy);
	if (err < 0)
		return err;
	if (!tb[NETNSA_NSID])
		return -EINVAL;
	nsid = nla_get_s32(tb[NETNSA_NSID]);

	if (tb[NETNSA_PID])
		peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID]));
	else if (tb[NETNSA_FD])
		peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD]));
	else
		return -EINVAL;
	if (IS_ERR(peer))
		return PTR_ERR(peer);

566
	spin_lock_irqsave(&nsid_lock, flags);
567
	if (__peernet2id(net, peer) >= 0) {
568
		spin_unlock_irqrestore(&nsid_lock, flags);
569 570 571 572 573
		err = -EEXIST;
		goto out;
	}

	err = alloc_netid(net, peer, nsid);
574
	spin_unlock_irqrestore(&nsid_lock, flags);
575 576
	if (err >= 0) {
		rtnl_net_notifyid(net, RTM_NEWNSID, err);
577
		err = 0;
578
	}
579 580 581 582 583 584 585 586 587 588 589 590 591
out:
	put_net(peer);
	return err;
}

static int rtnl_net_get_size(void)
{
	return NLMSG_ALIGN(sizeof(struct rtgenmsg))
	       + nla_total_size(sizeof(s32)) /* NETNSA_NSID */
	       ;
}

static int rtnl_net_fill(struct sk_buff *skb, u32 portid, u32 seq, int flags,
592
			 int cmd, struct net *net, int nsid)
593 594 595 596 597 598 599 600 601 602 603
{
	struct nlmsghdr *nlh;
	struct rtgenmsg *rth;

	nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rth), flags);
	if (!nlh)
		return -EMSGSIZE;

	rth = nlmsg_data(nlh);
	rth->rtgen_family = AF_UNSPEC;

604
	if (nla_put_s32(skb, NETNSA_NSID, nsid))
605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620
		goto nla_put_failure;

	nlmsg_end(skb, nlh);
	return 0;

nla_put_failure:
	nlmsg_cancel(skb, nlh);
	return -EMSGSIZE;
}

static int rtnl_net_getid(struct sk_buff *skb, struct nlmsghdr *nlh)
{
	struct net *net = sock_net(skb->sk);
	struct nlattr *tb[NETNSA_MAX + 1];
	struct sk_buff *msg;
	struct net *peer;
621
	int err, id;
622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642

	err = nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX,
			  rtnl_net_policy);
	if (err < 0)
		return err;
	if (tb[NETNSA_PID])
		peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID]));
	else if (tb[NETNSA_FD])
		peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD]));
	else
		return -EINVAL;

	if (IS_ERR(peer))
		return PTR_ERR(peer);

	msg = nlmsg_new(rtnl_net_get_size(), GFP_KERNEL);
	if (!msg) {
		err = -ENOMEM;
		goto out;
	}

643
	id = peernet2id(net, peer);
644
	err = rtnl_net_fill(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0,
645
			    RTM_NEWNSID, net, id);
646 647 648 649 650 651 652 653 654 655 656 657 658
	if (err < 0)
		goto err_out;

	err = rtnl_unicast(msg, net, NETLINK_CB(skb).portid);
	goto out;

err_out:
	nlmsg_free(msg);
out:
	put_net(peer);
	return err;
}

N
Nicolas Dichtel 已提交
659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676
struct rtnl_net_dump_cb {
	struct net *net;
	struct sk_buff *skb;
	struct netlink_callback *cb;
	int idx;
	int s_idx;
};

static int rtnl_net_dumpid_one(int id, void *peer, void *data)
{
	struct rtnl_net_dump_cb *net_cb = (struct rtnl_net_dump_cb *)data;
	int ret;

	if (net_cb->idx < net_cb->s_idx)
		goto cont;

	ret = rtnl_net_fill(net_cb->skb, NETLINK_CB(net_cb->cb->skb).portid,
			    net_cb->cb->nlh->nlmsg_seq, NLM_F_MULTI,
677
			    RTM_NEWNSID, net_cb->net, id);
N
Nicolas Dichtel 已提交
678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695
	if (ret < 0)
		return ret;

cont:
	net_cb->idx++;
	return 0;
}

static int rtnl_net_dumpid(struct sk_buff *skb, struct netlink_callback *cb)
{
	struct net *net = sock_net(skb->sk);
	struct rtnl_net_dump_cb net_cb = {
		.net = net,
		.skb = skb,
		.cb = cb,
		.idx = 0,
		.s_idx = cb->args[0],
	};
696
	unsigned long flags;
N
Nicolas Dichtel 已提交
697

698
	spin_lock_irqsave(&nsid_lock, flags);
N
Nicolas Dichtel 已提交
699
	idr_for_each(&net->netns_ids, rtnl_net_dumpid_one, &net_cb);
700
	spin_unlock_irqrestore(&nsid_lock, flags);
N
Nicolas Dichtel 已提交
701 702 703 704 705

	cb->args[0] = net_cb.idx;
	return skb->len;
}

706
static void rtnl_net_notifyid(struct net *net, int cmd, int id)
N
Nicolas Dichtel 已提交
707 708 709 710 711 712 713 714
{
	struct sk_buff *msg;
	int err = -ENOMEM;

	msg = nlmsg_new(rtnl_net_get_size(), GFP_KERNEL);
	if (!msg)
		goto out;

715
	err = rtnl_net_fill(msg, 0, 0, 0, cmd, net, id);
N
Nicolas Dichtel 已提交
716 717 718 719 720 721 722 723 724 725 726 727
	if (err < 0)
		goto err_out;

	rtnl_notify(msg, net, 0, RTNLGRP_NSID, NULL, 0);
	return;

err_out:
	nlmsg_free(msg);
out:
	rtnl_set_sk_err(net, RTNLGRP_NSID, err);
}

728 729
static int __init net_ns_init(void)
{
730
	struct net_generic *ng;
731

732
#ifdef CONFIG_NET_NS
733 734 735
	net_cachep = kmem_cache_create("net_namespace", sizeof(struct net),
					SMP_CACHE_BYTES,
					SLAB_PANIC, NULL);
736 737 738 739 740

	/* Create workqueue for cleanup */
	netns_wq = create_singlethread_workqueue("netns");
	if (!netns_wq)
		panic("Could not create netns workq");
741
#endif
742

743 744 745 746 747 748
	ng = net_alloc_generic();
	if (!ng)
		panic("Could not allocate generic netns");

	rcu_assign_pointer(init_net.gen, ng);

749
	mutex_lock(&net_mutex);
750
	if (setup_net(&init_net, &init_user_ns))
S
Stephen Hemminger 已提交
751
		panic("Could not setup the initial network namespace");
752

753
	rtnl_lock();
754
	list_add_tail_rcu(&init_net.list, &net_namespace_list);
755
	rtnl_unlock();
756 757 758

	mutex_unlock(&net_mutex);

759 760
	register_pernet_subsys(&net_ns_ops);

761
	rtnl_register(PF_UNSPEC, RTM_NEWNSID, rtnl_net_newid, NULL, NULL);
N
Nicolas Dichtel 已提交
762 763
	rtnl_register(PF_UNSPEC, RTM_GETNSID, rtnl_net_getid, rtnl_net_dumpid,
		      NULL);
764

765 766 767 768 769
	return 0;
}

pure_initcall(net_ns_init);

770
#ifdef CONFIG_NET_NS
771 772
static int __register_pernet_operations(struct list_head *list,
					struct pernet_operations *ops)
773
{
774
	struct net *net;
775
	int error;
776
	LIST_HEAD(net_exit_list);
777 778

	list_add_tail(&ops->list, list);
779
	if (ops->init || (ops->id && ops->size)) {
780
		for_each_net(net) {
781
			error = ops_init(ops, net);
782 783
			if (error)
				goto out_undo;
784
			list_add_tail(&net->exit_list, &net_exit_list);
785 786
		}
	}
787
	return 0;
788 789 790 791

out_undo:
	/* If I have an error cleanup all namespaces I initialized */
	list_del(&ops->list);
792 793
	ops_exit_list(ops, &net_exit_list);
	ops_free_list(ops, &net_exit_list);
794
	return error;
795 796
}

797
static void __unregister_pernet_operations(struct pernet_operations *ops)
798 799
{
	struct net *net;
800
	LIST_HEAD(net_exit_list);
801 802

	list_del(&ops->list);
803 804 805 806
	for_each_net(net)
		list_add_tail(&net->exit_list, &net_exit_list);
	ops_exit_list(ops, &net_exit_list);
	ops_free_list(ops, &net_exit_list);
807 808
}

809 810
#else

811 812
static int __register_pernet_operations(struct list_head *list,
					struct pernet_operations *ops)
813
{
814
	return ops_init(ops, &init_net);
815 816
}

817
static void __unregister_pernet_operations(struct pernet_operations *ops)
818
{
819 820 821 822
	LIST_HEAD(net_exit_list);
	list_add(&init_net.exit_list, &net_exit_list);
	ops_exit_list(ops, &net_exit_list);
	ops_free_list(ops, &net_exit_list);
823
}
824 825

#endif /* CONFIG_NET_NS */
826

827 828
static DEFINE_IDA(net_generic_ids);

829 830 831 832 833 834 835 836 837 838 839 840 841 842 843
static int register_pernet_operations(struct list_head *list,
				      struct pernet_operations *ops)
{
	int error;

	if (ops->id) {
again:
		error = ida_get_new_above(&net_generic_ids, 1, ops->id);
		if (error < 0) {
			if (error == -EAGAIN) {
				ida_pre_get(&net_generic_ids, GFP_KERNEL);
				goto again;
			}
			return error;
		}
E
Eric Dumazet 已提交
844
		max_gen_ptrs = max_t(unsigned int, max_gen_ptrs, *ops->id);
845 846
	}
	error = __register_pernet_operations(list, ops);
847 848 849 850 851
	if (error) {
		rcu_barrier();
		if (ops->id)
			ida_remove(&net_generic_ids, *ops->id);
	}
852 853 854 855 856 857 858 859

	return error;
}

static void unregister_pernet_operations(struct pernet_operations *ops)
{
	
	__unregister_pernet_operations(ops);
860
	rcu_barrier();
861 862 863 864
	if (ops->id)
		ida_remove(&net_generic_ids, *ops->id);
}

865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898
/**
 *      register_pernet_subsys - register a network namespace subsystem
 *	@ops:  pernet operations structure for the subsystem
 *
 *	Register a subsystem which has init and exit functions
 *	that are called when network namespaces are created and
 *	destroyed respectively.
 *
 *	When registered all network namespace init functions are
 *	called for every existing network namespace.  Allowing kernel
 *	modules to have a race free view of the set of network namespaces.
 *
 *	When a new network namespace is created all of the init
 *	methods are called in the order in which they were registered.
 *
 *	When a network namespace is destroyed all of the exit methods
 *	are called in the reverse of the order with which they were
 *	registered.
 */
int register_pernet_subsys(struct pernet_operations *ops)
{
	int error;
	mutex_lock(&net_mutex);
	error =  register_pernet_operations(first_device, ops);
	mutex_unlock(&net_mutex);
	return error;
}
EXPORT_SYMBOL_GPL(register_pernet_subsys);

/**
 *      unregister_pernet_subsys - unregister a network namespace subsystem
 *	@ops: pernet operations structure to manipulate
 *
 *	Remove the pernet operations structure from the list to be
899
 *	used when network namespaces are created or destroyed.  In
900 901 902
 *	addition run the exit method for all existing network
 *	namespaces.
 */
903
void unregister_pernet_subsys(struct pernet_operations *ops)
904 905
{
	mutex_lock(&net_mutex);
906
	unregister_pernet_operations(ops);
907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946
	mutex_unlock(&net_mutex);
}
EXPORT_SYMBOL_GPL(unregister_pernet_subsys);

/**
 *      register_pernet_device - register a network namespace device
 *	@ops:  pernet operations structure for the subsystem
 *
 *	Register a device which has init and exit functions
 *	that are called when network namespaces are created and
 *	destroyed respectively.
 *
 *	When registered all network namespace init functions are
 *	called for every existing network namespace.  Allowing kernel
 *	modules to have a race free view of the set of network namespaces.
 *
 *	When a new network namespace is created all of the init
 *	methods are called in the order in which they were registered.
 *
 *	When a network namespace is destroyed all of the exit methods
 *	are called in the reverse of the order with which they were
 *	registered.
 */
int register_pernet_device(struct pernet_operations *ops)
{
	int error;
	mutex_lock(&net_mutex);
	error = register_pernet_operations(&pernet_list, ops);
	if (!error && (first_device == &pernet_list))
		first_device = &ops->list;
	mutex_unlock(&net_mutex);
	return error;
}
EXPORT_SYMBOL_GPL(register_pernet_device);

/**
 *      unregister_pernet_device - unregister a network namespace netdevice
 *	@ops: pernet operations structure to manipulate
 *
 *	Remove the pernet operations structure from the list to be
947
 *	used when network namespaces are created or destroyed.  In
948 949 950 951 952 953 954 955 956 957 958 959
 *	addition run the exit method for all existing network
 *	namespaces.
 */
void unregister_pernet_device(struct pernet_operations *ops)
{
	mutex_lock(&net_mutex);
	if (&ops->list == first_device)
		first_device = first_device->next;
	unregister_pernet_operations(ops);
	mutex_unlock(&net_mutex);
}
EXPORT_SYMBOL_GPL(unregister_pernet_device);
960 961

#ifdef CONFIG_NET_NS
962
static struct ns_common *netns_get(struct task_struct *task)
963
{
964 965 966
	struct net *net = NULL;
	struct nsproxy *nsproxy;

967 968
	task_lock(task);
	nsproxy = task->nsproxy;
969 970
	if (nsproxy)
		net = get_net(nsproxy->net_ns);
971
	task_unlock(task);
972

973 974 975 976 977 978
	return net ? &net->ns : NULL;
}

static inline struct net *to_net_ns(struct ns_common *ns)
{
	return container_of(ns, struct net, ns);
979 980
}

981
static void netns_put(struct ns_common *ns)
982
{
983
	put_net(to_net_ns(ns));
984 985
}

986
static int netns_install(struct nsproxy *nsproxy, struct ns_common *ns)
987
{
988
	struct net *net = to_net_ns(ns);
989

990
	if (!ns_capable(net->user_ns, CAP_SYS_ADMIN) ||
991
	    !ns_capable(current_user_ns(), CAP_SYS_ADMIN))
992 993
		return -EPERM;

994
	put_net(nsproxy->net_ns);
995
	nsproxy->net_ns = get_net(net);
996 997 998 999 1000 1001 1002 1003 1004 1005 1006
	return 0;
}

const struct proc_ns_operations netns_operations = {
	.name		= "net",
	.type		= CLONE_NEWNET,
	.get		= netns_get,
	.put		= netns_put,
	.install	= netns_install,
};
#endif