net_namespace.c 22.9 KB
Newer Older
J
Joe Perches 已提交
1 2
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

3 4 5 6 7 8
#include <linux/workqueue.h>
#include <linux/rtnetlink.h>
#include <linux/cache.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/delay.h>
9
#include <linux/sched.h>
10
#include <linux/idr.h>
11
#include <linux/rculist.h>
12
#include <linux/nsproxy.h>
13 14
#include <linux/fs.h>
#include <linux/proc_ns.h>
15
#include <linux/file.h>
16
#include <linux/export.h>
17
#include <linux/user_namespace.h>
18 19 20
#include <linux/net_namespace.h>
#include <net/sock.h>
#include <net/netlink.h>
21
#include <net/net_namespace.h>
22
#include <net/netns/generic.h>
23 24 25 26 27 28 29

/*
 *	Our network namespace constructor/destructor lists
 */

static LIST_HEAD(pernet_list);
static struct list_head *first_device = &pernet_list;
30
DEFINE_MUTEX(net_mutex);
31 32

LIST_HEAD(net_namespace_list);
A
Alexey Dobriyan 已提交
33
EXPORT_SYMBOL_GPL(net_namespace_list);
34

35 36 37
struct net init_net = {
	.dev_base_head = LIST_HEAD_INIT(init_net.dev_base_head),
};
38
EXPORT_SYMBOL(init_net);
39

40 41
static bool init_net_initialized;

42 43
#define INITIAL_NET_GEN_PTRS	13 /* +1 for len +2 for rcu_head */

E
Eric Dumazet 已提交
44 45 46 47 48 49 50 51 52 53 54 55 56 57
static unsigned int max_gen_ptrs = INITIAL_NET_GEN_PTRS;

static struct net_generic *net_alloc_generic(void)
{
	struct net_generic *ng;
	size_t generic_size = offsetof(struct net_generic, ptr[max_gen_ptrs]);

	ng = kzalloc(generic_size, GFP_KERNEL);
	if (ng)
		ng->len = max_gen_ptrs;

	return ng;
}

58 59 60 61 62 63 64
static int net_assign_generic(struct net *net, int id, void *data)
{
	struct net_generic *ng, *old_ng;

	BUG_ON(!mutex_is_locked(&net_mutex));
	BUG_ON(id == 0);

E
Eric Dumazet 已提交
65 66 67
	old_ng = rcu_dereference_protected(net->gen,
					   lockdep_is_held(&net_mutex));
	ng = old_ng;
68 69 70
	if (old_ng->len >= id)
		goto assign;

E
Eric Dumazet 已提交
71
	ng = net_alloc_generic();
72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88
	if (ng == NULL)
		return -ENOMEM;

	/*
	 * Some synchronisation notes:
	 *
	 * The net_generic explores the net->gen array inside rcu
	 * read section. Besides once set the net->gen->ptr[x]
	 * pointer never changes (see rules in netns/generic.h).
	 *
	 * That said, we simply duplicate this array and schedule
	 * the old copy for kfree after a grace period.
	 */

	memcpy(&ng->ptr, &old_ng->ptr, old_ng->len * sizeof(void*));

	rcu_assign_pointer(net->gen, ng);
89
	kfree_rcu(old_ng, rcu);
90 91 92 93 94
assign:
	ng->ptr[id - 1] = data;
	return 0;
}

95 96
static int ops_init(const struct pernet_operations *ops, struct net *net)
{
97 98 99
	int err = -ENOMEM;
	void *data = NULL;

100
	if (ops->id && ops->size) {
101
		data = kzalloc(ops->size, GFP_KERNEL);
102
		if (!data)
103
			goto out;
104 105

		err = net_assign_generic(net, *ops->id, data);
106 107
		if (err)
			goto cleanup;
108
	}
109
	err = 0;
110
	if (ops->init)
111 112 113 114 115 116 117 118 119
		err = ops->init(net);
	if (!err)
		return 0;

cleanup:
	kfree(data);

out:
	return err;
120 121 122 123 124 125 126 127 128 129
}

static void ops_free(const struct pernet_operations *ops, struct net *net)
{
	if (ops->id && ops->size) {
		int id = *ops->id;
		kfree(net_generic(net, id));
	}
}

130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151
static void ops_exit_list(const struct pernet_operations *ops,
			  struct list_head *net_exit_list)
{
	struct net *net;
	if (ops->exit) {
		list_for_each_entry(net, net_exit_list, exit_list)
			ops->exit(net);
	}
	if (ops->exit_batch)
		ops->exit_batch(net_exit_list);
}

static void ops_free_list(const struct pernet_operations *ops,
			  struct list_head *net_exit_list)
{
	struct net *net;
	if (ops->size && ops->id) {
		list_for_each_entry(net, net_exit_list, exit_list)
			ops_free(ops, net);
	}
}

152
/* should be called with nsid_lock held */
153 154
static int alloc_netid(struct net *net, struct net *peer, int reqid)
{
155
	int min = 0, max = 0;
156 157 158 159 160 161

	if (reqid >= 0) {
		min = reqid;
		max = reqid + 1;
	}

162
	return idr_alloc(&net->netns_ids, peer, min, max, GFP_ATOMIC);
163 164 165 166 167 168 169 170 171 172 173 174 175 176 177
}

/* This function is used by idr_for_each(). If net is equal to peer, the
 * function returns the id so that idr_for_each() stops. Because we cannot
 * returns the id 0 (idr_for_each() will not stop), we return the magic value
 * NET_ID_ZERO (-1) for it.
 */
#define NET_ID_ZERO -1
static int net_eq_idr(int id, void *net, void *peer)
{
	if (net_eq(net, peer))
		return id ? : NET_ID_ZERO;
	return 0;
}

178 179 180 181
/* Should be called with nsid_lock held. If a new id is assigned, the bool alloc
 * is set to true, thus the caller knows that the new id must be notified via
 * rtnl.
 */
182
static int __peernet2id_alloc(struct net *net, struct net *peer, bool *alloc)
183 184
{
	int id = idr_for_each(&net->netns_ids, net_eq_idr, peer);
185
	bool alloc_it = *alloc;
186

187 188
	*alloc = false;

189 190 191 192 193 194
	/* Magic value for id 0. */
	if (id == NET_ID_ZERO)
		return 0;
	if (id > 0)
		return id;

195
	if (alloc_it) {
196
		id = alloc_netid(net, peer, -1);
197
		*alloc = true;
198 199
		return id >= 0 ? id : NETNSA_NSID_NOT_ASSIGNED;
	}
200

201
	return NETNSA_NSID_NOT_ASSIGNED;
202 203
}

204
/* should be called with nsid_lock held */
205 206 207 208 209 210 211 212
static int __peernet2id(struct net *net, struct net *peer)
{
	bool no = false;

	return __peernet2id_alloc(net, peer, &no);
}

static void rtnl_net_notifyid(struct net *net, int cmd, int id);
213 214 215
/* This function returns the id of a peer netns. If no id is assigned, one will
 * be allocated and returned.
 */
216
int peernet2id_alloc(struct net *net, struct net *peer)
217
{
218 219
	unsigned long flags;
	bool alloc;
220
	int id;
221

W
WANG Cong 已提交
222
	spin_lock_irqsave(&net->nsid_lock, flags);
223
	alloc = atomic_read(&peer->count) == 0 ? false : true;
224
	id = __peernet2id_alloc(net, peer, &alloc);
W
WANG Cong 已提交
225
	spin_unlock_irqrestore(&net->nsid_lock, flags);
226 227 228
	if (alloc && id >= 0)
		rtnl_net_notifyid(net, RTM_NEWNSID, id);
	return id;
229
}
230
EXPORT_SYMBOL(peernet2id_alloc);
231

232
/* This function returns, if assigned, the id of a peer netns. */
233
int peernet2id(struct net *net, struct net *peer)
234 235 236 237
{
	unsigned long flags;
	int id;

W
WANG Cong 已提交
238
	spin_lock_irqsave(&net->nsid_lock, flags);
239
	id = __peernet2id(net, peer);
W
WANG Cong 已提交
240
	spin_unlock_irqrestore(&net->nsid_lock, flags);
241 242 243
	return id;
}

244 245 246 247 248 249 250 251
/* This function returns true is the peer netns has an id assigned into the
 * current netns.
 */
bool peernet_has_id(struct net *net, struct net *peer)
{
	return peernet2id(net, peer) >= 0;
}

252 253
struct net *get_net_ns_by_id(struct net *net, int id)
{
254
	unsigned long flags;
255 256 257 258 259 260
	struct net *peer;

	if (id < 0)
		return NULL;

	rcu_read_lock();
W
WANG Cong 已提交
261
	spin_lock_irqsave(&net->nsid_lock, flags);
262 263 264
	peer = idr_find(&net->netns_ids, id);
	if (peer)
		get_net(peer);
W
WANG Cong 已提交
265
	spin_unlock_irqrestore(&net->nsid_lock, flags);
266 267 268 269 270
	rcu_read_unlock();

	return peer;
}

271 272 273
/*
 * setup_net runs the initializers for the network namespace object.
 */
274
static __net_init int setup_net(struct net *net, struct user_namespace *user_ns)
275 276
{
	/* Must be called with net_mutex held */
277
	const struct pernet_operations *ops, *saved_ops;
278
	int error = 0;
279
	LIST_HEAD(net_exit_list);
280 281

	atomic_set(&net->count, 1);
282
	atomic_set(&net->passive, 1);
283
	net->dev_base_seq = 1;
284
	net->user_ns = user_ns;
285
	idr_init(&net->netns_ids);
W
WANG Cong 已提交
286
	spin_lock_init(&net->nsid_lock);
287

288
	list_for_each_entry(ops, &pernet_list, list) {
289 290 291
		error = ops_init(ops, net);
		if (error < 0)
			goto out_undo;
292 293 294
	}
out:
	return error;
295

296 297 298 299
out_undo:
	/* Walk through the list backwards calling the exit functions
	 * for the pernet modules whose init functions did not fail.
	 */
300
	list_add(&net->exit_list, &net_exit_list);
301
	saved_ops = ops;
302 303 304
	list_for_each_entry_continue_reverse(ops, &pernet_list, list)
		ops_exit_list(ops, &net_exit_list);

305 306
	ops = saved_ops;
	list_for_each_entry_continue_reverse(ops, &pernet_list, list)
307
		ops_free_list(ops, &net_exit_list);
308 309

	rcu_barrier();
310 311 312
	goto out;
}

313

314 315 316 317
#ifdef CONFIG_NET_NS
static struct kmem_cache *net_cachep;
static struct workqueue_struct *netns_wq;

318
static struct net *net_alloc(void)
319
{
320 321 322 323 324 325 326 327
	struct net *net = NULL;
	struct net_generic *ng;

	ng = net_alloc_generic();
	if (!ng)
		goto out;

	net = kmem_cache_zalloc(net_cachep, GFP_KERNEL);
328
	if (!net)
329
		goto out_free;
330

331 332 333 334 335 336 337 338 339 340 341
	rcu_assign_pointer(net->gen, ng);
out:
	return net;

out_free:
	kfree(ng);
	goto out;
}

static void net_free(struct net *net)
{
E
Eric Dumazet 已提交
342
	kfree(rcu_access_pointer(net->gen));
343 344 345
	kmem_cache_free(net_cachep, net);
}

346 347 348 349 350 351 352
void net_drop_ns(void *p)
{
	struct net *ns = p;
	if (ns && atomic_dec_and_test(&ns->passive))
		net_free(ns);
}

353 354
struct net *copy_net_ns(unsigned long flags,
			struct user_namespace *user_ns, struct net *old_net)
355
{
356 357
	struct net *net;
	int rv;
358

359 360 361
	if (!(flags & CLONE_NEWNET))
		return get_net(old_net);

362 363 364
	net = net_alloc();
	if (!net)
		return ERR_PTR(-ENOMEM);
365 366 367

	get_user_ns(user_ns);

368
	mutex_lock(&net_mutex);
369
	rv = setup_net(net, user_ns);
370
	if (rv == 0) {
371
		rtnl_lock();
372
		list_add_tail_rcu(&net->list, &net_namespace_list);
373 374
		rtnl_unlock();
	}
375
	mutex_unlock(&net_mutex);
376
	if (rv < 0) {
377
		put_user_ns(user_ns);
378
		net_drop_ns(net);
379 380 381 382
		return ERR_PTR(rv);
	}
	return net;
}
383

384 385 386
static DEFINE_SPINLOCK(cleanup_list_lock);
static LIST_HEAD(cleanup_list);  /* Must hold cleanup_list_lock to touch */

387 388
static void cleanup_net(struct work_struct *work)
{
389
	const struct pernet_operations *ops;
390
	struct net *net, *tmp;
X
xiao jin 已提交
391
	struct list_head net_kill_list;
392
	LIST_HEAD(net_exit_list);
393

394 395 396 397
	/* Atomically snapshot the list of namespaces to cleanup */
	spin_lock_irq(&cleanup_list_lock);
	list_replace_init(&cleanup_list, &net_kill_list);
	spin_unlock_irq(&cleanup_list_lock);
398 399 400 401 402

	mutex_lock(&net_mutex);

	/* Don't let anyone else find us. */
	rtnl_lock();
403
	list_for_each_entry(net, &net_kill_list, cleanup_list) {
404
		list_del_rcu(&net->list);
405
		list_add_tail(&net->exit_list, &net_exit_list);
406
		for_each_net(tmp) {
407
			int id;
408

W
WANG Cong 已提交
409
			spin_lock_irq(&tmp->nsid_lock);
410 411
			id = __peernet2id(tmp, net);
			if (id >= 0)
412
				idr_remove(&tmp->netns_ids, id);
W
WANG Cong 已提交
413
			spin_unlock_irq(&tmp->nsid_lock);
414 415
			if (id >= 0)
				rtnl_net_notifyid(tmp, RTM_DELNSID, id);
416
		}
W
WANG Cong 已提交
417
		spin_lock_irq(&net->nsid_lock);
418
		idr_destroy(&net->netns_ids);
W
WANG Cong 已提交
419
		spin_unlock_irq(&net->nsid_lock);
420

421
	}
422 423
	rtnl_unlock();

424 425 426 427 428 429 430
	/*
	 * Another CPU might be rcu-iterating the list, wait for it.
	 * This needs to be before calling the exit() notifiers, so
	 * the rcu_barrier() below isn't sufficient alone.
	 */
	synchronize_rcu();

431
	/* Run all of the network namespace exit methods */
432 433 434
	list_for_each_entry_reverse(ops, &pernet_list, list)
		ops_exit_list(ops, &net_exit_list);

435
	/* Free the net generic variables */
436 437
	list_for_each_entry_reverse(ops, &pernet_list, list)
		ops_free_list(ops, &net_exit_list);
438 439 440 441 442 443 444 445 446

	mutex_unlock(&net_mutex);

	/* Ensure there are no outstanding rcu callbacks using this
	 * network namespace.
	 */
	rcu_barrier();

	/* Finally it is safe to free my network namespace structure */
447 448
	list_for_each_entry_safe(net, tmp, &net_exit_list, exit_list) {
		list_del_init(&net->exit_list);
449
		put_user_ns(net->user_ns);
450
		net_drop_ns(net);
451
	}
452
}
453
static DECLARE_WORK(net_cleanup_work, cleanup_net);
454 455 456 457

void __put_net(struct net *net)
{
	/* Cleanup the network namespace in process context */
458 459 460 461 462 463 464
	unsigned long flags;

	spin_lock_irqsave(&cleanup_list_lock, flags);
	list_add(&net->cleanup_list, &cleanup_list);
	spin_unlock_irqrestore(&cleanup_list_lock, flags);

	queue_work(netns_wq, &net_cleanup_work);
465 466 467
}
EXPORT_SYMBOL_GPL(__put_net);

468 469 470
struct net *get_net_ns_by_fd(int fd)
{
	struct file *file;
471
	struct ns_common *ns;
472 473 474
	struct net *net;

	file = proc_ns_fget(fd);
475 476
	if (IS_ERR(file))
		return ERR_CAST(file);
477

A
Al Viro 已提交
478
	ns = get_proc_ns(file_inode(file));
479 480
	if (ns->ops == &netns_operations)
		net = get_net(container_of(ns, struct net, ns));
481 482
	else
		net = ERR_PTR(-EINVAL);
483

484
	fput(file);
485 486 487
	return net;
}

488
#else
489 490 491 492
struct net *get_net_ns_by_fd(int fd)
{
	return ERR_PTR(-EINVAL);
}
493
#endif
494
EXPORT_SYMBOL_GPL(get_net_ns_by_fd);
495

496 497 498 499 500 501 502 503 504 505 506
struct net *get_net_ns_by_pid(pid_t pid)
{
	struct task_struct *tsk;
	struct net *net;

	/* Lookup the network namespace */
	net = ERR_PTR(-ESRCH);
	rcu_read_lock();
	tsk = find_task_by_vpid(pid);
	if (tsk) {
		struct nsproxy *nsproxy;
507 508
		task_lock(tsk);
		nsproxy = tsk->nsproxy;
509 510
		if (nsproxy)
			net = get_net(nsproxy->net_ns);
511
		task_unlock(tsk);
512 513 514 515 516 517
	}
	rcu_read_unlock();
	return net;
}
EXPORT_SYMBOL_GPL(get_net_ns_by_pid);

518 519
static __net_init int net_ns_net_init(struct net *net)
{
520 521 522
#ifdef CONFIG_NET_NS
	net->ns.ops = &netns_operations;
#endif
A
Al Viro 已提交
523
	return ns_alloc_inum(&net->ns);
524 525 526 527
}

static __net_exit void net_ns_net_exit(struct net *net)
{
A
Al Viro 已提交
528
	ns_free_inum(&net->ns);
529 530 531 532 533 534 535
}

static struct pernet_operations __net_initdata net_ns_ops = {
	.init = net_ns_net_init,
	.exit = net_ns_net_exit,
};

S
stephen hemminger 已提交
536
static const struct nla_policy rtnl_net_policy[NETNSA_MAX + 1] = {
537 538 539 540 541 542 543 544 545 546
	[NETNSA_NONE]		= { .type = NLA_UNSPEC },
	[NETNSA_NSID]		= { .type = NLA_S32 },
	[NETNSA_PID]		= { .type = NLA_U32 },
	[NETNSA_FD]		= { .type = NLA_U32 },
};

static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh)
{
	struct net *net = sock_net(skb->sk);
	struct nlattr *tb[NETNSA_MAX + 1];
547
	unsigned long flags;
548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567
	struct net *peer;
	int nsid, err;

	err = nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX,
			  rtnl_net_policy);
	if (err < 0)
		return err;
	if (!tb[NETNSA_NSID])
		return -EINVAL;
	nsid = nla_get_s32(tb[NETNSA_NSID]);

	if (tb[NETNSA_PID])
		peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID]));
	else if (tb[NETNSA_FD])
		peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD]));
	else
		return -EINVAL;
	if (IS_ERR(peer))
		return PTR_ERR(peer);

W
WANG Cong 已提交
568
	spin_lock_irqsave(&net->nsid_lock, flags);
569
	if (__peernet2id(net, peer) >= 0) {
W
WANG Cong 已提交
570
		spin_unlock_irqrestore(&net->nsid_lock, flags);
571 572 573 574 575
		err = -EEXIST;
		goto out;
	}

	err = alloc_netid(net, peer, nsid);
W
WANG Cong 已提交
576
	spin_unlock_irqrestore(&net->nsid_lock, flags);
577 578
	if (err >= 0) {
		rtnl_net_notifyid(net, RTM_NEWNSID, err);
579
		err = 0;
580
	}
581 582 583 584 585 586 587 588 589 590 591 592 593
out:
	put_net(peer);
	return err;
}

static int rtnl_net_get_size(void)
{
	return NLMSG_ALIGN(sizeof(struct rtgenmsg))
	       + nla_total_size(sizeof(s32)) /* NETNSA_NSID */
	       ;
}

static int rtnl_net_fill(struct sk_buff *skb, u32 portid, u32 seq, int flags,
594
			 int cmd, struct net *net, int nsid)
595 596 597 598 599 600 601 602 603 604 605
{
	struct nlmsghdr *nlh;
	struct rtgenmsg *rth;

	nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rth), flags);
	if (!nlh)
		return -EMSGSIZE;

	rth = nlmsg_data(nlh);
	rth->rtgen_family = AF_UNSPEC;

606
	if (nla_put_s32(skb, NETNSA_NSID, nsid))
607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622
		goto nla_put_failure;

	nlmsg_end(skb, nlh);
	return 0;

nla_put_failure:
	nlmsg_cancel(skb, nlh);
	return -EMSGSIZE;
}

static int rtnl_net_getid(struct sk_buff *skb, struct nlmsghdr *nlh)
{
	struct net *net = sock_net(skb->sk);
	struct nlattr *tb[NETNSA_MAX + 1];
	struct sk_buff *msg;
	struct net *peer;
623
	int err, id;
624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644

	err = nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX,
			  rtnl_net_policy);
	if (err < 0)
		return err;
	if (tb[NETNSA_PID])
		peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID]));
	else if (tb[NETNSA_FD])
		peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD]));
	else
		return -EINVAL;

	if (IS_ERR(peer))
		return PTR_ERR(peer);

	msg = nlmsg_new(rtnl_net_get_size(), GFP_KERNEL);
	if (!msg) {
		err = -ENOMEM;
		goto out;
	}

645
	id = peernet2id(net, peer);
646
	err = rtnl_net_fill(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0,
647
			    RTM_NEWNSID, net, id);
648 649 650 651 652 653 654 655 656 657 658 659 660
	if (err < 0)
		goto err_out;

	err = rtnl_unicast(msg, net, NETLINK_CB(skb).portid);
	goto out;

err_out:
	nlmsg_free(msg);
out:
	put_net(peer);
	return err;
}

N
Nicolas Dichtel 已提交
661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678
struct rtnl_net_dump_cb {
	struct net *net;
	struct sk_buff *skb;
	struct netlink_callback *cb;
	int idx;
	int s_idx;
};

static int rtnl_net_dumpid_one(int id, void *peer, void *data)
{
	struct rtnl_net_dump_cb *net_cb = (struct rtnl_net_dump_cb *)data;
	int ret;

	if (net_cb->idx < net_cb->s_idx)
		goto cont;

	ret = rtnl_net_fill(net_cb->skb, NETLINK_CB(net_cb->cb->skb).portid,
			    net_cb->cb->nlh->nlmsg_seq, NLM_F_MULTI,
679
			    RTM_NEWNSID, net_cb->net, id);
N
Nicolas Dichtel 已提交
680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697
	if (ret < 0)
		return ret;

cont:
	net_cb->idx++;
	return 0;
}

static int rtnl_net_dumpid(struct sk_buff *skb, struct netlink_callback *cb)
{
	struct net *net = sock_net(skb->sk);
	struct rtnl_net_dump_cb net_cb = {
		.net = net,
		.skb = skb,
		.cb = cb,
		.idx = 0,
		.s_idx = cb->args[0],
	};
698
	unsigned long flags;
N
Nicolas Dichtel 已提交
699

W
WANG Cong 已提交
700
	spin_lock_irqsave(&net->nsid_lock, flags);
N
Nicolas Dichtel 已提交
701
	idr_for_each(&net->netns_ids, rtnl_net_dumpid_one, &net_cb);
W
WANG Cong 已提交
702
	spin_unlock_irqrestore(&net->nsid_lock, flags);
N
Nicolas Dichtel 已提交
703 704 705 706 707

	cb->args[0] = net_cb.idx;
	return skb->len;
}

708
static void rtnl_net_notifyid(struct net *net, int cmd, int id)
N
Nicolas Dichtel 已提交
709 710 711 712 713 714 715 716
{
	struct sk_buff *msg;
	int err = -ENOMEM;

	msg = nlmsg_new(rtnl_net_get_size(), GFP_KERNEL);
	if (!msg)
		goto out;

717
	err = rtnl_net_fill(msg, 0, 0, 0, cmd, net, id);
N
Nicolas Dichtel 已提交
718 719 720 721 722 723 724 725 726 727 728 729
	if (err < 0)
		goto err_out;

	rtnl_notify(msg, net, 0, RTNLGRP_NSID, NULL, 0);
	return;

err_out:
	nlmsg_free(msg);
out:
	rtnl_set_sk_err(net, RTNLGRP_NSID, err);
}

730 731
static int __init net_ns_init(void)
{
732
	struct net_generic *ng;
733

734
#ifdef CONFIG_NET_NS
735 736 737
	net_cachep = kmem_cache_create("net_namespace", sizeof(struct net),
					SMP_CACHE_BYTES,
					SLAB_PANIC, NULL);
738 739 740 741 742

	/* Create workqueue for cleanup */
	netns_wq = create_singlethread_workqueue("netns");
	if (!netns_wq)
		panic("Could not create netns workq");
743
#endif
744

745 746 747 748 749 750
	ng = net_alloc_generic();
	if (!ng)
		panic("Could not allocate generic netns");

	rcu_assign_pointer(init_net.gen, ng);

751
	mutex_lock(&net_mutex);
752
	if (setup_net(&init_net, &init_user_ns))
S
Stephen Hemminger 已提交
753
		panic("Could not setup the initial network namespace");
754

755 756
	init_net_initialized = true;

757
	rtnl_lock();
758
	list_add_tail_rcu(&init_net.list, &net_namespace_list);
759
	rtnl_unlock();
760 761 762

	mutex_unlock(&net_mutex);

763 764
	register_pernet_subsys(&net_ns_ops);

765
	rtnl_register(PF_UNSPEC, RTM_NEWNSID, rtnl_net_newid, NULL, NULL);
N
Nicolas Dichtel 已提交
766 767
	rtnl_register(PF_UNSPEC, RTM_GETNSID, rtnl_net_getid, rtnl_net_dumpid,
		      NULL);
768

769 770 771 772 773
	return 0;
}

pure_initcall(net_ns_init);

774
#ifdef CONFIG_NET_NS
775 776
static int __register_pernet_operations(struct list_head *list,
					struct pernet_operations *ops)
777
{
778
	struct net *net;
779
	int error;
780
	LIST_HEAD(net_exit_list);
781 782

	list_add_tail(&ops->list, list);
783
	if (ops->init || (ops->id && ops->size)) {
784
		for_each_net(net) {
785
			error = ops_init(ops, net);
786 787
			if (error)
				goto out_undo;
788
			list_add_tail(&net->exit_list, &net_exit_list);
789 790
		}
	}
791
	return 0;
792 793 794 795

out_undo:
	/* If I have an error cleanup all namespaces I initialized */
	list_del(&ops->list);
796 797
	ops_exit_list(ops, &net_exit_list);
	ops_free_list(ops, &net_exit_list);
798
	return error;
799 800
}

801
static void __unregister_pernet_operations(struct pernet_operations *ops)
802 803
{
	struct net *net;
804
	LIST_HEAD(net_exit_list);
805 806

	list_del(&ops->list);
807 808 809 810
	for_each_net(net)
		list_add_tail(&net->exit_list, &net_exit_list);
	ops_exit_list(ops, &net_exit_list);
	ops_free_list(ops, &net_exit_list);
811 812
}

813 814
#else

815 816
static int __register_pernet_operations(struct list_head *list,
					struct pernet_operations *ops)
817
{
818 819 820 821 822
	if (!init_net_initialized) {
		list_add_tail(&ops->list, list);
		return 0;
	}

823
	return ops_init(ops, &init_net);
824 825
}

826
static void __unregister_pernet_operations(struct pernet_operations *ops)
827
{
828 829 830 831 832 833 834 835
	if (!init_net_initialized) {
		list_del(&ops->list);
	} else {
		LIST_HEAD(net_exit_list);
		list_add(&init_net.exit_list, &net_exit_list);
		ops_exit_list(ops, &net_exit_list);
		ops_free_list(ops, &net_exit_list);
	}
836
}
837 838

#endif /* CONFIG_NET_NS */
839

840 841
static DEFINE_IDA(net_generic_ids);

842 843 844 845 846 847 848 849 850 851 852 853 854 855 856
static int register_pernet_operations(struct list_head *list,
				      struct pernet_operations *ops)
{
	int error;

	if (ops->id) {
again:
		error = ida_get_new_above(&net_generic_ids, 1, ops->id);
		if (error < 0) {
			if (error == -EAGAIN) {
				ida_pre_get(&net_generic_ids, GFP_KERNEL);
				goto again;
			}
			return error;
		}
E
Eric Dumazet 已提交
857
		max_gen_ptrs = max_t(unsigned int, max_gen_ptrs, *ops->id);
858 859
	}
	error = __register_pernet_operations(list, ops);
860 861 862 863 864
	if (error) {
		rcu_barrier();
		if (ops->id)
			ida_remove(&net_generic_ids, *ops->id);
	}
865 866 867 868 869 870 871 872

	return error;
}

static void unregister_pernet_operations(struct pernet_operations *ops)
{
	
	__unregister_pernet_operations(ops);
873
	rcu_barrier();
874 875 876 877
	if (ops->id)
		ida_remove(&net_generic_ids, *ops->id);
}

878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911
/**
 *      register_pernet_subsys - register a network namespace subsystem
 *	@ops:  pernet operations structure for the subsystem
 *
 *	Register a subsystem which has init and exit functions
 *	that are called when network namespaces are created and
 *	destroyed respectively.
 *
 *	When registered all network namespace init functions are
 *	called for every existing network namespace.  Allowing kernel
 *	modules to have a race free view of the set of network namespaces.
 *
 *	When a new network namespace is created all of the init
 *	methods are called in the order in which they were registered.
 *
 *	When a network namespace is destroyed all of the exit methods
 *	are called in the reverse of the order with which they were
 *	registered.
 */
int register_pernet_subsys(struct pernet_operations *ops)
{
	int error;
	mutex_lock(&net_mutex);
	error =  register_pernet_operations(first_device, ops);
	mutex_unlock(&net_mutex);
	return error;
}
EXPORT_SYMBOL_GPL(register_pernet_subsys);

/**
 *      unregister_pernet_subsys - unregister a network namespace subsystem
 *	@ops: pernet operations structure to manipulate
 *
 *	Remove the pernet operations structure from the list to be
912
 *	used when network namespaces are created or destroyed.  In
913 914 915
 *	addition run the exit method for all existing network
 *	namespaces.
 */
916
void unregister_pernet_subsys(struct pernet_operations *ops)
917 918
{
	mutex_lock(&net_mutex);
919
	unregister_pernet_operations(ops);
920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959
	mutex_unlock(&net_mutex);
}
EXPORT_SYMBOL_GPL(unregister_pernet_subsys);

/**
 *      register_pernet_device - register a network namespace device
 *	@ops:  pernet operations structure for the subsystem
 *
 *	Register a device which has init and exit functions
 *	that are called when network namespaces are created and
 *	destroyed respectively.
 *
 *	When registered all network namespace init functions are
 *	called for every existing network namespace.  Allowing kernel
 *	modules to have a race free view of the set of network namespaces.
 *
 *	When a new network namespace is created all of the init
 *	methods are called in the order in which they were registered.
 *
 *	When a network namespace is destroyed all of the exit methods
 *	are called in the reverse of the order with which they were
 *	registered.
 */
int register_pernet_device(struct pernet_operations *ops)
{
	int error;
	mutex_lock(&net_mutex);
	error = register_pernet_operations(&pernet_list, ops);
	if (!error && (first_device == &pernet_list))
		first_device = &ops->list;
	mutex_unlock(&net_mutex);
	return error;
}
EXPORT_SYMBOL_GPL(register_pernet_device);

/**
 *      unregister_pernet_device - unregister a network namespace netdevice
 *	@ops: pernet operations structure to manipulate
 *
 *	Remove the pernet operations structure from the list to be
960
 *	used when network namespaces are created or destroyed.  In
961 962 963 964 965 966 967 968 969 970 971 972
 *	addition run the exit method for all existing network
 *	namespaces.
 */
void unregister_pernet_device(struct pernet_operations *ops)
{
	mutex_lock(&net_mutex);
	if (&ops->list == first_device)
		first_device = first_device->next;
	unregister_pernet_operations(ops);
	mutex_unlock(&net_mutex);
}
EXPORT_SYMBOL_GPL(unregister_pernet_device);
973 974

#ifdef CONFIG_NET_NS
975
static struct ns_common *netns_get(struct task_struct *task)
976
{
977 978 979
	struct net *net = NULL;
	struct nsproxy *nsproxy;

980 981
	task_lock(task);
	nsproxy = task->nsproxy;
982 983
	if (nsproxy)
		net = get_net(nsproxy->net_ns);
984
	task_unlock(task);
985

986 987 988 989 990 991
	return net ? &net->ns : NULL;
}

static inline struct net *to_net_ns(struct ns_common *ns)
{
	return container_of(ns, struct net, ns);
992 993
}

994
static void netns_put(struct ns_common *ns)
995
{
996
	put_net(to_net_ns(ns));
997 998
}

999
static int netns_install(struct nsproxy *nsproxy, struct ns_common *ns)
1000
{
1001
	struct net *net = to_net_ns(ns);
1002

1003
	if (!ns_capable(net->user_ns, CAP_SYS_ADMIN) ||
1004
	    !ns_capable(current_user_ns(), CAP_SYS_ADMIN))
1005 1006
		return -EPERM;

1007
	put_net(nsproxy->net_ns);
1008
	nsproxy->net_ns = get_net(net);
1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019
	return 0;
}

const struct proc_ns_operations netns_operations = {
	.name		= "net",
	.type		= CLONE_NEWNET,
	.get		= netns_get,
	.put		= netns_put,
	.install	= netns_install,
};
#endif