net_namespace.c 23.3 KB
Newer Older
J
Joe Perches 已提交
1 2
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

3 4 5 6 7 8
#include <linux/workqueue.h>
#include <linux/rtnetlink.h>
#include <linux/cache.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/delay.h>
9
#include <linux/sched.h>
10
#include <linux/idr.h>
11
#include <linux/rculist.h>
12
#include <linux/nsproxy.h>
13 14
#include <linux/fs.h>
#include <linux/proc_ns.h>
15
#include <linux/file.h>
16
#include <linux/export.h>
17
#include <linux/user_namespace.h>
18 19 20
#include <linux/net_namespace.h>
#include <net/sock.h>
#include <net/netlink.h>
21
#include <net/net_namespace.h>
22
#include <net/netns/generic.h>
23 24 25 26 27 28 29

/*
 *	Our network namespace constructor/destructor lists
 */

static LIST_HEAD(pernet_list);
static struct list_head *first_device = &pernet_list;
30
DEFINE_MUTEX(net_mutex);
31 32

LIST_HEAD(net_namespace_list);
A
Alexey Dobriyan 已提交
33
EXPORT_SYMBOL_GPL(net_namespace_list);
34

35 36 37
struct net init_net = {
	.dev_base_head = LIST_HEAD_INIT(init_net.dev_base_head),
};
38
EXPORT_SYMBOL(init_net);
39

40 41
static bool init_net_initialized;

42 43
#define INITIAL_NET_GEN_PTRS	13 /* +1 for len +2 for rcu_head */

E
Eric Dumazet 已提交
44 45 46 47 48 49 50 51 52 53 54 55 56 57
static unsigned int max_gen_ptrs = INITIAL_NET_GEN_PTRS;

static struct net_generic *net_alloc_generic(void)
{
	struct net_generic *ng;
	size_t generic_size = offsetof(struct net_generic, ptr[max_gen_ptrs]);

	ng = kzalloc(generic_size, GFP_KERNEL);
	if (ng)
		ng->len = max_gen_ptrs;

	return ng;
}

58 59 60 61 62 63 64
static int net_assign_generic(struct net *net, int id, void *data)
{
	struct net_generic *ng, *old_ng;

	BUG_ON(!mutex_is_locked(&net_mutex));
	BUG_ON(id == 0);

E
Eric Dumazet 已提交
65 66 67
	old_ng = rcu_dereference_protected(net->gen,
					   lockdep_is_held(&net_mutex));
	ng = old_ng;
68 69 70
	if (old_ng->len >= id)
		goto assign;

E
Eric Dumazet 已提交
71
	ng = net_alloc_generic();
72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88
	if (ng == NULL)
		return -ENOMEM;

	/*
	 * Some synchronisation notes:
	 *
	 * The net_generic explores the net->gen array inside rcu
	 * read section. Besides once set the net->gen->ptr[x]
	 * pointer never changes (see rules in netns/generic.h).
	 *
	 * That said, we simply duplicate this array and schedule
	 * the old copy for kfree after a grace period.
	 */

	memcpy(&ng->ptr, &old_ng->ptr, old_ng->len * sizeof(void*));

	rcu_assign_pointer(net->gen, ng);
89
	kfree_rcu(old_ng, rcu);
90 91 92 93 94
assign:
	ng->ptr[id - 1] = data;
	return 0;
}

95 96
static int ops_init(const struct pernet_operations *ops, struct net *net)
{
97 98 99
	int err = -ENOMEM;
	void *data = NULL;

100
	if (ops->id && ops->size) {
101
		data = kzalloc(ops->size, GFP_KERNEL);
102
		if (!data)
103
			goto out;
104 105

		err = net_assign_generic(net, *ops->id, data);
106 107
		if (err)
			goto cleanup;
108
	}
109
	err = 0;
110
	if (ops->init)
111 112 113 114 115 116 117 118 119
		err = ops->init(net);
	if (!err)
		return 0;

cleanup:
	kfree(data);

out:
	return err;
120 121 122 123 124 125 126 127 128 129
}

static void ops_free(const struct pernet_operations *ops, struct net *net)
{
	if (ops->id && ops->size) {
		int id = *ops->id;
		kfree(net_generic(net, id));
	}
}

130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151
static void ops_exit_list(const struct pernet_operations *ops,
			  struct list_head *net_exit_list)
{
	struct net *net;
	if (ops->exit) {
		list_for_each_entry(net, net_exit_list, exit_list)
			ops->exit(net);
	}
	if (ops->exit_batch)
		ops->exit_batch(net_exit_list);
}

static void ops_free_list(const struct pernet_operations *ops,
			  struct list_head *net_exit_list)
{
	struct net *net;
	if (ops->size && ops->id) {
		list_for_each_entry(net, net_exit_list, exit_list)
			ops_free(ops, net);
	}
}

152
/* should be called with nsid_lock held */
153 154
static int alloc_netid(struct net *net, struct net *peer, int reqid)
{
155
	int min = 0, max = 0;
156 157 158 159 160 161

	if (reqid >= 0) {
		min = reqid;
		max = reqid + 1;
	}

162
	return idr_alloc(&net->netns_ids, peer, min, max, GFP_ATOMIC);
163 164 165 166 167 168 169 170 171 172 173 174 175 176 177
}

/* This function is used by idr_for_each(). If net is equal to peer, the
 * function returns the id so that idr_for_each() stops. Because we cannot
 * returns the id 0 (idr_for_each() will not stop), we return the magic value
 * NET_ID_ZERO (-1) for it.
 */
#define NET_ID_ZERO -1
static int net_eq_idr(int id, void *net, void *peer)
{
	if (net_eq(net, peer))
		return id ? : NET_ID_ZERO;
	return 0;
}

178 179 180 181
/* Should be called with nsid_lock held. If a new id is assigned, the bool alloc
 * is set to true, thus the caller knows that the new id must be notified via
 * rtnl.
 */
182
static int __peernet2id_alloc(struct net *net, struct net *peer, bool *alloc)
183 184
{
	int id = idr_for_each(&net->netns_ids, net_eq_idr, peer);
185
	bool alloc_it = *alloc;
186

187 188
	*alloc = false;

189 190 191 192 193 194
	/* Magic value for id 0. */
	if (id == NET_ID_ZERO)
		return 0;
	if (id > 0)
		return id;

195
	if (alloc_it) {
196
		id = alloc_netid(net, peer, -1);
197
		*alloc = true;
198 199
		return id >= 0 ? id : NETNSA_NSID_NOT_ASSIGNED;
	}
200

201
	return NETNSA_NSID_NOT_ASSIGNED;
202 203
}

204
/* should be called with nsid_lock held */
205 206 207 208 209 210 211 212
static int __peernet2id(struct net *net, struct net *peer)
{
	bool no = false;

	return __peernet2id_alloc(net, peer, &no);
}

static void rtnl_net_notifyid(struct net *net, int cmd, int id);
213 214 215
/* This function returns the id of a peer netns. If no id is assigned, one will
 * be allocated and returned.
 */
216
int peernet2id_alloc(struct net *net, struct net *peer)
217
{
218
	bool alloc;
219
	int id;
220

221
	spin_lock_bh(&net->nsid_lock);
222
	alloc = atomic_read(&peer->count) == 0 ? false : true;
223
	id = __peernet2id_alloc(net, peer, &alloc);
224
	spin_unlock_bh(&net->nsid_lock);
225 226 227
	if (alloc && id >= 0)
		rtnl_net_notifyid(net, RTM_NEWNSID, id);
	return id;
228 229
}

230
/* This function returns, if assigned, the id of a peer netns. */
231
int peernet2id(struct net *net, struct net *peer)
232 233 234
{
	int id;

235
	spin_lock_bh(&net->nsid_lock);
236
	id = __peernet2id(net, peer);
237
	spin_unlock_bh(&net->nsid_lock);
238 239
	return id;
}
240
EXPORT_SYMBOL(peernet2id);
241

242 243 244 245 246 247 248 249
/* This function returns true is the peer netns has an id assigned into the
 * current netns.
 */
bool peernet_has_id(struct net *net, struct net *peer)
{
	return peernet2id(net, peer) >= 0;
}

250 251 252 253 254 255 256 257
struct net *get_net_ns_by_id(struct net *net, int id)
{
	struct net *peer;

	if (id < 0)
		return NULL;

	rcu_read_lock();
258
	spin_lock_bh(&net->nsid_lock);
259 260 261
	peer = idr_find(&net->netns_ids, id);
	if (peer)
		get_net(peer);
262
	spin_unlock_bh(&net->nsid_lock);
263 264 265 266 267
	rcu_read_unlock();

	return peer;
}

268 269 270
/*
 * setup_net runs the initializers for the network namespace object.
 */
271
static __net_init int setup_net(struct net *net, struct user_namespace *user_ns)
272 273
{
	/* Must be called with net_mutex held */
274
	const struct pernet_operations *ops, *saved_ops;
275
	int error = 0;
276
	LIST_HEAD(net_exit_list);
277 278

	atomic_set(&net->count, 1);
279
	atomic_set(&net->passive, 1);
280
	net->dev_base_seq = 1;
281
	net->user_ns = user_ns;
282
	idr_init(&net->netns_ids);
W
WANG Cong 已提交
283
	spin_lock_init(&net->nsid_lock);
284

285
	list_for_each_entry(ops, &pernet_list, list) {
286 287 288
		error = ops_init(ops, net);
		if (error < 0)
			goto out_undo;
289 290 291
	}
out:
	return error;
292

293 294 295 296
out_undo:
	/* Walk through the list backwards calling the exit functions
	 * for the pernet modules whose init functions did not fail.
	 */
297
	list_add(&net->exit_list, &net_exit_list);
298
	saved_ops = ops;
299 300 301
	list_for_each_entry_continue_reverse(ops, &pernet_list, list)
		ops_exit_list(ops, &net_exit_list);

302 303
	ops = saved_ops;
	list_for_each_entry_continue_reverse(ops, &pernet_list, list)
304
		ops_free_list(ops, &net_exit_list);
305 306

	rcu_barrier();
307 308 309
	goto out;
}

310

311
#ifdef CONFIG_NET_NS
312 313 314 315 316 317 318 319 320 321
static struct ucounts *inc_net_namespaces(struct user_namespace *ns)
{
	return inc_ucount(ns, current_euid(), UCOUNT_NET_NAMESPACES);
}

static void dec_net_namespaces(struct ucounts *ucounts)
{
	dec_ucount(ucounts, UCOUNT_NET_NAMESPACES);
}

322 323 324
static struct kmem_cache *net_cachep;
static struct workqueue_struct *netns_wq;

325
static struct net *net_alloc(void)
326
{
327 328 329 330 331 332 333 334
	struct net *net = NULL;
	struct net_generic *ng;

	ng = net_alloc_generic();
	if (!ng)
		goto out;

	net = kmem_cache_zalloc(net_cachep, GFP_KERNEL);
335
	if (!net)
336
		goto out_free;
337

338 339 340 341 342 343 344 345 346 347 348
	rcu_assign_pointer(net->gen, ng);
out:
	return net;

out_free:
	kfree(ng);
	goto out;
}

static void net_free(struct net *net)
{
E
Eric Dumazet 已提交
349
	kfree(rcu_access_pointer(net->gen));
350 351 352
	kmem_cache_free(net_cachep, net);
}

353 354 355 356 357 358 359
void net_drop_ns(void *p)
{
	struct net *ns = p;
	if (ns && atomic_dec_and_test(&ns->passive))
		net_free(ns);
}

360 361
struct net *copy_net_ns(unsigned long flags,
			struct user_namespace *user_ns, struct net *old_net)
362
{
363
	struct ucounts *ucounts;
364 365
	struct net *net;
	int rv;
366

367 368 369
	if (!(flags & CLONE_NEWNET))
		return get_net(old_net);

370 371
	ucounts = inc_net_namespaces(user_ns);
	if (!ucounts)
372
		return ERR_PTR(-ENOSPC);
373

374
	net = net_alloc();
375 376
	if (!net) {
		dec_net_namespaces(ucounts);
377
		return ERR_PTR(-ENOMEM);
378
	}
379 380 381

	get_user_ns(user_ns);

382
	mutex_lock(&net_mutex);
383
	net->ucounts = ucounts;
384
	rv = setup_net(net, user_ns);
385
	if (rv == 0) {
386
		rtnl_lock();
387
		list_add_tail_rcu(&net->list, &net_namespace_list);
388 389
		rtnl_unlock();
	}
390
	mutex_unlock(&net_mutex);
391
	if (rv < 0) {
392
		dec_net_namespaces(ucounts);
393
		put_user_ns(user_ns);
394
		net_drop_ns(net);
395 396 397 398
		return ERR_PTR(rv);
	}
	return net;
}
399

400 401 402
static DEFINE_SPINLOCK(cleanup_list_lock);
static LIST_HEAD(cleanup_list);  /* Must hold cleanup_list_lock to touch */

403 404
static void cleanup_net(struct work_struct *work)
{
405
	const struct pernet_operations *ops;
406
	struct net *net, *tmp;
X
xiao jin 已提交
407
	struct list_head net_kill_list;
408
	LIST_HEAD(net_exit_list);
409

410 411 412 413
	/* Atomically snapshot the list of namespaces to cleanup */
	spin_lock_irq(&cleanup_list_lock);
	list_replace_init(&cleanup_list, &net_kill_list);
	spin_unlock_irq(&cleanup_list_lock);
414 415 416 417 418

	mutex_lock(&net_mutex);

	/* Don't let anyone else find us. */
	rtnl_lock();
419
	list_for_each_entry(net, &net_kill_list, cleanup_list) {
420
		list_del_rcu(&net->list);
421
		list_add_tail(&net->exit_list, &net_exit_list);
422
		for_each_net(tmp) {
423
			int id;
424

425
			spin_lock_bh(&tmp->nsid_lock);
426 427
			id = __peernet2id(tmp, net);
			if (id >= 0)
428
				idr_remove(&tmp->netns_ids, id);
429
			spin_unlock_bh(&tmp->nsid_lock);
430 431
			if (id >= 0)
				rtnl_net_notifyid(tmp, RTM_DELNSID, id);
432
		}
433
		spin_lock_bh(&net->nsid_lock);
434
		idr_destroy(&net->netns_ids);
435
		spin_unlock_bh(&net->nsid_lock);
436

437
	}
438 439
	rtnl_unlock();

440 441 442 443 444 445 446
	/*
	 * Another CPU might be rcu-iterating the list, wait for it.
	 * This needs to be before calling the exit() notifiers, so
	 * the rcu_barrier() below isn't sufficient alone.
	 */
	synchronize_rcu();

447
	/* Run all of the network namespace exit methods */
448 449 450
	list_for_each_entry_reverse(ops, &pernet_list, list)
		ops_exit_list(ops, &net_exit_list);

451
	/* Free the net generic variables */
452 453
	list_for_each_entry_reverse(ops, &pernet_list, list)
		ops_free_list(ops, &net_exit_list);
454 455 456 457 458 459 460 461 462

	mutex_unlock(&net_mutex);

	/* Ensure there are no outstanding rcu callbacks using this
	 * network namespace.
	 */
	rcu_barrier();

	/* Finally it is safe to free my network namespace structure */
463 464
	list_for_each_entry_safe(net, tmp, &net_exit_list, exit_list) {
		list_del_init(&net->exit_list);
465
		dec_net_namespaces(net->ucounts);
466
		put_user_ns(net->user_ns);
467
		net_drop_ns(net);
468
	}
469
}
470
static DECLARE_WORK(net_cleanup_work, cleanup_net);
471 472 473 474

void __put_net(struct net *net)
{
	/* Cleanup the network namespace in process context */
475 476 477 478 479 480 481
	unsigned long flags;

	spin_lock_irqsave(&cleanup_list_lock, flags);
	list_add(&net->cleanup_list, &cleanup_list);
	spin_unlock_irqrestore(&cleanup_list_lock, flags);

	queue_work(netns_wq, &net_cleanup_work);
482 483 484
}
EXPORT_SYMBOL_GPL(__put_net);

485 486 487
struct net *get_net_ns_by_fd(int fd)
{
	struct file *file;
488
	struct ns_common *ns;
489 490 491
	struct net *net;

	file = proc_ns_fget(fd);
492 493
	if (IS_ERR(file))
		return ERR_CAST(file);
494

A
Al Viro 已提交
495
	ns = get_proc_ns(file_inode(file));
496 497
	if (ns->ops == &netns_operations)
		net = get_net(container_of(ns, struct net, ns));
498 499
	else
		net = ERR_PTR(-EINVAL);
500

501
	fput(file);
502 503 504
	return net;
}

505
#else
506 507 508 509
struct net *get_net_ns_by_fd(int fd)
{
	return ERR_PTR(-EINVAL);
}
510
#endif
511
EXPORT_SYMBOL_GPL(get_net_ns_by_fd);
512

513 514 515 516 517 518 519 520 521 522 523
struct net *get_net_ns_by_pid(pid_t pid)
{
	struct task_struct *tsk;
	struct net *net;

	/* Lookup the network namespace */
	net = ERR_PTR(-ESRCH);
	rcu_read_lock();
	tsk = find_task_by_vpid(pid);
	if (tsk) {
		struct nsproxy *nsproxy;
524 525
		task_lock(tsk);
		nsproxy = tsk->nsproxy;
526 527
		if (nsproxy)
			net = get_net(nsproxy->net_ns);
528
		task_unlock(tsk);
529 530 531 532 533 534
	}
	rcu_read_unlock();
	return net;
}
EXPORT_SYMBOL_GPL(get_net_ns_by_pid);

535 536
static __net_init int net_ns_net_init(struct net *net)
{
537 538 539
#ifdef CONFIG_NET_NS
	net->ns.ops = &netns_operations;
#endif
A
Al Viro 已提交
540
	return ns_alloc_inum(&net->ns);
541 542 543 544
}

static __net_exit void net_ns_net_exit(struct net *net)
{
A
Al Viro 已提交
545
	ns_free_inum(&net->ns);
546 547 548 549 550 551 552
}

static struct pernet_operations __net_initdata net_ns_ops = {
	.init = net_ns_net_init,
	.exit = net_ns_net_exit,
};

S
stephen hemminger 已提交
553
static const struct nla_policy rtnl_net_policy[NETNSA_MAX + 1] = {
554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583
	[NETNSA_NONE]		= { .type = NLA_UNSPEC },
	[NETNSA_NSID]		= { .type = NLA_S32 },
	[NETNSA_PID]		= { .type = NLA_U32 },
	[NETNSA_FD]		= { .type = NLA_U32 },
};

static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh)
{
	struct net *net = sock_net(skb->sk);
	struct nlattr *tb[NETNSA_MAX + 1];
	struct net *peer;
	int nsid, err;

	err = nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX,
			  rtnl_net_policy);
	if (err < 0)
		return err;
	if (!tb[NETNSA_NSID])
		return -EINVAL;
	nsid = nla_get_s32(tb[NETNSA_NSID]);

	if (tb[NETNSA_PID])
		peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID]));
	else if (tb[NETNSA_FD])
		peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD]));
	else
		return -EINVAL;
	if (IS_ERR(peer))
		return PTR_ERR(peer);

584
	spin_lock_bh(&net->nsid_lock);
585
	if (__peernet2id(net, peer) >= 0) {
586
		spin_unlock_bh(&net->nsid_lock);
587 588 589 590 591
		err = -EEXIST;
		goto out;
	}

	err = alloc_netid(net, peer, nsid);
592
	spin_unlock_bh(&net->nsid_lock);
593 594
	if (err >= 0) {
		rtnl_net_notifyid(net, RTM_NEWNSID, err);
595
		err = 0;
596
	}
597 598 599 600 601 602 603 604 605 606 607 608 609
out:
	put_net(peer);
	return err;
}

static int rtnl_net_get_size(void)
{
	return NLMSG_ALIGN(sizeof(struct rtgenmsg))
	       + nla_total_size(sizeof(s32)) /* NETNSA_NSID */
	       ;
}

static int rtnl_net_fill(struct sk_buff *skb, u32 portid, u32 seq, int flags,
610
			 int cmd, struct net *net, int nsid)
611 612 613 614 615 616 617 618 619 620 621
{
	struct nlmsghdr *nlh;
	struct rtgenmsg *rth;

	nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rth), flags);
	if (!nlh)
		return -EMSGSIZE;

	rth = nlmsg_data(nlh);
	rth->rtgen_family = AF_UNSPEC;

622
	if (nla_put_s32(skb, NETNSA_NSID, nsid))
623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638
		goto nla_put_failure;

	nlmsg_end(skb, nlh);
	return 0;

nla_put_failure:
	nlmsg_cancel(skb, nlh);
	return -EMSGSIZE;
}

static int rtnl_net_getid(struct sk_buff *skb, struct nlmsghdr *nlh)
{
	struct net *net = sock_net(skb->sk);
	struct nlattr *tb[NETNSA_MAX + 1];
	struct sk_buff *msg;
	struct net *peer;
639
	int err, id;
640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660

	err = nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX,
			  rtnl_net_policy);
	if (err < 0)
		return err;
	if (tb[NETNSA_PID])
		peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID]));
	else if (tb[NETNSA_FD])
		peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD]));
	else
		return -EINVAL;

	if (IS_ERR(peer))
		return PTR_ERR(peer);

	msg = nlmsg_new(rtnl_net_get_size(), GFP_KERNEL);
	if (!msg) {
		err = -ENOMEM;
		goto out;
	}

661
	id = peernet2id(net, peer);
662
	err = rtnl_net_fill(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0,
663
			    RTM_NEWNSID, net, id);
664 665 666 667 668 669 670 671 672 673 674 675 676
	if (err < 0)
		goto err_out;

	err = rtnl_unicast(msg, net, NETLINK_CB(skb).portid);
	goto out;

err_out:
	nlmsg_free(msg);
out:
	put_net(peer);
	return err;
}

N
Nicolas Dichtel 已提交
677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694
struct rtnl_net_dump_cb {
	struct net *net;
	struct sk_buff *skb;
	struct netlink_callback *cb;
	int idx;
	int s_idx;
};

static int rtnl_net_dumpid_one(int id, void *peer, void *data)
{
	struct rtnl_net_dump_cb *net_cb = (struct rtnl_net_dump_cb *)data;
	int ret;

	if (net_cb->idx < net_cb->s_idx)
		goto cont;

	ret = rtnl_net_fill(net_cb->skb, NETLINK_CB(net_cb->cb->skb).portid,
			    net_cb->cb->nlh->nlmsg_seq, NLM_F_MULTI,
695
			    RTM_NEWNSID, net_cb->net, id);
N
Nicolas Dichtel 已提交
696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714
	if (ret < 0)
		return ret;

cont:
	net_cb->idx++;
	return 0;
}

static int rtnl_net_dumpid(struct sk_buff *skb, struct netlink_callback *cb)
{
	struct net *net = sock_net(skb->sk);
	struct rtnl_net_dump_cb net_cb = {
		.net = net,
		.skb = skb,
		.cb = cb,
		.idx = 0,
		.s_idx = cb->args[0],
	};

715
	spin_lock_bh(&net->nsid_lock);
N
Nicolas Dichtel 已提交
716
	idr_for_each(&net->netns_ids, rtnl_net_dumpid_one, &net_cb);
717
	spin_unlock_bh(&net->nsid_lock);
N
Nicolas Dichtel 已提交
718 719 720 721 722

	cb->args[0] = net_cb.idx;
	return skb->len;
}

723
static void rtnl_net_notifyid(struct net *net, int cmd, int id)
N
Nicolas Dichtel 已提交
724 725 726 727 728 729 730 731
{
	struct sk_buff *msg;
	int err = -ENOMEM;

	msg = nlmsg_new(rtnl_net_get_size(), GFP_KERNEL);
	if (!msg)
		goto out;

732
	err = rtnl_net_fill(msg, 0, 0, 0, cmd, net, id);
N
Nicolas Dichtel 已提交
733 734 735 736 737 738 739 740 741 742 743 744
	if (err < 0)
		goto err_out;

	rtnl_notify(msg, net, 0, RTNLGRP_NSID, NULL, 0);
	return;

err_out:
	nlmsg_free(msg);
out:
	rtnl_set_sk_err(net, RTNLGRP_NSID, err);
}

745 746
static int __init net_ns_init(void)
{
747
	struct net_generic *ng;
748

749
#ifdef CONFIG_NET_NS
750 751 752
	net_cachep = kmem_cache_create("net_namespace", sizeof(struct net),
					SMP_CACHE_BYTES,
					SLAB_PANIC, NULL);
753 754 755 756 757

	/* Create workqueue for cleanup */
	netns_wq = create_singlethread_workqueue("netns");
	if (!netns_wq)
		panic("Could not create netns workq");
758
#endif
759

760 761 762 763 764 765
	ng = net_alloc_generic();
	if (!ng)
		panic("Could not allocate generic netns");

	rcu_assign_pointer(init_net.gen, ng);

766
	mutex_lock(&net_mutex);
767
	if (setup_net(&init_net, &init_user_ns))
S
Stephen Hemminger 已提交
768
		panic("Could not setup the initial network namespace");
769

770 771
	init_net_initialized = true;

772
	rtnl_lock();
773
	list_add_tail_rcu(&init_net.list, &net_namespace_list);
774
	rtnl_unlock();
775 776 777

	mutex_unlock(&net_mutex);

778 779
	register_pernet_subsys(&net_ns_ops);

780
	rtnl_register(PF_UNSPEC, RTM_NEWNSID, rtnl_net_newid, NULL, NULL);
N
Nicolas Dichtel 已提交
781 782
	rtnl_register(PF_UNSPEC, RTM_GETNSID, rtnl_net_getid, rtnl_net_dumpid,
		      NULL);
783

784 785 786 787 788
	return 0;
}

pure_initcall(net_ns_init);

789
#ifdef CONFIG_NET_NS
790 791
static int __register_pernet_operations(struct list_head *list,
					struct pernet_operations *ops)
792
{
793
	struct net *net;
794
	int error;
795
	LIST_HEAD(net_exit_list);
796 797

	list_add_tail(&ops->list, list);
798
	if (ops->init || (ops->id && ops->size)) {
799
		for_each_net(net) {
800
			error = ops_init(ops, net);
801 802
			if (error)
				goto out_undo;
803
			list_add_tail(&net->exit_list, &net_exit_list);
804 805
		}
	}
806
	return 0;
807 808 809 810

out_undo:
	/* If I have an error cleanup all namespaces I initialized */
	list_del(&ops->list);
811 812
	ops_exit_list(ops, &net_exit_list);
	ops_free_list(ops, &net_exit_list);
813
	return error;
814 815
}

816
static void __unregister_pernet_operations(struct pernet_operations *ops)
817 818
{
	struct net *net;
819
	LIST_HEAD(net_exit_list);
820 821

	list_del(&ops->list);
822 823 824 825
	for_each_net(net)
		list_add_tail(&net->exit_list, &net_exit_list);
	ops_exit_list(ops, &net_exit_list);
	ops_free_list(ops, &net_exit_list);
826 827
}

828 829
#else

830 831
static int __register_pernet_operations(struct list_head *list,
					struct pernet_operations *ops)
832
{
833 834 835 836 837
	if (!init_net_initialized) {
		list_add_tail(&ops->list, list);
		return 0;
	}

838
	return ops_init(ops, &init_net);
839 840
}

841
static void __unregister_pernet_operations(struct pernet_operations *ops)
842
{
843 844 845 846 847 848 849 850
	if (!init_net_initialized) {
		list_del(&ops->list);
	} else {
		LIST_HEAD(net_exit_list);
		list_add(&init_net.exit_list, &net_exit_list);
		ops_exit_list(ops, &net_exit_list);
		ops_free_list(ops, &net_exit_list);
	}
851
}
852 853

#endif /* CONFIG_NET_NS */
854

855 856
static DEFINE_IDA(net_generic_ids);

857 858 859 860 861 862 863 864 865 866 867 868 869 870 871
static int register_pernet_operations(struct list_head *list,
				      struct pernet_operations *ops)
{
	int error;

	if (ops->id) {
again:
		error = ida_get_new_above(&net_generic_ids, 1, ops->id);
		if (error < 0) {
			if (error == -EAGAIN) {
				ida_pre_get(&net_generic_ids, GFP_KERNEL);
				goto again;
			}
			return error;
		}
E
Eric Dumazet 已提交
872
		max_gen_ptrs = max_t(unsigned int, max_gen_ptrs, *ops->id);
873 874
	}
	error = __register_pernet_operations(list, ops);
875 876 877 878 879
	if (error) {
		rcu_barrier();
		if (ops->id)
			ida_remove(&net_generic_ids, *ops->id);
	}
880 881 882 883 884 885 886 887

	return error;
}

static void unregister_pernet_operations(struct pernet_operations *ops)
{
	
	__unregister_pernet_operations(ops);
888
	rcu_barrier();
889 890 891 892
	if (ops->id)
		ida_remove(&net_generic_ids, *ops->id);
}

893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926
/**
 *      register_pernet_subsys - register a network namespace subsystem
 *	@ops:  pernet operations structure for the subsystem
 *
 *	Register a subsystem which has init and exit functions
 *	that are called when network namespaces are created and
 *	destroyed respectively.
 *
 *	When registered all network namespace init functions are
 *	called for every existing network namespace.  Allowing kernel
 *	modules to have a race free view of the set of network namespaces.
 *
 *	When a new network namespace is created all of the init
 *	methods are called in the order in which they were registered.
 *
 *	When a network namespace is destroyed all of the exit methods
 *	are called in the reverse of the order with which they were
 *	registered.
 */
int register_pernet_subsys(struct pernet_operations *ops)
{
	int error;
	mutex_lock(&net_mutex);
	error =  register_pernet_operations(first_device, ops);
	mutex_unlock(&net_mutex);
	return error;
}
EXPORT_SYMBOL_GPL(register_pernet_subsys);

/**
 *      unregister_pernet_subsys - unregister a network namespace subsystem
 *	@ops: pernet operations structure to manipulate
 *
 *	Remove the pernet operations structure from the list to be
927
 *	used when network namespaces are created or destroyed.  In
928 929 930
 *	addition run the exit method for all existing network
 *	namespaces.
 */
931
void unregister_pernet_subsys(struct pernet_operations *ops)
932 933
{
	mutex_lock(&net_mutex);
934
	unregister_pernet_operations(ops);
935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974
	mutex_unlock(&net_mutex);
}
EXPORT_SYMBOL_GPL(unregister_pernet_subsys);

/**
 *      register_pernet_device - register a network namespace device
 *	@ops:  pernet operations structure for the subsystem
 *
 *	Register a device which has init and exit functions
 *	that are called when network namespaces are created and
 *	destroyed respectively.
 *
 *	When registered all network namespace init functions are
 *	called for every existing network namespace.  Allowing kernel
 *	modules to have a race free view of the set of network namespaces.
 *
 *	When a new network namespace is created all of the init
 *	methods are called in the order in which they were registered.
 *
 *	When a network namespace is destroyed all of the exit methods
 *	are called in the reverse of the order with which they were
 *	registered.
 */
int register_pernet_device(struct pernet_operations *ops)
{
	int error;
	mutex_lock(&net_mutex);
	error = register_pernet_operations(&pernet_list, ops);
	if (!error && (first_device == &pernet_list))
		first_device = &ops->list;
	mutex_unlock(&net_mutex);
	return error;
}
EXPORT_SYMBOL_GPL(register_pernet_device);

/**
 *      unregister_pernet_device - unregister a network namespace netdevice
 *	@ops: pernet operations structure to manipulate
 *
 *	Remove the pernet operations structure from the list to be
975
 *	used when network namespaces are created or destroyed.  In
976 977 978 979 980 981 982 983 984 985 986 987
 *	addition run the exit method for all existing network
 *	namespaces.
 */
void unregister_pernet_device(struct pernet_operations *ops)
{
	mutex_lock(&net_mutex);
	if (&ops->list == first_device)
		first_device = first_device->next;
	unregister_pernet_operations(ops);
	mutex_unlock(&net_mutex);
}
EXPORT_SYMBOL_GPL(unregister_pernet_device);
988 989

#ifdef CONFIG_NET_NS
990
static struct ns_common *netns_get(struct task_struct *task)
991
{
992 993 994
	struct net *net = NULL;
	struct nsproxy *nsproxy;

995 996
	task_lock(task);
	nsproxy = task->nsproxy;
997 998
	if (nsproxy)
		net = get_net(nsproxy->net_ns);
999
	task_unlock(task);
1000

1001 1002 1003 1004 1005 1006
	return net ? &net->ns : NULL;
}

static inline struct net *to_net_ns(struct ns_common *ns)
{
	return container_of(ns, struct net, ns);
1007 1008
}

1009
static void netns_put(struct ns_common *ns)
1010
{
1011
	put_net(to_net_ns(ns));
1012 1013
}

1014
static int netns_install(struct nsproxy *nsproxy, struct ns_common *ns)
1015
{
1016
	struct net *net = to_net_ns(ns);
1017

1018
	if (!ns_capable(net->user_ns, CAP_SYS_ADMIN) ||
1019
	    !ns_capable(current_user_ns(), CAP_SYS_ADMIN))
1020 1021
		return -EPERM;

1022
	put_net(nsproxy->net_ns);
1023
	nsproxy->net_ns = get_net(net);
1024 1025 1026
	return 0;
}

1027 1028 1029 1030 1031
static struct user_namespace *netns_owner(struct ns_common *ns)
{
	return to_net_ns(ns)->user_ns;
}

1032 1033 1034 1035 1036 1037
const struct proc_ns_operations netns_operations = {
	.name		= "net",
	.type		= CLONE_NEWNET,
	.get		= netns_get,
	.put		= netns_put,
	.install	= netns_install,
1038
	.owner		= netns_owner,
1039 1040
};
#endif