net_namespace.c 14.2 KB
Newer Older
1 2 3 4 5 6
#include <linux/workqueue.h>
#include <linux/rtnetlink.h>
#include <linux/cache.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/delay.h>
7
#include <linux/sched.h>
8
#include <linux/idr.h>
9
#include <linux/rculist.h>
10
#include <linux/nsproxy.h>
11 12
#include <linux/proc_fs.h>
#include <linux/file.h>
13
#include <net/net_namespace.h>
14
#include <net/netns/generic.h>
15 16 17 18 19 20 21 22 23 24

/*
 *	Our network namespace constructor/destructor lists
 */

static LIST_HEAD(pernet_list);
static struct list_head *first_device = &pernet_list;
static DEFINE_MUTEX(net_mutex);

LIST_HEAD(net_namespace_list);
A
Alexey Dobriyan 已提交
25
EXPORT_SYMBOL_GPL(net_namespace_list);
26 27

struct net init_net;
28
EXPORT_SYMBOL(init_net);
29

30 31
#define INITIAL_NET_GEN_PTRS	13 /* +1 for len +2 for rcu_head */

32 33 34 35 36 37 38
static int net_assign_generic(struct net *net, int id, void *data)
{
	struct net_generic *ng, *old_ng;

	BUG_ON(!mutex_is_locked(&net_mutex));
	BUG_ON(id == 0);

E
Eric Dumazet 已提交
39 40 41
	old_ng = rcu_dereference_protected(net->gen,
					   lockdep_is_held(&net_mutex));
	ng = old_ng;
42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64
	if (old_ng->len >= id)
		goto assign;

	ng = kzalloc(sizeof(struct net_generic) +
			id * sizeof(void *), GFP_KERNEL);
	if (ng == NULL)
		return -ENOMEM;

	/*
	 * Some synchronisation notes:
	 *
	 * The net_generic explores the net->gen array inside rcu
	 * read section. Besides once set the net->gen->ptr[x]
	 * pointer never changes (see rules in netns/generic.h).
	 *
	 * That said, we simply duplicate this array and schedule
	 * the old copy for kfree after a grace period.
	 */

	ng->len = id;
	memcpy(&ng->ptr, &old_ng->ptr, old_ng->len * sizeof(void*));

	rcu_assign_pointer(net->gen, ng);
65
	kfree_rcu(old_ng, rcu);
66 67 68 69 70
assign:
	ng->ptr[id - 1] = data;
	return 0;
}

71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97
static int ops_init(const struct pernet_operations *ops, struct net *net)
{
	int err;
	if (ops->id && ops->size) {
		void *data = kzalloc(ops->size, GFP_KERNEL);
		if (!data)
			return -ENOMEM;

		err = net_assign_generic(net, *ops->id, data);
		if (err) {
			kfree(data);
			return err;
		}
	}
	if (ops->init)
		return ops->init(net);
	return 0;
}

static void ops_free(const struct pernet_operations *ops, struct net *net)
{
	if (ops->id && ops->size) {
		int id = *ops->id;
		kfree(net_generic(net, id));
	}
}

98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119
static void ops_exit_list(const struct pernet_operations *ops,
			  struct list_head *net_exit_list)
{
	struct net *net;
	if (ops->exit) {
		list_for_each_entry(net, net_exit_list, exit_list)
			ops->exit(net);
	}
	if (ops->exit_batch)
		ops->exit_batch(net_exit_list);
}

static void ops_free_list(const struct pernet_operations *ops,
			  struct list_head *net_exit_list)
{
	struct net *net;
	if (ops->size && ops->id) {
		list_for_each_entry(net, net_exit_list, exit_list)
			ops_free(ops, net);
	}
}

120 121 122
/*
 * setup_net runs the initializers for the network namespace object.
 */
123
static __net_init int setup_net(struct net *net)
124 125
{
	/* Must be called with net_mutex held */
126
	const struct pernet_operations *ops, *saved_ops;
127
	int error = 0;
128
	LIST_HEAD(net_exit_list);
129 130

	atomic_set(&net->count, 1);
131

132
#ifdef NETNS_REFCNT_DEBUG
133
	atomic_set(&net->use_count, 0);
134
#endif
135

136
	list_for_each_entry(ops, &pernet_list, list) {
137 138 139
		error = ops_init(ops, net);
		if (error < 0)
			goto out_undo;
140 141 142
	}
out:
	return error;
143

144 145 146 147
out_undo:
	/* Walk through the list backwards calling the exit functions
	 * for the pernet modules whose init functions did not fail.
	 */
148
	list_add(&net->exit_list, &net_exit_list);
149
	saved_ops = ops;
150 151 152
	list_for_each_entry_continue_reverse(ops, &pernet_list, list)
		ops_exit_list(ops, &net_exit_list);

153 154
	ops = saved_ops;
	list_for_each_entry_continue_reverse(ops, &pernet_list, list)
155
		ops_free_list(ops, &net_exit_list);
156 157

	rcu_barrier();
158 159 160
	goto out;
}

161
static struct net_generic *net_alloc_generic(void)
162
{
163 164 165 166 167 168 169 170 171
	struct net_generic *ng;
	size_t generic_size = sizeof(struct net_generic) +
		INITIAL_NET_GEN_PTRS * sizeof(void *);

	ng = kzalloc(generic_size, GFP_KERNEL);
	if (ng)
		ng->len = INITIAL_NET_GEN_PTRS;

	return ng;
172 173
}

174 175 176 177
#ifdef CONFIG_NET_NS
static struct kmem_cache *net_cachep;
static struct workqueue_struct *netns_wq;

178
static struct net *net_alloc(void)
179
{
180 181 182 183 184 185 186 187
	struct net *net = NULL;
	struct net_generic *ng;

	ng = net_alloc_generic();
	if (!ng)
		goto out;

	net = kmem_cache_zalloc(net_cachep, GFP_KERNEL);
188
	if (!net)
189
		goto out_free;
190

191 192 193 194 195 196 197 198 199 200 201
	rcu_assign_pointer(net->gen, ng);
out:
	return net;

out_free:
	kfree(ng);
	goto out;
}

static void net_free(struct net *net)
{
202
#ifdef NETNS_REFCNT_DEBUG
203 204 205 206 207
	if (unlikely(atomic_read(&net->use_count) != 0)) {
		printk(KERN_EMERG "network namespace not free! Usage: %d\n",
			atomic_read(&net->use_count));
		return;
	}
208
#endif
209
	kfree(net->gen);
210 211 212
	kmem_cache_free(net_cachep, net);
}

213
struct net *copy_net_ns(unsigned long flags, struct net *old_net)
214
{
215 216
	struct net *net;
	int rv;
217

218 219 220
	if (!(flags & CLONE_NEWNET))
		return get_net(old_net);

221 222 223
	net = net_alloc();
	if (!net)
		return ERR_PTR(-ENOMEM);
224
	mutex_lock(&net_mutex);
225 226
	rv = setup_net(net);
	if (rv == 0) {
227
		rtnl_lock();
228
		list_add_tail_rcu(&net->list, &net_namespace_list);
229 230
		rtnl_unlock();
	}
231
	mutex_unlock(&net_mutex);
232 233 234 235 236 237
	if (rv < 0) {
		net_free(net);
		return ERR_PTR(rv);
	}
	return net;
}
238

239 240 241
static DEFINE_SPINLOCK(cleanup_list_lock);
static LIST_HEAD(cleanup_list);  /* Must hold cleanup_list_lock to touch */

242 243
static void cleanup_net(struct work_struct *work)
{
244
	const struct pernet_operations *ops;
245 246
	struct net *net, *tmp;
	LIST_HEAD(net_kill_list);
247
	LIST_HEAD(net_exit_list);
248

249 250 251 252
	/* Atomically snapshot the list of namespaces to cleanup */
	spin_lock_irq(&cleanup_list_lock);
	list_replace_init(&cleanup_list, &net_kill_list);
	spin_unlock_irq(&cleanup_list_lock);
253 254 255 256 257

	mutex_lock(&net_mutex);

	/* Don't let anyone else find us. */
	rtnl_lock();
258
	list_for_each_entry(net, &net_kill_list, cleanup_list) {
259
		list_del_rcu(&net->list);
260 261
		list_add_tail(&net->exit_list, &net_exit_list);
	}
262 263
	rtnl_unlock();

264 265 266 267 268 269 270
	/*
	 * Another CPU might be rcu-iterating the list, wait for it.
	 * This needs to be before calling the exit() notifiers, so
	 * the rcu_barrier() below isn't sufficient alone.
	 */
	synchronize_rcu();

271
	/* Run all of the network namespace exit methods */
272 273 274
	list_for_each_entry_reverse(ops, &pernet_list, list)
		ops_exit_list(ops, &net_exit_list);

275
	/* Free the net generic variables */
276 277
	list_for_each_entry_reverse(ops, &pernet_list, list)
		ops_free_list(ops, &net_exit_list);
278 279 280 281 282 283 284 285 286

	mutex_unlock(&net_mutex);

	/* Ensure there are no outstanding rcu callbacks using this
	 * network namespace.
	 */
	rcu_barrier();

	/* Finally it is safe to free my network namespace structure */
287 288
	list_for_each_entry_safe(net, tmp, &net_exit_list, exit_list) {
		list_del_init(&net->exit_list);
289 290
		net_free(net);
	}
291
}
292
static DECLARE_WORK(net_cleanup_work, cleanup_net);
293 294 295 296

void __put_net(struct net *net)
{
	/* Cleanup the network namespace in process context */
297 298 299 300 301 302 303
	unsigned long flags;

	spin_lock_irqsave(&cleanup_list_lock, flags);
	list_add(&net->cleanup_list, &cleanup_list);
	spin_unlock_irqrestore(&cleanup_list_lock, flags);

	queue_work(netns_wq, &net_cleanup_work);
304 305 306
}
EXPORT_SYMBOL_GPL(__put_net);

307 308 309 310 311 312 313
struct net *get_net_ns_by_fd(int fd)
{
	struct proc_inode *ei;
	struct file *file;
	struct net *net;

	file = proc_ns_fget(fd);
314 315
	if (IS_ERR(file))
		return ERR_CAST(file);
316 317

	ei = PROC_I(file->f_dentry->d_inode);
318 319 320 321
	if (ei->ns_ops == &netns_operations)
		net = get_net(ei->ns);
	else
		net = ERR_PTR(-EINVAL);
322

323
	fput(file);
324 325 326
	return net;
}

327 328 329 330 331 332 333
#else
struct net *copy_net_ns(unsigned long flags, struct net *old_net)
{
	if (flags & CLONE_NEWNET)
		return ERR_PTR(-EINVAL);
	return old_net;
}
334 335 336 337 338

struct net *get_net_ns_by_fd(int fd)
{
	return ERR_PTR(-EINVAL);
}
339 340
#endif

341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360
struct net *get_net_ns_by_pid(pid_t pid)
{
	struct task_struct *tsk;
	struct net *net;

	/* Lookup the network namespace */
	net = ERR_PTR(-ESRCH);
	rcu_read_lock();
	tsk = find_task_by_vpid(pid);
	if (tsk) {
		struct nsproxy *nsproxy;
		nsproxy = task_nsproxy(tsk);
		if (nsproxy)
			net = get_net(nsproxy->net_ns);
	}
	rcu_read_unlock();
	return net;
}
EXPORT_SYMBOL_GPL(get_net_ns_by_pid);

361 362
static int __init net_ns_init(void)
{
363
	struct net_generic *ng;
364

365
#ifdef CONFIG_NET_NS
366 367 368
	net_cachep = kmem_cache_create("net_namespace", sizeof(struct net),
					SMP_CACHE_BYTES,
					SLAB_PANIC, NULL);
369 370 371 372 373

	/* Create workqueue for cleanup */
	netns_wq = create_singlethread_workqueue("netns");
	if (!netns_wq)
		panic("Could not create netns workq");
374
#endif
375

376 377 378 379 380 381
	ng = net_alloc_generic();
	if (!ng)
		panic("Could not allocate generic netns");

	rcu_assign_pointer(init_net.gen, ng);

382
	mutex_lock(&net_mutex);
S
Stephen Hemminger 已提交
383 384
	if (setup_net(&init_net))
		panic("Could not setup the initial network namespace");
385

386
	rtnl_lock();
387
	list_add_tail_rcu(&init_net.list, &net_namespace_list);
388
	rtnl_unlock();
389 390 391 392 393 394 395 396

	mutex_unlock(&net_mutex);

	return 0;
}

pure_initcall(net_ns_init);

397
#ifdef CONFIG_NET_NS
398 399
static int __register_pernet_operations(struct list_head *list,
					struct pernet_operations *ops)
400
{
401
	struct net *net;
402
	int error;
403
	LIST_HEAD(net_exit_list);
404 405

	list_add_tail(&ops->list, list);
406
	if (ops->init || (ops->id && ops->size)) {
407
		for_each_net(net) {
408
			error = ops_init(ops, net);
409 410
			if (error)
				goto out_undo;
411
			list_add_tail(&net->exit_list, &net_exit_list);
412 413
		}
	}
414
	return 0;
415 416 417 418

out_undo:
	/* If I have an error cleanup all namespaces I initialized */
	list_del(&ops->list);
419 420
	ops_exit_list(ops, &net_exit_list);
	ops_free_list(ops, &net_exit_list);
421
	return error;
422 423
}

424
static void __unregister_pernet_operations(struct pernet_operations *ops)
425 426
{
	struct net *net;
427
	LIST_HEAD(net_exit_list);
428 429

	list_del(&ops->list);
430 431 432 433
	for_each_net(net)
		list_add_tail(&net->exit_list, &net_exit_list);
	ops_exit_list(ops, &net_exit_list);
	ops_free_list(ops, &net_exit_list);
434 435
}

436 437
#else

438 439
static int __register_pernet_operations(struct list_head *list,
					struct pernet_operations *ops)
440
{
441 442 443 444 445 446
	int err = 0;
	err = ops_init(ops, &init_net);
	if (err)
		ops_free(ops, &init_net);
	return err;
	
447 448
}

449
static void __unregister_pernet_operations(struct pernet_operations *ops)
450
{
451 452 453 454
	LIST_HEAD(net_exit_list);
	list_add(&init_net.exit_list, &net_exit_list);
	ops_exit_list(ops, &net_exit_list);
	ops_free_list(ops, &net_exit_list);
455
}
456 457

#endif /* CONFIG_NET_NS */
458

459 460
static DEFINE_IDA(net_generic_ids);

461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477
static int register_pernet_operations(struct list_head *list,
				      struct pernet_operations *ops)
{
	int error;

	if (ops->id) {
again:
		error = ida_get_new_above(&net_generic_ids, 1, ops->id);
		if (error < 0) {
			if (error == -EAGAIN) {
				ida_pre_get(&net_generic_ids, GFP_KERNEL);
				goto again;
			}
			return error;
		}
	}
	error = __register_pernet_operations(list, ops);
478 479 480 481 482
	if (error) {
		rcu_barrier();
		if (ops->id)
			ida_remove(&net_generic_ids, *ops->id);
	}
483 484 485 486 487 488 489 490

	return error;
}

static void unregister_pernet_operations(struct pernet_operations *ops)
{
	
	__unregister_pernet_operations(ops);
491
	rcu_barrier();
492 493 494 495
	if (ops->id)
		ida_remove(&net_generic_ids, *ops->id);
}

496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529
/**
 *      register_pernet_subsys - register a network namespace subsystem
 *	@ops:  pernet operations structure for the subsystem
 *
 *	Register a subsystem which has init and exit functions
 *	that are called when network namespaces are created and
 *	destroyed respectively.
 *
 *	When registered all network namespace init functions are
 *	called for every existing network namespace.  Allowing kernel
 *	modules to have a race free view of the set of network namespaces.
 *
 *	When a new network namespace is created all of the init
 *	methods are called in the order in which they were registered.
 *
 *	When a network namespace is destroyed all of the exit methods
 *	are called in the reverse of the order with which they were
 *	registered.
 */
int register_pernet_subsys(struct pernet_operations *ops)
{
	int error;
	mutex_lock(&net_mutex);
	error =  register_pernet_operations(first_device, ops);
	mutex_unlock(&net_mutex);
	return error;
}
EXPORT_SYMBOL_GPL(register_pernet_subsys);

/**
 *      unregister_pernet_subsys - unregister a network namespace subsystem
 *	@ops: pernet operations structure to manipulate
 *
 *	Remove the pernet operations structure from the list to be
530
 *	used when network namespaces are created or destroyed.  In
531 532 533
 *	addition run the exit method for all existing network
 *	namespaces.
 */
534
void unregister_pernet_subsys(struct pernet_operations *ops)
535 536
{
	mutex_lock(&net_mutex);
537
	unregister_pernet_operations(ops);
538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577
	mutex_unlock(&net_mutex);
}
EXPORT_SYMBOL_GPL(unregister_pernet_subsys);

/**
 *      register_pernet_device - register a network namespace device
 *	@ops:  pernet operations structure for the subsystem
 *
 *	Register a device which has init and exit functions
 *	that are called when network namespaces are created and
 *	destroyed respectively.
 *
 *	When registered all network namespace init functions are
 *	called for every existing network namespace.  Allowing kernel
 *	modules to have a race free view of the set of network namespaces.
 *
 *	When a new network namespace is created all of the init
 *	methods are called in the order in which they were registered.
 *
 *	When a network namespace is destroyed all of the exit methods
 *	are called in the reverse of the order with which they were
 *	registered.
 */
int register_pernet_device(struct pernet_operations *ops)
{
	int error;
	mutex_lock(&net_mutex);
	error = register_pernet_operations(&pernet_list, ops);
	if (!error && (first_device == &pernet_list))
		first_device = &ops->list;
	mutex_unlock(&net_mutex);
	return error;
}
EXPORT_SYMBOL_GPL(register_pernet_device);

/**
 *      unregister_pernet_device - unregister a network namespace netdevice
 *	@ops: pernet operations structure to manipulate
 *
 *	Remove the pernet operations structure from the list to be
578
 *	used when network namespaces are created or destroyed.  In
579 580 581 582 583 584 585 586 587 588 589 590
 *	addition run the exit method for all existing network
 *	namespaces.
 */
void unregister_pernet_device(struct pernet_operations *ops)
{
	mutex_lock(&net_mutex);
	if (&ops->list == first_device)
		first_device = first_device->next;
	unregister_pernet_operations(ops);
	mutex_unlock(&net_mutex);
}
EXPORT_SYMBOL_GPL(unregister_pernet_device);
591 592 593 594

#ifdef CONFIG_NET_NS
static void *netns_get(struct task_struct *task)
{
595 596 597
	struct net *net = NULL;
	struct nsproxy *nsproxy;

598
	rcu_read_lock();
599 600 601
	nsproxy = task_nsproxy(task);
	if (nsproxy)
		net = get_net(nsproxy->net_ns);
602
	rcu_read_unlock();
603

604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626
	return net;
}

static void netns_put(void *ns)
{
	put_net(ns);
}

static int netns_install(struct nsproxy *nsproxy, void *ns)
{
	put_net(nsproxy->net_ns);
	nsproxy->net_ns = get_net(ns);
	return 0;
}

const struct proc_ns_operations netns_operations = {
	.name		= "net",
	.type		= CLONE_NEWNET,
	.get		= netns_get,
	.put		= netns_put,
	.install	= netns_install,
};
#endif