net_namespace.c 14.4 KB
Newer Older
1 2 3 4 5 6
#include <linux/workqueue.h>
#include <linux/rtnetlink.h>
#include <linux/cache.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/delay.h>
7
#include <linux/sched.h>
8
#include <linux/idr.h>
9
#include <linux/rculist.h>
10
#include <linux/nsproxy.h>
11 12
#include <linux/proc_fs.h>
#include <linux/file.h>
13
#include <net/net_namespace.h>
14
#include <net/netns/generic.h>
15 16 17 18 19 20 21 22 23 24

/*
 *	Our network namespace constructor/destructor lists
 */

static LIST_HEAD(pernet_list);
static struct list_head *first_device = &pernet_list;
static DEFINE_MUTEX(net_mutex);

LIST_HEAD(net_namespace_list);
A
Alexey Dobriyan 已提交
25
EXPORT_SYMBOL_GPL(net_namespace_list);
26 27

struct net init_net;
28
EXPORT_SYMBOL(init_net);
29

30 31
#define INITIAL_NET_GEN_PTRS	13 /* +1 for len +2 for rcu_head */

32 33 34 35 36 37 38 39 40 41 42 43 44 45 46
static void net_generic_release(struct rcu_head *rcu)
{
	struct net_generic *ng;

	ng = container_of(rcu, struct net_generic, rcu);
	kfree(ng);
}

static int net_assign_generic(struct net *net, int id, void *data)
{
	struct net_generic *ng, *old_ng;

	BUG_ON(!mutex_is_locked(&net_mutex));
	BUG_ON(id == 0);

E
Eric Dumazet 已提交
47 48 49
	old_ng = rcu_dereference_protected(net->gen,
					   lockdep_is_held(&net_mutex));
	ng = old_ng;
50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78
	if (old_ng->len >= id)
		goto assign;

	ng = kzalloc(sizeof(struct net_generic) +
			id * sizeof(void *), GFP_KERNEL);
	if (ng == NULL)
		return -ENOMEM;

	/*
	 * Some synchronisation notes:
	 *
	 * The net_generic explores the net->gen array inside rcu
	 * read section. Besides once set the net->gen->ptr[x]
	 * pointer never changes (see rules in netns/generic.h).
	 *
	 * That said, we simply duplicate this array and schedule
	 * the old copy for kfree after a grace period.
	 */

	ng->len = id;
	memcpy(&ng->ptr, &old_ng->ptr, old_ng->len * sizeof(void*));

	rcu_assign_pointer(net->gen, ng);
	call_rcu(&old_ng->rcu, net_generic_release);
assign:
	ng->ptr[id - 1] = data;
	return 0;
}

79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105
static int ops_init(const struct pernet_operations *ops, struct net *net)
{
	int err;
	if (ops->id && ops->size) {
		void *data = kzalloc(ops->size, GFP_KERNEL);
		if (!data)
			return -ENOMEM;

		err = net_assign_generic(net, *ops->id, data);
		if (err) {
			kfree(data);
			return err;
		}
	}
	if (ops->init)
		return ops->init(net);
	return 0;
}

static void ops_free(const struct pernet_operations *ops, struct net *net)
{
	if (ops->id && ops->size) {
		int id = *ops->id;
		kfree(net_generic(net, id));
	}
}

106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127
static void ops_exit_list(const struct pernet_operations *ops,
			  struct list_head *net_exit_list)
{
	struct net *net;
	if (ops->exit) {
		list_for_each_entry(net, net_exit_list, exit_list)
			ops->exit(net);
	}
	if (ops->exit_batch)
		ops->exit_batch(net_exit_list);
}

static void ops_free_list(const struct pernet_operations *ops,
			  struct list_head *net_exit_list)
{
	struct net *net;
	if (ops->size && ops->id) {
		list_for_each_entry(net, net_exit_list, exit_list)
			ops_free(ops, net);
	}
}

128 129 130
/*
 * setup_net runs the initializers for the network namespace object.
 */
131
static __net_init int setup_net(struct net *net)
132 133
{
	/* Must be called with net_mutex held */
134
	const struct pernet_operations *ops, *saved_ops;
135
	int error = 0;
136
	LIST_HEAD(net_exit_list);
137 138

	atomic_set(&net->count, 1);
139

140
#ifdef NETNS_REFCNT_DEBUG
141
	atomic_set(&net->use_count, 0);
142
#endif
143

144
	list_for_each_entry(ops, &pernet_list, list) {
145 146 147
		error = ops_init(ops, net);
		if (error < 0)
			goto out_undo;
148 149 150
	}
out:
	return error;
151

152 153 154 155
out_undo:
	/* Walk through the list backwards calling the exit functions
	 * for the pernet modules whose init functions did not fail.
	 */
156
	list_add(&net->exit_list, &net_exit_list);
157
	saved_ops = ops;
158 159 160
	list_for_each_entry_continue_reverse(ops, &pernet_list, list)
		ops_exit_list(ops, &net_exit_list);

161 162
	ops = saved_ops;
	list_for_each_entry_continue_reverse(ops, &pernet_list, list)
163
		ops_free_list(ops, &net_exit_list);
164 165

	rcu_barrier();
166 167 168
	goto out;
}

169
static struct net_generic *net_alloc_generic(void)
170
{
171 172 173 174 175 176 177 178 179
	struct net_generic *ng;
	size_t generic_size = sizeof(struct net_generic) +
		INITIAL_NET_GEN_PTRS * sizeof(void *);

	ng = kzalloc(generic_size, GFP_KERNEL);
	if (ng)
		ng->len = INITIAL_NET_GEN_PTRS;

	return ng;
180 181
}

182 183 184 185
#ifdef CONFIG_NET_NS
static struct kmem_cache *net_cachep;
static struct workqueue_struct *netns_wq;

186
static struct net *net_alloc(void)
187
{
188 189 190 191 192 193 194 195
	struct net *net = NULL;
	struct net_generic *ng;

	ng = net_alloc_generic();
	if (!ng)
		goto out;

	net = kmem_cache_zalloc(net_cachep, GFP_KERNEL);
196
	if (!net)
197
		goto out_free;
198

199 200 201 202 203 204 205 206 207 208 209
	rcu_assign_pointer(net->gen, ng);
out:
	return net;

out_free:
	kfree(ng);
	goto out;
}

static void net_free(struct net *net)
{
210
#ifdef NETNS_REFCNT_DEBUG
211 212 213 214 215
	if (unlikely(atomic_read(&net->use_count) != 0)) {
		printk(KERN_EMERG "network namespace not free! Usage: %d\n",
			atomic_read(&net->use_count));
		return;
	}
216
#endif
217
	kfree(net->gen);
218 219 220
	kmem_cache_free(net_cachep, net);
}

221
static struct net *net_create(void)
222
{
223 224
	struct net *net;
	int rv;
225

226 227 228
	net = net_alloc();
	if (!net)
		return ERR_PTR(-ENOMEM);
229
	mutex_lock(&net_mutex);
230 231
	rv = setup_net(net);
	if (rv == 0) {
232
		rtnl_lock();
233
		list_add_tail_rcu(&net->list, &net_namespace_list);
234 235
		rtnl_unlock();
	}
236
	mutex_unlock(&net_mutex);
237 238 239 240 241 242
	if (rv < 0) {
		net_free(net);
		return ERR_PTR(rv);
	}
	return net;
}
243

244 245 246 247 248
struct net *copy_net_ns(unsigned long flags, struct net *old_net)
{
	if (!(flags & CLONE_NEWNET))
		return get_net(old_net);
	return net_create();
249 250
}

251 252 253
static DEFINE_SPINLOCK(cleanup_list_lock);
static LIST_HEAD(cleanup_list);  /* Must hold cleanup_list_lock to touch */

254 255
static void cleanup_net(struct work_struct *work)
{
256
	const struct pernet_operations *ops;
257 258
	struct net *net, *tmp;
	LIST_HEAD(net_kill_list);
259
	LIST_HEAD(net_exit_list);
260

261 262 263 264
	/* Atomically snapshot the list of namespaces to cleanup */
	spin_lock_irq(&cleanup_list_lock);
	list_replace_init(&cleanup_list, &net_kill_list);
	spin_unlock_irq(&cleanup_list_lock);
265 266 267 268 269

	mutex_lock(&net_mutex);

	/* Don't let anyone else find us. */
	rtnl_lock();
270
	list_for_each_entry(net, &net_kill_list, cleanup_list) {
271
		list_del_rcu(&net->list);
272 273
		list_add_tail(&net->exit_list, &net_exit_list);
	}
274 275
	rtnl_unlock();

276 277 278 279 280 281 282
	/*
	 * Another CPU might be rcu-iterating the list, wait for it.
	 * This needs to be before calling the exit() notifiers, so
	 * the rcu_barrier() below isn't sufficient alone.
	 */
	synchronize_rcu();

283
	/* Run all of the network namespace exit methods */
284 285 286
	list_for_each_entry_reverse(ops, &pernet_list, list)
		ops_exit_list(ops, &net_exit_list);

287
	/* Free the net generic variables */
288 289
	list_for_each_entry_reverse(ops, &pernet_list, list)
		ops_free_list(ops, &net_exit_list);
290 291 292 293 294 295 296 297 298

	mutex_unlock(&net_mutex);

	/* Ensure there are no outstanding rcu callbacks using this
	 * network namespace.
	 */
	rcu_barrier();

	/* Finally it is safe to free my network namespace structure */
299 300
	list_for_each_entry_safe(net, tmp, &net_exit_list, exit_list) {
		list_del_init(&net->exit_list);
301 302
		net_free(net);
	}
303
}
304
static DECLARE_WORK(net_cleanup_work, cleanup_net);
305 306 307 308

void __put_net(struct net *net)
{
	/* Cleanup the network namespace in process context */
309 310 311 312 313 314 315
	unsigned long flags;

	spin_lock_irqsave(&cleanup_list_lock, flags);
	list_add(&net->cleanup_list, &cleanup_list);
	spin_unlock_irqrestore(&cleanup_list_lock, flags);

	queue_work(netns_wq, &net_cleanup_work);
316 317 318 319 320 321 322 323 324 325 326 327
}
EXPORT_SYMBOL_GPL(__put_net);

#else
struct net *copy_net_ns(unsigned long flags, struct net *old_net)
{
	if (flags & CLONE_NEWNET)
		return ERR_PTR(-EINVAL);
	return old_net;
}
#endif

328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347
struct net *get_net_ns_by_pid(pid_t pid)
{
	struct task_struct *tsk;
	struct net *net;

	/* Lookup the network namespace */
	net = ERR_PTR(-ESRCH);
	rcu_read_lock();
	tsk = find_task_by_vpid(pid);
	if (tsk) {
		struct nsproxy *nsproxy;
		nsproxy = task_nsproxy(tsk);
		if (nsproxy)
			net = get_net(nsproxy->net_ns);
	}
	rcu_read_unlock();
	return net;
}
EXPORT_SYMBOL_GPL(get_net_ns_by_pid);

348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369
struct net *get_net_ns_by_fd(int fd)
{
	struct proc_inode *ei;
	struct file *file;
	struct net *net;

	net = ERR_PTR(-EINVAL);
	file = proc_ns_fget(fd);
	if (!file)
		goto out;

	ei = PROC_I(file->f_dentry->d_inode);
	if (ei->ns_ops != &netns_operations)
		goto out;

	net = get_net(ei->ns);
out:
	if (file)
		fput(file);
	return net;
}

370 371
static int __init net_ns_init(void)
{
372
	struct net_generic *ng;
373

374
#ifdef CONFIG_NET_NS
375 376 377
	net_cachep = kmem_cache_create("net_namespace", sizeof(struct net),
					SMP_CACHE_BYTES,
					SLAB_PANIC, NULL);
378 379 380 381 382

	/* Create workqueue for cleanup */
	netns_wq = create_singlethread_workqueue("netns");
	if (!netns_wq)
		panic("Could not create netns workq");
383
#endif
384

385 386 387 388 389 390
	ng = net_alloc_generic();
	if (!ng)
		panic("Could not allocate generic netns");

	rcu_assign_pointer(init_net.gen, ng);

391
	mutex_lock(&net_mutex);
S
Stephen Hemminger 已提交
392 393
	if (setup_net(&init_net))
		panic("Could not setup the initial network namespace");
394

395
	rtnl_lock();
396
	list_add_tail_rcu(&init_net.list, &net_namespace_list);
397
	rtnl_unlock();
398 399 400 401 402 403 404 405

	mutex_unlock(&net_mutex);

	return 0;
}

pure_initcall(net_ns_init);

406
#ifdef CONFIG_NET_NS
407 408
static int __register_pernet_operations(struct list_head *list,
					struct pernet_operations *ops)
409
{
410
	struct net *net;
411
	int error;
412
	LIST_HEAD(net_exit_list);
413 414

	list_add_tail(&ops->list, list);
415
	if (ops->init || (ops->id && ops->size)) {
416
		for_each_net(net) {
417
			error = ops_init(ops, net);
418 419
			if (error)
				goto out_undo;
420
			list_add_tail(&net->exit_list, &net_exit_list);
421 422
		}
	}
423
	return 0;
424 425 426 427

out_undo:
	/* If I have an error cleanup all namespaces I initialized */
	list_del(&ops->list);
428 429
	ops_exit_list(ops, &net_exit_list);
	ops_free_list(ops, &net_exit_list);
430
	return error;
431 432
}

433
static void __unregister_pernet_operations(struct pernet_operations *ops)
434 435
{
	struct net *net;
436
	LIST_HEAD(net_exit_list);
437 438

	list_del(&ops->list);
439 440 441 442
	for_each_net(net)
		list_add_tail(&net->exit_list, &net_exit_list);
	ops_exit_list(ops, &net_exit_list);
	ops_free_list(ops, &net_exit_list);
443 444
}

445 446
#else

447 448
static int __register_pernet_operations(struct list_head *list,
					struct pernet_operations *ops)
449
{
450 451 452 453 454 455
	int err = 0;
	err = ops_init(ops, &init_net);
	if (err)
		ops_free(ops, &init_net);
	return err;
	
456 457
}

458
static void __unregister_pernet_operations(struct pernet_operations *ops)
459
{
460 461 462 463
	LIST_HEAD(net_exit_list);
	list_add(&init_net.exit_list, &net_exit_list);
	ops_exit_list(ops, &net_exit_list);
	ops_free_list(ops, &net_exit_list);
464
}
465 466

#endif /* CONFIG_NET_NS */
467

468 469
static DEFINE_IDA(net_generic_ids);

470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486
static int register_pernet_operations(struct list_head *list,
				      struct pernet_operations *ops)
{
	int error;

	if (ops->id) {
again:
		error = ida_get_new_above(&net_generic_ids, 1, ops->id);
		if (error < 0) {
			if (error == -EAGAIN) {
				ida_pre_get(&net_generic_ids, GFP_KERNEL);
				goto again;
			}
			return error;
		}
	}
	error = __register_pernet_operations(list, ops);
487 488 489 490 491
	if (error) {
		rcu_barrier();
		if (ops->id)
			ida_remove(&net_generic_ids, *ops->id);
	}
492 493 494 495 496 497 498 499

	return error;
}

static void unregister_pernet_operations(struct pernet_operations *ops)
{
	
	__unregister_pernet_operations(ops);
500
	rcu_barrier();
501 502 503 504
	if (ops->id)
		ida_remove(&net_generic_ids, *ops->id);
}

505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538
/**
 *      register_pernet_subsys - register a network namespace subsystem
 *	@ops:  pernet operations structure for the subsystem
 *
 *	Register a subsystem which has init and exit functions
 *	that are called when network namespaces are created and
 *	destroyed respectively.
 *
 *	When registered all network namespace init functions are
 *	called for every existing network namespace.  Allowing kernel
 *	modules to have a race free view of the set of network namespaces.
 *
 *	When a new network namespace is created all of the init
 *	methods are called in the order in which they were registered.
 *
 *	When a network namespace is destroyed all of the exit methods
 *	are called in the reverse of the order with which they were
 *	registered.
 */
int register_pernet_subsys(struct pernet_operations *ops)
{
	int error;
	mutex_lock(&net_mutex);
	error =  register_pernet_operations(first_device, ops);
	mutex_unlock(&net_mutex);
	return error;
}
EXPORT_SYMBOL_GPL(register_pernet_subsys);

/**
 *      unregister_pernet_subsys - unregister a network namespace subsystem
 *	@ops: pernet operations structure to manipulate
 *
 *	Remove the pernet operations structure from the list to be
539
 *	used when network namespaces are created or destroyed.  In
540 541 542
 *	addition run the exit method for all existing network
 *	namespaces.
 */
543
void unregister_pernet_subsys(struct pernet_operations *ops)
544 545
{
	mutex_lock(&net_mutex);
546
	unregister_pernet_operations(ops);
547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586
	mutex_unlock(&net_mutex);
}
EXPORT_SYMBOL_GPL(unregister_pernet_subsys);

/**
 *      register_pernet_device - register a network namespace device
 *	@ops:  pernet operations structure for the subsystem
 *
 *	Register a device which has init and exit functions
 *	that are called when network namespaces are created and
 *	destroyed respectively.
 *
 *	When registered all network namespace init functions are
 *	called for every existing network namespace.  Allowing kernel
 *	modules to have a race free view of the set of network namespaces.
 *
 *	When a new network namespace is created all of the init
 *	methods are called in the order in which they were registered.
 *
 *	When a network namespace is destroyed all of the exit methods
 *	are called in the reverse of the order with which they were
 *	registered.
 */
int register_pernet_device(struct pernet_operations *ops)
{
	int error;
	mutex_lock(&net_mutex);
	error = register_pernet_operations(&pernet_list, ops);
	if (!error && (first_device == &pernet_list))
		first_device = &ops->list;
	mutex_unlock(&net_mutex);
	return error;
}
EXPORT_SYMBOL_GPL(register_pernet_device);

/**
 *      unregister_pernet_device - unregister a network namespace netdevice
 *	@ops: pernet operations structure to manipulate
 *
 *	Remove the pernet operations structure from the list to be
587
 *	used when network namespaces are created or destroyed.  In
588 589 590 591 592 593 594 595 596 597 598 599
 *	addition run the exit method for all existing network
 *	namespaces.
 */
void unregister_pernet_device(struct pernet_operations *ops)
{
	mutex_lock(&net_mutex);
	if (&ops->list == first_device)
		first_device = first_device->next;
	unregister_pernet_operations(ops);
	mutex_unlock(&net_mutex);
}
EXPORT_SYMBOL_GPL(unregister_pernet_device);
600 601 602 603

#ifdef CONFIG_NET_NS
static void *netns_get(struct task_struct *task)
{
604 605 606
	struct net *net = NULL;
	struct nsproxy *nsproxy;

607
	rcu_read_lock();
608 609 610
	nsproxy = task_nsproxy(task);
	if (nsproxy)
		net = get_net(nsproxy->net_ns);
611
	rcu_read_unlock();
612

613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635
	return net;
}

static void netns_put(void *ns)
{
	put_net(ns);
}

static int netns_install(struct nsproxy *nsproxy, void *ns)
{
	put_net(nsproxy->net_ns);
	nsproxy->net_ns = get_net(ns);
	return 0;
}

const struct proc_ns_operations netns_operations = {
	.name		= "net",
	.type		= CLONE_NEWNET,
	.get		= netns_get,
	.put		= netns_put,
	.install	= netns_install,
};
#endif