net_namespace.c 14.4 KB
Newer Older
1 2 3 4 5 6
#include <linux/workqueue.h>
#include <linux/rtnetlink.h>
#include <linux/cache.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/delay.h>
7
#include <linux/sched.h>
8
#include <linux/idr.h>
9
#include <linux/rculist.h>
10
#include <linux/nsproxy.h>
11 12
#include <linux/proc_fs.h>
#include <linux/file.h>
13
#include <net/net_namespace.h>
14
#include <net/netns/generic.h>
15 16 17 18 19 20 21 22 23 24

/*
 *	Our network namespace constructor/destructor lists
 */

static LIST_HEAD(pernet_list);
static struct list_head *first_device = &pernet_list;
static DEFINE_MUTEX(net_mutex);

LIST_HEAD(net_namespace_list);
A
Alexey Dobriyan 已提交
25
EXPORT_SYMBOL_GPL(net_namespace_list);
26 27

struct net init_net;
28
EXPORT_SYMBOL(init_net);
29

30 31
#define INITIAL_NET_GEN_PTRS	13 /* +1 for len +2 for rcu_head */

32 33 34 35 36 37 38
static int net_assign_generic(struct net *net, int id, void *data)
{
	struct net_generic *ng, *old_ng;

	BUG_ON(!mutex_is_locked(&net_mutex));
	BUG_ON(id == 0);

E
Eric Dumazet 已提交
39 40 41
	old_ng = rcu_dereference_protected(net->gen,
					   lockdep_is_held(&net_mutex));
	ng = old_ng;
42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64
	if (old_ng->len >= id)
		goto assign;

	ng = kzalloc(sizeof(struct net_generic) +
			id * sizeof(void *), GFP_KERNEL);
	if (ng == NULL)
		return -ENOMEM;

	/*
	 * Some synchronisation notes:
	 *
	 * The net_generic explores the net->gen array inside rcu
	 * read section. Besides once set the net->gen->ptr[x]
	 * pointer never changes (see rules in netns/generic.h).
	 *
	 * That said, we simply duplicate this array and schedule
	 * the old copy for kfree after a grace period.
	 */

	ng->len = id;
	memcpy(&ng->ptr, &old_ng->ptr, old_ng->len * sizeof(void*));

	rcu_assign_pointer(net->gen, ng);
65
	kfree_rcu(old_ng, rcu);
66 67 68 69 70
assign:
	ng->ptr[id - 1] = data;
	return 0;
}

71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97
static int ops_init(const struct pernet_operations *ops, struct net *net)
{
	int err;
	if (ops->id && ops->size) {
		void *data = kzalloc(ops->size, GFP_KERNEL);
		if (!data)
			return -ENOMEM;

		err = net_assign_generic(net, *ops->id, data);
		if (err) {
			kfree(data);
			return err;
		}
	}
	if (ops->init)
		return ops->init(net);
	return 0;
}

static void ops_free(const struct pernet_operations *ops, struct net *net)
{
	if (ops->id && ops->size) {
		int id = *ops->id;
		kfree(net_generic(net, id));
	}
}

98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119
static void ops_exit_list(const struct pernet_operations *ops,
			  struct list_head *net_exit_list)
{
	struct net *net;
	if (ops->exit) {
		list_for_each_entry(net, net_exit_list, exit_list)
			ops->exit(net);
	}
	if (ops->exit_batch)
		ops->exit_batch(net_exit_list);
}

static void ops_free_list(const struct pernet_operations *ops,
			  struct list_head *net_exit_list)
{
	struct net *net;
	if (ops->size && ops->id) {
		list_for_each_entry(net, net_exit_list, exit_list)
			ops_free(ops, net);
	}
}

120 121 122
/*
 * setup_net runs the initializers for the network namespace object.
 */
123
static __net_init int setup_net(struct net *net)
124 125
{
	/* Must be called with net_mutex held */
126
	const struct pernet_operations *ops, *saved_ops;
127
	int error = 0;
128
	LIST_HEAD(net_exit_list);
129 130

	atomic_set(&net->count, 1);
131
	atomic_set(&net->passive, 1);
132
	net->dev_base_seq = 1;
133

134
#ifdef NETNS_REFCNT_DEBUG
135
	atomic_set(&net->use_count, 0);
136
#endif
137

138
	list_for_each_entry(ops, &pernet_list, list) {
139 140 141
		error = ops_init(ops, net);
		if (error < 0)
			goto out_undo;
142 143 144
	}
out:
	return error;
145

146 147 148 149
out_undo:
	/* Walk through the list backwards calling the exit functions
	 * for the pernet modules whose init functions did not fail.
	 */
150
	list_add(&net->exit_list, &net_exit_list);
151
	saved_ops = ops;
152 153 154
	list_for_each_entry_continue_reverse(ops, &pernet_list, list)
		ops_exit_list(ops, &net_exit_list);

155 156
	ops = saved_ops;
	list_for_each_entry_continue_reverse(ops, &pernet_list, list)
157
		ops_free_list(ops, &net_exit_list);
158 159

	rcu_barrier();
160 161 162
	goto out;
}

163
static struct net_generic *net_alloc_generic(void)
164
{
165 166 167 168 169 170 171 172 173
	struct net_generic *ng;
	size_t generic_size = sizeof(struct net_generic) +
		INITIAL_NET_GEN_PTRS * sizeof(void *);

	ng = kzalloc(generic_size, GFP_KERNEL);
	if (ng)
		ng->len = INITIAL_NET_GEN_PTRS;

	return ng;
174 175
}

176 177 178 179
#ifdef CONFIG_NET_NS
static struct kmem_cache *net_cachep;
static struct workqueue_struct *netns_wq;

180
static struct net *net_alloc(void)
181
{
182 183 184 185 186 187 188 189
	struct net *net = NULL;
	struct net_generic *ng;

	ng = net_alloc_generic();
	if (!ng)
		goto out;

	net = kmem_cache_zalloc(net_cachep, GFP_KERNEL);
190
	if (!net)
191
		goto out_free;
192

193 194 195 196 197 198 199 200 201 202 203
	rcu_assign_pointer(net->gen, ng);
out:
	return net;

out_free:
	kfree(ng);
	goto out;
}

static void net_free(struct net *net)
{
204
#ifdef NETNS_REFCNT_DEBUG
205 206 207 208 209
	if (unlikely(atomic_read(&net->use_count) != 0)) {
		printk(KERN_EMERG "network namespace not free! Usage: %d\n",
			atomic_read(&net->use_count));
		return;
	}
210
#endif
211
	kfree(net->gen);
212 213 214
	kmem_cache_free(net_cachep, net);
}

215 216 217 218 219 220 221
void net_drop_ns(void *p)
{
	struct net *ns = p;
	if (ns && atomic_dec_and_test(&ns->passive))
		net_free(ns);
}

222
struct net *copy_net_ns(unsigned long flags, struct net *old_net)
223
{
224 225
	struct net *net;
	int rv;
226

227 228 229
	if (!(flags & CLONE_NEWNET))
		return get_net(old_net);

230 231 232
	net = net_alloc();
	if (!net)
		return ERR_PTR(-ENOMEM);
233
	mutex_lock(&net_mutex);
234 235
	rv = setup_net(net);
	if (rv == 0) {
236
		rtnl_lock();
237
		list_add_tail_rcu(&net->list, &net_namespace_list);
238 239
		rtnl_unlock();
	}
240
	mutex_unlock(&net_mutex);
241
	if (rv < 0) {
242
		net_drop_ns(net);
243 244 245 246
		return ERR_PTR(rv);
	}
	return net;
}
247

248 249 250
static DEFINE_SPINLOCK(cleanup_list_lock);
static LIST_HEAD(cleanup_list);  /* Must hold cleanup_list_lock to touch */

251 252
static void cleanup_net(struct work_struct *work)
{
253
	const struct pernet_operations *ops;
254 255
	struct net *net, *tmp;
	LIST_HEAD(net_kill_list);
256
	LIST_HEAD(net_exit_list);
257

258 259 260 261
	/* Atomically snapshot the list of namespaces to cleanup */
	spin_lock_irq(&cleanup_list_lock);
	list_replace_init(&cleanup_list, &net_kill_list);
	spin_unlock_irq(&cleanup_list_lock);
262 263 264 265 266

	mutex_lock(&net_mutex);

	/* Don't let anyone else find us. */
	rtnl_lock();
267
	list_for_each_entry(net, &net_kill_list, cleanup_list) {
268
		list_del_rcu(&net->list);
269 270
		list_add_tail(&net->exit_list, &net_exit_list);
	}
271 272
	rtnl_unlock();

273 274 275 276 277 278 279
	/*
	 * Another CPU might be rcu-iterating the list, wait for it.
	 * This needs to be before calling the exit() notifiers, so
	 * the rcu_barrier() below isn't sufficient alone.
	 */
	synchronize_rcu();

280
	/* Run all of the network namespace exit methods */
281 282 283
	list_for_each_entry_reverse(ops, &pernet_list, list)
		ops_exit_list(ops, &net_exit_list);

284
	/* Free the net generic variables */
285 286
	list_for_each_entry_reverse(ops, &pernet_list, list)
		ops_free_list(ops, &net_exit_list);
287 288 289 290 291 292 293 294 295

	mutex_unlock(&net_mutex);

	/* Ensure there are no outstanding rcu callbacks using this
	 * network namespace.
	 */
	rcu_barrier();

	/* Finally it is safe to free my network namespace structure */
296 297
	list_for_each_entry_safe(net, tmp, &net_exit_list, exit_list) {
		list_del_init(&net->exit_list);
298
		net_drop_ns(net);
299
	}
300
}
301
static DECLARE_WORK(net_cleanup_work, cleanup_net);
302 303 304 305

void __put_net(struct net *net)
{
	/* Cleanup the network namespace in process context */
306 307 308 309 310 311 312
	unsigned long flags;

	spin_lock_irqsave(&cleanup_list_lock, flags);
	list_add(&net->cleanup_list, &cleanup_list);
	spin_unlock_irqrestore(&cleanup_list_lock, flags);

	queue_work(netns_wq, &net_cleanup_work);
313 314 315
}
EXPORT_SYMBOL_GPL(__put_net);

316 317 318 319 320 321 322
struct net *get_net_ns_by_fd(int fd)
{
	struct proc_inode *ei;
	struct file *file;
	struct net *net;

	file = proc_ns_fget(fd);
323 324
	if (IS_ERR(file))
		return ERR_CAST(file);
325 326

	ei = PROC_I(file->f_dentry->d_inode);
327 328 329 330
	if (ei->ns_ops == &netns_operations)
		net = get_net(ei->ns);
	else
		net = ERR_PTR(-EINVAL);
331

332
	fput(file);
333 334 335
	return net;
}

336 337 338 339 340 341 342
#else
struct net *copy_net_ns(unsigned long flags, struct net *old_net)
{
	if (flags & CLONE_NEWNET)
		return ERR_PTR(-EINVAL);
	return old_net;
}
343 344 345 346 347

struct net *get_net_ns_by_fd(int fd)
{
	return ERR_PTR(-EINVAL);
}
348 349
#endif

350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369
struct net *get_net_ns_by_pid(pid_t pid)
{
	struct task_struct *tsk;
	struct net *net;

	/* Lookup the network namespace */
	net = ERR_PTR(-ESRCH);
	rcu_read_lock();
	tsk = find_task_by_vpid(pid);
	if (tsk) {
		struct nsproxy *nsproxy;
		nsproxy = task_nsproxy(tsk);
		if (nsproxy)
			net = get_net(nsproxy->net_ns);
	}
	rcu_read_unlock();
	return net;
}
EXPORT_SYMBOL_GPL(get_net_ns_by_pid);

370 371
static int __init net_ns_init(void)
{
372
	struct net_generic *ng;
373

374
#ifdef CONFIG_NET_NS
375 376 377
	net_cachep = kmem_cache_create("net_namespace", sizeof(struct net),
					SMP_CACHE_BYTES,
					SLAB_PANIC, NULL);
378 379 380 381 382

	/* Create workqueue for cleanup */
	netns_wq = create_singlethread_workqueue("netns");
	if (!netns_wq)
		panic("Could not create netns workq");
383
#endif
384

385 386 387 388 389 390
	ng = net_alloc_generic();
	if (!ng)
		panic("Could not allocate generic netns");

	rcu_assign_pointer(init_net.gen, ng);

391
	mutex_lock(&net_mutex);
S
Stephen Hemminger 已提交
392 393
	if (setup_net(&init_net))
		panic("Could not setup the initial network namespace");
394

395
	rtnl_lock();
396
	list_add_tail_rcu(&init_net.list, &net_namespace_list);
397
	rtnl_unlock();
398 399 400 401 402 403 404 405

	mutex_unlock(&net_mutex);

	return 0;
}

pure_initcall(net_ns_init);

406
#ifdef CONFIG_NET_NS
407 408
static int __register_pernet_operations(struct list_head *list,
					struct pernet_operations *ops)
409
{
410
	struct net *net;
411
	int error;
412
	LIST_HEAD(net_exit_list);
413 414

	list_add_tail(&ops->list, list);
415
	if (ops->init || (ops->id && ops->size)) {
416
		for_each_net(net) {
417
			error = ops_init(ops, net);
418 419
			if (error)
				goto out_undo;
420
			list_add_tail(&net->exit_list, &net_exit_list);
421 422
		}
	}
423
	return 0;
424 425 426 427

out_undo:
	/* If I have an error cleanup all namespaces I initialized */
	list_del(&ops->list);
428 429
	ops_exit_list(ops, &net_exit_list);
	ops_free_list(ops, &net_exit_list);
430
	return error;
431 432
}

433
static void __unregister_pernet_operations(struct pernet_operations *ops)
434 435
{
	struct net *net;
436
	LIST_HEAD(net_exit_list);
437 438

	list_del(&ops->list);
439 440 441 442
	for_each_net(net)
		list_add_tail(&net->exit_list, &net_exit_list);
	ops_exit_list(ops, &net_exit_list);
	ops_free_list(ops, &net_exit_list);
443 444
}

445 446
#else

447 448
static int __register_pernet_operations(struct list_head *list,
					struct pernet_operations *ops)
449
{
450 451 452 453 454 455
	int err = 0;
	err = ops_init(ops, &init_net);
	if (err)
		ops_free(ops, &init_net);
	return err;
	
456 457
}

458
static void __unregister_pernet_operations(struct pernet_operations *ops)
459
{
460 461 462 463
	LIST_HEAD(net_exit_list);
	list_add(&init_net.exit_list, &net_exit_list);
	ops_exit_list(ops, &net_exit_list);
	ops_free_list(ops, &net_exit_list);
464
}
465 466

#endif /* CONFIG_NET_NS */
467

468 469
static DEFINE_IDA(net_generic_ids);

470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486
static int register_pernet_operations(struct list_head *list,
				      struct pernet_operations *ops)
{
	int error;

	if (ops->id) {
again:
		error = ida_get_new_above(&net_generic_ids, 1, ops->id);
		if (error < 0) {
			if (error == -EAGAIN) {
				ida_pre_get(&net_generic_ids, GFP_KERNEL);
				goto again;
			}
			return error;
		}
	}
	error = __register_pernet_operations(list, ops);
487 488 489 490 491
	if (error) {
		rcu_barrier();
		if (ops->id)
			ida_remove(&net_generic_ids, *ops->id);
	}
492 493 494 495 496 497 498 499

	return error;
}

static void unregister_pernet_operations(struct pernet_operations *ops)
{
	
	__unregister_pernet_operations(ops);
500
	rcu_barrier();
501 502 503 504
	if (ops->id)
		ida_remove(&net_generic_ids, *ops->id);
}

505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538
/**
 *      register_pernet_subsys - register a network namespace subsystem
 *	@ops:  pernet operations structure for the subsystem
 *
 *	Register a subsystem which has init and exit functions
 *	that are called when network namespaces are created and
 *	destroyed respectively.
 *
 *	When registered all network namespace init functions are
 *	called for every existing network namespace.  Allowing kernel
 *	modules to have a race free view of the set of network namespaces.
 *
 *	When a new network namespace is created all of the init
 *	methods are called in the order in which they were registered.
 *
 *	When a network namespace is destroyed all of the exit methods
 *	are called in the reverse of the order with which they were
 *	registered.
 */
int register_pernet_subsys(struct pernet_operations *ops)
{
	int error;
	mutex_lock(&net_mutex);
	error =  register_pernet_operations(first_device, ops);
	mutex_unlock(&net_mutex);
	return error;
}
EXPORT_SYMBOL_GPL(register_pernet_subsys);

/**
 *      unregister_pernet_subsys - unregister a network namespace subsystem
 *	@ops: pernet operations structure to manipulate
 *
 *	Remove the pernet operations structure from the list to be
539
 *	used when network namespaces are created or destroyed.  In
540 541 542
 *	addition run the exit method for all existing network
 *	namespaces.
 */
543
void unregister_pernet_subsys(struct pernet_operations *ops)
544 545
{
	mutex_lock(&net_mutex);
546
	unregister_pernet_operations(ops);
547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586
	mutex_unlock(&net_mutex);
}
EXPORT_SYMBOL_GPL(unregister_pernet_subsys);

/**
 *      register_pernet_device - register a network namespace device
 *	@ops:  pernet operations structure for the subsystem
 *
 *	Register a device which has init and exit functions
 *	that are called when network namespaces are created and
 *	destroyed respectively.
 *
 *	When registered all network namespace init functions are
 *	called for every existing network namespace.  Allowing kernel
 *	modules to have a race free view of the set of network namespaces.
 *
 *	When a new network namespace is created all of the init
 *	methods are called in the order in which they were registered.
 *
 *	When a network namespace is destroyed all of the exit methods
 *	are called in the reverse of the order with which they were
 *	registered.
 */
int register_pernet_device(struct pernet_operations *ops)
{
	int error;
	mutex_lock(&net_mutex);
	error = register_pernet_operations(&pernet_list, ops);
	if (!error && (first_device == &pernet_list))
		first_device = &ops->list;
	mutex_unlock(&net_mutex);
	return error;
}
EXPORT_SYMBOL_GPL(register_pernet_device);

/**
 *      unregister_pernet_device - unregister a network namespace netdevice
 *	@ops: pernet operations structure to manipulate
 *
 *	Remove the pernet operations structure from the list to be
587
 *	used when network namespaces are created or destroyed.  In
588 589 590 591 592 593 594 595 596 597 598 599
 *	addition run the exit method for all existing network
 *	namespaces.
 */
void unregister_pernet_device(struct pernet_operations *ops)
{
	mutex_lock(&net_mutex);
	if (&ops->list == first_device)
		first_device = first_device->next;
	unregister_pernet_operations(ops);
	mutex_unlock(&net_mutex);
}
EXPORT_SYMBOL_GPL(unregister_pernet_device);
600 601 602 603

#ifdef CONFIG_NET_NS
static void *netns_get(struct task_struct *task)
{
604 605 606
	struct net *net = NULL;
	struct nsproxy *nsproxy;

607
	rcu_read_lock();
608 609 610
	nsproxy = task_nsproxy(task);
	if (nsproxy)
		net = get_net(nsproxy->net_ns);
611
	rcu_read_unlock();
612

613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635
	return net;
}

static void netns_put(void *ns)
{
	put_net(ns);
}

static int netns_install(struct nsproxy *nsproxy, void *ns)
{
	put_net(nsproxy->net_ns);
	nsproxy->net_ns = get_net(ns);
	return 0;
}

const struct proc_ns_operations netns_operations = {
	.name		= "net",
	.type		= CLONE_NEWNET,
	.get		= netns_get,
	.put		= netns_put,
	.install	= netns_install,
};
#endif