net_namespace.c 14.4 KB
Newer Older
1 2 3 4 5 6
#include <linux/workqueue.h>
#include <linux/rtnetlink.h>
#include <linux/cache.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/delay.h>
7
#include <linux/sched.h>
8
#include <linux/idr.h>
9
#include <linux/rculist.h>
10
#include <linux/nsproxy.h>
11 12
#include <linux/proc_fs.h>
#include <linux/file.h>
13
#include <net/net_namespace.h>
14
#include <net/netns/generic.h>
15 16 17 18 19 20 21 22 23 24

/*
 *	Our network namespace constructor/destructor lists
 */

static LIST_HEAD(pernet_list);
static struct list_head *first_device = &pernet_list;
static DEFINE_MUTEX(net_mutex);

LIST_HEAD(net_namespace_list);
A
Alexey Dobriyan 已提交
25
EXPORT_SYMBOL_GPL(net_namespace_list);
26 27

struct net init_net;
28
EXPORT_SYMBOL(init_net);
29

30 31
#define INITIAL_NET_GEN_PTRS	13 /* +1 for len +2 for rcu_head */

32 33 34 35 36 37 38
static int net_assign_generic(struct net *net, int id, void *data)
{
	struct net_generic *ng, *old_ng;

	BUG_ON(!mutex_is_locked(&net_mutex));
	BUG_ON(id == 0);

E
Eric Dumazet 已提交
39 40 41
	old_ng = rcu_dereference_protected(net->gen,
					   lockdep_is_held(&net_mutex));
	ng = old_ng;
42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64
	if (old_ng->len >= id)
		goto assign;

	ng = kzalloc(sizeof(struct net_generic) +
			id * sizeof(void *), GFP_KERNEL);
	if (ng == NULL)
		return -ENOMEM;

	/*
	 * Some synchronisation notes:
	 *
	 * The net_generic explores the net->gen array inside rcu
	 * read section. Besides once set the net->gen->ptr[x]
	 * pointer never changes (see rules in netns/generic.h).
	 *
	 * That said, we simply duplicate this array and schedule
	 * the old copy for kfree after a grace period.
	 */

	ng->len = id;
	memcpy(&ng->ptr, &old_ng->ptr, old_ng->len * sizeof(void*));

	rcu_assign_pointer(net->gen, ng);
65
	kfree_rcu(old_ng, rcu);
66 67 68 69 70
assign:
	ng->ptr[id - 1] = data;
	return 0;
}

71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97
static int ops_init(const struct pernet_operations *ops, struct net *net)
{
	int err;
	if (ops->id && ops->size) {
		void *data = kzalloc(ops->size, GFP_KERNEL);
		if (!data)
			return -ENOMEM;

		err = net_assign_generic(net, *ops->id, data);
		if (err) {
			kfree(data);
			return err;
		}
	}
	if (ops->init)
		return ops->init(net);
	return 0;
}

static void ops_free(const struct pernet_operations *ops, struct net *net)
{
	if (ops->id && ops->size) {
		int id = *ops->id;
		kfree(net_generic(net, id));
	}
}

98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119
static void ops_exit_list(const struct pernet_operations *ops,
			  struct list_head *net_exit_list)
{
	struct net *net;
	if (ops->exit) {
		list_for_each_entry(net, net_exit_list, exit_list)
			ops->exit(net);
	}
	if (ops->exit_batch)
		ops->exit_batch(net_exit_list);
}

static void ops_free_list(const struct pernet_operations *ops,
			  struct list_head *net_exit_list)
{
	struct net *net;
	if (ops->size && ops->id) {
		list_for_each_entry(net, net_exit_list, exit_list)
			ops_free(ops, net);
	}
}

120 121 122
/*
 * setup_net runs the initializers for the network namespace object.
 */
123
static __net_init int setup_net(struct net *net)
124 125
{
	/* Must be called with net_mutex held */
126
	const struct pernet_operations *ops, *saved_ops;
127
	int error = 0;
128
	LIST_HEAD(net_exit_list);
129 130

	atomic_set(&net->count, 1);
131
	atomic_set(&net->passive, 1);
132

133
#ifdef NETNS_REFCNT_DEBUG
134
	atomic_set(&net->use_count, 0);
135
#endif
136

137
	list_for_each_entry(ops, &pernet_list, list) {
138 139 140
		error = ops_init(ops, net);
		if (error < 0)
			goto out_undo;
141 142 143
	}
out:
	return error;
144

145 146 147 148
out_undo:
	/* Walk through the list backwards calling the exit functions
	 * for the pernet modules whose init functions did not fail.
	 */
149
	list_add(&net->exit_list, &net_exit_list);
150
	saved_ops = ops;
151 152 153
	list_for_each_entry_continue_reverse(ops, &pernet_list, list)
		ops_exit_list(ops, &net_exit_list);

154 155
	ops = saved_ops;
	list_for_each_entry_continue_reverse(ops, &pernet_list, list)
156
		ops_free_list(ops, &net_exit_list);
157 158

	rcu_barrier();
159 160 161
	goto out;
}

162
static struct net_generic *net_alloc_generic(void)
163
{
164 165 166 167 168 169 170 171 172
	struct net_generic *ng;
	size_t generic_size = sizeof(struct net_generic) +
		INITIAL_NET_GEN_PTRS * sizeof(void *);

	ng = kzalloc(generic_size, GFP_KERNEL);
	if (ng)
		ng->len = INITIAL_NET_GEN_PTRS;

	return ng;
173 174
}

175 176 177 178
#ifdef CONFIG_NET_NS
static struct kmem_cache *net_cachep;
static struct workqueue_struct *netns_wq;

179
static struct net *net_alloc(void)
180
{
181 182 183 184 185 186 187 188
	struct net *net = NULL;
	struct net_generic *ng;

	ng = net_alloc_generic();
	if (!ng)
		goto out;

	net = kmem_cache_zalloc(net_cachep, GFP_KERNEL);
189
	if (!net)
190
		goto out_free;
191

192 193 194 195 196 197 198 199 200 201 202
	rcu_assign_pointer(net->gen, ng);
out:
	return net;

out_free:
	kfree(ng);
	goto out;
}

static void net_free(struct net *net)
{
203
#ifdef NETNS_REFCNT_DEBUG
204 205 206 207 208
	if (unlikely(atomic_read(&net->use_count) != 0)) {
		printk(KERN_EMERG "network namespace not free! Usage: %d\n",
			atomic_read(&net->use_count));
		return;
	}
209
#endif
210
	kfree(net->gen);
211 212 213
	kmem_cache_free(net_cachep, net);
}

214 215 216 217 218 219 220
void net_drop_ns(void *p)
{
	struct net *ns = p;
	if (ns && atomic_dec_and_test(&ns->passive))
		net_free(ns);
}

221
struct net *copy_net_ns(unsigned long flags, struct net *old_net)
222
{
223 224
	struct net *net;
	int rv;
225

226 227 228
	if (!(flags & CLONE_NEWNET))
		return get_net(old_net);

229 230 231
	net = net_alloc();
	if (!net)
		return ERR_PTR(-ENOMEM);
232
	mutex_lock(&net_mutex);
233 234
	rv = setup_net(net);
	if (rv == 0) {
235
		rtnl_lock();
236
		list_add_tail_rcu(&net->list, &net_namespace_list);
237 238
		rtnl_unlock();
	}
239
	mutex_unlock(&net_mutex);
240
	if (rv < 0) {
241
		net_drop_ns(net);
242 243 244 245
		return ERR_PTR(rv);
	}
	return net;
}
246

247 248 249
static DEFINE_SPINLOCK(cleanup_list_lock);
static LIST_HEAD(cleanup_list);  /* Must hold cleanup_list_lock to touch */

250 251
static void cleanup_net(struct work_struct *work)
{
252
	const struct pernet_operations *ops;
253 254
	struct net *net, *tmp;
	LIST_HEAD(net_kill_list);
255
	LIST_HEAD(net_exit_list);
256

257 258 259 260
	/* Atomically snapshot the list of namespaces to cleanup */
	spin_lock_irq(&cleanup_list_lock);
	list_replace_init(&cleanup_list, &net_kill_list);
	spin_unlock_irq(&cleanup_list_lock);
261 262 263 264 265

	mutex_lock(&net_mutex);

	/* Don't let anyone else find us. */
	rtnl_lock();
266
	list_for_each_entry(net, &net_kill_list, cleanup_list) {
267
		list_del_rcu(&net->list);
268 269
		list_add_tail(&net->exit_list, &net_exit_list);
	}
270 271
	rtnl_unlock();

272 273 274 275 276 277 278
	/*
	 * Another CPU might be rcu-iterating the list, wait for it.
	 * This needs to be before calling the exit() notifiers, so
	 * the rcu_barrier() below isn't sufficient alone.
	 */
	synchronize_rcu();

279
	/* Run all of the network namespace exit methods */
280 281 282
	list_for_each_entry_reverse(ops, &pernet_list, list)
		ops_exit_list(ops, &net_exit_list);

283
	/* Free the net generic variables */
284 285
	list_for_each_entry_reverse(ops, &pernet_list, list)
		ops_free_list(ops, &net_exit_list);
286 287 288 289 290 291 292 293 294

	mutex_unlock(&net_mutex);

	/* Ensure there are no outstanding rcu callbacks using this
	 * network namespace.
	 */
	rcu_barrier();

	/* Finally it is safe to free my network namespace structure */
295 296
	list_for_each_entry_safe(net, tmp, &net_exit_list, exit_list) {
		list_del_init(&net->exit_list);
297
		net_drop_ns(net);
298
	}
299
}
300
static DECLARE_WORK(net_cleanup_work, cleanup_net);
301 302 303 304

void __put_net(struct net *net)
{
	/* Cleanup the network namespace in process context */
305 306 307 308 309 310 311
	unsigned long flags;

	spin_lock_irqsave(&cleanup_list_lock, flags);
	list_add(&net->cleanup_list, &cleanup_list);
	spin_unlock_irqrestore(&cleanup_list_lock, flags);

	queue_work(netns_wq, &net_cleanup_work);
312 313 314
}
EXPORT_SYMBOL_GPL(__put_net);

315 316 317 318 319 320 321
struct net *get_net_ns_by_fd(int fd)
{
	struct proc_inode *ei;
	struct file *file;
	struct net *net;

	file = proc_ns_fget(fd);
322 323
	if (IS_ERR(file))
		return ERR_CAST(file);
324 325

	ei = PROC_I(file->f_dentry->d_inode);
326 327 328 329
	if (ei->ns_ops == &netns_operations)
		net = get_net(ei->ns);
	else
		net = ERR_PTR(-EINVAL);
330

331
	fput(file);
332 333 334
	return net;
}

335 336 337 338 339 340 341
#else
struct net *copy_net_ns(unsigned long flags, struct net *old_net)
{
	if (flags & CLONE_NEWNET)
		return ERR_PTR(-EINVAL);
	return old_net;
}
342 343 344 345 346

struct net *get_net_ns_by_fd(int fd)
{
	return ERR_PTR(-EINVAL);
}
347 348
#endif

349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368
struct net *get_net_ns_by_pid(pid_t pid)
{
	struct task_struct *tsk;
	struct net *net;

	/* Lookup the network namespace */
	net = ERR_PTR(-ESRCH);
	rcu_read_lock();
	tsk = find_task_by_vpid(pid);
	if (tsk) {
		struct nsproxy *nsproxy;
		nsproxy = task_nsproxy(tsk);
		if (nsproxy)
			net = get_net(nsproxy->net_ns);
	}
	rcu_read_unlock();
	return net;
}
EXPORT_SYMBOL_GPL(get_net_ns_by_pid);

369 370
static int __init net_ns_init(void)
{
371
	struct net_generic *ng;
372

373
#ifdef CONFIG_NET_NS
374 375 376
	net_cachep = kmem_cache_create("net_namespace", sizeof(struct net),
					SMP_CACHE_BYTES,
					SLAB_PANIC, NULL);
377 378 379 380 381

	/* Create workqueue for cleanup */
	netns_wq = create_singlethread_workqueue("netns");
	if (!netns_wq)
		panic("Could not create netns workq");
382
#endif
383

384 385 386 387 388 389
	ng = net_alloc_generic();
	if (!ng)
		panic("Could not allocate generic netns");

	rcu_assign_pointer(init_net.gen, ng);

390
	mutex_lock(&net_mutex);
S
Stephen Hemminger 已提交
391 392
	if (setup_net(&init_net))
		panic("Could not setup the initial network namespace");
393

394
	rtnl_lock();
395
	list_add_tail_rcu(&init_net.list, &net_namespace_list);
396
	rtnl_unlock();
397 398 399 400 401 402 403 404

	mutex_unlock(&net_mutex);

	return 0;
}

pure_initcall(net_ns_init);

405
#ifdef CONFIG_NET_NS
406 407
static int __register_pernet_operations(struct list_head *list,
					struct pernet_operations *ops)
408
{
409
	struct net *net;
410
	int error;
411
	LIST_HEAD(net_exit_list);
412 413

	list_add_tail(&ops->list, list);
414
	if (ops->init || (ops->id && ops->size)) {
415
		for_each_net(net) {
416
			error = ops_init(ops, net);
417 418
			if (error)
				goto out_undo;
419
			list_add_tail(&net->exit_list, &net_exit_list);
420 421
		}
	}
422
	return 0;
423 424 425 426

out_undo:
	/* If I have an error cleanup all namespaces I initialized */
	list_del(&ops->list);
427 428
	ops_exit_list(ops, &net_exit_list);
	ops_free_list(ops, &net_exit_list);
429
	return error;
430 431
}

432
static void __unregister_pernet_operations(struct pernet_operations *ops)
433 434
{
	struct net *net;
435
	LIST_HEAD(net_exit_list);
436 437

	list_del(&ops->list);
438 439 440 441
	for_each_net(net)
		list_add_tail(&net->exit_list, &net_exit_list);
	ops_exit_list(ops, &net_exit_list);
	ops_free_list(ops, &net_exit_list);
442 443
}

444 445
#else

446 447
static int __register_pernet_operations(struct list_head *list,
					struct pernet_operations *ops)
448
{
449 450 451 452 453 454
	int err = 0;
	err = ops_init(ops, &init_net);
	if (err)
		ops_free(ops, &init_net);
	return err;
	
455 456
}

457
static void __unregister_pernet_operations(struct pernet_operations *ops)
458
{
459 460 461 462
	LIST_HEAD(net_exit_list);
	list_add(&init_net.exit_list, &net_exit_list);
	ops_exit_list(ops, &net_exit_list);
	ops_free_list(ops, &net_exit_list);
463
}
464 465

#endif /* CONFIG_NET_NS */
466

467 468
static DEFINE_IDA(net_generic_ids);

469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485
static int register_pernet_operations(struct list_head *list,
				      struct pernet_operations *ops)
{
	int error;

	if (ops->id) {
again:
		error = ida_get_new_above(&net_generic_ids, 1, ops->id);
		if (error < 0) {
			if (error == -EAGAIN) {
				ida_pre_get(&net_generic_ids, GFP_KERNEL);
				goto again;
			}
			return error;
		}
	}
	error = __register_pernet_operations(list, ops);
486 487 488 489 490
	if (error) {
		rcu_barrier();
		if (ops->id)
			ida_remove(&net_generic_ids, *ops->id);
	}
491 492 493 494 495 496 497 498

	return error;
}

static void unregister_pernet_operations(struct pernet_operations *ops)
{
	
	__unregister_pernet_operations(ops);
499
	rcu_barrier();
500 501 502 503
	if (ops->id)
		ida_remove(&net_generic_ids, *ops->id);
}

504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537
/**
 *      register_pernet_subsys - register a network namespace subsystem
 *	@ops:  pernet operations structure for the subsystem
 *
 *	Register a subsystem which has init and exit functions
 *	that are called when network namespaces are created and
 *	destroyed respectively.
 *
 *	When registered all network namespace init functions are
 *	called for every existing network namespace.  Allowing kernel
 *	modules to have a race free view of the set of network namespaces.
 *
 *	When a new network namespace is created all of the init
 *	methods are called in the order in which they were registered.
 *
 *	When a network namespace is destroyed all of the exit methods
 *	are called in the reverse of the order with which they were
 *	registered.
 */
int register_pernet_subsys(struct pernet_operations *ops)
{
	int error;
	mutex_lock(&net_mutex);
	error =  register_pernet_operations(first_device, ops);
	mutex_unlock(&net_mutex);
	return error;
}
EXPORT_SYMBOL_GPL(register_pernet_subsys);

/**
 *      unregister_pernet_subsys - unregister a network namespace subsystem
 *	@ops: pernet operations structure to manipulate
 *
 *	Remove the pernet operations structure from the list to be
538
 *	used when network namespaces are created or destroyed.  In
539 540 541
 *	addition run the exit method for all existing network
 *	namespaces.
 */
542
void unregister_pernet_subsys(struct pernet_operations *ops)
543 544
{
	mutex_lock(&net_mutex);
545
	unregister_pernet_operations(ops);
546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585
	mutex_unlock(&net_mutex);
}
EXPORT_SYMBOL_GPL(unregister_pernet_subsys);

/**
 *      register_pernet_device - register a network namespace device
 *	@ops:  pernet operations structure for the subsystem
 *
 *	Register a device which has init and exit functions
 *	that are called when network namespaces are created and
 *	destroyed respectively.
 *
 *	When registered all network namespace init functions are
 *	called for every existing network namespace.  Allowing kernel
 *	modules to have a race free view of the set of network namespaces.
 *
 *	When a new network namespace is created all of the init
 *	methods are called in the order in which they were registered.
 *
 *	When a network namespace is destroyed all of the exit methods
 *	are called in the reverse of the order with which they were
 *	registered.
 */
int register_pernet_device(struct pernet_operations *ops)
{
	int error;
	mutex_lock(&net_mutex);
	error = register_pernet_operations(&pernet_list, ops);
	if (!error && (first_device == &pernet_list))
		first_device = &ops->list;
	mutex_unlock(&net_mutex);
	return error;
}
EXPORT_SYMBOL_GPL(register_pernet_device);

/**
 *      unregister_pernet_device - unregister a network namespace netdevice
 *	@ops: pernet operations structure to manipulate
 *
 *	Remove the pernet operations structure from the list to be
586
 *	used when network namespaces are created or destroyed.  In
587 588 589 590 591 592 593 594 595 596 597 598
 *	addition run the exit method for all existing network
 *	namespaces.
 */
void unregister_pernet_device(struct pernet_operations *ops)
{
	mutex_lock(&net_mutex);
	if (&ops->list == first_device)
		first_device = first_device->next;
	unregister_pernet_operations(ops);
	mutex_unlock(&net_mutex);
}
EXPORT_SYMBOL_GPL(unregister_pernet_device);
599 600 601 602

#ifdef CONFIG_NET_NS
static void *netns_get(struct task_struct *task)
{
603 604 605
	struct net *net = NULL;
	struct nsproxy *nsproxy;

606
	rcu_read_lock();
607 608 609
	nsproxy = task_nsproxy(task);
	if (nsproxy)
		net = get_net(nsproxy->net_ns);
610
	rcu_read_unlock();
611

612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634
	return net;
}

static void netns_put(void *ns)
{
	put_net(ns);
}

static int netns_install(struct nsproxy *nsproxy, void *ns)
{
	put_net(nsproxy->net_ns);
	nsproxy->net_ns = get_net(ns);
	return 0;
}

const struct proc_ns_operations netns_operations = {
	.name		= "net",
	.type		= CLONE_NEWNET,
	.get		= netns_get,
	.put		= netns_put,
	.install	= netns_install,
};
#endif