net_namespace.c 13.5 KB
Newer Older
1 2 3 4 5 6
#include <linux/workqueue.h>
#include <linux/rtnetlink.h>
#include <linux/cache.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/delay.h>
7
#include <linux/sched.h>
8
#include <linux/idr.h>
9
#include <linux/rculist.h>
10
#include <linux/nsproxy.h>
11
#include <linux/netdevice.h>
12
#include <net/net_namespace.h>
13
#include <net/netns/generic.h>
14
#include <net/rtnetlink.h>
15 16 17 18 19 20 21 22 23 24

/*
 *	Our network namespace constructor/destructor lists
 */

static LIST_HEAD(pernet_list);
static struct list_head *first_device = &pernet_list;
static DEFINE_MUTEX(net_mutex);

LIST_HEAD(net_namespace_list);
A
Alexey Dobriyan 已提交
25
EXPORT_SYMBOL_GPL(net_namespace_list);
26 27

struct net init_net;
28
EXPORT_SYMBOL(init_net);
29

30 31
#define INITIAL_NET_GEN_PTRS	13 /* +1 for len +2 for rcu_head */

32 33 34 35 36 37 38 39 40 41 42 43 44 45
static void unregister_netdevices(struct net *net, struct list_head *list)
{
	struct net_device *dev;
	/* At exit all network devices most be removed from a network
	 * namespace.  Do this in the reverse order of registeration.
	 */
	for_each_netdev_reverse(net, dev) {
		if (dev->rtnl_link_ops)
			dev->rtnl_link_ops->dellink(dev, list);
		else
			unregister_netdevice_queue(dev, list);
	}
}

46 47 48
/*
 * setup_net runs the initializers for the network namespace object.
 */
49
static __net_init int setup_net(struct net *net)
50 51 52
{
	/* Must be called with net_mutex held */
	struct pernet_operations *ops;
53
	int error = 0;
54 55

	atomic_set(&net->count, 1);
56

57
#ifdef NETNS_REFCNT_DEBUG
58
	atomic_set(&net->use_count, 0);
59
#endif
60

61
	list_for_each_entry(ops, &pernet_list, list) {
62 63 64 65 66 67 68 69
		if (ops->init) {
			error = ops->init(net);
			if (error < 0)
				goto out_undo;
		}
	}
out:
	return error;
70

71 72 73 74
out_undo:
	/* Walk through the list backwards calling the exit functions
	 * for the pernet modules whose init functions did not fail.
	 */
75
	list_for_each_entry_continue_reverse(ops, &pernet_list, list) {
76 77
		if (ops->exit)
			ops->exit(net);
78 79 80 81 82 83 84
		if (&ops->list == first_device) {
			LIST_HEAD(dev_kill_list);
			rtnl_lock();
			unregister_netdevices(net, &dev_kill_list);
			unregister_netdevice_many(&dev_kill_list);
			rtnl_unlock();
		}
85
	}
86 87

	rcu_barrier();
88 89 90
	goto out;
}

91
static struct net_generic *net_alloc_generic(void)
92
{
93 94 95 96 97 98 99 100 101
	struct net_generic *ng;
	size_t generic_size = sizeof(struct net_generic) +
		INITIAL_NET_GEN_PTRS * sizeof(void *);

	ng = kzalloc(generic_size, GFP_KERNEL);
	if (ng)
		ng->len = INITIAL_NET_GEN_PTRS;

	return ng;
102 103
}

104 105 106 107
#ifdef CONFIG_NET_NS
static struct kmem_cache *net_cachep;
static struct workqueue_struct *netns_wq;

108
static struct net *net_alloc(void)
109
{
110 111 112 113 114 115 116 117
	struct net *net = NULL;
	struct net_generic *ng;

	ng = net_alloc_generic();
	if (!ng)
		goto out;

	net = kmem_cache_zalloc(net_cachep, GFP_KERNEL);
118
	if (!net)
119
		goto out_free;
120

121 122 123 124 125 126 127 128 129 130 131
	rcu_assign_pointer(net->gen, ng);
out:
	return net;

out_free:
	kfree(ng);
	goto out;
}

static void net_free(struct net *net)
{
132
#ifdef NETNS_REFCNT_DEBUG
133 134 135 136 137
	if (unlikely(atomic_read(&net->use_count) != 0)) {
		printk(KERN_EMERG "network namespace not free! Usage: %d\n",
			atomic_read(&net->use_count));
		return;
	}
138
#endif
139
	kfree(net->gen);
140 141 142
	kmem_cache_free(net_cachep, net);
}

143
static struct net *net_create(void)
144
{
145 146
	struct net *net;
	int rv;
147

148 149 150
	net = net_alloc();
	if (!net)
		return ERR_PTR(-ENOMEM);
151
	mutex_lock(&net_mutex);
152 153
	rv = setup_net(net);
	if (rv == 0) {
154
		rtnl_lock();
155
		list_add_tail_rcu(&net->list, &net_namespace_list);
156 157
		rtnl_unlock();
	}
158
	mutex_unlock(&net_mutex);
159 160 161 162 163 164
	if (rv < 0) {
		net_free(net);
		return ERR_PTR(rv);
	}
	return net;
}
165

166 167 168 169 170
struct net *copy_net_ns(unsigned long flags, struct net *old_net)
{
	if (!(flags & CLONE_NEWNET))
		return get_net(old_net);
	return net_create();
171 172
}

173 174 175
static DEFINE_SPINLOCK(cleanup_list_lock);
static LIST_HEAD(cleanup_list);  /* Must hold cleanup_list_lock to touch */

176 177 178
static void cleanup_net(struct work_struct *work)
{
	struct pernet_operations *ops;
179 180
	struct net *net, *tmp;
	LIST_HEAD(net_kill_list);
181

182 183 184 185
	/* Atomically snapshot the list of namespaces to cleanup */
	spin_lock_irq(&cleanup_list_lock);
	list_replace_init(&cleanup_list, &net_kill_list);
	spin_unlock_irq(&cleanup_list_lock);
186 187 188 189 190

	mutex_lock(&net_mutex);

	/* Don't let anyone else find us. */
	rtnl_lock();
191 192
	list_for_each_entry(net, &net_kill_list, cleanup_list)
		list_del_rcu(&net->list);
193 194
	rtnl_unlock();

195 196 197 198 199 200 201
	/*
	 * Another CPU might be rcu-iterating the list, wait for it.
	 * This needs to be before calling the exit() notifiers, so
	 * the rcu_barrier() below isn't sufficient alone.
	 */
	synchronize_rcu();

202 203
	/* Run all of the network namespace exit methods */
	list_for_each_entry_reverse(ops, &pernet_list, list) {
204 205 206 207 208 209 210 211 212 213 214 215
		if (ops->exit) {
			list_for_each_entry(net, &net_kill_list, cleanup_list)
				ops->exit(net);
		}
		if (&ops->list == first_device) {
			LIST_HEAD(dev_kill_list);
			rtnl_lock();
			list_for_each_entry(net, &net_kill_list, cleanup_list)
				unregister_netdevices(net, &dev_kill_list);
			unregister_netdevice_many(&dev_kill_list);
			rtnl_unlock();
		}
216 217 218 219 220 221 222 223 224 225
	}

	mutex_unlock(&net_mutex);

	/* Ensure there are no outstanding rcu callbacks using this
	 * network namespace.
	 */
	rcu_barrier();

	/* Finally it is safe to free my network namespace structure */
226 227 228 229
	list_for_each_entry_safe(net, tmp, &net_kill_list, cleanup_list) {
		list_del_init(&net->cleanup_list);
		net_free(net);
	}
230
}
231
static DECLARE_WORK(net_cleanup_work, cleanup_net);
232 233 234 235

void __put_net(struct net *net)
{
	/* Cleanup the network namespace in process context */
236 237 238 239 240 241 242
	unsigned long flags;

	spin_lock_irqsave(&cleanup_list_lock, flags);
	list_add(&net->cleanup_list, &cleanup_list);
	spin_unlock_irqrestore(&cleanup_list_lock, flags);

	queue_work(netns_wq, &net_cleanup_work);
243 244 245 246 247 248 249 250 251 252 253 254
}
EXPORT_SYMBOL_GPL(__put_net);

#else
struct net *copy_net_ns(unsigned long flags, struct net *old_net)
{
	if (flags & CLONE_NEWNET)
		return ERR_PTR(-EINVAL);
	return old_net;
}
#endif

255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274
struct net *get_net_ns_by_pid(pid_t pid)
{
	struct task_struct *tsk;
	struct net *net;

	/* Lookup the network namespace */
	net = ERR_PTR(-ESRCH);
	rcu_read_lock();
	tsk = find_task_by_vpid(pid);
	if (tsk) {
		struct nsproxy *nsproxy;
		nsproxy = task_nsproxy(tsk);
		if (nsproxy)
			net = get_net(nsproxy->net_ns);
	}
	rcu_read_unlock();
	return net;
}
EXPORT_SYMBOL_GPL(get_net_ns_by_pid);

275 276
static int __init net_ns_init(void)
{
277
	struct net_generic *ng;
278

279
#ifdef CONFIG_NET_NS
280 281 282
	net_cachep = kmem_cache_create("net_namespace", sizeof(struct net),
					SMP_CACHE_BYTES,
					SLAB_PANIC, NULL);
283 284 285 286 287

	/* Create workqueue for cleanup */
	netns_wq = create_singlethread_workqueue("netns");
	if (!netns_wq)
		panic("Could not create netns workq");
288
#endif
289

290 291 292 293 294 295
	ng = net_alloc_generic();
	if (!ng)
		panic("Could not allocate generic netns");

	rcu_assign_pointer(init_net.gen, ng);

296
	mutex_lock(&net_mutex);
S
Stephen Hemminger 已提交
297 298
	if (setup_net(&init_net))
		panic("Could not setup the initial network namespace");
299

300
	rtnl_lock();
301
	list_add_tail_rcu(&init_net.list, &net_namespace_list);
302
	rtnl_unlock();
303 304 305 306 307 308 309 310

	mutex_unlock(&net_mutex);

	return 0;
}

pure_initcall(net_ns_init);

311
#ifdef CONFIG_NET_NS
312 313 314 315 316 317 318
static int register_pernet_operations(struct list_head *list,
				      struct pernet_operations *ops)
{
	struct net *net, *undo_net;
	int error;

	list_add_tail(&ops->list, list);
319 320
	if (ops->init) {
		for_each_net(net) {
321 322 323 324 325
			error = ops->init(net);
			if (error)
				goto out_undo;
		}
	}
326
	return 0;
327 328 329 330

out_undo:
	/* If I have an error cleanup all namespaces I initialized */
	list_del(&ops->list);
331 332
	if (ops->exit) {
		for_each_net(undo_net) {
O
Octavian Purdila 已提交
333
			if (net_eq(undo_net, net))
334
				goto undone;
335
			ops->exit(undo_net);
336
		}
337 338
	}
undone:
339
	return error;
340 341 342 343 344 345 346
}

static void unregister_pernet_operations(struct pernet_operations *ops)
{
	struct net *net;

	list_del(&ops->list);
347 348
	if (ops->exit)
		for_each_net(net)
349 350 351
			ops->exit(net);
}

352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368
#else

static int register_pernet_operations(struct list_head *list,
				      struct pernet_operations *ops)
{
	if (ops->init == NULL)
		return 0;
	return ops->init(&init_net);
}

static void unregister_pernet_operations(struct pernet_operations *ops)
{
	if (ops->exit)
		ops->exit(&init_net);
}
#endif

369 370
static DEFINE_IDA(net_generic_ids);

371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404
/**
 *      register_pernet_subsys - register a network namespace subsystem
 *	@ops:  pernet operations structure for the subsystem
 *
 *	Register a subsystem which has init and exit functions
 *	that are called when network namespaces are created and
 *	destroyed respectively.
 *
 *	When registered all network namespace init functions are
 *	called for every existing network namespace.  Allowing kernel
 *	modules to have a race free view of the set of network namespaces.
 *
 *	When a new network namespace is created all of the init
 *	methods are called in the order in which they were registered.
 *
 *	When a network namespace is destroyed all of the exit methods
 *	are called in the reverse of the order with which they were
 *	registered.
 */
int register_pernet_subsys(struct pernet_operations *ops)
{
	int error;
	mutex_lock(&net_mutex);
	error =  register_pernet_operations(first_device, ops);
	mutex_unlock(&net_mutex);
	return error;
}
EXPORT_SYMBOL_GPL(register_pernet_subsys);

/**
 *      unregister_pernet_subsys - unregister a network namespace subsystem
 *	@ops: pernet operations structure to manipulate
 *
 *	Remove the pernet operations structure from the list to be
405
 *	used when network namespaces are created or destroyed.  In
406 407 408 409 410 411 412 413 414 415 416
 *	addition run the exit method for all existing network
 *	namespaces.
 */
void unregister_pernet_subsys(struct pernet_operations *module)
{
	mutex_lock(&net_mutex);
	unregister_pernet_operations(module);
	mutex_unlock(&net_mutex);
}
EXPORT_SYMBOL_GPL(unregister_pernet_subsys);

417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434
int register_pernet_gen_subsys(int *id, struct pernet_operations *ops)
{
	int rv;

	mutex_lock(&net_mutex);
again:
	rv = ida_get_new_above(&net_generic_ids, 1, id);
	if (rv < 0) {
		if (rv == -EAGAIN) {
			ida_pre_get(&net_generic_ids, GFP_KERNEL);
			goto again;
		}
		goto out;
	}
	rv = register_pernet_operations(first_device, ops);
	if (rv < 0)
		ida_remove(&net_generic_ids, *id);
out:
435
	mutex_unlock(&net_mutex);
436 437 438 439 440 441 442 443 444 445 446 447 448
	return rv;
}
EXPORT_SYMBOL_GPL(register_pernet_gen_subsys);

void unregister_pernet_gen_subsys(int id, struct pernet_operations *ops)
{
	mutex_lock(&net_mutex);
	unregister_pernet_operations(ops);
	ida_remove(&net_generic_ids, id);
	mutex_unlock(&net_mutex);
}
EXPORT_SYMBOL_GPL(unregister_pernet_gen_subsys);

449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479
/**
 *      register_pernet_device - register a network namespace device
 *	@ops:  pernet operations structure for the subsystem
 *
 *	Register a device which has init and exit functions
 *	that are called when network namespaces are created and
 *	destroyed respectively.
 *
 *	When registered all network namespace init functions are
 *	called for every existing network namespace.  Allowing kernel
 *	modules to have a race free view of the set of network namespaces.
 *
 *	When a new network namespace is created all of the init
 *	methods are called in the order in which they were registered.
 *
 *	When a network namespace is destroyed all of the exit methods
 *	are called in the reverse of the order with which they were
 *	registered.
 */
int register_pernet_device(struct pernet_operations *ops)
{
	int error;
	mutex_lock(&net_mutex);
	error = register_pernet_operations(&pernet_list, ops);
	if (!error && (first_device == &pernet_list))
		first_device = &ops->list;
	mutex_unlock(&net_mutex);
	return error;
}
EXPORT_SYMBOL_GPL(register_pernet_device);

480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503
int register_pernet_gen_device(int *id, struct pernet_operations *ops)
{
	int error;
	mutex_lock(&net_mutex);
again:
	error = ida_get_new_above(&net_generic_ids, 1, id);
	if (error) {
		if (error == -EAGAIN) {
			ida_pre_get(&net_generic_ids, GFP_KERNEL);
			goto again;
		}
		goto out;
	}
	error = register_pernet_operations(&pernet_list, ops);
	if (error)
		ida_remove(&net_generic_ids, *id);
	else if (first_device == &pernet_list)
		first_device = &ops->list;
out:
	mutex_unlock(&net_mutex);
	return error;
}
EXPORT_SYMBOL_GPL(register_pernet_gen_device);

504 505 506 507 508
/**
 *      unregister_pernet_device - unregister a network namespace netdevice
 *	@ops: pernet operations structure to manipulate
 *
 *	Remove the pernet operations structure from the list to be
509
 *	used when network namespaces are created or destroyed.  In
510 511 512 513 514 515 516 517 518 519 520 521
 *	addition run the exit method for all existing network
 *	namespaces.
 */
void unregister_pernet_device(struct pernet_operations *ops)
{
	mutex_lock(&net_mutex);
	if (&ops->list == first_device)
		first_device = first_device->next;
	unregister_pernet_operations(ops);
	mutex_unlock(&net_mutex);
}
EXPORT_SYMBOL_GPL(unregister_pernet_device);
522 523 524 525 526 527 528 529 530 531 532

void unregister_pernet_gen_device(int id, struct pernet_operations *ops)
{
	mutex_lock(&net_mutex);
	if (&ops->list == first_device)
		first_device = first_device->next;
	unregister_pernet_operations(ops);
	ida_remove(&net_generic_ids, id);
	mutex_unlock(&net_mutex);
}
EXPORT_SYMBOL_GPL(unregister_pernet_gen_device);
533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569

static void net_generic_release(struct rcu_head *rcu)
{
	struct net_generic *ng;

	ng = container_of(rcu, struct net_generic, rcu);
	kfree(ng);
}

int net_assign_generic(struct net *net, int id, void *data)
{
	struct net_generic *ng, *old_ng;

	BUG_ON(!mutex_is_locked(&net_mutex));
	BUG_ON(id == 0);

	ng = old_ng = net->gen;
	if (old_ng->len >= id)
		goto assign;

	ng = kzalloc(sizeof(struct net_generic) +
			id * sizeof(void *), GFP_KERNEL);
	if (ng == NULL)
		return -ENOMEM;

	/*
	 * Some synchronisation notes:
	 *
	 * The net_generic explores the net->gen array inside rcu
	 * read section. Besides once set the net->gen->ptr[x]
	 * pointer never changes (see rules in netns/generic.h).
	 *
	 * That said, we simply duplicate this array and schedule
	 * the old copy for kfree after a grace period.
	 */

	ng->len = id;
E
Eric Dumazet 已提交
570
	memcpy(&ng->ptr, &old_ng->ptr, old_ng->len * sizeof(void*));
571 572 573 574 575 576 577 578

	rcu_assign_pointer(net->gen, ng);
	call_rcu(&old_ng->rcu, net_generic_release);
assign:
	ng->ptr[id - 1] = data;
	return 0;
}
EXPORT_SYMBOL_GPL(net_assign_generic);