af_netlink.c 61.0 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3
/*
 * NETLINK      Kernel-user communication protocol.
 *
4
 * 		Authors:	Alan Cox <alan@lxorguk.ukuu.org.uk>
L
Linus Torvalds 已提交
5
 * 				Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
6
 * 				Patrick McHardy <kaber@trash.net>
L
Linus Torvalds 已提交
7 8 9 10 11
 *
 *		This program is free software; you can redistribute it and/or
 *		modify it under the terms of the GNU General Public License
 *		as published by the Free Software Foundation; either version
 *		2 of the License, or (at your option) any later version.
12
 *
L
Linus Torvalds 已提交
13 14 15 16
 * Tue Jun 26 14:36:48 MEST 2001 Herbert "herp" Rosmanith
 *                               added netlink_proto_exit
 * Tue Jan 22 18:32:44 BRST 2002 Arnaldo C. de Melo <acme@conectiva.com.br>
 * 				 use nlk_sk, as sk->protinfo is on a diet 8)
17 18 19 20 21 22
 * Fri Jul 22 19:51:12 MEST 2005 Harald Welte <laforge@gnumonks.org>
 * 				 - inc module use count of module that owns
 * 				   the kernel socket in case userspace opens
 * 				   socket of same protocol
 * 				 - remove all module support, since netlink is
 * 				   mandatory if CONFIG_NET=y these days
L
Linus Torvalds 已提交
23 24 25 26
 */

#include <linux/module.h>

27
#include <linux/capability.h>
L
Linus Torvalds 已提交
28 29 30 31 32 33 34 35 36 37 38 39 40 41 42
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/stat.h>
#include <linux/socket.h>
#include <linux/un.h>
#include <linux/fcntl.h>
#include <linux/termios.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <linux/fs.h>
#include <linux/slab.h>
43
#include <linux/uaccess.h>
L
Linus Torvalds 已提交
44 45 46 47 48 49 50 51 52 53 54 55 56
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/rtnetlink.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/notifier.h>
#include <linux/security.h>
#include <linux/jhash.h>
#include <linux/jiffies.h>
#include <linux/random.h>
#include <linux/bitops.h>
#include <linux/mm.h>
#include <linux/types.h>
A
Andrew Morton 已提交
57
#include <linux/audit.h>
58
#include <linux/mutex.h>
59
#include <linux/vmalloc.h>
60
#include <linux/if_arp.h>
61
#include <linux/rhashtable.h>
62
#include <asm/cacheflush.h>
63
#include <linux/hash.h>
64
#include <linux/genetlink.h>
A
Andrew Morton 已提交
65

66
#include <net/net_namespace.h>
L
Linus Torvalds 已提交
67 68
#include <net/sock.h>
#include <net/scm.h>
69
#include <net/netlink.h>
L
Linus Torvalds 已提交
70

71
#include "af_netlink.h"
L
Linus Torvalds 已提交
72

73 74 75
struct listeners {
	struct rcu_head		rcu;
	unsigned long		masks[0];
76 77
};

78
/* state bits */
79
#define NETLINK_S_CONGESTED		0x0
80 81

/* flags */
82 83 84 85
#define NETLINK_F_KERNEL_SOCKET		0x1
#define NETLINK_F_RECV_PKTINFO		0x2
#define NETLINK_F_BROADCAST_SEND_ERROR	0x4
#define NETLINK_F_RECV_NO_ENOBUFS	0x8
86
#define NETLINK_F_LISTEN_ALL_NSID	0x10
87
#define NETLINK_F_CAP_ACK		0x20
88

89
static inline int netlink_is_kernel(struct sock *sk)
90
{
91
	return nlk_sk(sk)->flags & NETLINK_F_KERNEL_SOCKET;
92 93
}

94
struct netlink_table *nl_table __read_mostly;
95
EXPORT_SYMBOL_GPL(nl_table);
L
Linus Torvalds 已提交
96 97 98 99

static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait);

static int netlink_dump(struct sock *sk);
100
static void netlink_skb_destructor(struct sk_buff *skb);
L
Linus Torvalds 已提交
101

102
/* nl_table locking explained:
103
 * Lookup and traversal are protected with an RCU read-side lock. Insertion
Y
Ying Xue 已提交
104
 * and removal are protected with per bucket lock while using RCU list
105 106 107 108
 * modification primitives and may run in parallel to RCU protected lookups.
 * Destruction of the Netlink socket may only occur *after* nl_table_lock has
 * been acquired * either during or after the socket has been removed from
 * the list and after an RCU grace period.
109
 */
110 111
DEFINE_RWLOCK(nl_table_lock);
EXPORT_SYMBOL_GPL(nl_table_lock);
L
Linus Torvalds 已提交
112 113
static atomic_t nl_table_users = ATOMIC_INIT(0);

114 115
#define nl_deref_protected(X) rcu_dereference_protected(X, lockdep_is_held(&nl_table_lock));

W
WANG Cong 已提交
116
static BLOCKING_NOTIFIER_HEAD(netlink_chain);
L
Linus Torvalds 已提交
117

118 119 120
static DEFINE_SPINLOCK(netlink_tap_lock);
static struct list_head netlink_tap_all __read_mostly;

121 122
static const struct rhashtable_params netlink_rhashtable_params;

123
static inline u32 netlink_group_mask(u32 group)
124 125 126 127
{
	return group ? 1 << (group - 1) : 0;
}

128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145
static struct sk_buff *netlink_to_full_skb(const struct sk_buff *skb,
					   gfp_t gfp_mask)
{
	unsigned int len = skb_end_offset(skb);
	struct sk_buff *new;

	new = alloc_skb(len, gfp_mask);
	if (new == NULL)
		return NULL;

	NETLINK_CB(new).portid = NETLINK_CB(skb).portid;
	NETLINK_CB(new).dst_group = NETLINK_CB(skb).dst_group;
	NETLINK_CB(new).creds = NETLINK_CB(skb).creds;

	memcpy(skb_put(new, len), skb->data, len);
	return new;
}

146 147 148 149 150 151 152 153 154
int netlink_add_tap(struct netlink_tap *nt)
{
	if (unlikely(nt->dev->type != ARPHRD_NETLINK))
		return -EINVAL;

	spin_lock(&netlink_tap_lock);
	list_add_rcu(&nt->list, &netlink_tap_all);
	spin_unlock(&netlink_tap_lock);

155
	__module_get(nt->module);
156 157 158 159 160

	return 0;
}
EXPORT_SYMBOL_GPL(netlink_add_tap);

161
static int __netlink_remove_tap(struct netlink_tap *nt)
162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179
{
	bool found = false;
	struct netlink_tap *tmp;

	spin_lock(&netlink_tap_lock);

	list_for_each_entry(tmp, &netlink_tap_all, list) {
		if (nt == tmp) {
			list_del_rcu(&nt->list);
			found = true;
			goto out;
		}
	}

	pr_warn("__netlink_remove_tap: %p not found\n", nt);
out:
	spin_unlock(&netlink_tap_lock);

180
	if (found)
181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196
		module_put(nt->module);

	return found ? 0 : -ENODEV;
}

int netlink_remove_tap(struct netlink_tap *nt)
{
	int ret;

	ret = __netlink_remove_tap(nt);
	synchronize_net();

	return ret;
}
EXPORT_SYMBOL_GPL(netlink_remove_tap);

197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212
static bool netlink_filter_tap(const struct sk_buff *skb)
{
	struct sock *sk = skb->sk;

	/* We take the more conservative approach and
	 * whitelist socket protocols that may pass.
	 */
	switch (sk->sk_protocol) {
	case NETLINK_ROUTE:
	case NETLINK_USERSOCK:
	case NETLINK_SOCK_DIAG:
	case NETLINK_NFLOG:
	case NETLINK_XFRM:
	case NETLINK_FIB_LOOKUP:
	case NETLINK_NETFILTER:
	case NETLINK_GENERIC:
V
Varka Bhadram 已提交
213
		return true;
214 215
	}

V
Varka Bhadram 已提交
216
	return false;
217 218
}

219 220 221 222
static int __netlink_deliver_tap_skb(struct sk_buff *skb,
				     struct net_device *dev)
{
	struct sk_buff *nskb;
223
	struct sock *sk = skb->sk;
224 225 226
	int ret = -ENOMEM;

	dev_hold(dev);
227

228
	if (is_vmalloc_addr(skb->head))
229 230 231
		nskb = netlink_to_full_skb(skb, GFP_ATOMIC);
	else
		nskb = skb_clone(skb, GFP_ATOMIC);
232 233
	if (nskb) {
		nskb->dev = dev;
234
		nskb->protocol = htons((u16) sk->sk_protocol);
235 236
		nskb->pkt_type = netlink_is_kernel(sk) ?
				 PACKET_KERNEL : PACKET_USER;
237
		skb_reset_network_header(nskb);
238 239 240 241 242 243 244 245 246 247 248 249 250 251
		ret = dev_queue_xmit(nskb);
		if (unlikely(ret > 0))
			ret = net_xmit_errno(ret);
	}

	dev_put(dev);
	return ret;
}

static void __netlink_deliver_tap(struct sk_buff *skb)
{
	int ret;
	struct netlink_tap *tmp;

252 253 254
	if (!netlink_filter_tap(skb))
		return;

255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271
	list_for_each_entry_rcu(tmp, &netlink_tap_all, list) {
		ret = __netlink_deliver_tap_skb(skb, tmp->dev);
		if (unlikely(ret))
			break;
	}
}

static void netlink_deliver_tap(struct sk_buff *skb)
{
	rcu_read_lock();

	if (unlikely(!list_empty(&netlink_tap_all)))
		__netlink_deliver_tap(skb);

	rcu_read_unlock();
}

272 273 274 275 276 277 278
static void netlink_deliver_tap_kernel(struct sock *dst, struct sock *src,
				       struct sk_buff *skb)
{
	if (!(netlink_is_kernel(dst) && netlink_is_kernel(src)))
		netlink_deliver_tap(skb);
}

279 280 281 282
static void netlink_overrun(struct sock *sk)
{
	struct netlink_sock *nlk = nlk_sk(sk);

283 284 285
	if (!(nlk->flags & NETLINK_F_RECV_NO_ENOBUFS)) {
		if (!test_and_set_bit(NETLINK_S_CONGESTED,
				      &nlk_sk(sk)->state)) {
286 287 288 289 290 291 292 293 294 295 296 297
			sk->sk_err = ENOBUFS;
			sk->sk_error_report(sk);
		}
	}
	atomic_inc(&sk->sk_drops);
}

static void netlink_rcv_wake(struct sock *sk)
{
	struct netlink_sock *nlk = nlk_sk(sk);

	if (skb_queue_empty(&sk->sk_receive_queue))
298 299
		clear_bit(NETLINK_S_CONGESTED, &nlk->state);
	if (!test_bit(NETLINK_S_CONGESTED, &nlk->state))
300 301 302
		wake_up_interruptible(&nlk->wait);
}

303 304
static void netlink_skb_destructor(struct sk_buff *skb)
{
305
	if (is_vmalloc_addr(skb->head)) {
306 307 308 309
		if (!skb->cloned ||
		    !atomic_dec_return(&(skb_shinfo(skb)->dataref)))
			vfree(skb->head);

310 311
		skb->head = NULL;
	}
312 313
	if (skb->sk != NULL)
		sock_rfree(skb);
314 315 316 317 318 319 320 321 322 323 324
}

static void netlink_skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
{
	WARN_ON(skb->sk != NULL);
	skb->sk = sk;
	skb->destructor = netlink_skb_destructor;
	atomic_add(skb->truesize, &sk->sk_rmem_alloc);
	sk_mem_charge(sk, skb->truesize);
}

325
static void netlink_sock_destruct(struct sock *sk)
L
Linus Torvalds 已提交
326
{
327 328
	struct netlink_sock *nlk = nlk_sk(sk);

329
	if (nlk->cb_running) {
330 331
		if (nlk->cb.done)
			nlk->cb.done(&nlk->cb);
332 333
		module_put(nlk->cb.module);
		kfree_skb(nlk->cb.skb);
334 335
	}

L
Linus Torvalds 已提交
336 337 338
	skb_queue_purge(&sk->sk_receive_queue);

	if (!sock_flag(sk, SOCK_DEAD)) {
339
		printk(KERN_ERR "Freeing alive netlink socket %p\n", sk);
L
Linus Torvalds 已提交
340 341
		return;
	}
342 343 344 345

	WARN_ON(atomic_read(&sk->sk_rmem_alloc));
	WARN_ON(atomic_read(&sk->sk_wmem_alloc));
	WARN_ON(nlk_sk(sk)->groups);
L
Linus Torvalds 已提交
346 347
}

348 349 350 351 352
static void netlink_sock_destruct_work(struct work_struct *work)
{
	struct netlink_sock *nlk = container_of(work, struct netlink_sock,
						work);

353
	sk_free(&nlk->sk);
354 355
}

356 357
/* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on
 * SMP. Look, when several writers sleep and reader wakes them up, all but one
L
Linus Torvalds 已提交
358 359 360 361
 * immediately hit write lock and grab all the cpus. Exclusive sleep solves
 * this, _but_ remember, it adds useless work on UP machines.
 */

362
void netlink_table_grab(void)
363
	__acquires(nl_table_lock)
L
Linus Torvalds 已提交
364
{
365 366
	might_sleep();

367
	write_lock_irq(&nl_table_lock);
L
Linus Torvalds 已提交
368 369 370 371 372

	if (atomic_read(&nl_table_users)) {
		DECLARE_WAITQUEUE(wait, current);

		add_wait_queue_exclusive(&nl_table_wait, &wait);
373
		for (;;) {
L
Linus Torvalds 已提交
374 375 376
			set_current_state(TASK_UNINTERRUPTIBLE);
			if (atomic_read(&nl_table_users) == 0)
				break;
377
			write_unlock_irq(&nl_table_lock);
L
Linus Torvalds 已提交
378
			schedule();
379
			write_lock_irq(&nl_table_lock);
L
Linus Torvalds 已提交
380 381 382 383 384 385 386
		}

		__set_current_state(TASK_RUNNING);
		remove_wait_queue(&nl_table_wait, &wait);
	}
}

387
void netlink_table_ungrab(void)
388
	__releases(nl_table_lock)
L
Linus Torvalds 已提交
389
{
390
	write_unlock_irq(&nl_table_lock);
L
Linus Torvalds 已提交
391 392 393
	wake_up(&nl_table_wait);
}

394
static inline void
L
Linus Torvalds 已提交
395 396 397 398 399 400 401 402 403
netlink_lock_table(void)
{
	/* read_lock() synchronizes us to netlink_table_grab */

	read_lock(&nl_table_lock);
	atomic_inc(&nl_table_users);
	read_unlock(&nl_table_lock);
}

404
static inline void
L
Linus Torvalds 已提交
405 406 407 408 409 410
netlink_unlock_table(void)
{
	if (atomic_dec_and_test(&nl_table_users))
		wake_up(&nl_table_wait);
}

411
struct netlink_compare_arg
L
Linus Torvalds 已提交
412
{
413
	possible_net_t pnet;
414 415
	u32 portid;
};
L
Linus Torvalds 已提交
416

417 418 419
/* Doing sizeof directly may yield 4 extra bytes on 64-bit. */
#define netlink_compare_arg_len \
	(offsetof(struct netlink_compare_arg, portid) + sizeof(u32))
420 421 422

static inline int netlink_compare(struct rhashtable_compare_arg *arg,
				  const void *ptr)
L
Linus Torvalds 已提交
423
{
424 425
	const struct netlink_compare_arg *x = arg->key;
	const struct netlink_sock *nlk = ptr;
L
Linus Torvalds 已提交
426

427
	return nlk->portid != x->portid ||
428 429 430 431 432 433 434 435 436
	       !net_eq(sock_net(&nlk->sk), read_pnet(&x->pnet));
}

static void netlink_compare_arg_init(struct netlink_compare_arg *arg,
				     struct net *net, u32 portid)
{
	memset(arg, 0, sizeof(*arg));
	write_pnet(&arg->pnet, net);
	arg->portid = portid;
L
Linus Torvalds 已提交
437 438
}

439 440
static struct sock *__netlink_lookup(struct netlink_table *table, u32 portid,
				     struct net *net)
L
Linus Torvalds 已提交
441
{
442
	struct netlink_compare_arg arg;
L
Linus Torvalds 已提交
443

444 445 446
	netlink_compare_arg_init(&arg, net, portid);
	return rhashtable_lookup_fast(&table->hash, &arg,
				      netlink_rhashtable_params);
L
Linus Torvalds 已提交
447 448
}

449
static int __netlink_insert(struct netlink_table *table, struct sock *sk)
Y
Ying Xue 已提交
450
{
451
	struct netlink_compare_arg arg;
Y
Ying Xue 已提交
452

453
	netlink_compare_arg_init(&arg, sock_net(sk), nlk_sk(sk)->portid);
454 455 456
	return rhashtable_lookup_insert_key(&table->hash, &arg,
					    &nlk_sk(sk)->node,
					    netlink_rhashtable_params);
Y
Ying Xue 已提交
457 458
}

459
static struct sock *netlink_lookup(struct net *net, int protocol, u32 portid)
L
Linus Torvalds 已提交
460
{
461 462
	struct netlink_table *table = &nl_table[protocol];
	struct sock *sk;
L
Linus Torvalds 已提交
463

464 465 466 467 468
	rcu_read_lock();
	sk = __netlink_lookup(table, portid, net);
	if (sk)
		sock_hold(sk);
	rcu_read_unlock();
L
Linus Torvalds 已提交
469

470
	return sk;
L
Linus Torvalds 已提交
471 472
}

473
static const struct proto_ops netlink_ops;
L
Linus Torvalds 已提交
474

475 476 477 478 479 480
static void
netlink_update_listeners(struct sock *sk)
{
	struct netlink_table *tbl = &nl_table[sk->sk_protocol];
	unsigned long mask;
	unsigned int i;
481 482 483 484 485
	struct listeners *listeners;

	listeners = nl_deref_protected(tbl->listeners);
	if (!listeners)
		return;
486

487
	for (i = 0; i < NLGRPLONGS(tbl->groups); i++) {
488
		mask = 0;
489
		sk_for_each_bound(sk, &tbl->mc_list) {
490 491 492
			if (i < NLGRPLONGS(nlk_sk(sk)->ngroups))
				mask |= nlk_sk(sk)->groups[i];
		}
493
		listeners->masks[i] = mask;
494 495 496 497 498
	}
	/* this function is only called with the netlink table "grabbed", which
	 * makes sure updates are visible before bind or setsockopt return. */
}

499
static int netlink_insert(struct sock *sk, u32 portid)
L
Linus Torvalds 已提交
500
{
501
	struct netlink_table *table = &nl_table[sk->sk_protocol];
502
	int err;
L
Linus Torvalds 已提交
503

Y
Ying Xue 已提交
504
	lock_sock(sk);
L
Linus Torvalds 已提交
505

506 507
	err = nlk_sk(sk)->portid == portid ? 0 : -EBUSY;
	if (nlk_sk(sk)->bound)
L
Linus Torvalds 已提交
508 509 510
		goto err;

	err = -ENOMEM;
511 512
	if (BITS_PER_LONG > 32 &&
	    unlikely(atomic_read(&table->hash.nelems) >= UINT_MAX))
L
Linus Torvalds 已提交
513 514
		goto err;

515
	nlk_sk(sk)->portid = portid;
516
	sock_hold(sk);
517

518 519
	err = __netlink_insert(table, sk);
	if (err) {
520 521 522 523 524
		/* In case the hashtable backend returns with -EBUSY
		 * from here, it must not escape to the caller.
		 */
		if (unlikely(err == -EBUSY))
			err = -EOVERFLOW;
525 526
		if (err == -EEXIST)
			err = -EADDRINUSE;
Y
Ying Xue 已提交
527
		sock_put(sk);
528
		goto err;
529 530
	}

531 532 533
	/* We need to ensure that the socket is hashed and visible. */
	smp_wmb();
	nlk_sk(sk)->bound = portid;
534

L
Linus Torvalds 已提交
535
err:
Y
Ying Xue 已提交
536
	release_sock(sk);
L
Linus Torvalds 已提交
537 538 539 540 541
	return err;
}

static void netlink_remove(struct sock *sk)
{
542 543 544
	struct netlink_table *table;

	table = &nl_table[sk->sk_protocol];
545 546
	if (!rhashtable_remove_fast(&table->hash, &nlk_sk(sk)->node,
				    netlink_rhashtable_params)) {
547 548 549 550
		WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
		__sock_put(sk);
	}

L
Linus Torvalds 已提交
551
	netlink_table_grab();
552
	if (nlk_sk(sk)->subscriptions) {
L
Linus Torvalds 已提交
553
		__sk_del_bind_node(sk);
554 555
		netlink_update_listeners(sk);
	}
556 557
	if (sk->sk_protocol == NETLINK_GENERIC)
		atomic_inc(&genl_sk_destructing_cnt);
L
Linus Torvalds 已提交
558 559 560 561 562 563 564 565 566
	netlink_table_ungrab();
}

static struct proto netlink_proto = {
	.name	  = "NETLINK",
	.owner	  = THIS_MODULE,
	.obj_size = sizeof(struct netlink_sock),
};

567
static int __netlink_create(struct net *net, struct socket *sock,
568 569
			    struct mutex *cb_mutex, int protocol,
			    int kern)
L
Linus Torvalds 已提交
570 571 572
{
	struct sock *sk;
	struct netlink_sock *nlk;
573 574 575

	sock->ops = &netlink_ops;

576
	sk = sk_alloc(net, PF_NETLINK, GFP_KERNEL, &netlink_proto, kern);
577 578 579 580 581 582
	if (!sk)
		return -ENOMEM;

	sock_init_data(sock, sk);

	nlk = nlk_sk(sk);
E
Eric Dumazet 已提交
583
	if (cb_mutex) {
584
		nlk->cb_mutex = cb_mutex;
E
Eric Dumazet 已提交
585
	} else {
586 587 588
		nlk->cb_mutex = &nlk->cb_def_mutex;
		mutex_init(nlk->cb_mutex);
	}
589 590 591 592 593 594 595
	init_waitqueue_head(&nlk->wait);

	sk->sk_destruct = netlink_sock_destruct;
	sk->sk_protocol = protocol;
	return 0;
}

596 597
static int netlink_create(struct net *net, struct socket *sock, int protocol,
			  int kern)
598 599
{
	struct module *module = NULL;
600
	struct mutex *cb_mutex;
601
	struct netlink_sock *nlk;
602 603
	int (*bind)(struct net *net, int group);
	void (*unbind)(struct net *net, int group);
604
	int err = 0;
L
Linus Torvalds 已提交
605 606 607 608 609 610

	sock->state = SS_UNCONNECTED;

	if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
		return -ESOCKTNOSUPPORT;

611
	if (protocol < 0 || protocol >= MAX_LINKS)
L
Linus Torvalds 已提交
612 613
		return -EPROTONOSUPPORT;

614
	netlink_lock_table();
615
#ifdef CONFIG_MODULES
616
	if (!nl_table[protocol].registered) {
617
		netlink_unlock_table();
618
		request_module("net-pf-%d-proto-%d", PF_NETLINK, protocol);
619
		netlink_lock_table();
620
	}
621 622 623 624
#endif
	if (nl_table[protocol].registered &&
	    try_module_get(nl_table[protocol].module))
		module = nl_table[protocol].module;
625 626
	else
		err = -EPROTONOSUPPORT;
627
	cb_mutex = nl_table[protocol].cb_mutex;
628
	bind = nl_table[protocol].bind;
629
	unbind = nl_table[protocol].unbind;
630
	netlink_unlock_table();
631

632 633 634
	if (err < 0)
		goto out;

635
	err = __netlink_create(net, sock, cb_mutex, protocol, kern);
636
	if (err < 0)
637 638
		goto out_module;

639
	local_bh_disable();
640
	sock_prot_inuse_add(net, &netlink_proto, 1);
641 642
	local_bh_enable();

643 644
	nlk = nlk_sk(sock->sk);
	nlk->module = module;
645
	nlk->netlink_bind = bind;
646
	nlk->netlink_unbind = unbind;
647 648
out:
	return err;
L
Linus Torvalds 已提交
649

650 651 652
out_module:
	module_put(module);
	goto out;
L
Linus Torvalds 已提交
653 654
}

655 656 657
static void deferred_put_nlk_sk(struct rcu_head *head)
{
	struct netlink_sock *nlk = container_of(head, struct netlink_sock, rcu);
658 659 660 661 662 663 664 665 666 667
	struct sock *sk = &nlk->sk;

	if (!atomic_dec_and_test(&sk->sk_refcnt))
		return;

	if (nlk->cb_running && nlk->cb.done) {
		INIT_WORK(&nlk->work, netlink_sock_destruct_work);
		schedule_work(&nlk->work);
		return;
	}
668

669
	sk_free(sk);
670 671
}

L
Linus Torvalds 已提交
672 673 674 675 676 677 678 679 680
static int netlink_release(struct socket *sock)
{
	struct sock *sk = sock->sk;
	struct netlink_sock *nlk;

	if (!sk)
		return 0;

	netlink_remove(sk);
681
	sock_orphan(sk);
L
Linus Torvalds 已提交
682 683
	nlk = nlk_sk(sk);

684 685 686 687
	/*
	 * OK. Socket is unlinked, any packets that arrive now
	 * will be purged.
	 */
L
Linus Torvalds 已提交
688

689 690 691 692 693 694 695 696 697 698 699 700 701 702
	/* must not acquire netlink_table_lock in any way again before unbind
	 * and notifying genetlink is done as otherwise it might deadlock
	 */
	if (nlk->netlink_unbind) {
		int i;

		for (i = 0; i < nlk->ngroups; i++)
			if (test_bit(i, nlk->groups))
				nlk->netlink_unbind(sock_net(sk), i + 1);
	}
	if (sk->sk_protocol == NETLINK_GENERIC &&
	    atomic_dec_return(&genl_sk_destructing_cnt) == 0)
		wake_up(&genl_sk_destructing_waitq);

L
Linus Torvalds 已提交
703 704 705 706 707
	sock->sk = NULL;
	wake_up_interruptible_all(&nlk->wait);

	skb_queue_purge(&sk->sk_write_queue);

708
	if (nlk->portid && nlk->bound) {
L
Linus Torvalds 已提交
709
		struct netlink_notify n = {
710
						.net = sock_net(sk),
L
Linus Torvalds 已提交
711
						.protocol = sk->sk_protocol,
712
						.portid = nlk->portid,
L
Linus Torvalds 已提交
713
					  };
W
WANG Cong 已提交
714
		blocking_notifier_call_chain(&netlink_chain,
715
				NETLINK_URELEASE, &n);
716
	}
717

718
	module_put(nlk->module);
719

720
	if (netlink_is_kernel(sk)) {
721
		netlink_table_grab();
722 723
		BUG_ON(nl_table[sk->sk_protocol].registered == 0);
		if (--nl_table[sk->sk_protocol].registered == 0) {
724 725 726 727 728
			struct listeners *old;

			old = nl_deref_protected(nl_table[sk->sk_protocol].listeners);
			RCU_INIT_POINTER(nl_table[sk->sk_protocol].listeners, NULL);
			kfree_rcu(old, rcu);
729
			nl_table[sk->sk_protocol].module = NULL;
730
			nl_table[sk->sk_protocol].bind = NULL;
731
			nl_table[sk->sk_protocol].unbind = NULL;
732
			nl_table[sk->sk_protocol].flags = 0;
733 734
			nl_table[sk->sk_protocol].registered = 0;
		}
735
		netlink_table_ungrab();
E
Eric Dumazet 已提交
736
	}
737

738 739 740
	kfree(nlk->groups);
	nlk->groups = NULL;

741
	local_bh_disable();
742
	sock_prot_inuse_add(sock_net(sk), &netlink_proto, -1);
743
	local_bh_enable();
744
	call_rcu(&nlk->rcu, deferred_put_nlk_sk);
L
Linus Torvalds 已提交
745 746 747 748 749 750
	return 0;
}

static int netlink_autobind(struct socket *sock)
{
	struct sock *sk = sock->sk;
751
	struct net *net = sock_net(sk);
752
	struct netlink_table *table = &nl_table[sk->sk_protocol];
753
	s32 portid = task_tgid_vnr(current);
L
Linus Torvalds 已提交
754
	int err;
H
Herbert Xu 已提交
755 756
	s32 rover = -4096;
	bool ok;
L
Linus Torvalds 已提交
757 758 759

retry:
	cond_resched();
760
	rcu_read_lock();
H
Herbert Xu 已提交
761 762 763
	ok = !__netlink_lookup(table, portid, net);
	rcu_read_unlock();
	if (!ok) {
764
		/* Bind collision, search negative portid values. */
H
Herbert Xu 已提交
765 766 767 768
		if (rover == -4096)
			/* rover will be in range [S32_MIN, -4097] */
			rover = S32_MIN + prandom_u32_max(-4096 - S32_MIN);
		else if (rover >= -4096)
769
			rover = -4097;
H
Herbert Xu 已提交
770
		portid = rover--;
771
		goto retry;
L
Linus Torvalds 已提交
772 773
	}

774
	err = netlink_insert(sk, portid);
L
Linus Torvalds 已提交
775 776
	if (err == -EADDRINUSE)
		goto retry;
777 778 779 780 781 782

	/* If 2 threads race to autobind, that is fine.  */
	if (err == -EBUSY)
		err = 0;

	return err;
L
Linus Torvalds 已提交
783 784
}

785 786 787 788 789 790 791 792 793 794 795 796 797
/**
 * __netlink_ns_capable - General netlink message capability test
 * @nsp: NETLINK_CB of the socket buffer holding a netlink command from userspace.
 * @user_ns: The user namespace of the capability to use
 * @cap: The capability to use
 *
 * Test to see if the opener of the socket we received the message
 * from had when the netlink socket was created and the sender of the
 * message has has the capability @cap in the user namespace @user_ns.
 */
bool __netlink_ns_capable(const struct netlink_skb_parms *nsp,
			struct user_namespace *user_ns, int cap)
{
798 799 800
	return ((nsp->flags & NETLINK_SKB_DST) ||
		file_ns_capable(nsp->sk->sk_socket->file, user_ns, cap)) &&
		ns_capable(user_ns, cap);
801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851
}
EXPORT_SYMBOL(__netlink_ns_capable);

/**
 * netlink_ns_capable - General netlink message capability test
 * @skb: socket buffer holding a netlink command from userspace
 * @user_ns: The user namespace of the capability to use
 * @cap: The capability to use
 *
 * Test to see if the opener of the socket we received the message
 * from had when the netlink socket was created and the sender of the
 * message has has the capability @cap in the user namespace @user_ns.
 */
bool netlink_ns_capable(const struct sk_buff *skb,
			struct user_namespace *user_ns, int cap)
{
	return __netlink_ns_capable(&NETLINK_CB(skb), user_ns, cap);
}
EXPORT_SYMBOL(netlink_ns_capable);

/**
 * netlink_capable - Netlink global message capability test
 * @skb: socket buffer holding a netlink command from userspace
 * @cap: The capability to use
 *
 * Test to see if the opener of the socket we received the message
 * from had when the netlink socket was created and the sender of the
 * message has has the capability @cap in all user namespaces.
 */
bool netlink_capable(const struct sk_buff *skb, int cap)
{
	return netlink_ns_capable(skb, &init_user_ns, cap);
}
EXPORT_SYMBOL(netlink_capable);

/**
 * netlink_net_capable - Netlink network namespace message capability test
 * @skb: socket buffer holding a netlink command from userspace
 * @cap: The capability to use
 *
 * Test to see if the opener of the socket we received the message
 * from had when the netlink socket was created and the sender of the
 * message has has the capability @cap over the network namespace of
 * the socket we received the message from.
 */
bool netlink_net_capable(const struct sk_buff *skb, int cap)
{
	return netlink_ns_capable(skb, sock_net(skb->sk)->user_ns, cap);
}
EXPORT_SYMBOL(netlink_net_capable);

852
static inline int netlink_allowed(const struct socket *sock, unsigned int flag)
853
{
854
	return (nl_table[sock->sk->sk_protocol].flags & flag) ||
855
		ns_capable(sock_net(sock->sk)->user_ns, CAP_NET_ADMIN);
856
}
L
Linus Torvalds 已提交
857

858 859 860 861 862 863 864 865 866 867 868 869
static void
netlink_update_subscriptions(struct sock *sk, unsigned int subscriptions)
{
	struct netlink_sock *nlk = nlk_sk(sk);

	if (nlk->subscriptions && !subscriptions)
		__sk_del_bind_node(sk);
	else if (!nlk->subscriptions && subscriptions)
		sk_add_bind_node(sk, &nl_table[sk->sk_protocol].mc_list);
	nlk->subscriptions = subscriptions;
}

870
static int netlink_realloc_groups(struct sock *sk)
871 872 873
{
	struct netlink_sock *nlk = nlk_sk(sk);
	unsigned int groups;
874
	unsigned long *new_groups;
875 876
	int err = 0;

877 878
	netlink_table_grab();

879
	groups = nl_table[sk->sk_protocol].groups;
880
	if (!nl_table[sk->sk_protocol].registered) {
881
		err = -ENOENT;
882 883
		goto out_unlock;
	}
884

885 886
	if (nlk->ngroups >= groups)
		goto out_unlock;
887

888 889 890 891 892
	new_groups = krealloc(nlk->groups, NLGRPSZ(groups), GFP_ATOMIC);
	if (new_groups == NULL) {
		err = -ENOMEM;
		goto out_unlock;
	}
893
	memset((char *)new_groups + NLGRPSZ(nlk->ngroups), 0,
894 895 896
	       NLGRPSZ(groups) - NLGRPSZ(nlk->ngroups));

	nlk->groups = new_groups;
897
	nlk->ngroups = groups;
898 899 900
 out_unlock:
	netlink_table_ungrab();
	return err;
901 902
}

903
static void netlink_undo_bind(int group, long unsigned int groups,
904
			      struct sock *sk)
905
{
906
	struct netlink_sock *nlk = nlk_sk(sk);
907 908 909 910 911 912
	int undo;

	if (!nlk->netlink_unbind)
		return;

	for (undo = 0; undo < group; undo++)
913
		if (test_bit(undo, &groups))
914
			nlk->netlink_unbind(sock_net(sk), undo + 1);
915 916
}

917 918
static int netlink_bind(struct socket *sock, struct sockaddr *addr,
			int addr_len)
L
Linus Torvalds 已提交
919 920
{
	struct sock *sk = sock->sk;
921
	struct net *net = sock_net(sk);
L
Linus Torvalds 已提交
922 923 924
	struct netlink_sock *nlk = nlk_sk(sk);
	struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
	int err;
925
	long unsigned int groups = nladdr->nl_groups;
926
	bool bound;
927

928 929 930
	if (addr_len < sizeof(struct sockaddr_nl))
		return -EINVAL;

L
Linus Torvalds 已提交
931 932 933 934
	if (nladdr->nl_family != AF_NETLINK)
		return -EINVAL;

	/* Only superuser is allowed to listen multicasts */
935
	if (groups) {
936
		if (!netlink_allowed(sock, NL_CFG_F_NONROOT_RECV))
937
			return -EPERM;
938 939 940
		err = netlink_realloc_groups(sk);
		if (err)
			return err;
941
	}
L
Linus Torvalds 已提交
942

943 944 945 946 947
	bound = nlk->bound;
	if (bound) {
		/* Ensure nlk->portid is up-to-date. */
		smp_rmb();

948
		if (nladdr->nl_pid != nlk->portid)
L
Linus Torvalds 已提交
949
			return -EINVAL;
950
	}
951 952 953 954 955 956 957

	if (nlk->netlink_bind && groups) {
		int group;

		for (group = 0; group < nlk->ngroups; group++) {
			if (!test_bit(group, &groups))
				continue;
958
			err = nlk->netlink_bind(net, group + 1);
959 960
			if (!err)
				continue;
961
			netlink_undo_bind(group, groups, sk);
962 963 964 965
			return err;
		}
	}

966 967 968 969
	/* No need for barriers here as we return to user-space without
	 * using any of the bound attributes.
	 */
	if (!bound) {
L
Linus Torvalds 已提交
970
		err = nladdr->nl_pid ?
971
			netlink_insert(sk, nladdr->nl_pid) :
L
Linus Torvalds 已提交
972
			netlink_autobind(sock);
973
		if (err) {
974
			netlink_undo_bind(nlk->ngroups, groups, sk);
L
Linus Torvalds 已提交
975
			return err;
976
		}
L
Linus Torvalds 已提交
977 978
	}

979
	if (!groups && (nlk->groups == NULL || !(u32)nlk->groups[0]))
L
Linus Torvalds 已提交
980 981 982
		return 0;

	netlink_table_grab();
983
	netlink_update_subscriptions(sk, nlk->subscriptions +
984
					 hweight32(groups) -
985
					 hweight32(nlk->groups[0]));
986
	nlk->groups[0] = (nlk->groups[0] & ~0xffffffffUL) | groups;
987
	netlink_update_listeners(sk);
L
Linus Torvalds 已提交
988 989 990 991 992 993 994 995 996 997 998
	netlink_table_ungrab();

	return 0;
}

static int netlink_connect(struct socket *sock, struct sockaddr *addr,
			   int alen, int flags)
{
	int err = 0;
	struct sock *sk = sock->sk;
	struct netlink_sock *nlk = nlk_sk(sk);
999
	struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
L
Linus Torvalds 已提交
1000

1001 1002 1003
	if (alen < sizeof(addr->sa_family))
		return -EINVAL;

L
Linus Torvalds 已提交
1004 1005
	if (addr->sa_family == AF_UNSPEC) {
		sk->sk_state	= NETLINK_UNCONNECTED;
1006
		nlk->dst_portid	= 0;
1007
		nlk->dst_group  = 0;
L
Linus Torvalds 已提交
1008 1009 1010 1011 1012
		return 0;
	}
	if (addr->sa_family != AF_NETLINK)
		return -EINVAL;

1013
	if ((nladdr->nl_groups || nladdr->nl_pid) &&
1014
	    !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND))
L
Linus Torvalds 已提交
1015 1016
		return -EPERM;

1017 1018 1019 1020
	/* No need for barriers here as we return to user-space without
	 * using any of the bound attributes.
	 */
	if (!nlk->bound)
L
Linus Torvalds 已提交
1021 1022 1023 1024
		err = netlink_autobind(sock);

	if (err == 0) {
		sk->sk_state	= NETLINK_CONNECTED;
1025
		nlk->dst_portid = nladdr->nl_pid;
1026
		nlk->dst_group  = ffs(nladdr->nl_groups);
L
Linus Torvalds 已提交
1027 1028 1029 1030 1031
	}

	return err;
}

1032 1033
static int netlink_getname(struct socket *sock, struct sockaddr *addr,
			   int *addr_len, int peer)
L
Linus Torvalds 已提交
1034 1035 1036
{
	struct sock *sk = sock->sk;
	struct netlink_sock *nlk = nlk_sk(sk);
1037
	DECLARE_SOCKADDR(struct sockaddr_nl *, nladdr, addr);
1038

L
Linus Torvalds 已提交
1039 1040 1041 1042 1043
	nladdr->nl_family = AF_NETLINK;
	nladdr->nl_pad = 0;
	*addr_len = sizeof(*nladdr);

	if (peer) {
1044
		nladdr->nl_pid = nlk->dst_portid;
1045
		nladdr->nl_groups = netlink_group_mask(nlk->dst_group);
L
Linus Torvalds 已提交
1046
	} else {
1047
		nladdr->nl_pid = nlk->portid;
1048
		nladdr->nl_groups = nlk->groups ? nlk->groups[0] : 0;
L
Linus Torvalds 已提交
1049 1050 1051 1052
	}
	return 0;
}

1053 1054 1055 1056 1057 1058 1059 1060
static int netlink_ioctl(struct socket *sock, unsigned int cmd,
			 unsigned long arg)
{
	/* try to hand this ioctl down to the NIC drivers.
	 */
	return -ENOIOCTLCMD;
}

1061
static struct sock *netlink_getsockbyportid(struct sock *ssk, u32 portid)
L
Linus Torvalds 已提交
1062 1063 1064 1065
{
	struct sock *sock;
	struct netlink_sock *nlk;

1066
	sock = netlink_lookup(sock_net(ssk), ssk->sk_protocol, portid);
L
Linus Torvalds 已提交
1067 1068 1069 1070 1071
	if (!sock)
		return ERR_PTR(-ECONNREFUSED);

	/* Don't bother queuing skb if kernel socket has no input function */
	nlk = nlk_sk(sock);
1072
	if (sock->sk_state == NETLINK_CONNECTED &&
1073
	    nlk->dst_portid != nlk_sk(ssk)->portid) {
L
Linus Torvalds 已提交
1074 1075 1076 1077 1078 1079 1080 1081
		sock_put(sock);
		return ERR_PTR(-ECONNREFUSED);
	}
	return sock;
}

struct sock *netlink_getsockbyfilp(struct file *filp)
{
A
Al Viro 已提交
1082
	struct inode *inode = file_inode(filp);
L
Linus Torvalds 已提交
1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095
	struct sock *sock;

	if (!S_ISSOCK(inode->i_mode))
		return ERR_PTR(-ENOTSOCK);

	sock = SOCKET_I(inode)->sk;
	if (sock->sk_family != AF_NETLINK)
		return ERR_PTR(-EINVAL);

	sock_hold(sock);
	return sock;
}

1096 1097
static struct sk_buff *netlink_alloc_large_skb(unsigned int size,
					       int broadcast)
1098 1099 1100 1101
{
	struct sk_buff *skb;
	void *data;

1102
	if (size <= NLMSG_GOODSIZE || broadcast)
1103 1104
		return alloc_skb(size, GFP_KERNEL);

1105 1106
	size = SKB_DATA_ALIGN(size) +
	       SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1107 1108 1109

	data = vmalloc(size);
	if (data == NULL)
1110
		return NULL;
1111

E
Eric Dumazet 已提交
1112
	skb = __build_skb(data, size);
1113 1114
	if (skb == NULL)
		vfree(data);
E
Eric Dumazet 已提交
1115
	else
1116
		skb->destructor = netlink_skb_destructor;
1117 1118 1119 1120

	return skb;
}

L
Linus Torvalds 已提交
1121 1122 1123 1124 1125 1126 1127 1128 1129 1130
/*
 * Attach a skb to a netlink socket.
 * The caller must hold a reference to the destination socket. On error, the
 * reference is dropped. The skb is not send to the destination, just all
 * all error checks are performed and memory in the queue is reserved.
 * Return values:
 * < 0: error. skb freed, reference to sock dropped.
 * 0: continue
 * 1: repeat lookup - reference dropped while waiting for socket memory.
 */
1131
int netlink_attachskb(struct sock *sk, struct sk_buff *skb,
P
Patrick McHardy 已提交
1132
		      long *timeo, struct sock *ssk)
L
Linus Torvalds 已提交
1133 1134 1135 1136 1137
{
	struct netlink_sock *nlk;

	nlk = nlk_sk(sk);

1138
	if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
1139
	     test_bit(NETLINK_S_CONGESTED, &nlk->state))) {
L
Linus Torvalds 已提交
1140
		DECLARE_WAITQUEUE(wait, current);
P
Patrick McHardy 已提交
1141
		if (!*timeo) {
1142
			if (!ssk || netlink_is_kernel(ssk))
L
Linus Torvalds 已提交
1143 1144 1145 1146 1147 1148 1149 1150 1151 1152
				netlink_overrun(sk);
			sock_put(sk);
			kfree_skb(skb);
			return -EAGAIN;
		}

		__set_current_state(TASK_INTERRUPTIBLE);
		add_wait_queue(&nlk->wait, &wait);

		if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
1153
		     test_bit(NETLINK_S_CONGESTED, &nlk->state)) &&
L
Linus Torvalds 已提交
1154
		    !sock_flag(sk, SOCK_DEAD))
P
Patrick McHardy 已提交
1155
			*timeo = schedule_timeout(*timeo);
L
Linus Torvalds 已提交
1156 1157 1158 1159 1160 1161 1162

		__set_current_state(TASK_RUNNING);
		remove_wait_queue(&nlk->wait, &wait);
		sock_put(sk);

		if (signal_pending(current)) {
			kfree_skb(skb);
P
Patrick McHardy 已提交
1163
			return sock_intr_errno(*timeo);
L
Linus Torvalds 已提交
1164 1165 1166
		}
		return 1;
	}
1167
	netlink_skb_set_owner_r(skb, sk);
L
Linus Torvalds 已提交
1168 1169 1170
	return 0;
}

1171
static int __netlink_sendskb(struct sock *sk, struct sk_buff *skb)
L
Linus Torvalds 已提交
1172 1173 1174
{
	int len = skb->len;

1175 1176
	netlink_deliver_tap(skb);

1177
	skb_queue_tail(&sk->sk_receive_queue, skb);
1178
	sk->sk_data_ready(sk);
1179 1180 1181 1182 1183 1184 1185
	return len;
}

int netlink_sendskb(struct sock *sk, struct sk_buff *skb)
{
	int len = __netlink_sendskb(sk, skb);

L
Linus Torvalds 已提交
1186 1187 1188 1189 1190 1191 1192 1193 1194 1195
	sock_put(sk);
	return len;
}

void netlink_detachskb(struct sock *sk, struct sk_buff *skb)
{
	kfree_skb(skb);
	sock_put(sk);
}

1196
static struct sk_buff *netlink_trim(struct sk_buff *skb, gfp_t allocation)
L
Linus Torvalds 已提交
1197 1198 1199
{
	int delta;

1200
	WARN_ON(skb->sk != NULL);
1201
	delta = skb->end - skb->tail;
1202
	if (is_vmalloc_addr(skb->head) || delta * 2 < skb->truesize)
L
Linus Torvalds 已提交
1203 1204 1205 1206 1207 1208
		return skb;

	if (skb_shared(skb)) {
		struct sk_buff *nskb = skb_clone(skb, allocation);
		if (!nskb)
			return skb;
1209
		consume_skb(skb);
L
Linus Torvalds 已提交
1210 1211 1212
		skb = nskb;
	}

1213 1214 1215
	if (!pskb_expand_head(skb, 0, -delta,
			      (allocation & ~__GFP_DIRECT_RECLAIM) |
			      __GFP_NOWARN | __GFP_NORETRY))
L
Linus Torvalds 已提交
1216 1217 1218 1219 1220
		skb->truesize -= delta;

	return skb;
}

1221 1222
static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb,
				  struct sock *ssk)
1223 1224 1225 1226 1227 1228 1229
{
	int ret;
	struct netlink_sock *nlk = nlk_sk(sk);

	ret = -ECONNREFUSED;
	if (nlk->netlink_rcv != NULL) {
		ret = skb->len;
1230
		netlink_skb_set_owner_r(skb, sk);
1231
		NETLINK_CB(skb).sk = ssk;
1232
		netlink_deliver_tap_kernel(sk, ssk, skb);
1233
		nlk->netlink_rcv(skb);
1234 1235 1236
		consume_skb(skb);
	} else {
		kfree_skb(skb);
1237 1238 1239 1240 1241 1242
	}
	sock_put(sk);
	return ret;
}

int netlink_unicast(struct sock *ssk, struct sk_buff *skb,
1243
		    u32 portid, int nonblock)
L
Linus Torvalds 已提交
1244 1245 1246 1247 1248 1249 1250 1251 1252
{
	struct sock *sk;
	int err;
	long timeo;

	skb = netlink_trim(skb, gfp_any());

	timeo = sock_sndtimeo(ssk, nonblock);
retry:
1253
	sk = netlink_getsockbyportid(ssk, portid);
L
Linus Torvalds 已提交
1254 1255 1256 1257
	if (IS_ERR(sk)) {
		kfree_skb(skb);
		return PTR_ERR(sk);
	}
1258
	if (netlink_is_kernel(sk))
1259
		return netlink_unicast_kernel(sk, skb, ssk);
1260

1261
	if (sk_filter(sk, skb)) {
W
Wang Chen 已提交
1262
		err = skb->len;
1263 1264 1265 1266 1267
		kfree_skb(skb);
		sock_put(sk);
		return err;
	}

1268
	err = netlink_attachskb(sk, skb, &timeo, ssk);
L
Linus Torvalds 已提交
1269 1270 1271 1272 1273
	if (err == 1)
		goto retry;
	if (err)
		return err;

1274
	return netlink_sendskb(sk, skb);
L
Linus Torvalds 已提交
1275
}
1276
EXPORT_SYMBOL(netlink_unicast);
L
Linus Torvalds 已提交
1277

1278 1279 1280
int netlink_has_listeners(struct sock *sk, unsigned int group)
{
	int res = 0;
1281
	struct listeners *listeners;
1282

1283
	BUG_ON(!netlink_is_kernel(sk));
1284 1285 1286 1287

	rcu_read_lock();
	listeners = rcu_dereference(nl_table[sk->sk_protocol].listeners);

1288
	if (listeners && group - 1 < nl_table[sk->sk_protocol].groups)
1289
		res = test_bit(group - 1, listeners->masks);
1290 1291 1292

	rcu_read_unlock();

1293 1294 1295 1296
	return res;
}
EXPORT_SYMBOL_GPL(netlink_has_listeners);

1297
static int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb)
L
Linus Torvalds 已提交
1298 1299 1300 1301
{
	struct netlink_sock *nlk = nlk_sk(sk);

	if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
1302
	    !test_bit(NETLINK_S_CONGESTED, &nlk->state)) {
1303
		netlink_skb_set_owner_r(skb, sk);
1304
		__netlink_sendskb(sk, skb);
1305
		return atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1);
L
Linus Torvalds 已提交
1306 1307 1308 1309 1310 1311
	}
	return -1;
}

struct netlink_broadcast_data {
	struct sock *exclude_sk;
1312
	struct net *net;
1313
	u32 portid;
L
Linus Torvalds 已提交
1314 1315
	u32 group;
	int failure;
1316
	int delivery_failure;
L
Linus Torvalds 已提交
1317 1318
	int congested;
	int delivered;
A
Al Viro 已提交
1319
	gfp_t allocation;
L
Linus Torvalds 已提交
1320
	struct sk_buff *skb, *skb2;
1321 1322
	int (*tx_filter)(struct sock *dsk, struct sk_buff *skb, void *data);
	void *tx_data;
L
Linus Torvalds 已提交
1323 1324
};

1325 1326
static void do_one_broadcast(struct sock *sk,
				    struct netlink_broadcast_data *p)
L
Linus Torvalds 已提交
1327 1328 1329 1330 1331
{
	struct netlink_sock *nlk = nlk_sk(sk);
	int val;

	if (p->exclude_sk == sk)
1332
		return;
L
Linus Torvalds 已提交
1333

1334
	if (nlk->portid == p->portid || p->group - 1 >= nlk->ngroups ||
1335
	    !test_bit(p->group - 1, nlk->groups))
1336
		return;
L
Linus Torvalds 已提交
1337

1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348
	if (!net_eq(sock_net(sk), p->net)) {
		if (!(nlk->flags & NETLINK_F_LISTEN_ALL_NSID))
			return;

		if (!peernet_has_id(sock_net(sk), p->net))
			return;

		if (!file_ns_capable(sk->sk_socket->file, p->net->user_ns,
				     CAP_NET_BROADCAST))
			return;
	}
1349

L
Linus Torvalds 已提交
1350 1351
	if (p->failure) {
		netlink_overrun(sk);
1352
		return;
L
Linus Torvalds 已提交
1353 1354 1355 1356
	}

	sock_hold(sk);
	if (p->skb2 == NULL) {
1357
		if (skb_shared(p->skb)) {
L
Linus Torvalds 已提交
1358 1359
			p->skb2 = skb_clone(p->skb, p->allocation);
		} else {
1360 1361 1362 1363 1364 1365
			p->skb2 = skb_get(p->skb);
			/*
			 * skb ownership may have been set when
			 * delivered to a previous socket.
			 */
			skb_orphan(p->skb2);
L
Linus Torvalds 已提交
1366 1367 1368 1369 1370 1371
		}
	}
	if (p->skb2 == NULL) {
		netlink_overrun(sk);
		/* Clone failed. Notify ALL listeners. */
		p->failure = 1;
1372
		if (nlk->flags & NETLINK_F_BROADCAST_SEND_ERROR)
1373
			p->delivery_failure = 1;
1374 1375 1376
		goto out;
	}
	if (p->tx_filter && p->tx_filter(sk, p->skb2, p->tx_data)) {
1377 1378
		kfree_skb(p->skb2);
		p->skb2 = NULL;
1379 1380 1381
		goto out;
	}
	if (sk_filter(sk, p->skb2)) {
1382 1383
		kfree_skb(p->skb2);
		p->skb2 = NULL;
1384 1385 1386 1387 1388 1389
		goto out;
	}
	NETLINK_CB(p->skb2).nsid = peernet2id(sock_net(sk), p->net);
	NETLINK_CB(p->skb2).nsid_is_set = true;
	val = netlink_broadcast_deliver(sk, p->skb2);
	if (val < 0) {
L
Linus Torvalds 已提交
1390
		netlink_overrun(sk);
1391
		if (nlk->flags & NETLINK_F_BROADCAST_SEND_ERROR)
1392
			p->delivery_failure = 1;
L
Linus Torvalds 已提交
1393 1394 1395 1396 1397
	} else {
		p->congested |= val;
		p->delivered = 1;
		p->skb2 = NULL;
	}
1398
out:
L
Linus Torvalds 已提交
1399 1400 1401
	sock_put(sk);
}

1402
int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, u32 portid,
1403 1404 1405
	u32 group, gfp_t allocation,
	int (*filter)(struct sock *dsk, struct sk_buff *skb, void *data),
	void *filter_data)
L
Linus Torvalds 已提交
1406
{
1407
	struct net *net = sock_net(ssk);
L
Linus Torvalds 已提交
1408 1409 1410 1411 1412 1413
	struct netlink_broadcast_data info;
	struct sock *sk;

	skb = netlink_trim(skb, allocation);

	info.exclude_sk = ssk;
1414
	info.net = net;
1415
	info.portid = portid;
L
Linus Torvalds 已提交
1416 1417
	info.group = group;
	info.failure = 0;
1418
	info.delivery_failure = 0;
L
Linus Torvalds 已提交
1419 1420 1421 1422 1423
	info.congested = 0;
	info.delivered = 0;
	info.allocation = allocation;
	info.skb = skb;
	info.skb2 = NULL;
1424 1425
	info.tx_filter = filter;
	info.tx_data = filter_data;
L
Linus Torvalds 已提交
1426 1427 1428 1429 1430

	/* While we sleep in clone, do not allow to change socket list */

	netlink_lock_table();

1431
	sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list)
L
Linus Torvalds 已提交
1432 1433
		do_one_broadcast(sk, &info);

1434
	consume_skb(skb);
1435

L
Linus Torvalds 已提交
1436 1437
	netlink_unlock_table();

1438 1439
	if (info.delivery_failure) {
		kfree_skb(info.skb2);
1440
		return -ENOBUFS;
E
Eric Dumazet 已提交
1441 1442
	}
	consume_skb(info.skb2);
1443

L
Linus Torvalds 已提交
1444
	if (info.delivered) {
1445
		if (info.congested && gfpflags_allow_blocking(allocation))
L
Linus Torvalds 已提交
1446 1447 1448 1449 1450
			yield();
		return 0;
	}
	return -ESRCH;
}
1451 1452
EXPORT_SYMBOL(netlink_broadcast_filtered);

1453
int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 portid,
1454 1455
		      u32 group, gfp_t allocation)
{
1456
	return netlink_broadcast_filtered(ssk, skb, portid, group, allocation,
1457 1458
		NULL, NULL);
}
1459
EXPORT_SYMBOL(netlink_broadcast);
L
Linus Torvalds 已提交
1460 1461 1462

struct netlink_set_err_data {
	struct sock *exclude_sk;
1463
	u32 portid;
L
Linus Torvalds 已提交
1464 1465 1466 1467
	u32 group;
	int code;
};

1468
static int do_one_set_err(struct sock *sk, struct netlink_set_err_data *p)
L
Linus Torvalds 已提交
1469 1470
{
	struct netlink_sock *nlk = nlk_sk(sk);
1471
	int ret = 0;
L
Linus Torvalds 已提交
1472 1473 1474 1475

	if (sk == p->exclude_sk)
		goto out;

O
Octavian Purdila 已提交
1476
	if (!net_eq(sock_net(sk), sock_net(p->exclude_sk)))
1477 1478
		goto out;

1479
	if (nlk->portid == p->portid || p->group - 1 >= nlk->ngroups ||
1480
	    !test_bit(p->group - 1, nlk->groups))
L
Linus Torvalds 已提交
1481 1482
		goto out;

1483
	if (p->code == ENOBUFS && nlk->flags & NETLINK_F_RECV_NO_ENOBUFS) {
1484 1485 1486 1487
		ret = 1;
		goto out;
	}

L
Linus Torvalds 已提交
1488 1489 1490
	sk->sk_err = p->code;
	sk->sk_error_report(sk);
out:
1491
	return ret;
L
Linus Torvalds 已提交
1492 1493
}

1494 1495 1496
/**
 * netlink_set_err - report error to broadcast listeners
 * @ssk: the kernel netlink socket, as returned by netlink_kernel_create()
1497
 * @portid: the PORTID of a process that we want to skip (if any)
1498
 * @group: the broadcast group that will notice the error
1499
 * @code: error code, must be negative (as usual in kernelspace)
1500 1501
 *
 * This function returns the number of broadcast listeners that have set the
1502
 * NETLINK_NO_ENOBUFS socket option.
1503
 */
1504
int netlink_set_err(struct sock *ssk, u32 portid, u32 group, int code)
L
Linus Torvalds 已提交
1505 1506 1507
{
	struct netlink_set_err_data info;
	struct sock *sk;
1508
	int ret = 0;
L
Linus Torvalds 已提交
1509 1510

	info.exclude_sk = ssk;
1511
	info.portid = portid;
L
Linus Torvalds 已提交
1512
	info.group = group;
1513 1514
	/* sk->sk_err wants a positive error value */
	info.code = -code;
L
Linus Torvalds 已提交
1515 1516 1517

	read_lock(&nl_table_lock);

1518
	sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list)
1519
		ret += do_one_set_err(sk, &info);
L
Linus Torvalds 已提交
1520 1521

	read_unlock(&nl_table_lock);
1522
	return ret;
L
Linus Torvalds 已提交
1523
}
1524
EXPORT_SYMBOL(netlink_set_err);
L
Linus Torvalds 已提交
1525

1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542
/* must be called with netlink table grabbed */
static void netlink_update_socket_mc(struct netlink_sock *nlk,
				     unsigned int group,
				     int is_new)
{
	int old, new = !!is_new, subscriptions;

	old = test_bit(group - 1, nlk->groups);
	subscriptions = nlk->subscriptions - old + new;
	if (new)
		__set_bit(group - 1, nlk->groups);
	else
		__clear_bit(group - 1, nlk->groups);
	netlink_update_subscriptions(&nlk->sk, subscriptions);
	netlink_update_listeners(&nlk->sk);
}

1543
static int netlink_setsockopt(struct socket *sock, int level, int optname,
1544
			      char __user *optval, unsigned int optlen)
1545 1546 1547
{
	struct sock *sk = sock->sk;
	struct netlink_sock *nlk = nlk_sk(sk);
1548 1549
	unsigned int val = 0;
	int err;
1550 1551 1552 1553

	if (level != SOL_NETLINK)
		return -ENOPROTOOPT;

1554
	if (optlen >= sizeof(int) &&
1555
	    get_user(val, (unsigned int __user *)optval))
1556 1557 1558 1559 1560
		return -EFAULT;

	switch (optname) {
	case NETLINK_PKTINFO:
		if (val)
1561
			nlk->flags |= NETLINK_F_RECV_PKTINFO;
1562
		else
1563
			nlk->flags &= ~NETLINK_F_RECV_PKTINFO;
1564 1565 1566 1567
		err = 0;
		break;
	case NETLINK_ADD_MEMBERSHIP:
	case NETLINK_DROP_MEMBERSHIP: {
1568
		if (!netlink_allowed(sock, NL_CFG_F_NONROOT_RECV))
1569
			return -EPERM;
1570 1571 1572
		err = netlink_realloc_groups(sk);
		if (err)
			return err;
1573 1574
		if (!val || val - 1 >= nlk->ngroups)
			return -EINVAL;
1575
		if (optname == NETLINK_ADD_MEMBERSHIP && nlk->netlink_bind) {
1576
			err = nlk->netlink_bind(sock_net(sk), val);
1577 1578 1579
			if (err)
				return err;
		}
1580
		netlink_table_grab();
1581 1582
		netlink_update_socket_mc(nlk, val,
					 optname == NETLINK_ADD_MEMBERSHIP);
1583
		netlink_table_ungrab();
1584
		if (optname == NETLINK_DROP_MEMBERSHIP && nlk->netlink_unbind)
1585
			nlk->netlink_unbind(sock_net(sk), val);
1586

1587 1588 1589
		err = 0;
		break;
	}
1590 1591
	case NETLINK_BROADCAST_ERROR:
		if (val)
1592
			nlk->flags |= NETLINK_F_BROADCAST_SEND_ERROR;
1593
		else
1594
			nlk->flags &= ~NETLINK_F_BROADCAST_SEND_ERROR;
1595 1596
		err = 0;
		break;
1597 1598
	case NETLINK_NO_ENOBUFS:
		if (val) {
1599 1600
			nlk->flags |= NETLINK_F_RECV_NO_ENOBUFS;
			clear_bit(NETLINK_S_CONGESTED, &nlk->state);
1601
			wake_up_interruptible(&nlk->wait);
E
Eric Dumazet 已提交
1602
		} else {
1603
			nlk->flags &= ~NETLINK_F_RECV_NO_ENOBUFS;
E
Eric Dumazet 已提交
1604
		}
1605 1606
		err = 0;
		break;
1607 1608 1609 1610 1611 1612 1613 1614 1615 1616
	case NETLINK_LISTEN_ALL_NSID:
		if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_BROADCAST))
			return -EPERM;

		if (val)
			nlk->flags |= NETLINK_F_LISTEN_ALL_NSID;
		else
			nlk->flags &= ~NETLINK_F_LISTEN_ALL_NSID;
		err = 0;
		break;
1617 1618 1619 1620 1621 1622 1623
	case NETLINK_CAP_ACK:
		if (val)
			nlk->flags |= NETLINK_F_CAP_ACK;
		else
			nlk->flags &= ~NETLINK_F_CAP_ACK;
		err = 0;
		break;
1624 1625 1626 1627 1628 1629 1630
	default:
		err = -ENOPROTOOPT;
	}
	return err;
}

static int netlink_getsockopt(struct socket *sock, int level, int optname,
1631
			      char __user *optval, int __user *optlen)
1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649
{
	struct sock *sk = sock->sk;
	struct netlink_sock *nlk = nlk_sk(sk);
	int len, val, err;

	if (level != SOL_NETLINK)
		return -ENOPROTOOPT;

	if (get_user(len, optlen))
		return -EFAULT;
	if (len < 0)
		return -EINVAL;

	switch (optname) {
	case NETLINK_PKTINFO:
		if (len < sizeof(int))
			return -EINVAL;
		len = sizeof(int);
1650
		val = nlk->flags & NETLINK_F_RECV_PKTINFO ? 1 : 0;
H
Heiko Carstens 已提交
1651 1652 1653
		if (put_user(len, optlen) ||
		    put_user(val, optval))
			return -EFAULT;
1654 1655
		err = 0;
		break;
1656 1657 1658 1659
	case NETLINK_BROADCAST_ERROR:
		if (len < sizeof(int))
			return -EINVAL;
		len = sizeof(int);
1660
		val = nlk->flags & NETLINK_F_BROADCAST_SEND_ERROR ? 1 : 0;
1661 1662 1663 1664 1665
		if (put_user(len, optlen) ||
		    put_user(val, optval))
			return -EFAULT;
		err = 0;
		break;
1666 1667 1668 1669
	case NETLINK_NO_ENOBUFS:
		if (len < sizeof(int))
			return -EINVAL;
		len = sizeof(int);
1670
		val = nlk->flags & NETLINK_F_RECV_NO_ENOBUFS ? 1 : 0;
1671 1672 1673 1674 1675
		if (put_user(len, optlen) ||
		    put_user(val, optval))
			return -EFAULT;
		err = 0;
		break;
1676 1677 1678 1679
	case NETLINK_LIST_MEMBERSHIPS: {
		int pos, idx, shift;

		err = 0;
1680
		netlink_lock_table();
1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694
		for (pos = 0; pos * 8 < nlk->ngroups; pos += sizeof(u32)) {
			if (len - pos < sizeof(u32))
				break;

			idx = pos / sizeof(unsigned long);
			shift = (pos % sizeof(unsigned long)) * 8;
			if (put_user((u32)(nlk->groups[idx] >> shift),
				     (u32 __user *)(optval + pos))) {
				err = -EFAULT;
				break;
			}
		}
		if (put_user(ALIGN(nlk->ngroups / 8, sizeof(u32)), optlen))
			err = -EFAULT;
1695
		netlink_unlock_table();
1696 1697
		break;
	}
1698 1699 1700 1701 1702 1703 1704 1705 1706 1707
	case NETLINK_CAP_ACK:
		if (len < sizeof(int))
			return -EINVAL;
		len = sizeof(int);
		val = nlk->flags & NETLINK_F_CAP_ACK ? 1 : 0;
		if (put_user(len, optlen) ||
		    put_user(val, optval))
			return -EFAULT;
		err = 0;
		break;
1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721
	default:
		err = -ENOPROTOOPT;
	}
	return err;
}

static void netlink_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb)
{
	struct nl_pktinfo info;

	info.group = NETLINK_CB(skb).dst_group;
	put_cmsg(msg, SOL_NETLINK, NETLINK_PKTINFO, sizeof(info), &info);
}

1722 1723 1724 1725 1726 1727 1728 1729 1730 1731
static void netlink_cmsg_listen_all_nsid(struct sock *sk, struct msghdr *msg,
					 struct sk_buff *skb)
{
	if (!NETLINK_CB(skb).nsid_is_set)
		return;

	put_cmsg(msg, SOL_NETLINK, NETLINK_LISTEN_ALL_NSID, sizeof(int),
		 &NETLINK_CB(skb).nsid);
}

1732
static int netlink_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
L
Linus Torvalds 已提交
1733 1734 1735
{
	struct sock *sk = sock->sk;
	struct netlink_sock *nlk = nlk_sk(sk);
1736
	DECLARE_SOCKADDR(struct sockaddr_nl *, addr, msg->msg_name);
1737
	u32 dst_portid;
1738
	u32 dst_group;
L
Linus Torvalds 已提交
1739 1740 1741
	struct sk_buff *skb;
	int err;
	struct scm_cookie scm;
1742
	u32 netlink_skb_flags = 0;
L
Linus Torvalds 已提交
1743 1744 1745 1746

	if (msg->msg_flags&MSG_OOB)
		return -EOPNOTSUPP;

C
Christoph Hellwig 已提交
1747
	err = scm_send(sock, msg, &scm, true);
L
Linus Torvalds 已提交
1748 1749 1750 1751
	if (err < 0)
		return err;

	if (msg->msg_namelen) {
1752
		err = -EINVAL;
L
Linus Torvalds 已提交
1753
		if (addr->nl_family != AF_NETLINK)
1754
			goto out;
1755
		dst_portid = addr->nl_pid;
1756
		dst_group = ffs(addr->nl_groups);
1757
		err =  -EPERM;
1758
		if ((dst_group || dst_portid) &&
1759
		    !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND))
1760
			goto out;
1761
		netlink_skb_flags |= NETLINK_SKB_DST;
L
Linus Torvalds 已提交
1762
	} else {
1763
		dst_portid = nlk->dst_portid;
1764
		dst_group = nlk->dst_group;
L
Linus Torvalds 已提交
1765 1766
	}

1767
	if (!nlk->bound) {
L
Linus Torvalds 已提交
1768 1769 1770
		err = netlink_autobind(sock);
		if (err)
			goto out;
1771 1772 1773
	} else {
		/* Ensure nlk is hashed and visible. */
		smp_rmb();
L
Linus Torvalds 已提交
1774 1775 1776 1777 1778 1779
	}

	err = -EMSGSIZE;
	if (len > sk->sk_sndbuf - 32)
		goto out;
	err = -ENOBUFS;
1780
	skb = netlink_alloc_large_skb(len, dst_group);
1781
	if (skb == NULL)
L
Linus Torvalds 已提交
1782 1783
		goto out;

1784
	NETLINK_CB(skb).portid	= nlk->portid;
1785
	NETLINK_CB(skb).dst_group = dst_group;
C
Christoph Hellwig 已提交
1786
	NETLINK_CB(skb).creds	= scm.creds;
1787
	NETLINK_CB(skb).flags	= netlink_skb_flags;
L
Linus Torvalds 已提交
1788 1789

	err = -EFAULT;
A
Al Viro 已提交
1790
	if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
L
Linus Torvalds 已提交
1791 1792 1793 1794 1795 1796 1797 1798 1799 1800
		kfree_skb(skb);
		goto out;
	}

	err = security_netlink_send(sk, skb);
	if (err) {
		kfree_skb(skb);
		goto out;
	}

1801
	if (dst_group) {
L
Linus Torvalds 已提交
1802
		atomic_inc(&skb->users);
1803
		netlink_broadcast(sk, skb, dst_portid, dst_group, GFP_KERNEL);
L
Linus Torvalds 已提交
1804
	}
1805
	err = netlink_unicast(sk, skb, dst_portid, msg->msg_flags&MSG_DONTWAIT);
L
Linus Torvalds 已提交
1806 1807

out:
C
Christoph Hellwig 已提交
1808
	scm_destroy(&scm);
L
Linus Torvalds 已提交
1809 1810 1811
	return err;
}

1812
static int netlink_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
L
Linus Torvalds 已提交
1813 1814 1815 1816 1817 1818 1819
			   int flags)
{
	struct scm_cookie scm;
	struct sock *sk = sock->sk;
	struct netlink_sock *nlk = nlk_sk(sk);
	int noblock = flags&MSG_DONTWAIT;
	size_t copied;
J
Johannes Berg 已提交
1820
	struct sk_buff *skb, *data_skb;
1821
	int err, ret;
L
Linus Torvalds 已提交
1822 1823 1824 1825 1826 1827

	if (flags&MSG_OOB)
		return -EOPNOTSUPP;

	copied = 0;

1828 1829
	skb = skb_recv_datagram(sk, flags, noblock, &err);
	if (skb == NULL)
L
Linus Torvalds 已提交
1830 1831
		goto out;

J
Johannes Berg 已提交
1832 1833
	data_skb = skb;

1834 1835 1836
#ifdef CONFIG_COMPAT_NETLINK_MESSAGES
	if (unlikely(skb_shinfo(skb)->frag_list)) {
		/*
J
Johannes Berg 已提交
1837 1838 1839
		 * If this skb has a frag_list, then here that means that we
		 * will have to use the frag_list skb's data for compat tasks
		 * and the regular skb's data for normal (non-compat) tasks.
1840
		 *
J
Johannes Berg 已提交
1841 1842 1843 1844
		 * If we need to send the compat skb, assign it to the
		 * 'data_skb' variable so that it will be used below for data
		 * copying. We keep 'skb' for everything else, including
		 * freeing both later.
1845
		 */
J
Johannes Berg 已提交
1846 1847
		if (flags & MSG_CMSG_COMPAT)
			data_skb = skb_shinfo(skb)->frag_list;
1848 1849 1850
	}
#endif

E
Eric Dumazet 已提交
1851 1852 1853
	/* Record the max length of recvmsg() calls for future allocations */
	nlk->max_recvmsg_len = max(nlk->max_recvmsg_len, len);
	nlk->max_recvmsg_len = min_t(size_t, nlk->max_recvmsg_len,
1854
				     SKB_WITH_OVERHEAD(32768));
E
Eric Dumazet 已提交
1855

J
Johannes Berg 已提交
1856
	copied = data_skb->len;
L
Linus Torvalds 已提交
1857 1858 1859 1860 1861
	if (len < copied) {
		msg->msg_flags |= MSG_TRUNC;
		copied = len;
	}

J
Johannes Berg 已提交
1862
	skb_reset_transport_header(data_skb);
1863
	err = skb_copy_datagram_msg(data_skb, 0, msg, copied);
L
Linus Torvalds 已提交
1864 1865

	if (msg->msg_name) {
1866
		DECLARE_SOCKADDR(struct sockaddr_nl *, addr, msg->msg_name);
L
Linus Torvalds 已提交
1867 1868
		addr->nl_family = AF_NETLINK;
		addr->nl_pad    = 0;
1869
		addr->nl_pid	= NETLINK_CB(skb).portid;
1870
		addr->nl_groups	= netlink_group_mask(NETLINK_CB(skb).dst_group);
L
Linus Torvalds 已提交
1871 1872 1873
		msg->msg_namelen = sizeof(*addr);
	}

1874
	if (nlk->flags & NETLINK_F_RECV_PKTINFO)
1875
		netlink_cmsg_recv_pktinfo(msg, skb);
1876 1877
	if (nlk->flags & NETLINK_F_LISTEN_ALL_NSID)
		netlink_cmsg_listen_all_nsid(sk, msg, skb);
1878

C
Christoph Hellwig 已提交
1879 1880
	memset(&scm, 0, sizeof(scm));
	scm.creds = *NETLINK_CREDS(skb);
1881
	if (flags & MSG_TRUNC)
J
Johannes Berg 已提交
1882
		copied = data_skb->len;
1883

L
Linus Torvalds 已提交
1884 1885
	skb_free_datagram(sk, skb);

1886 1887
	if (nlk->cb_running &&
	    atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) {
1888 1889
		ret = netlink_dump(sk);
		if (ret) {
1890
			sk->sk_err = -ret;
1891 1892 1893
			sk->sk_error_report(sk);
		}
	}
L
Linus Torvalds 已提交
1894

C
Christoph Hellwig 已提交
1895
	scm_recv(sock, msg, &scm, flags);
L
Linus Torvalds 已提交
1896 1897 1898 1899 1900
out:
	netlink_rcv_wake(sk);
	return err ? : copied;
}

1901
static void netlink_data_ready(struct sock *sk)
L
Linus Torvalds 已提交
1902
{
1903
	BUG();
L
Linus Torvalds 已提交
1904 1905 1906
}

/*
1907
 *	We export these functions to other modules. They provide a
L
Linus Torvalds 已提交
1908 1909 1910 1911 1912
 *	complete set of kernel non-blocking support for message
 *	queueing.
 */

struct sock *
1913 1914
__netlink_kernel_create(struct net *net, int unit, struct module *module,
			struct netlink_kernel_cfg *cfg)
L
Linus Torvalds 已提交
1915 1916 1917
{
	struct socket *sock;
	struct sock *sk;
1918
	struct netlink_sock *nlk;
1919
	struct listeners *listeners = NULL;
1920 1921
	struct mutex *cb_mutex = cfg ? cfg->cb_mutex : NULL;
	unsigned int groups;
L
Linus Torvalds 已提交
1922

1923
	BUG_ON(!nl_table);
L
Linus Torvalds 已提交
1924

1925
	if (unit < 0 || unit >= MAX_LINKS)
L
Linus Torvalds 已提交
1926 1927 1928 1929
		return NULL;

	if (sock_create_lite(PF_NETLINK, SOCK_DGRAM, unit, &sock))
		return NULL;
1930 1931

	if (__netlink_create(net, sock, cb_mutex, unit, 1) < 0)
1932 1933 1934
		goto out_sock_release_nosk;

	sk = sock->sk;
1935

1936
	if (!cfg || cfg->groups < 32)
1937
		groups = 32;
1938 1939
	else
		groups = cfg->groups;
1940

1941
	listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL);
1942 1943 1944
	if (!listeners)
		goto out_sock_release;

L
Linus Torvalds 已提交
1945
	sk->sk_data_ready = netlink_data_ready;
1946 1947
	if (cfg && cfg->input)
		nlk_sk(sk)->netlink_rcv = cfg->input;
L
Linus Torvalds 已提交
1948

1949
	if (netlink_insert(sk, 0))
1950
		goto out_sock_release;
1951

1952
	nlk = nlk_sk(sk);
1953
	nlk->flags |= NETLINK_F_KERNEL_SOCKET;
1954 1955

	netlink_table_grab();
1956 1957
	if (!nl_table[unit].registered) {
		nl_table[unit].groups = groups;
1958
		rcu_assign_pointer(nl_table[unit].listeners, listeners);
1959 1960
		nl_table[unit].cb_mutex = cb_mutex;
		nl_table[unit].module = module;
1961 1962
		if (cfg) {
			nl_table[unit].bind = cfg->bind;
1963
			nl_table[unit].unbind = cfg->unbind;
1964
			nl_table[unit].flags = cfg->flags;
1965 1966
			if (cfg->compare)
				nl_table[unit].compare = cfg->compare;
1967
		}
1968
		nl_table[unit].registered = 1;
1969 1970
	} else {
		kfree(listeners);
1971
		nl_table[unit].registered++;
1972
	}
1973
	netlink_table_ungrab();
1974 1975
	return sk;

1976
out_sock_release:
1977
	kfree(listeners);
1978
	netlink_kernel_release(sk);
1979 1980 1981
	return NULL;

out_sock_release_nosk:
1982
	sock_release(sock);
1983
	return NULL;
L
Linus Torvalds 已提交
1984
}
1985
EXPORT_SYMBOL(__netlink_kernel_create);
1986 1987 1988 1989

void
netlink_kernel_release(struct sock *sk)
{
1990 1991 1992 1993
	if (sk == NULL || sk->sk_socket == NULL)
		return;

	sock_release(sk->sk_socket);
1994 1995 1996
}
EXPORT_SYMBOL(netlink_kernel_release);

1997
int __netlink_change_ngroups(struct sock *sk, unsigned int groups)
1998
{
1999
	struct listeners *new, *old;
2000 2001 2002 2003 2004 2005
	struct netlink_table *tbl = &nl_table[sk->sk_protocol];

	if (groups < 32)
		groups = 32;

	if (NLGRPSZ(tbl->groups) < NLGRPSZ(groups)) {
2006 2007
		new = kzalloc(sizeof(*new) + NLGRPSZ(groups), GFP_ATOMIC);
		if (!new)
2008
			return -ENOMEM;
2009
		old = nl_deref_protected(tbl->listeners);
2010 2011 2012
		memcpy(new->masks, old->masks, NLGRPSZ(tbl->groups));
		rcu_assign_pointer(tbl->listeners, new);

2013
		kfree_rcu(old, rcu);
2014 2015 2016
	}
	tbl->groups = groups;

2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037
	return 0;
}

/**
 * netlink_change_ngroups - change number of multicast groups
 *
 * This changes the number of multicast groups that are available
 * on a certain netlink family. Note that it is not possible to
 * change the number of groups to below 32. Also note that it does
 * not implicitly call netlink_clear_multicast_users() when the
 * number of groups is reduced.
 *
 * @sk: The kernel netlink socket, as returned by netlink_kernel_create().
 * @groups: The new number of groups.
 */
int netlink_change_ngroups(struct sock *sk, unsigned int groups)
{
	int err;

	netlink_table_grab();
	err = __netlink_change_ngroups(sk, groups);
2038
	netlink_table_ungrab();
2039

2040 2041 2042
	return err;
}

2043 2044 2045 2046 2047
void __netlink_clear_multicast_users(struct sock *ksk, unsigned int group)
{
	struct sock *sk;
	struct netlink_table *tbl = &nl_table[ksk->sk_protocol];

2048
	sk_for_each_bound(sk, &tbl->mc_list)
2049 2050 2051
		netlink_update_socket_mc(nlk_sk(sk), group, 0);
}

2052
struct nlmsghdr *
2053
__nlmsg_put(struct sk_buff *skb, u32 portid, u32 seq, int type, int len, int flags)
2054 2055
{
	struct nlmsghdr *nlh;
2056
	int size = nlmsg_msg_size(len);
2057

2058
	nlh = (struct nlmsghdr *)skb_put(skb, NLMSG_ALIGN(size));
2059 2060 2061
	nlh->nlmsg_type = type;
	nlh->nlmsg_len = size;
	nlh->nlmsg_flags = flags;
2062
	nlh->nlmsg_pid = portid;
2063 2064
	nlh->nlmsg_seq = seq;
	if (!__builtin_constant_p(size) || NLMSG_ALIGN(size) - size != 0)
2065
		memset(nlmsg_data(nlh) + len, 0, NLMSG_ALIGN(size) - size);
2066 2067 2068 2069
	return nlh;
}
EXPORT_SYMBOL(__nlmsg_put);

L
Linus Torvalds 已提交
2070 2071 2072 2073 2074 2075 2076 2077 2078
/*
 * It looks a bit ugly.
 * It would be better to create kernel thread.
 */

static int netlink_dump(struct sock *sk)
{
	struct netlink_sock *nlk = nlk_sk(sk);
	struct netlink_callback *cb;
2079
	struct sk_buff *skb = NULL;
L
Linus Torvalds 已提交
2080
	struct nlmsghdr *nlh;
2081
	struct module *module;
2082
	int len, err = -ENOBUFS;
2083
	int alloc_min_size;
2084
	int alloc_size;
L
Linus Torvalds 已提交
2085

2086
	mutex_lock(nlk->cb_mutex);
2087
	if (!nlk->cb_running) {
2088 2089
		err = -EINVAL;
		goto errout_skb;
L
Linus Torvalds 已提交
2090 2091
	}

2092
	if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
2093
		goto errout_skb;
E
Eric Dumazet 已提交
2094 2095 2096 2097 2098 2099

	/* NLMSG_GOODSIZE is small to avoid high order allocations being
	 * required, but it makes sense to _attempt_ a 16K bytes allocation
	 * to reduce number of system calls on dump operations, if user
	 * ever provided a big enough buffer.
	 */
2100 2101 2102 2103 2104
	cb = &nlk->cb;
	alloc_min_size = max_t(int, cb->min_dump_alloc, NLMSG_GOODSIZE);

	if (alloc_min_size < nlk->max_recvmsg_len) {
		alloc_size = nlk->max_recvmsg_len;
2105 2106 2107
		skb = alloc_skb(alloc_size,
				(GFP_KERNEL & ~__GFP_DIRECT_RECLAIM) |
				__GFP_NOWARN | __GFP_NORETRY);
E
Eric Dumazet 已提交
2108
	}
2109 2110
	if (!skb) {
		alloc_size = alloc_min_size;
2111
		skb = alloc_skb(alloc_size, GFP_KERNEL);
2112
	}
2113
	if (!skb)
2114
		goto errout_skb;
2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125

	/* Trim skb to allocated size. User is expected to provide buffer as
	 * large as max(min_dump_alloc, 16KiB (mac_recvmsg_len capped at
	 * netlink_recvmsg())). dump will pack as many smaller messages as
	 * could fit within the allocated skb. skb is typically allocated
	 * with larger space than required (could be as much as near 2x the
	 * requested size with align to next power of 2 approach). Allowing
	 * dump to use the excess space makes it difficult for a user to have a
	 * reasonable static buffer based on the expected largest dump of a
	 * single netdev. The outcome is MSG_TRUNC error.
	 */
2126
	skb_reserve(skb, skb_tailroom(skb) - alloc_size);
2127
	netlink_skb_set_owner_r(skb, sk);
2128

L
Linus Torvalds 已提交
2129 2130 2131
	len = cb->dump(skb, cb);

	if (len > 0) {
2132
		mutex_unlock(nlk->cb_mutex);
2133 2134 2135

		if (sk_filter(sk, skb))
			kfree_skb(skb);
2136 2137
		else
			__netlink_sendskb(sk, skb);
L
Linus Torvalds 已提交
2138 2139 2140
		return 0;
	}

2141 2142 2143 2144
	nlh = nlmsg_put_answer(skb, cb, NLMSG_DONE, sizeof(len), NLM_F_MULTI);
	if (!nlh)
		goto errout_skb;

2145 2146
	nl_dump_check_consistent(cb, nlh);

2147 2148
	memcpy(nlmsg_data(nlh), &len, sizeof(len));

2149 2150
	if (sk_filter(sk, skb))
		kfree_skb(skb);
2151 2152
	else
		__netlink_sendskb(sk, skb);
L
Linus Torvalds 已提交
2153

2154 2155
	if (cb->done)
		cb->done(cb);
L
Linus Torvalds 已提交
2156

2157
	nlk->cb_running = false;
2158 2159
	module = cb->module;
	skb = cb->skb;
2160
	mutex_unlock(nlk->cb_mutex);
2161 2162
	module_put(module);
	consume_skb(skb);
L
Linus Torvalds 已提交
2163
	return 0;
2164

2165
errout_skb:
2166
	mutex_unlock(nlk->cb_mutex);
2167 2168
	kfree_skb(skb);
	return err;
L
Linus Torvalds 已提交
2169 2170
}

2171 2172 2173
int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
			 const struct nlmsghdr *nlh,
			 struct netlink_dump_control *control)
L
Linus Torvalds 已提交
2174 2175 2176 2177
{
	struct netlink_callback *cb;
	struct sock *sk;
	struct netlink_sock *nlk;
2178
	int ret;
L
Linus Torvalds 已提交
2179

2180
	atomic_inc(&skb->users);
2181

2182
	sk = netlink_lookup(sock_net(ssk), ssk->sk_protocol, NETLINK_CB(skb).portid);
L
Linus Torvalds 已提交
2183
	if (sk == NULL) {
2184 2185
		ret = -ECONNREFUSED;
		goto error_free;
L
Linus Torvalds 已提交
2186
	}
2187

2188
	nlk = nlk_sk(sk);
2189
	mutex_lock(nlk->cb_mutex);
2190
	/* A dump is in progress... */
2191
	if (nlk->cb_running) {
2192
		ret = -EBUSY;
2193
		goto error_unlock;
L
Linus Torvalds 已提交
2194
	}
2195
	/* add reference of module which cb->dump belongs to */
2196
	if (!try_module_get(control->module)) {
2197
		ret = -EPROTONOSUPPORT;
2198
		goto error_unlock;
2199 2200
	}

2201 2202
	cb = &nlk->cb;
	memset(cb, 0, sizeof(*cb));
2203
	cb->start = control->start;
2204 2205 2206 2207 2208 2209 2210 2211 2212 2213
	cb->dump = control->dump;
	cb->done = control->done;
	cb->nlh = nlh;
	cb->data = control->data;
	cb->module = control->module;
	cb->min_dump_alloc = control->min_dump_alloc;
	cb->skb = skb;

	nlk->cb_running = true;

2214
	mutex_unlock(nlk->cb_mutex);
L
Linus Torvalds 已提交
2215

2216 2217 2218
	if (cb->start)
		cb->start(cb);

2219
	ret = netlink_dump(sk);
L
Linus Torvalds 已提交
2220
	sock_put(sk);
2221

2222 2223 2224
	if (ret)
		return ret;

2225 2226 2227 2228
	/* We successfully started a dump, by returning -EINTR we
	 * signal not to send ACK even if it was requested.
	 */
	return -EINTR;
2229 2230 2231 2232 2233 2234 2235

error_unlock:
	sock_put(sk);
	mutex_unlock(nlk->cb_mutex);
error_free:
	kfree_skb(skb);
	return ret;
L
Linus Torvalds 已提交
2236
}
2237
EXPORT_SYMBOL(__netlink_dump_start);
L
Linus Torvalds 已提交
2238 2239 2240 2241 2242 2243

void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err)
{
	struct sk_buff *skb;
	struct nlmsghdr *rep;
	struct nlmsgerr *errmsg;
2244
	size_t payload = sizeof(*errmsg);
2245
	struct netlink_sock *nlk = nlk_sk(NETLINK_CB(in_skb).sk);
L
Linus Torvalds 已提交
2246

2247 2248 2249 2250
	/* Error messages get the original request appened, unless the user
	 * requests to cap the error message.
	 */
	if (!(nlk->flags & NETLINK_F_CAP_ACK) && err)
2251
		payload += nlmsg_len(nlh);
L
Linus Torvalds 已提交
2252

2253
	skb = nlmsg_new(payload, GFP_KERNEL);
L
Linus Torvalds 已提交
2254 2255 2256
	if (!skb) {
		struct sock *sk;

2257
		sk = netlink_lookup(sock_net(in_skb->sk),
2258
				    in_skb->sk->sk_protocol,
2259
				    NETLINK_CB(in_skb).portid);
L
Linus Torvalds 已提交
2260 2261 2262 2263 2264 2265 2266 2267
		if (sk) {
			sk->sk_err = ENOBUFS;
			sk->sk_error_report(sk);
			sock_put(sk);
		}
		return;
	}

2268
	rep = __nlmsg_put(skb, NETLINK_CB(in_skb).portid, nlh->nlmsg_seq,
2269
			  NLMSG_ERROR, payload, 0);
2270
	errmsg = nlmsg_data(rep);
L
Linus Torvalds 已提交
2271
	errmsg->error = err;
2272
	memcpy(&errmsg->msg, nlh, payload > sizeof(*errmsg) ? nlh->nlmsg_len : sizeof(*nlh));
2273
	netlink_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).portid, MSG_DONTWAIT);
L
Linus Torvalds 已提交
2274
}
2275
EXPORT_SYMBOL(netlink_ack);
L
Linus Torvalds 已提交
2276

2277
int netlink_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *,
2278
						     struct nlmsghdr *))
2279 2280 2281 2282 2283
{
	struct nlmsghdr *nlh;
	int err;

	while (skb->len >= nlmsg_total_size(0)) {
2284 2285
		int msglen;

2286
		nlh = nlmsg_hdr(skb);
2287
		err = 0;
2288

2289
		if (nlh->nlmsg_len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len)
2290 2291
			return 0;

2292 2293
		/* Only requests are handled by the kernel */
		if (!(nlh->nlmsg_flags & NLM_F_REQUEST))
2294
			goto ack;
2295 2296 2297

		/* Skip control messages */
		if (nlh->nlmsg_type < NLMSG_MIN_TYPE)
2298
			goto ack;
2299

2300
		err = cb(skb, nlh);
2301 2302 2303 2304
		if (err == -EINTR)
			goto skip;

ack:
2305
		if (nlh->nlmsg_flags & NLM_F_ACK || err)
2306 2307
			netlink_ack(skb, nlh, err);

2308
skip:
2309
		msglen = NLMSG_ALIGN(nlh->nlmsg_len);
2310 2311 2312
		if (msglen > skb->len)
			msglen = skb->len;
		skb_pull(skb, msglen);
2313 2314 2315 2316
	}

	return 0;
}
2317
EXPORT_SYMBOL(netlink_rcv_skb);
2318

2319 2320 2321 2322
/**
 * nlmsg_notify - send a notification netlink message
 * @sk: netlink socket to use
 * @skb: notification message
2323
 * @portid: destination netlink portid for reports or 0
2324 2325 2326 2327
 * @group: destination multicast group or 0
 * @report: 1 to report back, 0 to disable
 * @flags: allocation flags
 */
2328
int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 portid,
2329 2330 2331 2332 2333
		 unsigned int group, int report, gfp_t flags)
{
	int err = 0;

	if (group) {
2334
		int exclude_portid = 0;
2335 2336 2337

		if (report) {
			atomic_inc(&skb->users);
2338
			exclude_portid = portid;
2339 2340
		}

2341 2342
		/* errors reported via destination sk->sk_err, but propagate
		 * delivery errors if NETLINK_BROADCAST_ERROR flag is set */
2343
		err = nlmsg_multicast(sk, skb, exclude_portid, group, flags);
2344 2345
	}

2346 2347 2348
	if (report) {
		int err2;

2349
		err2 = nlmsg_unicast(sk, skb, portid);
2350 2351 2352
		if (!err || err == -ESRCH)
			err = err2;
	}
2353 2354 2355

	return err;
}
2356
EXPORT_SYMBOL(nlmsg_notify);
2357

L
Linus Torvalds 已提交
2358 2359
#ifdef CONFIG_PROC_FS
struct nl_seq_iter {
2360
	struct seq_net_private p;
2361
	struct rhashtable_iter hti;
L
Linus Torvalds 已提交
2362 2363 2364
	int link;
};

2365
static int netlink_walk_start(struct nl_seq_iter *iter)
L
Linus Torvalds 已提交
2366
{
2367
	int err;
L
Linus Torvalds 已提交
2368

2369 2370
	err = rhashtable_walk_init(&nl_table[iter->link].hash, &iter->hti,
				   GFP_KERNEL);
2371 2372 2373
	if (err) {
		iter->link = MAX_LINKS;
		return err;
L
Linus Torvalds 已提交
2374
	}
2375 2376 2377

	err = rhashtable_walk_start(&iter->hti);
	return err == -EAGAIN ? 0 : err;
L
Linus Torvalds 已提交
2378 2379
}

2380
static void netlink_walk_stop(struct nl_seq_iter *iter)
L
Linus Torvalds 已提交
2381
{
2382 2383
	rhashtable_walk_stop(&iter->hti);
	rhashtable_walk_exit(&iter->hti);
L
Linus Torvalds 已提交
2384 2385
}

2386
static void *__netlink_seq_next(struct seq_file *seq)
L
Linus Torvalds 已提交
2387
{
2388
	struct nl_seq_iter *iter = seq->private;
2389
	struct netlink_sock *nlk;
L
Linus Torvalds 已提交
2390

2391 2392 2393
	do {
		for (;;) {
			int err;
L
Linus Torvalds 已提交
2394

2395
			nlk = rhashtable_walk_next(&iter->hti);
2396

2397 2398 2399
			if (IS_ERR(nlk)) {
				if (PTR_ERR(nlk) == -EAGAIN)
					continue;
2400

2401 2402
				return nlk;
			}
L
Linus Torvalds 已提交
2403

2404 2405
			if (nlk)
				break;
L
Linus Torvalds 已提交
2406

2407 2408 2409
			netlink_walk_stop(iter);
			if (++iter->link >= MAX_LINKS)
				return NULL;
2410

2411 2412 2413
			err = netlink_walk_start(iter);
			if (err)
				return ERR_PTR(err);
L
Linus Torvalds 已提交
2414
		}
2415
	} while (sock_net(&nlk->sk) != seq_file_net(seq));
L
Linus Torvalds 已提交
2416

2417 2418
	return nlk;
}
L
Linus Torvalds 已提交
2419

2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442
static void *netlink_seq_start(struct seq_file *seq, loff_t *posp)
{
	struct nl_seq_iter *iter = seq->private;
	void *obj = SEQ_START_TOKEN;
	loff_t pos;
	int err;

	iter->link = 0;

	err = netlink_walk_start(iter);
	if (err)
		return ERR_PTR(err);

	for (pos = *posp; pos && obj && !IS_ERR(obj); pos--)
		obj = __netlink_seq_next(seq);

	return obj;
}

static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
	++*pos;
	return __netlink_seq_next(seq);
L
Linus Torvalds 已提交
2443 2444 2445 2446
}

static void netlink_seq_stop(struct seq_file *seq, void *v)
{
2447 2448 2449 2450 2451 2452
	struct nl_seq_iter *iter = seq->private;

	if (iter->link >= MAX_LINKS)
		return;

	netlink_walk_stop(iter);
L
Linus Torvalds 已提交
2453 2454 2455 2456 2457
}


static int netlink_seq_show(struct seq_file *seq, void *v)
{
E
Eric Dumazet 已提交
2458
	if (v == SEQ_START_TOKEN) {
L
Linus Torvalds 已提交
2459 2460
		seq_puts(seq,
			 "sk       Eth Pid    Groups   "
2461
			 "Rmem     Wmem     Dump     Locks     Drops     Inode\n");
E
Eric Dumazet 已提交
2462
	} else {
L
Linus Torvalds 已提交
2463 2464 2465
		struct sock *s = v;
		struct netlink_sock *nlk = nlk_sk(s);

2466
		seq_printf(seq, "%pK %-3d %-6u %08x %-8d %-8d %d %-8d %-8d %-8lu\n",
L
Linus Torvalds 已提交
2467 2468
			   s,
			   s->sk_protocol,
2469
			   nlk->portid,
2470
			   nlk->groups ? (u32)nlk->groups[0] : 0,
2471 2472
			   sk_rmem_alloc_get(s),
			   sk_wmem_alloc_get(s),
2473
			   nlk->cb_running,
2474
			   atomic_read(&s->sk_refcnt),
2475 2476
			   atomic_read(&s->sk_drops),
			   sock_i_ino(s)
L
Linus Torvalds 已提交
2477 2478 2479 2480 2481 2482
			);

	}
	return 0;
}

2483
static const struct seq_operations netlink_seq_ops = {
L
Linus Torvalds 已提交
2484 2485 2486 2487 2488 2489 2490 2491 2492
	.start  = netlink_seq_start,
	.next   = netlink_seq_next,
	.stop   = netlink_seq_stop,
	.show   = netlink_seq_show,
};


static int netlink_seq_open(struct inode *inode, struct file *file)
{
2493 2494
	return seq_open_net(inode, file, &netlink_seq_ops,
				sizeof(struct nl_seq_iter));
2495 2496
}

2497
static const struct file_operations netlink_seq_fops = {
L
Linus Torvalds 已提交
2498 2499 2500 2501
	.owner		= THIS_MODULE,
	.open		= netlink_seq_open,
	.read		= seq_read,
	.llseek		= seq_lseek,
2502
	.release	= seq_release_net,
L
Linus Torvalds 已提交
2503 2504 2505 2506 2507 2508
};

#endif

int netlink_register_notifier(struct notifier_block *nb)
{
W
WANG Cong 已提交
2509
	return blocking_notifier_chain_register(&netlink_chain, nb);
L
Linus Torvalds 已提交
2510
}
2511
EXPORT_SYMBOL(netlink_register_notifier);
L
Linus Torvalds 已提交
2512 2513 2514

int netlink_unregister_notifier(struct notifier_block *nb)
{
W
WANG Cong 已提交
2515
	return blocking_notifier_chain_unregister(&netlink_chain, nb);
L
Linus Torvalds 已提交
2516
}
2517
EXPORT_SYMBOL(netlink_unregister_notifier);
2518

2519
static const struct proto_ops netlink_ops = {
L
Linus Torvalds 已提交
2520 2521 2522 2523 2524 2525 2526 2527
	.family =	PF_NETLINK,
	.owner =	THIS_MODULE,
	.release =	netlink_release,
	.bind =		netlink_bind,
	.connect =	netlink_connect,
	.socketpair =	sock_no_socketpair,
	.accept =	sock_no_accept,
	.getname =	netlink_getname,
2528
	.poll =		datagram_poll,
2529
	.ioctl =	netlink_ioctl,
L
Linus Torvalds 已提交
2530 2531
	.listen =	sock_no_listen,
	.shutdown =	sock_no_shutdown,
2532 2533
	.setsockopt =	netlink_setsockopt,
	.getsockopt =	netlink_getsockopt,
L
Linus Torvalds 已提交
2534 2535
	.sendmsg =	netlink_sendmsg,
	.recvmsg =	netlink_recvmsg,
2536
	.mmap =		sock_no_mmap,
L
Linus Torvalds 已提交
2537 2538 2539
	.sendpage =	sock_no_sendpage,
};

2540
static const struct net_proto_family netlink_family_ops = {
L
Linus Torvalds 已提交
2541 2542 2543 2544 2545
	.family = PF_NETLINK,
	.create = netlink_create,
	.owner	= THIS_MODULE,	/* for consistency 8) */
};

2546
static int __net_init netlink_net_init(struct net *net)
2547 2548
{
#ifdef CONFIG_PROC_FS
2549
	if (!proc_create("netlink", 0, net->proc_net, &netlink_seq_fops))
2550 2551 2552 2553 2554
		return -ENOMEM;
#endif
	return 0;
}

2555
static void __net_exit netlink_net_exit(struct net *net)
2556 2557
{
#ifdef CONFIG_PROC_FS
2558
	remove_proc_entry("netlink", net->proc_net);
2559 2560 2561
#endif
}

2562 2563
static void __init netlink_add_usersock_entry(void)
{
2564
	struct listeners *listeners;
2565 2566
	int groups = 32;

2567
	listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL);
2568
	if (!listeners)
2569
		panic("netlink_add_usersock_entry: Cannot allocate listeners\n");
2570 2571 2572 2573

	netlink_table_grab();

	nl_table[NETLINK_USERSOCK].groups = groups;
2574
	rcu_assign_pointer(nl_table[NETLINK_USERSOCK].listeners, listeners);
2575 2576
	nl_table[NETLINK_USERSOCK].module = THIS_MODULE;
	nl_table[NETLINK_USERSOCK].registered = 1;
2577
	nl_table[NETLINK_USERSOCK].flags = NL_CFG_F_NONROOT_SEND;
2578 2579 2580 2581

	netlink_table_ungrab();
}

2582
static struct pernet_operations __net_initdata netlink_net_ops = {
2583 2584 2585 2586
	.init = netlink_net_init,
	.exit = netlink_net_exit,
};

2587
static inline u32 netlink_hash(const void *data, u32 len, u32 seed)
2588 2589 2590 2591
{
	const struct netlink_sock *nlk = data;
	struct netlink_compare_arg arg;

2592
	netlink_compare_arg_init(&arg, sock_net(&nlk->sk), nlk->portid);
2593
	return jhash2((u32 *)&arg, netlink_compare_arg_len / sizeof(u32), seed);
2594 2595 2596 2597 2598 2599 2600
}

static const struct rhashtable_params netlink_rhashtable_params = {
	.head_offset = offsetof(struct netlink_sock, node),
	.key_len = netlink_compare_arg_len,
	.obj_hashfn = netlink_hash,
	.obj_cmpfn = netlink_compare,
2601
	.automatic_shrinking = true,
2602 2603
};

L
Linus Torvalds 已提交
2604 2605 2606 2607 2608 2609 2610 2611
static int __init netlink_proto_init(void)
{
	int i;
	int err = proto_register(&netlink_proto, 0);

	if (err != 0)
		goto out;

2612
	BUILD_BUG_ON(sizeof(struct netlink_skb_parms) > FIELD_SIZEOF(struct sk_buff, cb));
L
Linus Torvalds 已提交
2613

2614
	nl_table = kcalloc(MAX_LINKS, sizeof(*nl_table), GFP_KERNEL);
2615 2616
	if (!nl_table)
		goto panic;
L
Linus Torvalds 已提交
2617 2618

	for (i = 0; i < MAX_LINKS; i++) {
2619 2620
		if (rhashtable_init(&nl_table[i].hash,
				    &netlink_rhashtable_params) < 0) {
2621 2622
			while (--i > 0)
				rhashtable_destroy(&nl_table[i].hash);
L
Linus Torvalds 已提交
2623
			kfree(nl_table);
2624
			goto panic;
L
Linus Torvalds 已提交
2625 2626 2627
		}
	}

2628 2629
	INIT_LIST_HEAD(&netlink_tap_all);

2630 2631
	netlink_add_usersock_entry();

L
Linus Torvalds 已提交
2632
	sock_register(&netlink_family_ops);
2633
	register_pernet_subsys(&netlink_net_ops);
2634
	/* The netlink device handler may be needed early. */
L
Linus Torvalds 已提交
2635 2636 2637
	rtnetlink_init();
out:
	return err;
2638 2639
panic:
	panic("netlink_init: Cannot allocate nl_table\n");
L
Linus Torvalds 已提交
2640 2641 2642
}

core_initcall(netlink_proto_init);