udp.c 74.8 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7
/*
 * INET		An implementation of the TCP/IP protocol suite for the LINUX
 *		operating system.  INET is implemented using the  BSD Socket
 *		interface as the means of communication with the user level.
 *
 *		The User Datagram Protocol (UDP).
 *
8
 * Authors:	Ross Biro
L
Linus Torvalds 已提交
9 10
 *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
 *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
11
 *		Alan Cox, <alan@lxorguk.ukuu.org.uk>
L
Linus Torvalds 已提交
12 13 14 15 16 17 18 19 20
 *		Hirokazu Takahashi, <taka@valinux.co.jp>
 *
 * Fixes:
 *		Alan Cox	:	verify_area() calls
 *		Alan Cox	: 	stopped close while in use off icmp
 *					messages. Not a fix but a botch that
 *					for udp at least is 'valid'.
 *		Alan Cox	:	Fixed icmp handling properly
 *		Alan Cox	: 	Correct error for oversized datagrams
21 22
 *		Alan Cox	:	Tidied select() semantics.
 *		Alan Cox	:	udp_err() fixed properly, also now
L
Linus Torvalds 已提交
23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56
 *					select and read wake correctly on errors
 *		Alan Cox	:	udp_send verify_area moved to avoid mem leak
 *		Alan Cox	:	UDP can count its memory
 *		Alan Cox	:	send to an unknown connection causes
 *					an ECONNREFUSED off the icmp, but
 *					does NOT close.
 *		Alan Cox	:	Switched to new sk_buff handlers. No more backlog!
 *		Alan Cox	:	Using generic datagram code. Even smaller and the PEEK
 *					bug no longer crashes it.
 *		Fred Van Kempen	: 	Net2e support for sk->broadcast.
 *		Alan Cox	:	Uses skb_free_datagram
 *		Alan Cox	:	Added get/set sockopt support.
 *		Alan Cox	:	Broadcasting without option set returns EACCES.
 *		Alan Cox	:	No wakeup calls. Instead we now use the callbacks.
 *		Alan Cox	:	Use ip_tos and ip_ttl
 *		Alan Cox	:	SNMP Mibs
 *		Alan Cox	:	MSG_DONTROUTE, and 0.0.0.0 support.
 *		Matt Dillon	:	UDP length checks.
 *		Alan Cox	:	Smarter af_inet used properly.
 *		Alan Cox	:	Use new kernel side addressing.
 *		Alan Cox	:	Incorrect return on truncated datagram receive.
 *	Arnt Gulbrandsen 	:	New udp_send and stuff
 *		Alan Cox	:	Cache last socket
 *		Alan Cox	:	Route cache
 *		Jon Peatfield	:	Minor efficiency fix to sendto().
 *		Mike Shaver	:	RFC1122 checks.
 *		Alan Cox	:	Nonblocking error fix.
 *	Willy Konynenberg	:	Transparent proxying support.
 *		Mike McLagan	:	Routing by source
 *		David S. Miller	:	New socket lookup architecture.
 *					Last socket cache retained as it
 *					does have a high hit rate.
 *		Olaf Kirch	:	Don't linearise iovec on sendmsg.
 *		Andi Kleen	:	Some cleanups, cache destination entry
57
 *					for connect.
L
Linus Torvalds 已提交
58 59 60 61 62 63 64 65 66 67 68 69 70
 *	Vitaly E. Lavrov	:	Transparent proxy revived after year coma.
 *		Melvin Smith	:	Check msg_name not msg_namelen in sendto(),
 *					return ENOTCONN for unconnected sockets (POSIX)
 *		Janos Farkas	:	don't deliver multi/broadcasts to a different
 *					bound-to-device socket
 *	Hirokazu Takahashi	:	HW checksumming for outgoing UDP
 *					datagrams.
 *	Hirokazu Takahashi	:	sendfile() on UDP works now.
 *		Arnaldo C. Melo :	convert /proc/net/udp to seq_file
 *	YOSHIFUJI Hideaki @USAGI and:	Support IPV6_V6ONLY socket option, which
 *	Alexey Kuznetsov:		allow both IPv4 and IPv6 sockets to bind
 *					a single port at the same time.
 *	Derek Atkins <derek@ihtfp.com>: Add Encapulation Support
71
 *	James Chapman		:	Add L2TP encapsulation type.
L
Linus Torvalds 已提交
72 73 74 75 76 77 78
 *
 *
 *		This program is free software; you can redistribute it and/or
 *		modify it under the terms of the GNU General Public License
 *		as published by the Free Software Foundation; either version
 *		2 of the License, or (at your option) any later version.
 */
79

80 81
#define pr_fmt(fmt) "UDP: " fmt

82
#include <linux/uaccess.h>
L
Linus Torvalds 已提交
83
#include <asm/ioctls.h>
H
Hideo Aoki 已提交
84
#include <linux/bootmem.h>
85 86
#include <linux/highmem.h>
#include <linux/swap.h>
L
Linus Torvalds 已提交
87 88 89 90 91
#include <linux/types.h>
#include <linux/fcntl.h>
#include <linux/module.h>
#include <linux/socket.h>
#include <linux/sockios.h>
92
#include <linux/igmp.h>
93
#include <linux/inetdevice.h>
L
Linus Torvalds 已提交
94 95 96 97 98 99
#include <linux/in.h>
#include <linux/errno.h>
#include <linux/timer.h>
#include <linux/mm.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
100
#include <linux/slab.h>
101
#include <net/tcp_states.h>
L
Linus Torvalds 已提交
102 103 104
#include <linux/skbuff.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
105
#include <net/net_namespace.h>
L
Linus Torvalds 已提交
106
#include <net/icmp.h>
S
Shawn Bohrer 已提交
107
#include <net/inet_hashtables.h>
L
Linus Torvalds 已提交
108 109 110
#include <net/route.h>
#include <net/checksum.h>
#include <net/xfrm.h>
111
#include <trace/events/udp.h>
112
#include <linux/static_key.h>
113
#include <trace/events/skb.h>
114
#include <net/busy_poll.h>
115
#include "udp_impl.h"
116
#include <net/sock_reuseport.h>
E
Eric Dumazet 已提交
117
#include <net/addrconf.h>
L
Linus Torvalds 已提交
118

119
struct udp_table udp_table __read_mostly;
120
EXPORT_SYMBOL(udp_table);
L
Linus Torvalds 已提交
121

E
Eric Dumazet 已提交
122
long sysctl_udp_mem[3] __read_mostly;
H
Hideo Aoki 已提交
123
EXPORT_SYMBOL(sysctl_udp_mem);
E
Eric Dumazet 已提交
124

E
Eric Dumazet 已提交
125
atomic_long_t udp_memory_allocated;
H
Hideo Aoki 已提交
126 127
EXPORT_SYMBOL(udp_memory_allocated);

128 129
#define MAX_UDP_PORTS 65536
#define PORTS_PER_CHAIN (MAX_UDP_PORTS / UDP_HTABLE_SIZE_MIN)
130

131 132 133 134 135 136 137 138 139 140 141
/* IPCB reference means this can not be used from early demux */
static bool udp_lib_exact_dif_match(struct net *net, struct sk_buff *skb)
{
#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
	if (!net->ipv4.sysctl_udp_l3mdev_accept &&
	    skb && ipv4_l3mdev_skb(IPCB(skb)->flags))
		return true;
#endif
	return false;
}

142
static int udp_lib_lport_inuse(struct net *net, __u16 num,
143
			       const struct udp_hslot *hslot,
144
			       unsigned long *bitmap,
145
			       struct sock *sk, unsigned int log)
L
Linus Torvalds 已提交
146
{
147
	struct sock *sk2;
148
	kuid_t uid = sock_i_uid(sk);
149

150
	sk_for_each(sk2, &hslot->head) {
151 152
		if (net_eq(sock_net(sk2), net) &&
		    sk2 != sk &&
153
		    (bitmap || udp_sk(sk2)->udp_port_hash == num) &&
154 155 156
		    (!sk2->sk_reuse || !sk->sk_reuse) &&
		    (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if ||
		     sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
157
		    inet_rcv_saddr_equal(sk, sk2, true)) {
158 159 160 161 162 163 164 165 166 167 168
			if (sk2->sk_reuseport && sk->sk_reuseport &&
			    !rcu_access_pointer(sk->sk_reuseport_cb) &&
			    uid_eq(uid, sock_i_uid(sk2))) {
				if (!bitmap)
					return 0;
			} else {
				if (!bitmap)
					return 1;
				__set_bit(udp_sk(sk2)->udp_port_hash >> log,
					  bitmap);
			}
169
		}
170
	}
171 172 173
	return 0;
}

E
Eric Dumazet 已提交
174 175 176 177 178
/*
 * Note: we still hold spinlock of primary hash chain, so no other writer
 * can insert/delete a socket with local_port == num
 */
static int udp_lib_lport_inuse2(struct net *net, __u16 num,
179
				struct udp_hslot *hslot2,
180
				struct sock *sk)
E
Eric Dumazet 已提交
181 182
{
	struct sock *sk2;
183
	kuid_t uid = sock_i_uid(sk);
E
Eric Dumazet 已提交
184 185 186
	int res = 0;

	spin_lock(&hslot2->lock);
187
	udp_portaddr_for_each_entry(sk2, &hslot2->head) {
188 189 190 191 192 193
		if (net_eq(sock_net(sk2), net) &&
		    sk2 != sk &&
		    (udp_sk(sk2)->udp_port_hash == num) &&
		    (!sk2->sk_reuse || !sk->sk_reuse) &&
		    (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if ||
		     sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
194
		    inet_rcv_saddr_equal(sk, sk2, true)) {
195 196 197 198 199 200 201
			if (sk2->sk_reuseport && sk->sk_reuseport &&
			    !rcu_access_pointer(sk->sk_reuseport_cb) &&
			    uid_eq(uid, sock_i_uid(sk2))) {
				res = 0;
			} else {
				res = 1;
			}
E
Eric Dumazet 已提交
202 203
			break;
		}
204
	}
E
Eric Dumazet 已提交
205 206 207 208
	spin_unlock(&hslot2->lock);
	return res;
}

209
static int udp_reuseport_add_sock(struct sock *sk, struct udp_hslot *hslot)
210 211 212 213 214
{
	struct net *net = sock_net(sk);
	kuid_t uid = sock_i_uid(sk);
	struct sock *sk2;

215
	sk_for_each(sk2, &hslot->head) {
216 217 218 219 220 221 222
		if (net_eq(sock_net(sk2), net) &&
		    sk2 != sk &&
		    sk2->sk_family == sk->sk_family &&
		    ipv6_only_sock(sk2) == ipv6_only_sock(sk) &&
		    (udp_sk(sk2)->udp_port_hash == udp_sk(sk)->udp_port_hash) &&
		    (sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
		    sk2->sk_reuseport && uid_eq(uid, sock_i_uid(sk2)) &&
223
		    inet_rcv_saddr_equal(sk, sk2, false)) {
224 225
			return reuseport_add_sock(sk, sk2,
						  inet_rcv_saddr_any(sk));
226 227 228
		}
	}

229
	return reuseport_alloc(sk, inet_rcv_saddr_any(sk));
230 231
}

232
/**
233
 *  udp_lib_get_port  -  UDP/-Lite port lookup for IPv4 and IPv6
234 235 236
 *
 *  @sk:          socket struct in question
 *  @snum:        port number to look up
L
Lucas De Marchi 已提交
237
 *  @hash2_nulladdr: AF-dependent hash value in secondary hash chains,
E
Eric Dumazet 已提交
238
 *                   with NULL address
239
 */
240
int udp_lib_get_port(struct sock *sk, unsigned short snum,
E
Eric Dumazet 已提交
241
		     unsigned int hash2_nulladdr)
242
{
243
	struct udp_hslot *hslot, *hslot2;
244
	struct udp_table *udptable = sk->sk_prot->h.udp_table;
245
	int    error = 1;
246
	struct net *net = sock_net(sk);
L
Linus Torvalds 已提交
247

248
	if (!snum) {
E
Eric Dumazet 已提交
249
		int low, high, remaining;
250
		unsigned int rand;
251 252
		unsigned short first, last;
		DECLARE_BITMAP(bitmap, PORTS_PER_CHAIN);
253

254
		inet_get_local_port_range(net, &low, &high);
255
		remaining = (high - low) + 1;
256

257
		rand = prandom_u32();
258
		first = reciprocal_scale(rand, remaining) + low;
259 260 261
		/*
		 * force rand to be an odd multiple of UDP_HTABLE_SIZE
		 */
262
		rand = (rand | 1) * (udptable->mask + 1);
E
Eric Dumazet 已提交
263 264
		last = first + udptable->mask + 1;
		do {
265
			hslot = udp_hashslot(udptable, net, first);
266
			bitmap_zero(bitmap, PORTS_PER_CHAIN);
267
			spin_lock_bh(&hslot->lock);
268
			udp_lib_lport_inuse(net, snum, hslot, bitmap, sk,
269
					    udptable->log);
270 271 272 273 274 275 276

			snum = first;
			/*
			 * Iterate on all possible values of snum for this hash.
			 * Using steps of an odd multiple of UDP_HTABLE_SIZE
			 * give us randomization and full range coverage.
			 */
E
Eric Dumazet 已提交
277
			do {
278
				if (low <= snum && snum <= high &&
279
				    !test_bit(snum >> udptable->log, bitmap) &&
280
				    !inet_is_local_reserved_port(net, snum))
281 282 283 284
					goto found;
				snum += rand;
			} while (snum != first);
			spin_unlock_bh(&hslot->lock);
285
			cond_resched();
E
Eric Dumazet 已提交
286
		} while (++first != last);
287
		goto fail;
288
	} else {
289
		hslot = udp_hashslot(udptable, net, snum);
290
		spin_lock_bh(&hslot->lock);
E
Eric Dumazet 已提交
291 292 293 294 295 296 297 298 299 300 301
		if (hslot->count > 10) {
			int exist;
			unsigned int slot2 = udp_sk(sk)->udp_portaddr_hash ^ snum;

			slot2          &= udptable->mask;
			hash2_nulladdr &= udptable->mask;

			hslot2 = udp_hashslot2(udptable, slot2);
			if (hslot->count < hslot2->count)
				goto scan_primary_hash;

302
			exist = udp_lib_lport_inuse2(net, snum, hslot2, sk);
E
Eric Dumazet 已提交
303 304 305
			if (!exist && (hash2_nulladdr != slot2)) {
				hslot2 = udp_hashslot2(udptable, hash2_nulladdr);
				exist = udp_lib_lport_inuse2(net, snum, hslot2,
306
							     sk);
E
Eric Dumazet 已提交
307 308 309 310 311 312 313
			}
			if (exist)
				goto fail_unlock;
			else
				goto found;
		}
scan_primary_hash:
314
		if (udp_lib_lport_inuse(net, snum, hslot, NULL, sk, 0))
315 316
			goto fail_unlock;
	}
317
found:
E
Eric Dumazet 已提交
318
	inet_sk(sk)->inet_num = snum;
319 320
	udp_sk(sk)->udp_port_hash = snum;
	udp_sk(sk)->udp_portaddr_hash ^= snum;
L
Linus Torvalds 已提交
321
	if (sk_unhashed(sk)) {
322
		if (sk->sk_reuseport &&
323
		    udp_reuseport_add_sock(sk, hslot)) {
324 325 326 327 328 329
			inet_sk(sk)->inet_num = 0;
			udp_sk(sk)->udp_port_hash = 0;
			udp_sk(sk)->udp_portaddr_hash ^= snum;
			goto fail_unlock;
		}

330
		sk_add_node_rcu(sk, &hslot->head);
E
Eric Dumazet 已提交
331
		hslot->count++;
332
		sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
333 334 335

		hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash);
		spin_lock(&hslot2->lock);
336
		if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport &&
337 338 339
		    sk->sk_family == AF_INET6)
			hlist_add_tail_rcu(&udp_sk(sk)->udp_portaddr_node,
					   &hslot2->head);
340
		else
341 342
			hlist_add_head_rcu(&udp_sk(sk)->udp_portaddr_node,
					   &hslot2->head);
343 344
		hslot2->count++;
		spin_unlock(&hslot2->lock);
L
Linus Torvalds 已提交
345
	}
346
	sock_set_flag(sk, SOCK_RCU_FREE);
347
	error = 0;
348 349
fail_unlock:
	spin_unlock_bh(&hslot->lock);
L
Linus Torvalds 已提交
350
fail:
351 352
	return error;
}
E
Eric Dumazet 已提交
353
EXPORT_SYMBOL(udp_lib_get_port);
354

355
int udp_v4_get_port(struct sock *sk, unsigned short snum)
356
{
E
Eric Dumazet 已提交
357
	unsigned int hash2_nulladdr =
358
		ipv4_portaddr_hash(sock_net(sk), htonl(INADDR_ANY), snum);
E
Eric Dumazet 已提交
359
	unsigned int hash2_partial =
360
		ipv4_portaddr_hash(sock_net(sk), inet_sk(sk)->inet_rcv_saddr, 0);
E
Eric Dumazet 已提交
361

362
	/* precompute partial secondary hash */
E
Eric Dumazet 已提交
363
	udp_sk(sk)->udp_portaddr_hash = hash2_partial;
364
	return udp_lib_get_port(sk, snum, hash2_nulladdr);
365 366
}

367 368
static int compute_score(struct sock *sk, struct net *net,
			 __be32 saddr, __be16 sport,
369 370
			 __be32 daddr, unsigned short hnum,
			 int dif, int sdif, bool exact_dif)
371
{
372 373
	int score;
	struct inet_sock *inet;
374

375 376 377 378
	if (!net_eq(sock_net(sk), net) ||
	    udp_sk(sk)->udp_port_hash != hnum ||
	    ipv6_only_sock(sk))
		return -1;
379

380 381 382 383 384 385 386
	score = (sk->sk_family == PF_INET) ? 2 : 1;
	inet = inet_sk(sk);

	if (inet->inet_rcv_saddr) {
		if (inet->inet_rcv_saddr != daddr)
			return -1;
		score += 4;
387
	}
388 389 390 391 392 393 394 395 396 397 398 399 400

	if (inet->inet_daddr) {
		if (inet->inet_daddr != saddr)
			return -1;
		score += 4;
	}

	if (inet->inet_dport) {
		if (inet->inet_dport != sport)
			return -1;
		score += 4;
	}

401
	if (sk->sk_bound_dev_if || exact_dif) {
402 403 404
		bool dev_match = (sk->sk_bound_dev_if == dif ||
				  sk->sk_bound_dev_if == sdif);

P
Paolo Abeni 已提交
405
		if (!dev_match)
406
			return -1;
P
Paolo Abeni 已提交
407
		if (sk->sk_bound_dev_if)
408
			score += 4;
409
	}
410

411 412
	if (sk->sk_incoming_cpu == raw_smp_processor_id())
		score++;
413 414 415
	return score;
}

416 417 418
static u32 udp_ehashfn(const struct net *net, const __be32 laddr,
		       const __u16 lport, const __be32 faddr,
		       const __be16 fport)
419
{
420 421 422 423
	static u32 udp_ehash_secret __read_mostly;

	net_get_random_once(&udp_ehash_secret, sizeof(udp_ehash_secret));

424
	return __inet_ehashfn(laddr, lport, faddr, fport,
425
			      udp_ehash_secret + net_hash_mix(net));
426 427
}

428
/* called with rcu_read_lock() */
E
Eric Dumazet 已提交
429
static struct sock *udp4_lib_lookup2(struct net *net,
430 431 432 433 434
				     __be32 saddr, __be16 sport,
				     __be32 daddr, unsigned int hnum,
				     int dif, int sdif, bool exact_dif,
				     struct udp_hslot *hslot2,
				     struct sk_buff *skb)
E
Eric Dumazet 已提交
435 436
{
	struct sock *sk, *result;
P
Paolo Abeni 已提交
437
	int score, badness;
438
	u32 hash = 0;
E
Eric Dumazet 已提交
439 440

	result = NULL;
441
	badness = 0;
442
	udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
443
		score = compute_score(sk, net, saddr, sport,
444
				      daddr, hnum, dif, sdif, exact_dif);
E
Eric Dumazet 已提交
445
		if (score > badness) {
P
Paolo Abeni 已提交
446
			if (sk->sk_reuseport) {
447 448
				hash = udp_ehashfn(net, daddr, hnum,
						   saddr, sport);
449
				result = reuseport_select_sock(sk, hash, skb,
450
							sizeof(struct udphdr));
451 452
				if (result)
					return result;
453
			}
454 455
			badness = score;
			result = sk;
E
Eric Dumazet 已提交
456 457 458 459 460
		}
	}
	return result;
}

461 462 463
/* UDP is nearly always wildcards out the wazoo, it makes no sense to try
 * harder than this. -DaveM
 */
464
struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr,
465 466
		__be16 sport, __be32 daddr, __be16 dport, int dif,
		int sdif, struct udp_table *udptable, struct sk_buff *skb)
467
{
468
	struct sock *sk, *result;
469
	unsigned short hnum = ntohs(dport);
E
Eric Dumazet 已提交
470 471
	unsigned int hash2, slot2, slot = udp_hashfn(net, hnum, udptable->mask);
	struct udp_hslot *hslot2, *hslot = &udptable->hash[slot];
472
	bool exact_dif = udp_lib_exact_dif_match(net, skb);
P
Paolo Abeni 已提交
473
	int score, badness;
474
	u32 hash = 0;
475

E
Eric Dumazet 已提交
476
	if (hslot->count > 10) {
477
		hash2 = ipv4_portaddr_hash(net, daddr, hnum);
E
Eric Dumazet 已提交
478 479 480 481 482 483
		slot2 = hash2 & udptable->mask;
		hslot2 = &udptable->hash2[slot2];
		if (hslot->count < hslot2->count)
			goto begin;

		result = udp4_lib_lookup2(net, saddr, sport,
484
					  daddr, hnum, dif, sdif,
485
					  exact_dif, hslot2, skb);
E
Eric Dumazet 已提交
486
		if (!result) {
487
			unsigned int old_slot2 = slot2;
488
			hash2 = ipv4_portaddr_hash(net, htonl(INADDR_ANY), hnum);
E
Eric Dumazet 已提交
489
			slot2 = hash2 & udptable->mask;
490 491 492 493
			/* avoid searching the same slot again. */
			if (unlikely(slot2 == old_slot2))
				return result;

E
Eric Dumazet 已提交
494 495 496 497
			hslot2 = &udptable->hash2[slot2];
			if (hslot->count < hslot2->count)
				goto begin;

498
			result = udp4_lib_lookup2(net, saddr, sport,
499
						  daddr, hnum, dif, sdif,
500
						  exact_dif, hslot2, skb);
E
Eric Dumazet 已提交
501
		}
502 503
		if (unlikely(IS_ERR(result)))
			return NULL;
E
Eric Dumazet 已提交
504 505
		return result;
	}
506 507
begin:
	result = NULL;
508
	badness = 0;
509
	sk_for_each_rcu(sk, &hslot->head) {
510
		score = compute_score(sk, net, saddr, sport,
511
				      daddr, hnum, dif, sdif, exact_dif);
512
		if (score > badness) {
P
Paolo Abeni 已提交
513
			if (sk->sk_reuseport) {
514 515
				hash = udp_ehashfn(net, daddr, hnum,
						   saddr, sport);
516
				result = reuseport_select_sock(sk, hash, skb,
517
							sizeof(struct udphdr));
518 519
				if (unlikely(IS_ERR(result)))
					return NULL;
520 521
				if (result)
					return result;
522
			}
523 524
			result = sk;
			badness = score;
525 526 527 528
		}
	}
	return result;
}
529
EXPORT_SYMBOL_GPL(__udp4_lib_lookup);
530

531 532
static inline struct sock *__udp4_lib_lookup_skb(struct sk_buff *skb,
						 __be16 sport, __be16 dport,
533
						 struct udp_table *udptable)
534 535 536
{
	const struct iphdr *iph = ip_hdr(skb);

537
	return __udp4_lib_lookup(dev_net(skb->dev), iph->saddr, sport,
538
				 iph->daddr, dport, inet_iif(skb),
539
				 inet_sdif(skb), udptable, skb);
540 541
}

542 543 544
struct sock *udp4_lib_lookup_skb(struct sk_buff *skb,
				 __be16 sport, __be16 dport)
{
545
	return __udp4_lib_lookup_skb(skb, sport, dport, &udp_table);
546 547 548
}
EXPORT_SYMBOL_GPL(udp4_lib_lookup_skb);

549 550 551
/* Must be called under rcu_read_lock().
 * Does increment socket refcount.
 */
552
#if IS_ENABLED(CONFIG_NF_TPROXY_IPV4) || IS_ENABLED(CONFIG_NF_SOCKET_IPV4)
553 554 555
struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
			     __be32 daddr, __be16 dport, int dif)
{
556 557 558
	struct sock *sk;

	sk = __udp4_lib_lookup(net, saddr, sport, daddr, dport,
559
			       dif, 0, &udp_table, NULL);
560
	if (sk && !refcount_inc_not_zero(&sk->sk_refcnt))
561 562
		sk = NULL;
	return sk;
563 564
}
EXPORT_SYMBOL_GPL(udp4_lib_lookup);
565
#endif
566

S
Shawn Bohrer 已提交
567 568 569
static inline bool __udp_is_mcast_sock(struct net *net, struct sock *sk,
				       __be16 loc_port, __be32 loc_addr,
				       __be16 rmt_port, __be32 rmt_addr,
570
				       int dif, int sdif, unsigned short hnum)
S
Shawn Bohrer 已提交
571 572 573 574 575 576 577 578 579
{
	struct inet_sock *inet = inet_sk(sk);

	if (!net_eq(sock_net(sk), net) ||
	    udp_sk(sk)->udp_port_hash != hnum ||
	    (inet->inet_daddr && inet->inet_daddr != rmt_addr) ||
	    (inet->inet_dport != rmt_port && inet->inet_dport) ||
	    (inet->inet_rcv_saddr && inet->inet_rcv_saddr != loc_addr) ||
	    ipv6_only_sock(sk) ||
580 581
	    (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif &&
	     sk->sk_bound_dev_if != sdif))
S
Shawn Bohrer 已提交
582
		return false;
583
	if (!ip_mc_sf_allow(sk, loc_addr, rmt_addr, dif, sdif))
S
Shawn Bohrer 已提交
584 585 586 587
		return false;
	return true;
}

588 589 590 591 592 593 594 595 596 597 598
/*
 * This routine is called by the ICMP module when it gets some
 * sort of error condition.  If err < 0 then the socket should
 * be closed and the error returned to the user.  If err > 0
 * it's just the icmp type << 8 | icmp code.
 * Header points to the ip header of the error packet. We move
 * on past this. Then (as it used to claim before adjustment)
 * header points to the first 8 bytes of the udp header.  We need
 * to find the appropriate port.
 */

599
void __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable)
600 601
{
	struct inet_sock *inet;
602
	const struct iphdr *iph = (const struct iphdr *)skb->data;
E
Eric Dumazet 已提交
603
	struct udphdr *uh = (struct udphdr *)(skb->data+(iph->ihl<<2));
604 605 606 607 608
	const int type = icmp_hdr(skb)->type;
	const int code = icmp_hdr(skb)->code;
	struct sock *sk;
	int harderr;
	int err;
609
	struct net *net = dev_net(skb->dev);
610

611
	sk = __udp4_lib_lookup(net, iph->daddr, uh->dest,
612 613
			       iph->saddr, uh->source, skb->dev->ifindex, 0,
			       udptable, NULL);
614
	if (!sk) {
E
Eric Dumazet 已提交
615
		__ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635
		return;	/* No socket for error */
	}

	err = 0;
	harderr = 0;
	inet = inet_sk(sk);

	switch (type) {
	default:
	case ICMP_TIME_EXCEEDED:
		err = EHOSTUNREACH;
		break;
	case ICMP_SOURCE_QUENCH:
		goto out;
	case ICMP_PARAMETERPROB:
		err = EPROTO;
		harderr = 1;
		break;
	case ICMP_DEST_UNREACH:
		if (code == ICMP_FRAG_NEEDED) { /* Path MTU discovery */
636
			ipv4_sk_update_pmtu(skb, sk, info);
637 638 639 640 641 642 643 644 645 646 647 648 649
			if (inet->pmtudisc != IP_PMTUDISC_DONT) {
				err = EMSGSIZE;
				harderr = 1;
				break;
			}
			goto out;
		}
		err = EHOSTUNREACH;
		if (code <= NR_ICMP_UNREACH) {
			harderr = icmp_err_convert[code].fatal;
			err = icmp_err_convert[code].errno;
		}
		break;
650 651
	case ICMP_REDIRECT:
		ipv4_sk_redirect(skb, sk);
652
		goto out;
653 654 655 656 657 658 659 660 661
	}

	/*
	 *      RFC1122: OK.  Passes ICMP errors back to application, as per
	 *	4.1.3.3.
	 */
	if (!inet->recverr) {
		if (!harderr || sk->sk_state != TCP_ESTABLISHED)
			goto out;
662
	} else
E
Eric Dumazet 已提交
663
		ip_icmp_error(sk, skb, err, uh->dest, info, (u8 *)(uh+1));
664

665 666 667
	sk->sk_err = err;
	sk->sk_error_report(sk);
out:
668
	return;
669 670 671 672
}

void udp_err(struct sk_buff *skb, u32 info)
{
673
	__udp4_lib_err(skb, info, &udp_table);
674 675 676 677 678
}

/*
 * Throw away all pending data and cancel the corking. Socket is locked.
 */
679
void udp_flush_pending_frames(struct sock *sk)
680 681 682 683 684 685 686 687 688
{
	struct udp_sock *up = udp_sk(sk);

	if (up->pending) {
		up->len = 0;
		up->pending = 0;
		ip_flush_pending_frames(sk);
	}
}
689
EXPORT_SYMBOL(udp_flush_pending_frames);
690 691

/**
H
Herbert Xu 已提交
692
 * 	udp4_hwcsum  -  handle outgoing HW checksumming
693 694
 * 	@skb: 	sk_buff containing the filled-in UDP header
 * 	        (checksum field must be zeroed out)
H
Herbert Xu 已提交
695 696
 *	@src:	source IP address
 *	@dst:	destination IP address
697
 */
698
void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst)
699 700
{
	struct udphdr *uh = udp_hdr(skb);
H
Herbert Xu 已提交
701 702 703
	int offset = skb_transport_offset(skb);
	int len = skb->len - offset;
	int hlen = len;
704 705
	__wsum csum = 0;

706
	if (!skb_has_frag_list(skb)) {
707 708 709 710 711
		/*
		 * Only one fragment on the socket.
		 */
		skb->csum_start = skb_transport_header(skb) - skb->head;
		skb->csum_offset = offsetof(struct udphdr, check);
H
Herbert Xu 已提交
712 713
		uh->check = ~csum_tcpudp_magic(src, dst, len,
					       IPPROTO_UDP, 0);
714
	} else {
715 716
		struct sk_buff *frags;

717 718 719 720 721
		/*
		 * HW-checksum won't work as there are two or more
		 * fragments on the socket so that all csums of sk_buffs
		 * should be together
		 */
722
		skb_walk_frags(skb, frags) {
H
Herbert Xu 已提交
723 724
			csum = csum_add(csum, frags->csum);
			hlen -= frags->len;
725
		}
726

H
Herbert Xu 已提交
727
		csum = skb_checksum(skb, offset, hlen, csum);
728 729 730 731 732 733 734
		skb->ip_summed = CHECKSUM_NONE;

		uh->check = csum_tcpudp_magic(src, dst, len, IPPROTO_UDP, csum);
		if (uh->check == 0)
			uh->check = CSUM_MANGLED_0;
	}
}
735
EXPORT_SYMBOL_GPL(udp4_hwcsum);
736

737 738 739 740 741 742 743 744
/* Function to set UDP checksum for an IPv4 UDP packet. This is intended
 * for the simple case like when setting the checksum for a UDP tunnel.
 */
void udp_set_csum(bool nocheck, struct sk_buff *skb,
		  __be32 saddr, __be32 daddr, int len)
{
	struct udphdr *uh = udp_hdr(skb);

745
	if (nocheck) {
746
		uh->check = 0;
747
	} else if (skb_is_gso(skb)) {
748
		uh->check = ~udp_v4_check(len, saddr, daddr, 0);
749 750 751 752 753
	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
		uh->check = 0;
		uh->check = udp_v4_check(len, saddr, daddr, lco_csum(skb));
		if (uh->check == 0)
			uh->check = CSUM_MANGLED_0;
754
	} else {
755 756 757 758 759 760 761 762
		skb->ip_summed = CHECKSUM_PARTIAL;
		skb->csum_start = skb_transport_header(skb) - skb->head;
		skb->csum_offset = offsetof(struct udphdr, check);
		uh->check = ~udp_v4_check(len, saddr, daddr, 0);
	}
}
EXPORT_SYMBOL(udp_set_csum);

763 764
static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4,
			struct inet_cork *cork)
765
{
H
Herbert Xu 已提交
766
	struct sock *sk = skb->sk;
767 768 769 770
	struct inet_sock *inet = inet_sk(sk);
	struct udphdr *uh;
	int err = 0;
	int is_udplite = IS_UDPLITE(sk);
H
Herbert Xu 已提交
771 772
	int offset = skb_transport_offset(skb);
	int len = skb->len - offset;
773 774 775 776 777 778
	__wsum csum = 0;

	/*
	 * Create a UDP header
	 */
	uh = udp_hdr(skb);
H
Herbert Xu 已提交
779
	uh->source = inet->inet_sport;
780
	uh->dest = fl4->fl4_dport;
H
Herbert Xu 已提交
781
	uh->len = htons(len);
782 783
	uh->check = 0;

784 785 786 787
	if (cork->gso_size) {
		const int hlen = skb_network_header_len(skb) +
				 sizeof(struct udphdr);

788 789
		if (hlen + cork->gso_size > cork->fragsize) {
			kfree_skb(skb);
790
			return -EINVAL;
791 792 793
		}
		if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS) {
			kfree_skb(skb);
794
			return -EINVAL;
795 796 797
		}
		if (sk->sk_no_check_tx) {
			kfree_skb(skb);
798
			return -EINVAL;
799
		}
800
		if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite ||
801 802
		    dst_xfrm(skb_dst(skb))) {
			kfree_skb(skb);
803
			return -EIO;
804
		}
805 806 807

		skb_shinfo(skb)->gso_size = cork->gso_size;
		skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
808 809
		skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(len - sizeof(uh),
							 cork->gso_size);
810
		goto csum_partial;
811 812
	}

813
	if (is_udplite)  				 /*     UDP-Lite      */
H
Herbert Xu 已提交
814
		csum = udplite_csum(skb);
815

816
	else if (sk->sk_no_check_tx) {			 /* UDP csum off */
817 818 819 820 821

		skb->ip_summed = CHECKSUM_NONE;
		goto send;

	} else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */
822
csum_partial:
823

824
		udp4_hwcsum(skb, fl4->saddr, fl4->daddr);
825 826
		goto send;

H
Herbert Xu 已提交
827 828
	} else
		csum = udp_csum(skb);
829 830

	/* add protocol-dependent pseudo-header */
831
	uh->check = csum_tcpudp_magic(fl4->saddr, fl4->daddr, len,
E
Eric Dumazet 已提交
832
				      sk->sk_protocol, csum);
833 834 835 836
	if (uh->check == 0)
		uh->check = CSUM_MANGLED_0;

send:
E
Eric Dumazet 已提交
837
	err = ip_send_skb(sock_net(sk), skb);
E
Eric Dumazet 已提交
838 839
	if (err) {
		if (err == -ENOBUFS && !inet->recverr) {
840 841
			UDP_INC_STATS(sock_net(sk),
				      UDP_MIB_SNDBUFERRORS, is_udplite);
E
Eric Dumazet 已提交
842 843 844
			err = 0;
		}
	} else
845 846
		UDP_INC_STATS(sock_net(sk),
			      UDP_MIB_OUTDATAGRAMS, is_udplite);
H
Herbert Xu 已提交
847 848 849 850 851 852
	return err;
}

/*
 * Push out all pending data as one UDP datagram. Socket is locked.
 */
853
int udp_push_pending_frames(struct sock *sk)
H
Herbert Xu 已提交
854 855 856
{
	struct udp_sock  *up = udp_sk(sk);
	struct inet_sock *inet = inet_sk(sk);
D
David S. Miller 已提交
857
	struct flowi4 *fl4 = &inet->cork.fl.u.ip4;
H
Herbert Xu 已提交
858 859 860
	struct sk_buff *skb;
	int err = 0;

861
	skb = ip_finish_skb(sk, fl4);
H
Herbert Xu 已提交
862 863 864
	if (!skb)
		goto out;

865
	err = udp_send_skb(skb, fl4, &inet->cork.base);
H
Herbert Xu 已提交
866

867 868 869 870 871
out:
	up->len = 0;
	up->pending = 0;
	return err;
}
872
EXPORT_SYMBOL(udp_push_pending_frames);
873

W
Willem de Bruijn 已提交
874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910
static int __udp_cmsg_send(struct cmsghdr *cmsg, u16 *gso_size)
{
	switch (cmsg->cmsg_type) {
	case UDP_SEGMENT:
		if (cmsg->cmsg_len != CMSG_LEN(sizeof(__u16)))
			return -EINVAL;
		*gso_size = *(__u16 *)CMSG_DATA(cmsg);
		return 0;
	default:
		return -EINVAL;
	}
}

int udp_cmsg_send(struct sock *sk, struct msghdr *msg, u16 *gso_size)
{
	struct cmsghdr *cmsg;
	bool need_ip = false;
	int err;

	for_each_cmsghdr(cmsg, msg) {
		if (!CMSG_OK(msg, cmsg))
			return -EINVAL;

		if (cmsg->cmsg_level != SOL_UDP) {
			need_ip = true;
			continue;
		}

		err = __udp_cmsg_send(cmsg, gso_size);
		if (err)
			return err;
	}

	return need_ip;
}
EXPORT_SYMBOL_GPL(udp_cmsg_send);

911
int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
912 913 914
{
	struct inet_sock *inet = inet_sk(sk);
	struct udp_sock *up = udp_sk(sk);
A
Andrey Ignatov 已提交
915
	DECLARE_SOCKADDR(struct sockaddr_in *, usin, msg->msg_name);
916
	struct flowi4 fl4_stack;
D
David S. Miller 已提交
917
	struct flowi4 *fl4;
918 919 920 921 922 923 924 925 926 927 928
	int ulen = len;
	struct ipcm_cookie ipc;
	struct rtable *rt = NULL;
	int free = 0;
	int connected = 0;
	__be32 daddr, faddr, saddr;
	__be16 dport;
	u8  tos;
	int err, is_udplite = IS_UDPLITE(sk);
	int corkreq = up->corkflag || msg->msg_flags&MSG_MORE;
	int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
H
Herbert Xu 已提交
929
	struct sk_buff *skb;
930
	struct ip_options_data opt_copy;
931 932 933 934 935 936 937 938

	if (len > 0xFFFF)
		return -EMSGSIZE;

	/*
	 *	Check the flags.
	 */

E
Eric Dumazet 已提交
939
	if (msg->msg_flags & MSG_OOB) /* Mirror BSD error message compatibility */
940 941
		return -EOPNOTSUPP;

H
Herbert Xu 已提交
942 943
	getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag;

944
	fl4 = &inet->cork.fl.u.ip4;
945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964
	if (up->pending) {
		/*
		 * There are pending frames.
		 * The socket lock must be held while it's corked.
		 */
		lock_sock(sk);
		if (likely(up->pending)) {
			if (unlikely(up->pending != AF_INET)) {
				release_sock(sk);
				return -EINVAL;
			}
			goto do_append_data;
		}
		release_sock(sk);
	}
	ulen += sizeof(struct udphdr);

	/*
	 *	Get and verify the address.
	 */
A
Andrey Ignatov 已提交
965
	if (usin) {
966 967 968 969 970 971 972 973 974 975 976 977 978 979
		if (msg->msg_namelen < sizeof(*usin))
			return -EINVAL;
		if (usin->sin_family != AF_INET) {
			if (usin->sin_family != AF_UNSPEC)
				return -EAFNOSUPPORT;
		}

		daddr = usin->sin_addr.s_addr;
		dport = usin->sin_port;
		if (dport == 0)
			return -EINVAL;
	} else {
		if (sk->sk_state != TCP_ESTABLISHED)
			return -EDESTADDRREQ;
E
Eric Dumazet 已提交
980 981
		daddr = inet->inet_daddr;
		dport = inet->inet_dport;
982 983 984 985 986 987
		/* Open fast path for connected socket.
		   Route will not be used, if at least one option is set.
		 */
		connected = 1;
	}

988
	ipcm_init_sk(&ipc, inet);
989
	ipc.gso_size = up->gso_size;
990

991
	if (msg->msg_controllen) {
W
Willem de Bruijn 已提交
992 993 994 995 996
		err = udp_cmsg_send(sk, msg, &ipc.gso_size);
		if (err > 0)
			err = ip_cmsg_send(sk, msg, &ipc,
					   sk->sk_family == AF_INET6);
		if (unlikely(err < 0)) {
997
			kfree(ipc.opt);
998
			return err;
999
		}
1000 1001 1002 1003
		if (ipc.opt)
			free = 1;
		connected = 0;
	}
1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015
	if (!ipc.opt) {
		struct ip_options_rcu *inet_opt;

		rcu_read_lock();
		inet_opt = rcu_dereference(inet->inet_opt);
		if (inet_opt) {
			memcpy(&opt_copy, inet_opt,
			       sizeof(*inet_opt) + inet_opt->opt.optlen);
			ipc.opt = &opt_copy.opt;
		}
		rcu_read_unlock();
	}
1016

A
Andrey Ignatov 已提交
1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032
	if (cgroup_bpf_enabled && !connected) {
		err = BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk,
					    (struct sockaddr *)usin, &ipc.addr);
		if (err)
			goto out_free;
		if (usin) {
			if (usin->sin_port == 0) {
				/* BPF program set invalid port. Reject it. */
				err = -EINVAL;
				goto out_free;
			}
			daddr = usin->sin_addr.s_addr;
			dport = usin->sin_port;
		}
	}

1033 1034 1035
	saddr = ipc.addr;
	ipc.addr = faddr = daddr;

1036
	if (ipc.opt && ipc.opt->opt.srr) {
1037 1038 1039 1040
		if (!daddr) {
			err = -EINVAL;
			goto out_free;
		}
1041
		faddr = ipc.opt->opt.faddr;
1042 1043
		connected = 0;
	}
1044
	tos = get_rttos(&ipc, inet);
1045 1046
	if (sock_flag(sk, SOCK_LOCALROUTE) ||
	    (msg->msg_flags & MSG_DONTROUTE) ||
1047
	    (ipc.opt && ipc.opt->opt.is_strictroute)) {
1048 1049 1050 1051 1052 1053 1054 1055 1056 1057
		tos |= RTO_ONLINK;
		connected = 0;
	}

	if (ipv4_is_multicast(daddr)) {
		if (!ipc.oif)
			ipc.oif = inet->mc_index;
		if (!saddr)
			saddr = inet->mc_addr;
		connected = 0;
1058
	} else if (!ipc.oif) {
1059
		ipc.oif = inet->uc_index;
1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072
	} else if (ipv4_is_lbcast(daddr) && inet->uc_index) {
		/* oif is set, packet is to local broadcast and
		 * and uc_index is set. oif is most likely set
		 * by sk_bound_dev_if. If uc_index != oif check if the
		 * oif is an L3 master and uc_index is an L3 slave.
		 * If so, we want to allow the send using the uc_index.
		 */
		if (ipc.oif != inet->uc_index &&
		    ipc.oif == l3mdev_master_ifindex_by_index(sock_net(sk),
							      inet->uc_index)) {
			ipc.oif = inet->uc_index;
		}
	}
1073 1074

	if (connected)
E
Eric Dumazet 已提交
1075
		rt = (struct rtable *)sk_dst_check(sk, 0);
1076

1077
	if (!rt) {
1078
		struct net *net = sock_net(sk);
D
David Ahern 已提交
1079
		__u8 flow_flags = inet_sk_flowi_flags(sk);
1080

1081
		fl4 = &fl4_stack;
D
David Ahern 已提交
1082

1083
		flowi4_init_output(fl4, ipc.oif, sk->sk_mark, tos,
1084
				   RT_SCOPE_UNIVERSE, sk->sk_protocol,
D
David Ahern 已提交
1085
				   flow_flags,
1086 1087
				   faddr, saddr, dport, inet->inet_sport,
				   sk->sk_uid);
1088

1089 1090
		security_sk_classify_flow(sk, flowi4_to_flowi(fl4));
		rt = ip_route_output_flow(net, fl4, sk);
1091 1092
		if (IS_ERR(rt)) {
			err = PTR_ERR(rt);
1093
			rt = NULL;
1094
			if (err == -ENETUNREACH)
1095
				IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
1096 1097 1098 1099 1100 1101 1102 1103
			goto out;
		}

		err = -EACCES;
		if ((rt->rt_flags & RTCF_BROADCAST) &&
		    !sock_flag(sk, SOCK_BROADCAST))
			goto out;
		if (connected)
1104
			sk_dst_set(sk, dst_clone(&rt->dst));
1105 1106 1107 1108 1109 1110
	}

	if (msg->msg_flags&MSG_CONFIRM)
		goto do_confirm;
back_from_confirm:

1111
	saddr = fl4->saddr;
1112
	if (!ipc.addr)
1113
		daddr = ipc.addr = fl4->daddr;
1114

H
Herbert Xu 已提交
1115 1116
	/* Lockless fast path for the non-corking case. */
	if (!corkreq) {
W
Willem de Bruijn 已提交
1117 1118
		struct inet_cork cork;

1119
		skb = ip_make_skb(sk, fl4, getfrag, msg, ulen,
H
Herbert Xu 已提交
1120
				  sizeof(struct udphdr), &ipc, &rt,
W
Willem de Bruijn 已提交
1121
				  &cork, msg->msg_flags);
H
Herbert Xu 已提交
1122
		err = PTR_ERR(skb);
1123
		if (!IS_ERR_OR_NULL(skb))
1124
			err = udp_send_skb(skb, fl4, &cork);
H
Herbert Xu 已提交
1125 1126 1127
		goto out;
	}

1128 1129 1130 1131 1132 1133
	lock_sock(sk);
	if (unlikely(up->pending)) {
		/* The socket is already corked while preparing it. */
		/* ... which is an evident application bug. --ANK */
		release_sock(sk);

1134
		net_dbg_ratelimited("socket already corked\n");
1135 1136 1137 1138 1139 1140
		err = -EINVAL;
		goto out;
	}
	/*
	 *	Now cork the socket to pend data.
	 */
D
David S. Miller 已提交
1141 1142 1143
	fl4 = &inet->cork.fl.u.ip4;
	fl4->daddr = daddr;
	fl4->saddr = saddr;
1144 1145
	fl4->fl4_dport = dport;
	fl4->fl4_sport = inet->inet_sport;
1146 1147 1148 1149
	up->pending = AF_INET;

do_append_data:
	up->len += ulen;
1150
	err = ip_append_data(sk, fl4, getfrag, msg, ulen,
1151 1152
			     sizeof(struct udphdr), &ipc, &rt,
			     corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags);
1153 1154 1155 1156 1157 1158 1159 1160 1161 1162
	if (err)
		udp_flush_pending_frames(sk);
	else if (!corkreq)
		err = udp_push_pending_frames(sk);
	else if (unlikely(skb_queue_empty(&sk->sk_write_queue)))
		up->pending = 0;
	release_sock(sk);

out:
	ip_rt_put(rt);
1163
out_free:
1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175
	if (free)
		kfree(ipc.opt);
	if (!err)
		return len;
	/*
	 * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space.  Reporting
	 * ENOBUFS might not be good (it's not tunable per se), but otherwise
	 * we don't have a good statistic (IpOutDiscards but it can be too many
	 * things).  We could add another new stat but at least for now that
	 * seems like overkill.
	 */
	if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
1176 1177
		UDP_INC_STATS(sock_net(sk),
			      UDP_MIB_SNDBUFERRORS, is_udplite);
1178 1179 1180 1181
	}
	return err;

do_confirm:
1182 1183
	if (msg->msg_flags & MSG_PROBE)
		dst_confirm_neigh(&rt->dst, &fl4->daddr);
1184 1185 1186 1187 1188
	if (!(msg->msg_flags&MSG_PROBE) || len)
		goto back_from_confirm;
	err = 0;
	goto out;
}
E
Eric Dumazet 已提交
1189
EXPORT_SYMBOL(udp_sendmsg);
1190 1191 1192 1193

int udp_sendpage(struct sock *sk, struct page *page, int offset,
		 size_t size, int flags)
{
1194
	struct inet_sock *inet = inet_sk(sk);
1195 1196 1197
	struct udp_sock *up = udp_sk(sk);
	int ret;

1198 1199 1200
	if (flags & MSG_SENDPAGE_NOTLAST)
		flags |= MSG_MORE;

1201 1202 1203 1204 1205 1206 1207
	if (!up->pending) {
		struct msghdr msg = {	.msg_flags = flags|MSG_MORE };

		/* Call udp_sendmsg to specify destination address which
		 * sendpage interface can't pass.
		 * This will succeed only when the socket is connected.
		 */
1208
		ret = udp_sendmsg(sk, &msg, 0);
1209 1210 1211 1212 1213 1214 1215 1216 1217
		if (ret < 0)
			return ret;
	}

	lock_sock(sk);

	if (unlikely(!up->pending)) {
		release_sock(sk);

1218
		net_dbg_ratelimited("cork failed\n");
1219 1220 1221
		return -EINVAL;
	}

1222 1223
	ret = ip_append_page(sk, &inet->cork.fl.u.ip4,
			     page, offset, size, flags);
1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243
	if (ret == -EOPNOTSUPP) {
		release_sock(sk);
		return sock_no_sendpage(sk->sk_socket, page, offset,
					size, flags);
	}
	if (ret < 0) {
		udp_flush_pending_frames(sk);
		goto out;
	}

	up->len += size;
	if (!(up->corkflag || (flags&MSG_MORE)))
		ret = udp_push_pending_frames(sk);
	if (!ret)
		ret = size;
out:
	release_sock(sk);
	return ret;
}

1244 1245
#define UDP_SKB_IS_STATELESS 0x80000000

1246 1247
static void udp_set_dev_scratch(struct sk_buff *skb)
{
1248
	struct udp_dev_scratch *scratch = udp_skb_scratch(skb);
1249 1250

	BUILD_BUG_ON(sizeof(struct udp_dev_scratch) > sizeof(long));
1251 1252
	scratch->_tsize_state = skb->truesize;
#if BITS_PER_LONG == 64
1253 1254 1255
	scratch->len = skb->len;
	scratch->csum_unnecessary = !!skb_csum_unnecessary(skb);
	scratch->is_linear = !skb_is_nonlinear(skb);
1256
#endif
P
Paolo Abeni 已提交
1257 1258 1259 1260 1261
	/* all head states execept sp (dst, sk, nf) are always cleared by
	 * udp_rcv() and we need to preserve secpath, if present, to eventually
	 * process IP_CMSG_PASSSEC at recvmsg() time
	 */
	if (likely(!skb_sec_path(skb)))
1262
		scratch->_tsize_state |= UDP_SKB_IS_STATELESS;
1263 1264 1265 1266
}

static int udp_skb_truesize(struct sk_buff *skb)
{
1267
	return udp_skb_scratch(skb)->_tsize_state & ~UDP_SKB_IS_STATELESS;
1268 1269
}

1270
static bool udp_skb_has_head_state(struct sk_buff *skb)
1271
{
1272
	return !(udp_skb_scratch(skb)->_tsize_state & UDP_SKB_IS_STATELESS);
1273 1274
}

1275
/* fully reclaim rmem/fwd memory allocated for skb */
1276 1277
static void udp_rmem_release(struct sock *sk, int size, int partial,
			     bool rx_queue_lock_held)
1278
{
1279
	struct udp_sock *up = udp_sk(sk);
1280
	struct sk_buff_head *sk_queue;
1281 1282
	int amt;

1283 1284 1285
	if (likely(partial)) {
		up->forward_deficit += size;
		size = up->forward_deficit;
1286
		if (size < (sk->sk_rcvbuf >> 2))
1287 1288 1289 1290 1291 1292
			return;
	} else {
		size += up->forward_deficit;
	}
	up->forward_deficit = 0;

1293 1294 1295
	/* acquire the sk_receive_queue for fwd allocated memory scheduling,
	 * if the called don't held it already
	 */
1296
	sk_queue = &sk->sk_receive_queue;
1297 1298 1299
	if (!rx_queue_lock_held)
		spin_lock(&sk_queue->lock);

1300

1301 1302 1303 1304 1305 1306
	sk->sk_forward_alloc += size;
	amt = (sk->sk_forward_alloc - partial) & ~(SK_MEM_QUANTUM - 1);
	sk->sk_forward_alloc -= amt;

	if (amt)
		__sk_mem_reduce_allocated(sk, amt >> SK_MEM_QUANTUM_SHIFT);
1307 1308

	atomic_sub(size, &sk->sk_rmem_alloc);
1309 1310 1311 1312

	/* this can save us from acquiring the rx queue lock on next receive */
	skb_queue_splice_tail_init(sk_queue, &up->reader_queue);

1313 1314
	if (!rx_queue_lock_held)
		spin_unlock(&sk_queue->lock);
1315 1316
}

1317
/* Note: called with reader_queue.lock held.
1318 1319 1320 1321
 * Instead of using skb->truesize here, find a copy of it in skb->dev_scratch
 * This avoids a cache line miss while receive_queue lock is held.
 * Look at __udp_enqueue_schedule_skb() to find where this copy is done.
 */
1322
void udp_skb_destructor(struct sock *sk, struct sk_buff *skb)
1323
{
1324 1325
	prefetch(&skb->data);
	udp_rmem_release(sk, udp_skb_truesize(skb), 1, false);
1326
}
1327
EXPORT_SYMBOL(udp_skb_destructor);
1328

1329
/* as above, but the caller held the rx queue lock, too */
1330
static void udp_skb_dtor_locked(struct sock *sk, struct sk_buff *skb)
1331
{
1332 1333
	prefetch(&skb->data);
	udp_rmem_release(sk, udp_skb_truesize(skb), 1, true);
1334 1335
}

E
Eric Dumazet 已提交
1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360
/* Idea of busylocks is to let producers grab an extra spinlock
 * to relieve pressure on the receive_queue spinlock shared by consumer.
 * Under flood, this means that only one producer can be in line
 * trying to acquire the receive_queue spinlock.
 * These busylock can be allocated on a per cpu manner, instead of a
 * per socket one (that would consume a cache line per socket)
 */
static int udp_busylocks_log __read_mostly;
static spinlock_t *udp_busylocks __read_mostly;

static spinlock_t *busylock_acquire(void *ptr)
{
	spinlock_t *busy;

	busy = udp_busylocks + hash_ptr(ptr, udp_busylocks_log);
	spin_lock(busy);
	return busy;
}

static void busylock_release(spinlock_t *busy)
{
	if (busy)
		spin_unlock(busy);
}

1361 1362 1363 1364
int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb)
{
	struct sk_buff_head *list = &sk->sk_receive_queue;
	int rmem, delta, amt, err = -ENOMEM;
E
Eric Dumazet 已提交
1365
	spinlock_t *busy = NULL;
1366
	int size;
1367 1368 1369 1370 1371

	/* try to avoid the costly atomic add/sub pair when the receive
	 * queue is full; always allow at least a packet
	 */
	rmem = atomic_read(&sk->sk_rmem_alloc);
1372
	if (rmem > sk->sk_rcvbuf)
1373 1374
		goto drop;

1375 1376 1377 1378 1379 1380
	/* Under mem pressure, it might be helpful to help udp_recvmsg()
	 * having linear skbs :
	 * - Reduce memory overhead and thus increase receive queue capacity
	 * - Less cache line misses at copyout() time
	 * - Less work at consume_skb() (less alien page frag freeing)
	 */
E
Eric Dumazet 已提交
1381
	if (rmem > (sk->sk_rcvbuf >> 1)) {
1382
		skb_condense(skb);
E
Eric Dumazet 已提交
1383 1384 1385

		busy = busylock_acquire(sk);
	}
1386
	size = skb->truesize;
1387
	udp_set_dev_scratch(skb);
1388

1389 1390 1391 1392
	/* we drop only if the receive buf is full and the receive
	 * queue contains some other skb
	 */
	rmem = atomic_add_return(size, &sk->sk_rmem_alloc);
1393
	if (rmem > (size + sk->sk_rcvbuf))
1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410
		goto uncharge_drop;

	spin_lock(&list->lock);
	if (size >= sk->sk_forward_alloc) {
		amt = sk_mem_pages(size);
		delta = amt << SK_MEM_QUANTUM_SHIFT;
		if (!__sk_mem_raise_allocated(sk, delta, amt, SK_MEM_RECV)) {
			err = -ENOBUFS;
			spin_unlock(&list->lock);
			goto uncharge_drop;
		}

		sk->sk_forward_alloc += delta;
	}

	sk->sk_forward_alloc -= size;

1411 1412 1413
	/* no need to setup a destructor, we will explicitly release the
	 * forward allocated memory on dequeue
	 */
1414 1415 1416 1417 1418 1419 1420 1421
	sock_skb_set_dropcount(sk, skb);

	__skb_queue_tail(list, skb);
	spin_unlock(&list->lock);

	if (!sock_flag(sk, SOCK_DEAD))
		sk->sk_data_ready(sk);

E
Eric Dumazet 已提交
1422
	busylock_release(busy);
1423 1424 1425 1426 1427 1428 1429
	return 0;

uncharge_drop:
	atomic_sub(skb->truesize, &sk->sk_rmem_alloc);

drop:
	atomic_inc(&sk->sk_drops);
E
Eric Dumazet 已提交
1430
	busylock_release(busy);
1431 1432 1433 1434
	return err;
}
EXPORT_SYMBOL_GPL(__udp_enqueue_schedule_skb);

1435
void udp_destruct_sock(struct sock *sk)
1436 1437
{
	/* reclaim completely the forward allocated memory */
1438
	struct udp_sock *up = udp_sk(sk);
1439 1440 1441
	unsigned int total = 0;
	struct sk_buff *skb;

1442 1443
	skb_queue_splice_tail_init(&sk->sk_receive_queue, &up->reader_queue);
	while ((skb = __skb_dequeue(&up->reader_queue)) != NULL) {
1444 1445 1446
		total += skb->truesize;
		kfree_skb(skb);
	}
1447
	udp_rmem_release(sk, total, 0, true);
1448

1449 1450
	inet_sock_destruct(sk);
}
1451
EXPORT_SYMBOL_GPL(udp_destruct_sock);
1452 1453 1454

int udp_init_sock(struct sock *sk)
{
1455
	skb_queue_head_init(&udp_sk(sk)->reader_queue);
1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468
	sk->sk_destruct = udp_destruct_sock;
	return 0;
}
EXPORT_SYMBOL_GPL(udp_init_sock);

void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len)
{
	if (unlikely(READ_ONCE(sk->sk_peek_off) >= 0)) {
		bool slow = lock_sock_fast(sk);

		sk_peek_offset_bwd(sk, len);
		unlock_sock_fast(sk, slow);
	}
P
Paolo Abeni 已提交
1469

1470 1471 1472
	if (!skb_unref(skb))
		return;

1473 1474
	/* In the more common cases we cleared the head states previously,
	 * see __udp_queue_rcv_skb().
1475
	 */
1476
	if (unlikely(udp_skb_has_head_state(skb)))
1477
		skb_release_head_state(skb);
1478
	__consume_stateless_skb(skb);
1479 1480 1481
}
EXPORT_SYMBOL_GPL(skb_consume_udp);

1482 1483 1484 1485 1486 1487
static struct sk_buff *__first_packet_length(struct sock *sk,
					     struct sk_buff_head *rcvq,
					     int *total)
{
	struct sk_buff *skb;

P
Paolo Abeni 已提交
1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504
	while ((skb = skb_peek(rcvq)) != NULL) {
		if (udp_lib_checksum_complete(skb)) {
			__UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS,
					IS_UDPLITE(sk));
			__UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS,
					IS_UDPLITE(sk));
			atomic_inc(&sk->sk_drops);
			__skb_unlink(skb, rcvq);
			*total += skb->truesize;
			kfree_skb(skb);
		} else {
			/* the csum related bits could be changed, refresh
			 * the scratch area
			 */
			udp_set_dev_scratch(skb);
			break;
		}
1505 1506 1507 1508
	}
	return skb;
}

E
Eric Dumazet 已提交
1509 1510 1511 1512 1513
/**
 *	first_packet_length	- return length of first packet in receive queue
 *	@sk: socket
 *
 *	Drops all bad checksum frames, until a valid one is found.
1514
 *	Returns the length of found skb, or -1 if none is found.
E
Eric Dumazet 已提交
1515
 */
1516
static int first_packet_length(struct sock *sk)
E
Eric Dumazet 已提交
1517
{
1518 1519
	struct sk_buff_head *rcvq = &udp_sk(sk)->reader_queue;
	struct sk_buff_head *sk_queue = &sk->sk_receive_queue;
E
Eric Dumazet 已提交
1520
	struct sk_buff *skb;
1521
	int total = 0;
1522
	int res;
E
Eric Dumazet 已提交
1523 1524

	spin_lock_bh(&rcvq->lock);
1525 1526 1527 1528 1529 1530 1531
	skb = __first_packet_length(sk, rcvq, &total);
	if (!skb && !skb_queue_empty(sk_queue)) {
		spin_lock(&sk_queue->lock);
		skb_queue_splice_tail_init(sk_queue, rcvq);
		spin_unlock(&sk_queue->lock);

		skb = __first_packet_length(sk, rcvq, &total);
E
Eric Dumazet 已提交
1532
	}
1533
	res = skb ? skb->len : -1;
1534
	if (total)
1535
		udp_rmem_release(sk, total, 1, false);
E
Eric Dumazet 已提交
1536 1537 1538 1539
	spin_unlock_bh(&rcvq->lock);
	return res;
}

L
Linus Torvalds 已提交
1540 1541 1542
/*
 *	IOCTL requests applicable to the UDP protocol
 */
1543

L
Linus Torvalds 已提交
1544 1545
int udp_ioctl(struct sock *sk, int cmd, unsigned long arg)
{
1546 1547
	switch (cmd) {
	case SIOCOUTQ:
L
Linus Torvalds 已提交
1548
	{
1549 1550
		int amount = sk_wmem_alloc_get(sk);

1551 1552
		return put_user(amount, (int __user *)arg);
	}
L
Linus Torvalds 已提交
1553

1554 1555
	case SIOCINQ:
	{
1556
		int amount = max_t(int, 0, first_packet_length(sk));
1557 1558 1559

		return put_user(amount, (int __user *)arg);
	}
L
Linus Torvalds 已提交
1560

1561 1562
	default:
		return -ENOIOCTLCMD;
L
Linus Torvalds 已提交
1563
	}
1564 1565

	return 0;
L
Linus Torvalds 已提交
1566
}
E
Eric Dumazet 已提交
1567
EXPORT_SYMBOL(udp_ioctl);
L
Linus Torvalds 已提交
1568

1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593
struct sk_buff *__skb_recv_udp(struct sock *sk, unsigned int flags,
			       int noblock, int *peeked, int *off, int *err)
{
	struct sk_buff_head *sk_queue = &sk->sk_receive_queue;
	struct sk_buff_head *queue;
	struct sk_buff *last;
	long timeo;
	int error;

	queue = &udp_sk(sk)->reader_queue;
	flags |= noblock ? MSG_DONTWAIT : 0;
	timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
	do {
		struct sk_buff *skb;

		error = sock_error(sk);
		if (error)
			break;

		error = -EAGAIN;
		*peeked = 0;
		do {
			spin_lock_bh(&queue->lock);
			skb = __skb_try_recv_from_queue(sk, queue, flags,
							udp_skb_destructor,
1594
							peeked, off, err,
1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605
							&last);
			if (skb) {
				spin_unlock_bh(&queue->lock);
				return skb;
			}

			if (skb_queue_empty(sk_queue)) {
				spin_unlock_bh(&queue->lock);
				goto busy_check;
			}

1606 1607 1608 1609 1610
			/* refill the reader queue and walk it again
			 * keep both queues locked to avoid re-acquiring
			 * the sk_receive_queue lock if fwd memory scheduling
			 * is needed.
			 */
1611 1612 1613 1614
			spin_lock(&sk_queue->lock);
			skb_queue_splice_tail_init(sk_queue, queue);

			skb = __skb_try_recv_from_queue(sk, queue, flags,
1615
							udp_skb_dtor_locked,
1616
							peeked, off, err,
1617
							&last);
1618
			spin_unlock(&sk_queue->lock);
1619
			spin_unlock_bh(&queue->lock);
1620
			if (skb)
1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637
				return skb;

busy_check:
			if (!sk_can_busy_loop(sk))
				break;

			sk_busy_loop(sk, flags & MSG_DONTWAIT);
		} while (!skb_queue_empty(sk_queue));

		/* sk_queue is empty, reader_queue may contain peeked packets */
	} while (timeo &&
		 !__skb_wait_for_more_packets(sk, &error, &timeo,
					      (struct sk_buff *)sk_queue));

	*err = error;
	return NULL;
}
1638
EXPORT_SYMBOL(__skb_recv_udp);
1639

1640 1641 1642 1643 1644
/*
 * 	This should be easy, if there is something there we
 * 	return it, otherwise we block.
 */

1645 1646
int udp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock,
		int flags, int *addr_len)
1647 1648
{
	struct inet_sock *inet = inet_sk(sk);
1649
	DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name);
1650
	struct sk_buff *skb;
1651
	unsigned int ulen, copied;
1652
	int peeked, peeking, off;
1653 1654
	int err;
	int is_udplite = IS_UDPLITE(sk);
1655
	bool checksum_valid = false;
1656 1657

	if (flags & MSG_ERRQUEUE)
1658
		return ip_recv_error(sk, msg, len, addr_len);
1659 1660

try_again:
1661 1662
	peeking = flags & MSG_PEEK;
	off = sk_peek_offset(sk, flags);
1663
	skb = __skb_recv_udp(sk, flags, noblock, &peeked, &off, &err);
1664
	if (!skb)
1665
		return err;
1666

1667
	ulen = udp_skb_len(skb);
1668
	copied = len;
1669 1670
	if (copied > ulen - off)
		copied = ulen - off;
1671
	else if (copied < ulen)
1672 1673 1674 1675 1676 1677 1678 1679
		msg->msg_flags |= MSG_TRUNC;

	/*
	 * If checksum is needed at all, try to do it while copying the
	 * data.  If the data is truncated, or if we only want a partial
	 * coverage checksum (UDP-Lite), do it before the copy.
	 */

1680 1681
	if (copied < ulen || peeking ||
	    (is_udplite && UDP_SKB_CB(skb)->partial_cov)) {
1682 1683
		checksum_valid = udp_skb_csum_unnecessary(skb) ||
				!__udp_lib_checksum_complete(skb);
1684
		if (!checksum_valid)
1685 1686 1687
			goto csum_copy_err;
	}

1688 1689 1690 1691 1692 1693
	if (checksum_valid || udp_skb_csum_unnecessary(skb)) {
		if (udp_skb_is_linear(skb))
			err = copy_linear_skb(skb, copied, off, &msg->msg_iter);
		else
			err = skb_copy_datagram_msg(skb, off, msg, copied);
	} else {
1694
		err = skb_copy_and_csum_datagram_msg(skb, off, msg);
1695 1696 1697 1698 1699

		if (err == -EINVAL)
			goto csum_copy_err;
	}

1700
	if (unlikely(err)) {
1701 1702
		if (!peeked) {
			atomic_inc(&sk->sk_drops);
1703 1704
			UDP_INC_STATS(sock_net(sk),
				      UDP_MIB_INERRORS, is_udplite);
1705
		}
1706
		kfree_skb(skb);
1707
		return err;
1708
	}
1709 1710

	if (!peeked)
1711 1712
		UDP_INC_STATS(sock_net(sk),
			      UDP_MIB_INDATAGRAMS, is_udplite);
1713

1714
	sock_recv_ts_and_drops(msg, sk, skb);
1715 1716

	/* Copy the address. */
E
Eric Dumazet 已提交
1717
	if (sin) {
1718 1719 1720 1721
		sin->sin_family = AF_INET;
		sin->sin_port = udp_hdr(skb)->source;
		sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
		memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
1722
		*addr_len = sizeof(*sin);
D
Daniel Borkmann 已提交
1723 1724 1725 1726

		if (cgroup_bpf_enabled)
			BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk,
							(struct sockaddr *)sin);
1727 1728
	}
	if (inet->cmsg_flags)
1729
		ip_cmsg_recv_offset(msg, sk, skb, sizeof(struct udphdr), off);
1730

1731
	err = copied;
1732 1733 1734
	if (flags & MSG_TRUNC)
		err = ulen;

1735
	skb_consume_udp(sk, skb, peeking ? -err : err);
1736 1737 1738
	return err;

csum_copy_err:
1739 1740
	if (!__sk_queue_drop_skb(sk, &udp_sk(sk)->reader_queue, skb, flags,
				 udp_skb_destructor)) {
1741 1742
		UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
		UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
1743
	}
1744
	kfree_skb(skb);
1745

1746 1747
	/* starting over for a new packet, but check if we need to yield */
	cond_resched();
1748
	msg->msg_flags &= ~MSG_TRUNC;
1749 1750 1751
	goto try_again;
}

A
Andrey Ignatov 已提交
1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764
int udp_pre_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
{
	/* This check is replicated from __ip4_datagram_connect() and
	 * intended to prevent BPF program called below from accessing bytes
	 * that are out of the bound specified by user in addr_len.
	 */
	if (addr_len < sizeof(struct sockaddr_in))
		return -EINVAL;

	return BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr);
}
EXPORT_SYMBOL(udp_pre_connect);

1765
int __udp_disconnect(struct sock *sk, int flags)
L
Linus Torvalds 已提交
1766 1767 1768 1769 1770
{
	struct inet_sock *inet = inet_sk(sk);
	/*
	 *	1003.1g - break association.
	 */
1771

L
Linus Torvalds 已提交
1772
	sk->sk_state = TCP_CLOSE;
E
Eric Dumazet 已提交
1773 1774
	inet->inet_daddr = 0;
	inet->inet_dport = 0;
1775
	sock_rps_reset_rxhash(sk);
L
Linus Torvalds 已提交
1776 1777 1778 1779 1780 1781
	sk->sk_bound_dev_if = 0;
	if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
		inet_reset_saddr(sk);

	if (!(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) {
		sk->sk_prot->unhash(sk);
E
Eric Dumazet 已提交
1782
		inet->inet_sport = 0;
L
Linus Torvalds 已提交
1783 1784 1785 1786
	}
	sk_dst_reset(sk);
	return 0;
}
1787 1788 1789 1790 1791 1792 1793 1794 1795
EXPORT_SYMBOL(__udp_disconnect);

int udp_disconnect(struct sock *sk, int flags)
{
	lock_sock(sk);
	__udp_disconnect(sk, flags);
	release_sock(sk);
	return 0;
}
E
Eric Dumazet 已提交
1796
EXPORT_SYMBOL(udp_disconnect);
L
Linus Torvalds 已提交
1797

1798 1799
void udp_lib_unhash(struct sock *sk)
{
1800 1801
	if (sk_hashed(sk)) {
		struct udp_table *udptable = sk->sk_prot->h.udp_table;
1802 1803 1804 1805 1806
		struct udp_hslot *hslot, *hslot2;

		hslot  = udp_hashslot(udptable, sock_net(sk),
				      udp_sk(sk)->udp_port_hash);
		hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash);
1807

1808
		spin_lock_bh(&hslot->lock);
1809 1810
		if (rcu_access_pointer(sk->sk_reuseport_cb))
			reuseport_detach_sock(sk);
1811
		if (sk_del_node_init_rcu(sk)) {
E
Eric Dumazet 已提交
1812
			hslot->count--;
E
Eric Dumazet 已提交
1813
			inet_sk(sk)->inet_num = 0;
1814
			sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
1815 1816

			spin_lock(&hslot2->lock);
1817
			hlist_del_init_rcu(&udp_sk(sk)->udp_portaddr_node);
1818 1819
			hslot2->count--;
			spin_unlock(&hslot2->lock);
1820 1821
		}
		spin_unlock_bh(&hslot->lock);
1822 1823 1824 1825
	}
}
EXPORT_SYMBOL(udp_lib_unhash);

E
Eric Dumazet 已提交
1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837
/*
 * inet_rcv_saddr was changed, we must rehash secondary hash
 */
void udp_lib_rehash(struct sock *sk, u16 newhash)
{
	if (sk_hashed(sk)) {
		struct udp_table *udptable = sk->sk_prot->h.udp_table;
		struct udp_hslot *hslot, *hslot2, *nhslot2;

		hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash);
		nhslot2 = udp_hashslot2(udptable, newhash);
		udp_sk(sk)->udp_portaddr_hash = newhash;
1838 1839 1840

		if (hslot2 != nhslot2 ||
		    rcu_access_pointer(sk->sk_reuseport_cb)) {
E
Eric Dumazet 已提交
1841 1842 1843 1844
			hslot = udp_hashslot(udptable, sock_net(sk),
					     udp_sk(sk)->udp_port_hash);
			/* we must lock primary chain too */
			spin_lock_bh(&hslot->lock);
1845 1846 1847 1848 1849
			if (rcu_access_pointer(sk->sk_reuseport_cb))
				reuseport_detach_sock(sk);

			if (hslot2 != nhslot2) {
				spin_lock(&hslot2->lock);
1850
				hlist_del_init_rcu(&udp_sk(sk)->udp_portaddr_node);
1851 1852 1853 1854
				hslot2->count--;
				spin_unlock(&hslot2->lock);

				spin_lock(&nhslot2->lock);
1855
				hlist_add_head_rcu(&udp_sk(sk)->udp_portaddr_node,
1856 1857 1858 1859
							 &nhslot2->head);
				nhslot2->count++;
				spin_unlock(&nhslot2->lock);
			}
E
Eric Dumazet 已提交
1860 1861 1862 1863 1864 1865 1866 1867 1868

			spin_unlock_bh(&hslot->lock);
		}
	}
}
EXPORT_SYMBOL(udp_lib_rehash);

static void udp_v4_rehash(struct sock *sk)
{
1869
	u16 new_hash = ipv4_portaddr_hash(sock_net(sk),
E
Eric Dumazet 已提交
1870 1871 1872 1873 1874
					  inet_sk(sk)->inet_rcv_saddr,
					  inet_sk(sk)->inet_num);
	udp_lib_rehash(sk, new_hash);
}

1875
static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
H
Herbert Xu 已提交
1876
{
T
Tom Herbert 已提交
1877
	int rc;
E
Eric Dumazet 已提交
1878

1879
	if (inet_sk(sk)->inet_daddr) {
1880
		sock_rps_save_rxhash(sk, skb);
1881
		sk_mark_napi_id(sk, skb);
E
Eric Dumazet 已提交
1882
		sk_incoming_cpu_update(sk);
1883 1884
	} else {
		sk_mark_napi_id_once(sk, skb);
1885
	}
T
Tom Herbert 已提交
1886

1887
	rc = __udp_enqueue_schedule_skb(sk, skb);
E
Eric Dumazet 已提交
1888 1889
	if (rc < 0) {
		int is_udplite = IS_UDPLITE(sk);
H
Herbert Xu 已提交
1890 1891

		/* Note that an ENOMEM error is charged twice */
E
Eric Dumazet 已提交
1892
		if (rc == -ENOMEM)
1893
			UDP_INC_STATS(sock_net(sk), UDP_MIB_RCVBUFERRORS,
1894
					is_udplite);
1895
		UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
E
Eric Dumazet 已提交
1896
		kfree_skb(skb);
1897
		trace_udp_fail_queue_rcv_skb(rc, sk);
E
Eric Dumazet 已提交
1898
		return -1;
H
Herbert Xu 已提交
1899 1900 1901 1902 1903
	}

	return 0;
}

1904
static DEFINE_STATIC_KEY_FALSE(udp_encap_needed_key);
1905 1906
void udp_encap_enable(void)
{
1907
	static_branch_enable(&udp_encap_needed_key);
1908 1909 1910
}
EXPORT_SYMBOL(udp_encap_enable);

1911 1912 1913 1914 1915 1916 1917 1918
/* returns:
 *  -1: error
 *   0: success
 *  >0: "udp encap" protocol resubmission
 *
 * Note that in the success and error cases, the skb is assumed to
 * have either been requeued or freed.
 */
1919
static int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930
{
	struct udp_sock *up = udp_sk(sk);
	int is_udplite = IS_UDPLITE(sk);

	/*
	 *	Charge it to the socket, dropping if the queue is full.
	 */
	if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
		goto drop;
	nf_reset(skb);

1931
	if (static_branch_unlikely(&udp_encap_needed_key) && up->encap_type) {
1932 1933
		int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);

1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945
		/*
		 * This is an encapsulation socket so pass the skb to
		 * the socket's udp_encap_rcv() hook. Otherwise, just
		 * fall through and pass this up the UDP socket.
		 * up->encap_rcv() returns the following value:
		 * =0 if skb was successfully passed to the encap
		 *    handler or was discarded by it.
		 * >0 if skb should be passed on to UDP.
		 * <0 if skb should be resubmitted as proto -N
		 */

		/* if we're overly short, let UDP handle it */
1946
		encap_rcv = READ_ONCE(up->encap_rcv);
1947
		if (encap_rcv) {
1948 1949
			int ret;

1950 1951 1952 1953
			/* Verify checksum before giving to encap */
			if (udp_lib_checksum_complete(skb))
				goto csum_error;

1954
			ret = encap_rcv(sk, skb);
1955
			if (ret <= 0) {
1956 1957 1958
				__UDP_INC_STATS(sock_net(sk),
						UDP_MIB_INDATAGRAMS,
						is_udplite);
1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982
				return -ret;
			}
		}

		/* FALLTHROUGH -- it's a UDP Packet */
	}

	/*
	 * 	UDP-Lite specific tests, ignored on UDP sockets
	 */
	if ((is_udplite & UDPLITE_RECV_CC)  &&  UDP_SKB_CB(skb)->partial_cov) {

		/*
		 * MIB statistics other than incrementing the error count are
		 * disabled for the following two types of errors: these depend
		 * on the application settings, not on the functioning of the
		 * protocol stack as such.
		 *
		 * RFC 3828 here recommends (sec 3.3): "There should also be a
		 * way ... to ... at least let the receiving application block
		 * delivery of packets with coverage values less than a value
		 * provided by the application."
		 */
		if (up->pcrlen == 0) {          /* full coverage was set  */
1983 1984
			net_dbg_ratelimited("UDPLite: partial coverage %d while full coverage %d requested\n",
					    UDP_SKB_CB(skb)->cscov, skb->len);
1985 1986 1987 1988 1989 1990 1991 1992 1993
			goto drop;
		}
		/* The next case involves violating the min. coverage requested
		 * by the receiver. This is subtle: if receiver wants x and x is
		 * greater than the buffersize/MTU then receiver will complain
		 * that it wants x while sender emits packets of smaller size y.
		 * Therefore the above ...()->partial_cov statement is essential.
		 */
		if (UDP_SKB_CB(skb)->cscov  <  up->pcrlen) {
1994 1995
			net_dbg_ratelimited("UDPLite: coverage %d too small, need min %d\n",
					    UDP_SKB_CB(skb)->cscov, up->pcrlen);
1996 1997 1998 1999
			goto drop;
		}
	}

2000
	prefetch(&sk->sk_rmem_alloc);
2001 2002
	if (rcu_access_pointer(sk->sk_filter) &&
	    udp_lib_checksum_complete(skb))
2003
			goto csum_error;
2004

2005
	if (sk_filter_trim_cap(sk, skb, sizeof(struct udphdr)))
2006
		goto drop;
2007

2008
	udp_csum_pull_header(skb);
2009

2010
	ipv4_pktinfo_prepare(sk, skb);
2011
	return __udp_queue_rcv_skb(sk, skb);
2012

2013
csum_error:
2014
	__UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
2015
drop:
2016
	__UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
E
Eric Dumazet 已提交
2017
	atomic_inc(&sk->sk_drops);
2018 2019 2020 2021
	kfree_skb(skb);
	return -1;
}

2022
/* For TCP sockets, sk_rx_dst is protected by socket lock
2023
 * For UDP, we use xchg() to guard against concurrent changes.
2024
 */
2025
bool udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
S
Shawn Bohrer 已提交
2026
{
2027 2028
	struct dst_entry *old;

2029 2030 2031
	if (dst_hold_safe(dst)) {
		old = xchg(&sk->sk_rx_dst, dst);
		dst_release(old);
2032
		return old != dst;
2033
	}
2034
	return false;
S
Shawn Bohrer 已提交
2035
}
2036
EXPORT_SYMBOL(udp_sk_rx_dst_set);
S
Shawn Bohrer 已提交
2037

2038 2039 2040
/*
 *	Multicasts and broadcasts go to each listener.
 *
2041
 *	Note: called only from the BH handler context.
2042
 */
2043
static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
2044 2045
				    struct udphdr  *uh,
				    __be32 saddr, __be32 daddr,
2046 2047
				    struct udp_table *udptable,
				    int proto)
2048
{
2049
	struct sock *sk, *first = NULL;
2050 2051
	unsigned short hnum = ntohs(uh->dest);
	struct udp_hslot *hslot = udp_hashslot(udptable, net, hnum);
2052
	unsigned int hash2 = 0, hash2_any = 0, use_hash2 = (hslot->count > 10);
2053 2054
	unsigned int offset = offsetof(typeof(*sk), sk_node);
	int dif = skb->dev->ifindex;
2055
	int sdif = inet_sdif(skb);
2056 2057
	struct hlist_node *node;
	struct sk_buff *nskb;
2058 2059

	if (use_hash2) {
2060
		hash2_any = ipv4_portaddr_hash(net, htonl(INADDR_ANY), hnum) &
2061
			    udptable->mask;
2062
		hash2 = ipv4_portaddr_hash(net, daddr, hnum) & udptable->mask;
2063
start_lookup:
2064
		hslot = &udptable->hash2[hash2];
2065 2066
		offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node);
	}
2067

2068 2069
	sk_for_each_entry_offset_rcu(sk, node, &hslot->head, offset) {
		if (!__udp_is_mcast_sock(net, sk, uh->dest, daddr,
2070
					 uh->source, saddr, dif, sdif, hnum))
2071 2072 2073 2074 2075
			continue;

		if (!first) {
			first = sk;
			continue;
2076
		}
2077
		nskb = skb_clone(skb, GFP_ATOMIC);
2078

2079 2080
		if (unlikely(!nskb)) {
			atomic_inc(&sk->sk_drops);
2081 2082 2083 2084
			__UDP_INC_STATS(net, UDP_MIB_RCVBUFERRORS,
					IS_UDPLITE(sk));
			__UDP_INC_STATS(net, UDP_MIB_INERRORS,
					IS_UDPLITE(sk));
2085 2086 2087 2088 2089
			continue;
		}
		if (udp_queue_rcv_skb(sk, nskb) > 0)
			consume_skb(nskb);
	}
2090

2091 2092 2093 2094 2095 2096
	/* Also lookup *:port if we are using hash2 and haven't done so yet. */
	if (use_hash2 && hash2 != hash2_any) {
		hash2 = hash2_any;
		goto start_lookup;
	}

2097 2098 2099
	if (first) {
		if (udp_queue_rcv_skb(first, skb) > 0)
			consume_skb(skb);
2100
	} else {
2101
		kfree_skb(skb);
2102 2103
		__UDP_INC_STATS(net, UDP_MIB_IGNOREDMULTI,
				proto == IPPROTO_UDPLITE);
2104
	}
2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124
	return 0;
}

/* Initialize UDP checksum. If exited with zero value (success),
 * CHECKSUM_UNNECESSARY means, that no more checks are required.
 * Otherwise, csum completion requires chacksumming packet body,
 * including udp header and folding it to skb->csum.
 */
static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh,
				 int proto)
{
	int err;

	UDP_SKB_CB(skb)->partial_cov = 0;
	UDP_SKB_CB(skb)->cscov = skb->len;

	if (proto == IPPROTO_UDPLITE) {
		err = udplite_checksum_init(skb, uh);
		if (err)
			return err;
2125 2126 2127 2128 2129

		if (UDP_SKB_CB(skb)->partial_cov) {
			skb->csum = inet_compute_pseudo(skb, proto);
			return 0;
		}
2130 2131
	}

2132 2133 2134
	/* Note, we are only interested in != 0 or == 0, thus the
	 * force to int.
	 */
2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152
	err = (__force int)skb_checksum_init_zero_check(skb, proto, uh->check,
							inet_compute_pseudo);
	if (err)
		return err;

	if (skb->ip_summed == CHECKSUM_COMPLETE && !skb->csum_valid) {
		/* If SW calculated the value, we know it's bad */
		if (skb->csum_complete_sw)
			return 1;

		/* HW says the value is bad. Let's validate that.
		 * skb->csum is no longer the full packet checksum,
		 * so don't treat it as such.
		 */
		skb_checksum_complete_unset(skb);
	}

	return 0;
2153 2154
}

2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176
/* wrapper for udp_queue_rcv_skb tacking care of csum conversion and
 * return code conversion for ip layer consumption
 */
static int udp_unicast_rcv_skb(struct sock *sk, struct sk_buff *skb,
			       struct udphdr *uh)
{
	int ret;

	if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk))
		skb_checksum_try_convert(skb, IPPROTO_UDP, uh->check,
					 inet_compute_pseudo);

	ret = udp_queue_rcv_skb(sk, skb);

	/* a return value > 0 means to resubmit the input, but
	 * it wants the return to be -protocol, or 0
	 */
	if (ret > 0)
		return -ret;
	return 0;
}

2177 2178 2179 2180
/*
 *	All we need to do is get the socket, and then do a checksum.
 */

2181
int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
2182 2183 2184
		   int proto)
{
	struct sock *sk;
2185
	struct udphdr *uh;
2186
	unsigned short ulen;
E
Eric Dumazet 已提交
2187
	struct rtable *rt = skb_rtable(skb);
2188
	__be32 saddr, daddr;
2189
	struct net *net = dev_net(skb->dev);
2190 2191 2192 2193 2194 2195 2196

	/*
	 *  Validate the packet.
	 */
	if (!pskb_may_pull(skb, sizeof(struct udphdr)))
		goto drop;		/* No space for header. */

2197
	uh   = udp_hdr(skb);
2198
	ulen = ntohs(uh->len);
2199 2200 2201
	saddr = ip_hdr(skb)->saddr;
	daddr = ip_hdr(skb)->daddr;

2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214
	if (ulen > skb->len)
		goto short_packet;

	if (proto == IPPROTO_UDP) {
		/* UDP validates ulen. */
		if (ulen < sizeof(*uh) || pskb_trim_rcsum(skb, ulen))
			goto short_packet;
		uh = udp_hdr(skb);
	}

	if (udp4_csum_init(skb, uh, proto))
		goto csum_error;

2215 2216
	sk = skb_steal_sock(skb);
	if (sk) {
2217
		struct dst_entry *dst = skb_dst(skb);
S
Shawn Bohrer 已提交
2218 2219
		int ret;

2220 2221
		if (unlikely(sk->sk_rx_dst != dst))
			udp_sk_rx_dst_set(sk, dst);
2222

2223
		ret = udp_unicast_rcv_skb(sk, skb, uh);
2224
		sock_put(sk);
2225
		return ret;
S
Shawn Bohrer 已提交
2226
	}
2227

F
Fabian Frederick 已提交
2228 2229
	if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST))
		return __udp4_lib_mcast_deliver(net, skb, uh,
2230
						saddr, daddr, udptable, proto);
F
Fabian Frederick 已提交
2231 2232

	sk = __udp4_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
2233 2234
	if (sk)
		return udp_unicast_rcv_skb(sk, skb, uh);
2235 2236 2237 2238 2239 2240 2241 2242 2243

	if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
		goto drop;
	nf_reset(skb);

	/* No socket. Drop packet silently, if checksum is wrong */
	if (udp_lib_checksum_complete(skb))
		goto csum_error;

2244
	__UDP_INC_STATS(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
2245 2246 2247 2248 2249 2250 2251 2252 2253 2254
	icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);

	/*
	 * Hmm.  We got an UDP packet to a port to which we
	 * don't wanna listen.  Ignore it.
	 */
	kfree_skb(skb);
	return 0;

short_packet:
2255 2256 2257 2258 2259
	net_dbg_ratelimited("UDP%s: short packet: From %pI4:%u %d/%d to %pI4:%u\n",
			    proto == IPPROTO_UDPLITE ? "Lite" : "",
			    &saddr, ntohs(uh->source),
			    ulen, skb->len,
			    &daddr, ntohs(uh->dest));
2260 2261 2262 2263 2264 2265 2266
	goto drop;

csum_error:
	/*
	 * RFC1122: OK.  Discards the bad packet silently (as far as
	 * the network is concerned, anyway) as per 4.1.3.4 (MUST).
	 */
2267 2268 2269 2270
	net_dbg_ratelimited("UDP%s: bad checksum. From %pI4:%u to %pI4:%u ulen %d\n",
			    proto == IPPROTO_UDPLITE ? "Lite" : "",
			    &saddr, ntohs(uh->source), &daddr, ntohs(uh->dest),
			    ulen);
2271
	__UDP_INC_STATS(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE);
2272
drop:
2273
	__UDP_INC_STATS(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE);
2274 2275 2276 2277
	kfree_skb(skb);
	return 0;
}

S
Shawn Bohrer 已提交
2278 2279 2280 2281 2282 2283
/* We can only early demux multicast if there is a single matching socket.
 * If more than one socket found returns NULL
 */
static struct sock *__udp4_lib_mcast_demux_lookup(struct net *net,
						  __be16 loc_port, __be32 loc_addr,
						  __be16 rmt_port, __be32 rmt_addr,
2284
						  int dif, int sdif)
S
Shawn Bohrer 已提交
2285 2286 2287
{
	struct sock *sk, *result;
	unsigned short hnum = ntohs(loc_port);
2288
	unsigned int slot = udp_hashfn(net, hnum, udp_table.mask);
S
Shawn Bohrer 已提交
2289 2290
	struct udp_hslot *hslot = &udp_table.hash[slot];

2291 2292 2293 2294
	/* Do not bother scanning a too big list */
	if (hslot->count > 10)
		return NULL;

S
Shawn Bohrer 已提交
2295
	result = NULL;
2296 2297
	sk_for_each_rcu(sk, &hslot->head) {
		if (__udp_is_mcast_sock(net, sk, loc_port, loc_addr,
2298
					rmt_port, rmt_addr, dif, sdif, hnum)) {
2299 2300
			if (result)
				return NULL;
S
Shawn Bohrer 已提交
2301 2302 2303
			result = sk;
		}
	}
2304

S
Shawn Bohrer 已提交
2305 2306 2307 2308 2309 2310 2311 2312 2313 2314
	return result;
}

/* For unicast we should only early demux connected sockets or we can
 * break forwarding setups.  The chains here can be long so only check
 * if the first socket is an exact match and if not move on.
 */
static struct sock *__udp4_lib_demux_lookup(struct net *net,
					    __be16 loc_port, __be32 loc_addr,
					    __be16 rmt_port, __be32 rmt_addr,
2315
					    int dif, int sdif)
S
Shawn Bohrer 已提交
2316 2317
{
	unsigned short hnum = ntohs(loc_port);
2318
	unsigned int hash2 = ipv4_portaddr_hash(net, loc_addr, hnum);
S
Shawn Bohrer 已提交
2319 2320
	unsigned int slot2 = hash2 & udp_table.mask;
	struct udp_hslot *hslot2 = &udp_table.hash2[slot2];
2321
	INET_ADDR_COOKIE(acookie, rmt_addr, loc_addr);
S
Shawn Bohrer 已提交
2322
	const __portpair ports = INET_COMBINED_PORTS(rmt_port, hnum);
2323
	struct sock *sk;
S
Shawn Bohrer 已提交
2324

2325 2326
	udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
		if (INET_MATCH(sk, net, acookie, rmt_addr,
2327
			       loc_addr, ports, dif, sdif))
2328
			return sk;
S
Shawn Bohrer 已提交
2329 2330 2331
		/* Only check first socket in chain */
		break;
	}
2332
	return NULL;
S
Shawn Bohrer 已提交
2333 2334
}

2335
int udp_v4_early_demux(struct sk_buff *skb)
S
Shawn Bohrer 已提交
2336
{
2337
	struct net *net = dev_net(skb->dev);
2338
	struct in_device *in_dev = NULL;
2339 2340
	const struct iphdr *iph;
	const struct udphdr *uh;
2341
	struct sock *sk = NULL;
S
Shawn Bohrer 已提交
2342 2343
	struct dst_entry *dst;
	int dif = skb->dev->ifindex;
2344
	int sdif = inet_sdif(skb);
2345
	int ours;
S
Shawn Bohrer 已提交
2346 2347 2348

	/* validate the packet */
	if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct udphdr)))
2349
		return 0;
S
Shawn Bohrer 已提交
2350

2351 2352 2353
	iph = ip_hdr(skb);
	uh = udp_hdr(skb);

P
Paolo Abeni 已提交
2354
	if (skb->pkt_type == PACKET_MULTICAST) {
2355
		in_dev = __in_dev_get_rcu(skb->dev);
2356 2357

		if (!in_dev)
2358
			return 0;
2359

P
Paolo Abeni 已提交
2360 2361 2362 2363
		ours = ip_check_mc_rcu(in_dev, iph->daddr, iph->saddr,
				       iph->protocol);
		if (!ours)
			return 0;
2364

S
Shawn Bohrer 已提交
2365
		sk = __udp4_lib_mcast_demux_lookup(net, uh->dest, iph->daddr,
2366 2367
						   uh->source, iph->saddr,
						   dif, sdif);
2368
	} else if (skb->pkt_type == PACKET_HOST) {
S
Shawn Bohrer 已提交
2369
		sk = __udp4_lib_demux_lookup(net, uh->dest, iph->daddr,
2370
					     uh->source, iph->saddr, dif, sdif);
2371
	}
S
Shawn Bohrer 已提交
2372

2373
	if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt))
2374
		return 0;
S
Shawn Bohrer 已提交
2375 2376

	skb->sk = sk;
2377
	skb->destructor = sock_efree;
2378
	dst = READ_ONCE(sk->sk_rx_dst);
S
Shawn Bohrer 已提交
2379 2380 2381

	if (dst)
		dst = dst_check(dst, 0);
2382
	if (dst) {
2383 2384
		u32 itag = 0;

2385 2386 2387 2388 2389
		/* set noref for now.
		 * any place which wants to hold dst has to call
		 * dst_hold_safe()
		 */
		skb_dst_set_noref(skb, dst);
2390 2391 2392 2393 2394 2395 2396 2397

		/* for unconnected multicast sockets we need to validate
		 * the source on each packet
		 */
		if (!inet_sk(sk)->inet_daddr && in_dev)
			return ip_mc_validate_source(skb, iph->daddr,
						     iph->saddr, iph->tos,
						     skb->dev, in_dev, &itag);
2398
	}
2399
	return 0;
S
Shawn Bohrer 已提交
2400 2401
}

2402 2403
int udp_rcv(struct sk_buff *skb)
{
2404
	return __udp4_lib_rcv(skb, &udp_table, IPPROTO_UDP);
2405 2406
}

2407
void udp_destroy_sock(struct sock *sk)
2408
{
T
Tom Parkin 已提交
2409
	struct udp_sock *up = udp_sk(sk);
2410
	bool slow = lock_sock_fast(sk);
2411
	udp_flush_pending_frames(sk);
2412
	unlock_sock_fast(sk, slow);
2413
	if (static_branch_unlikely(&udp_encap_needed_key) && up->encap_type) {
T
Tom Parkin 已提交
2414
		void (*encap_destroy)(struct sock *sk);
2415
		encap_destroy = READ_ONCE(up->encap_destroy);
T
Tom Parkin 已提交
2416 2417 2418
		if (encap_destroy)
			encap_destroy(sk);
	}
2419 2420
}

L
Linus Torvalds 已提交
2421 2422 2423
/*
 *	Socket option code for UDP
 */
2424
int udp_lib_setsockopt(struct sock *sk, int level, int optname,
2425
		       char __user *optval, unsigned int optlen,
2426
		       int (*push_pending_frames)(struct sock *))
L
Linus Torvalds 已提交
2427 2428
{
	struct udp_sock *up = udp_sk(sk);
2429
	int val, valbool;
L
Linus Torvalds 已提交
2430
	int err = 0;
W
Wang Chen 已提交
2431
	int is_udplite = IS_UDPLITE(sk);
L
Linus Torvalds 已提交
2432

E
Eric Dumazet 已提交
2433
	if (optlen < sizeof(int))
L
Linus Torvalds 已提交
2434 2435 2436 2437 2438
		return -EINVAL;

	if (get_user(val, (int __user *)optval))
		return -EFAULT;

2439 2440
	valbool = val ? 1 : 0;

2441
	switch (optname) {
L
Linus Torvalds 已提交
2442 2443 2444 2445 2446 2447
	case UDP_CORK:
		if (val != 0) {
			up->corkflag = 1;
		} else {
			up->corkflag = 0;
			lock_sock(sk);
2448
			push_pending_frames(sk);
L
Linus Torvalds 已提交
2449 2450 2451
			release_sock(sk);
		}
		break;
2452

L
Linus Torvalds 已提交
2453 2454 2455 2456 2457
	case UDP_ENCAP:
		switch (val) {
		case 0:
		case UDP_ENCAP_ESPINUDP:
		case UDP_ENCAP_ESPINUDP_NON_IKE:
2458 2459
			up->encap_rcv = xfrm4_udp_encap_rcv;
			/* FALLTHROUGH */
2460
		case UDP_ENCAP_L2TPINUDP:
L
Linus Torvalds 已提交
2461
			up->encap_type = val;
2462
			udp_encap_enable();
L
Linus Torvalds 已提交
2463 2464 2465 2466 2467 2468 2469
			break;
		default:
			err = -ENOPROTOOPT;
			break;
		}
		break;

2470 2471 2472 2473 2474 2475 2476 2477
	case UDP_NO_CHECK6_TX:
		up->no_check6_tx = valbool;
		break;

	case UDP_NO_CHECK6_RX:
		up->no_check6_rx = valbool;
		break;

2478 2479 2480 2481 2482 2483
	case UDP_SEGMENT:
		if (val < 0 || val > USHRT_MAX)
			return -EINVAL;
		up->gso_size = val;
		break;

2484 2485 2486 2487 2488 2489
	/*
	 * 	UDP-Lite's partial checksum coverage (RFC 3828).
	 */
	/* The sender sets actual checksum coverage length via this option.
	 * The case coverage > packet length is handled by send module. */
	case UDPLITE_SEND_CSCOV:
W
Wang Chen 已提交
2490
		if (!is_udplite)         /* Disable the option on UDP sockets */
2491 2492 2493
			return -ENOPROTOOPT;
		if (val != 0 && val < 8) /* Illegal coverage: use default (8) */
			val = 8;
2494 2495
		else if (val > USHRT_MAX)
			val = USHRT_MAX;
2496 2497 2498 2499
		up->pcslen = val;
		up->pcflag |= UDPLITE_SEND_CC;
		break;

2500 2501
	/* The receiver specifies a minimum checksum coverage value. To make
	 * sense, this should be set to at least 8 (as done below). If zero is
2502 2503
	 * used, this again means full checksum coverage.                     */
	case UDPLITE_RECV_CSCOV:
W
Wang Chen 已提交
2504
		if (!is_udplite)         /* Disable the option on UDP sockets */
2505 2506 2507
			return -ENOPROTOOPT;
		if (val != 0 && val < 8) /* Avoid silly minimal values.       */
			val = 8;
2508 2509
		else if (val > USHRT_MAX)
			val = USHRT_MAX;
2510 2511 2512 2513
		up->pcrlen = val;
		up->pcflag |= UDPLITE_RECV_CC;
		break;

L
Linus Torvalds 已提交
2514 2515 2516
	default:
		err = -ENOPROTOOPT;
		break;
2517
	}
L
Linus Torvalds 已提交
2518 2519 2520

	return err;
}
E
Eric Dumazet 已提交
2521
EXPORT_SYMBOL(udp_lib_setsockopt);
L
Linus Torvalds 已提交
2522

2523
int udp_setsockopt(struct sock *sk, int level, int optname,
2524
		   char __user *optval, unsigned int optlen)
2525 2526 2527 2528 2529 2530 2531 2532 2533
{
	if (level == SOL_UDP  ||  level == SOL_UDPLITE)
		return udp_lib_setsockopt(sk, level, optname, optval, optlen,
					  udp_push_pending_frames);
	return ip_setsockopt(sk, level, optname, optval, optlen);
}

#ifdef CONFIG_COMPAT
int compat_udp_setsockopt(struct sock *sk, int level, int optname,
2534
			  char __user *optval, unsigned int optlen)
2535 2536 2537 2538 2539 2540 2541 2542
{
	if (level == SOL_UDP  ||  level == SOL_UDPLITE)
		return udp_lib_setsockopt(sk, level, optname, optval, optlen,
					  udp_push_pending_frames);
	return compat_ip_setsockopt(sk, level, optname, optval, optlen);
}
#endif

2543 2544
int udp_lib_getsockopt(struct sock *sk, int level, int optname,
		       char __user *optval, int __user *optlen)
L
Linus Torvalds 已提交
2545 2546 2547 2548
{
	struct udp_sock *up = udp_sk(sk);
	int val, len;

E
Eric Dumazet 已提交
2549
	if (get_user(len, optlen))
L
Linus Torvalds 已提交
2550 2551 2552
		return -EFAULT;

	len = min_t(unsigned int, len, sizeof(int));
2553

2554
	if (len < 0)
L
Linus Torvalds 已提交
2555 2556
		return -EINVAL;

2557
	switch (optname) {
L
Linus Torvalds 已提交
2558 2559 2560 2561 2562 2563 2564 2565
	case UDP_CORK:
		val = up->corkflag;
		break;

	case UDP_ENCAP:
		val = up->encap_type;
		break;

2566 2567 2568 2569 2570 2571 2572 2573
	case UDP_NO_CHECK6_TX:
		val = up->no_check6_tx;
		break;

	case UDP_NO_CHECK6_RX:
		val = up->no_check6_rx;
		break;

2574 2575 2576 2577
	case UDP_SEGMENT:
		val = up->gso_size;
		break;

2578 2579 2580 2581 2582 2583 2584 2585 2586 2587
	/* The following two cannot be changed on UDP sockets, the return is
	 * always 0 (which corresponds to the full checksum coverage of UDP). */
	case UDPLITE_SEND_CSCOV:
		val = up->pcslen;
		break;

	case UDPLITE_RECV_CSCOV:
		val = up->pcrlen;
		break;

L
Linus Torvalds 已提交
2588 2589
	default:
		return -ENOPROTOOPT;
2590
	}
L
Linus Torvalds 已提交
2591

2592
	if (put_user(len, optlen))
2593
		return -EFAULT;
E
Eric Dumazet 已提交
2594
	if (copy_to_user(optval, &val, len))
L
Linus Torvalds 已提交
2595
		return -EFAULT;
2596
	return 0;
L
Linus Torvalds 已提交
2597
}
E
Eric Dumazet 已提交
2598
EXPORT_SYMBOL(udp_lib_getsockopt);
L
Linus Torvalds 已提交
2599

2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616
int udp_getsockopt(struct sock *sk, int level, int optname,
		   char __user *optval, int __user *optlen)
{
	if (level == SOL_UDP  ||  level == SOL_UDPLITE)
		return udp_lib_getsockopt(sk, level, optname, optval, optlen);
	return ip_getsockopt(sk, level, optname, optval, optlen);
}

#ifdef CONFIG_COMPAT
int compat_udp_getsockopt(struct sock *sk, int level, int optname,
				 char __user *optval, int __user *optlen)
{
	if (level == SOL_UDP  ||  level == SOL_UDPLITE)
		return udp_lib_getsockopt(sk, level, optname, optval, optlen);
	return compat_ip_getsockopt(sk, level, optname, optval, optlen);
}
#endif
L
Linus Torvalds 已提交
2617 2618 2619 2620
/**
 * 	udp_poll - wait for a UDP event.
 *	@file - file struct
 *	@sock - socket
2621
 *	@wait - poll table
L
Linus Torvalds 已提交
2622
 *
2623
 *	This is same as datagram poll, except for the special case of
L
Linus Torvalds 已提交
2624 2625 2626 2627 2628 2629
 *	blocking sockets. If application is using a blocking fd
 *	and a packet with checksum error is in the queue;
 *	then it could get return from select indicating data available
 *	but then block when reading it. Add special case code
 *	to work around these arguably broken applications.
 */
2630
__poll_t udp_poll(struct file *file, struct socket *sock, poll_table *wait)
L
Linus Torvalds 已提交
2631
{
2632
	__poll_t mask = datagram_poll(file, sock, wait);
L
Linus Torvalds 已提交
2633
	struct sock *sk = sock->sk;
2634

2635
	if (!skb_queue_empty(&udp_sk(sk)->reader_queue))
2636
		mask |= EPOLLIN | EPOLLRDNORM;
2637

L
Linus Torvalds 已提交
2638
	/* Check for false positives due to checksum errors */
2639
	if ((mask & EPOLLRDNORM) && !(file->f_flags & O_NONBLOCK) &&
2640
	    !(sk->sk_shutdown & RCV_SHUTDOWN) && first_packet_length(sk) == -1)
2641
		mask &= ~(EPOLLIN | EPOLLRDNORM);
L
Linus Torvalds 已提交
2642 2643

	return mask;
2644

L
Linus Torvalds 已提交
2645
}
2646
EXPORT_SYMBOL(udp_poll);
L
Linus Torvalds 已提交
2647

2648 2649 2650 2651 2652 2653
int udp_abort(struct sock *sk, int err)
{
	lock_sock(sk);

	sk->sk_err = err;
	sk->sk_error_report(sk);
2654
	__udp_disconnect(sk, 0);
2655 2656 2657 2658 2659 2660 2661

	release_sock(sk);

	return 0;
}
EXPORT_SYMBOL_GPL(udp_abort);

2662
struct proto udp_prot = {
2663 2664 2665
	.name			= "UDP",
	.owner			= THIS_MODULE,
	.close			= udp_lib_close,
A
Andrey Ignatov 已提交
2666
	.pre_connect		= udp_pre_connect,
2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687
	.connect		= ip4_datagram_connect,
	.disconnect		= udp_disconnect,
	.ioctl			= udp_ioctl,
	.init			= udp_init_sock,
	.destroy		= udp_destroy_sock,
	.setsockopt		= udp_setsockopt,
	.getsockopt		= udp_getsockopt,
	.sendmsg		= udp_sendmsg,
	.recvmsg		= udp_recvmsg,
	.sendpage		= udp_sendpage,
	.release_cb		= ip4_datagram_release_cb,
	.hash			= udp_lib_hash,
	.unhash			= udp_lib_unhash,
	.rehash			= udp_v4_rehash,
	.get_port		= udp_v4_get_port,
	.memory_allocated	= &udp_memory_allocated,
	.sysctl_mem		= sysctl_udp_mem,
	.sysctl_wmem_offset	= offsetof(struct net, ipv4.sysctl_udp_wmem_min),
	.sysctl_rmem_offset	= offsetof(struct net, ipv4.sysctl_udp_rmem_min),
	.obj_size		= sizeof(struct udp_sock),
	.h.udp_table		= &udp_table,
2688
#ifdef CONFIG_COMPAT
2689 2690
	.compat_setsockopt	= compat_udp_setsockopt,
	.compat_getsockopt	= compat_udp_getsockopt,
2691
#endif
2692
	.diag_destroy		= udp_abort,
2693
};
E
Eric Dumazet 已提交
2694
EXPORT_SYMBOL(udp_prot);
L
Linus Torvalds 已提交
2695 2696 2697 2698

/* ------------------------------------------------------------------------ */
#ifdef CONFIG_PROC_FS

2699
static struct sock *udp_get_first(struct seq_file *seq, int start)
L
Linus Torvalds 已提交
2700 2701
{
	struct sock *sk;
2702
	struct udp_seq_afinfo *afinfo = PDE_DATA(file_inode(seq->file));
L
Linus Torvalds 已提交
2703
	struct udp_iter_state *state = seq->private;
2704
	struct net *net = seq_file_net(seq);
L
Linus Torvalds 已提交
2705

2706
	for (state->bucket = start; state->bucket <= afinfo->udp_table->mask;
2707
	     ++state->bucket) {
2708
		struct udp_hslot *hslot = &afinfo->udp_table->hash[state->bucket];
2709

2710
		if (hlist_empty(&hslot->head))
2711 2712
			continue;

2713
		spin_lock_bh(&hslot->lock);
2714
		sk_for_each(sk, &hslot->head) {
2715
			if (!net_eq(sock_net(sk), net))
2716
				continue;
2717
			if (sk->sk_family == afinfo->family)
L
Linus Torvalds 已提交
2718 2719
				goto found;
		}
2720
		spin_unlock_bh(&hslot->lock);
L
Linus Torvalds 已提交
2721 2722 2723 2724 2725 2726 2727 2728
	}
	sk = NULL;
found:
	return sk;
}

static struct sock *udp_get_next(struct seq_file *seq, struct sock *sk)
{
2729
	struct udp_seq_afinfo *afinfo = PDE_DATA(file_inode(seq->file));
L
Linus Torvalds 已提交
2730
	struct udp_iter_state *state = seq->private;
2731
	struct net *net = seq_file_net(seq);
L
Linus Torvalds 已提交
2732 2733

	do {
2734
		sk = sk_next(sk);
2735
	} while (sk && (!net_eq(sock_net(sk), net) || sk->sk_family != afinfo->family));
L
Linus Torvalds 已提交
2736

2737
	if (!sk) {
2738 2739
		if (state->bucket <= afinfo->udp_table->mask)
			spin_unlock_bh(&afinfo->udp_table->hash[state->bucket].lock);
2740
		return udp_get_first(seq, state->bucket + 1);
L
Linus Torvalds 已提交
2741 2742 2743 2744 2745 2746
	}
	return sk;
}

static struct sock *udp_get_idx(struct seq_file *seq, loff_t pos)
{
2747
	struct sock *sk = udp_get_first(seq, 0);
L
Linus Torvalds 已提交
2748 2749

	if (sk)
2750
		while (pos && (sk = udp_get_next(seq, sk)) != NULL)
L
Linus Torvalds 已提交
2751 2752 2753 2754
			--pos;
	return pos ? NULL : sk;
}

2755
void *udp_seq_start(struct seq_file *seq, loff_t *pos)
L
Linus Torvalds 已提交
2756
{
2757
	struct udp_iter_state *state = seq->private;
2758
	state->bucket = MAX_UDP_PORTS;
2759

2760
	return *pos ? udp_get_idx(seq, *pos-1) : SEQ_START_TOKEN;
L
Linus Torvalds 已提交
2761
}
2762
EXPORT_SYMBOL(udp_seq_start);
L
Linus Torvalds 已提交
2763

2764
void *udp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
L
Linus Torvalds 已提交
2765 2766 2767
{
	struct sock *sk;

2768
	if (v == SEQ_START_TOKEN)
L
Linus Torvalds 已提交
2769 2770 2771 2772 2773 2774 2775
		sk = udp_get_idx(seq, 0);
	else
		sk = udp_get_next(seq, v);

	++*pos;
	return sk;
}
2776
EXPORT_SYMBOL(udp_seq_next);
L
Linus Torvalds 已提交
2777

2778
void udp_seq_stop(struct seq_file *seq, void *v)
L
Linus Torvalds 已提交
2779
{
2780
	struct udp_seq_afinfo *afinfo = PDE_DATA(file_inode(seq->file));
2781 2782
	struct udp_iter_state *state = seq->private;

2783 2784
	if (state->bucket <= afinfo->udp_table->mask)
		spin_unlock_bh(&afinfo->udp_table->hash[state->bucket].lock);
L
Linus Torvalds 已提交
2785
}
2786
EXPORT_SYMBOL(udp_seq_stop);
2787 2788

/* ------------------------------------------------------------------------ */
2789
static void udp4_format_sock(struct sock *sp, struct seq_file *f,
2790
		int bucket)
2791 2792
{
	struct inet_sock *inet = inet_sk(sp);
E
Eric Dumazet 已提交
2793 2794 2795 2796
	__be32 dest = inet->inet_daddr;
	__be32 src  = inet->inet_rcv_saddr;
	__u16 destp	  = ntohs(inet->inet_dport);
	__u16 srcp	  = ntohs(inet->inet_sport);
2797

2798
	seq_printf(f, "%5d: %08X:%04X %08X:%04X"
2799
		" %02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %d",
2800
		bucket, src, srcp, dest, destp, sp->sk_state,
2801
		sk_wmem_alloc_get(sp),
2802
		udp_rqueue_get(sp),
2803 2804 2805
		0, 0L, 0,
		from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
		0, sock_i_ino(sp),
2806
		refcount_read(&sp->sk_refcnt), sp,
2807
		atomic_read(&sp->sk_drops));
2808 2809 2810 2811
}

int udp4_seq_show(struct seq_file *seq, void *v)
{
2812
	seq_setwidth(seq, 127);
2813
	if (v == SEQ_START_TOKEN)
2814
		seq_puts(seq, "  sl  local_address rem_address   st tx_queue "
2815
			   "rx_queue tr tm->when retrnsmt   uid  timeout "
E
Eric Dumazet 已提交
2816
			   "inode ref pointer drops");
2817 2818 2819
	else {
		struct udp_iter_state *state = seq->private;

2820
		udp4_format_sock(v, seq, state->bucket);
2821
	}
2822
	seq_pad(seq, '\n');
2823 2824 2825
	return 0;
}

2826
const struct seq_operations udp_seq_ops = {
2827 2828 2829 2830 2831
	.start		= udp_seq_start,
	.next		= udp_seq_next,
	.stop		= udp_seq_stop,
	.show		= udp4_seq_show,
};
2832
EXPORT_SYMBOL(udp_seq_ops);
2833

2834 2835
static struct udp_seq_afinfo udp4_seq_afinfo = {
	.family		= AF_INET,
2836
	.udp_table	= &udp_table,
2837 2838
};

2839
static int __net_init udp4_proc_init_net(struct net *net)
2840
{
2841 2842
	if (!proc_create_net_data("udp", 0444, net->proc_net, &udp_seq_ops,
			sizeof(struct udp_iter_state), &udp4_seq_afinfo))
2843 2844
		return -ENOMEM;
	return 0;
2845 2846
}

2847
static void __net_exit udp4_proc_exit_net(struct net *net)
2848
{
2849
	remove_proc_entry("udp", net->proc_net);
2850 2851 2852 2853 2854 2855 2856
}

static struct pernet_operations udp4_net_ops = {
	.init = udp4_proc_init_net,
	.exit = udp4_proc_exit_net,
};

2857 2858
int __init udp4_proc_init(void)
{
2859
	return register_pernet_subsys(&udp4_net_ops);
2860 2861 2862 2863
}

void udp4_proc_exit(void)
{
2864
	unregister_pernet_subsys(&udp4_net_ops);
2865
}
L
Linus Torvalds 已提交
2866 2867
#endif /* CONFIG_PROC_FS */

2868 2869
static __initdata unsigned long uhash_entries;
static int __init set_uhash_entries(char *str)
2870
{
2871 2872
	ssize_t ret;

2873 2874
	if (!str)
		return 0;
2875 2876 2877 2878 2879

	ret = kstrtoul(str, 0, &uhash_entries);
	if (ret)
		return 0;

2880 2881 2882 2883 2884
	if (uhash_entries && uhash_entries < UDP_HTABLE_SIZE_MIN)
		uhash_entries = UDP_HTABLE_SIZE_MIN;
	return 1;
}
__setup("uhash_entries=", set_uhash_entries);
2885

2886 2887 2888 2889
void __init udp_table_init(struct udp_table *table, const char *name)
{
	unsigned int i;

2890 2891 2892 2893 2894 2895 2896 2897 2898 2899
	table->hash = alloc_large_system_hash(name,
					      2 * sizeof(struct udp_hslot),
					      uhash_entries,
					      21, /* one slot per 2 MB */
					      0,
					      &table->log,
					      &table->mask,
					      UDP_HTABLE_SIZE_MIN,
					      64 * 1024);

2900
	table->hash2 = table->hash + (table->mask + 1);
2901
	for (i = 0; i <= table->mask; i++) {
2902
		INIT_HLIST_HEAD(&table->hash[i].head);
E
Eric Dumazet 已提交
2903
		table->hash[i].count = 0;
2904 2905
		spin_lock_init(&table->hash[i].lock);
	}
2906
	for (i = 0; i <= table->mask; i++) {
2907
		INIT_HLIST_HEAD(&table->hash2[i].head);
2908 2909 2910
		table->hash2[i].count = 0;
		spin_lock_init(&table->hash2[i].lock);
	}
2911 2912
}

2913 2914 2915 2916 2917 2918 2919 2920 2921 2922
u32 udp_flow_hashrnd(void)
{
	static u32 hashrnd __read_mostly;

	net_get_random_once(&hashrnd, sizeof(hashrnd));

	return hashrnd;
}
EXPORT_SYMBOL(udp_flow_hashrnd);

2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939
static void __udp_sysctl_init(struct net *net)
{
	net->ipv4.sysctl_udp_rmem_min = SK_MEM_QUANTUM;
	net->ipv4.sysctl_udp_wmem_min = SK_MEM_QUANTUM;

#ifdef CONFIG_NET_L3_MASTER_DEV
	net->ipv4.sysctl_udp_l3mdev_accept = 0;
#endif
}

static int __net_init udp_sysctl_init(struct net *net)
{
	__udp_sysctl_init(net);
	return 0;
}

static struct pernet_operations __net_initdata udp_sysctl_ops = {
K
Kirill Tkhai 已提交
2940
	.init	= udp_sysctl_init,
2941 2942
};

H
Hideo Aoki 已提交
2943 2944
void __init udp_init(void)
{
2945
	unsigned long limit;
E
Eric Dumazet 已提交
2946
	unsigned int i;
H
Hideo Aoki 已提交
2947

2948
	udp_table_init(&udp_table, "UDP");
2949
	limit = nr_free_buffer_pages() / 8;
H
Hideo Aoki 已提交
2950 2951 2952 2953 2954
	limit = max(limit, 128UL);
	sysctl_udp_mem[0] = limit / 4 * 3;
	sysctl_udp_mem[1] = limit;
	sysctl_udp_mem[2] = sysctl_udp_mem[0] * 2;

2955
	__udp_sysctl_init(&init_net);
E
Eric Dumazet 已提交
2956 2957 2958 2959 2960 2961 2962 2963 2964

	/* 16 spinlocks per cpu */
	udp_busylocks_log = ilog2(nr_cpu_ids) + 4;
	udp_busylocks = kmalloc(sizeof(spinlock_t) << udp_busylocks_log,
				GFP_KERNEL);
	if (!udp_busylocks)
		panic("UDP: failed to alloc udp_busylocks\n");
	for (i = 0; i < (1U << udp_busylocks_log); i++)
		spin_lock_init(udp_busylocks + i);
2965 2966 2967

	if (register_pernet_subsys(&udp_sysctl_ops))
		panic("UDP: failed to init sysctl parameters.\n");
H
Hideo Aoki 已提交
2968
}