udp.c 74.2 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7
/*
 * INET		An implementation of the TCP/IP protocol suite for the LINUX
 *		operating system.  INET is implemented using the  BSD Socket
 *		interface as the means of communication with the user level.
 *
 *		The User Datagram Protocol (UDP).
 *
8
 * Authors:	Ross Biro
L
Linus Torvalds 已提交
9 10
 *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
 *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
11
 *		Alan Cox, <alan@lxorguk.ukuu.org.uk>
L
Linus Torvalds 已提交
12 13 14 15 16 17 18 19 20
 *		Hirokazu Takahashi, <taka@valinux.co.jp>
 *
 * Fixes:
 *		Alan Cox	:	verify_area() calls
 *		Alan Cox	: 	stopped close while in use off icmp
 *					messages. Not a fix but a botch that
 *					for udp at least is 'valid'.
 *		Alan Cox	:	Fixed icmp handling properly
 *		Alan Cox	: 	Correct error for oversized datagrams
21 22
 *		Alan Cox	:	Tidied select() semantics.
 *		Alan Cox	:	udp_err() fixed properly, also now
L
Linus Torvalds 已提交
23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56
 *					select and read wake correctly on errors
 *		Alan Cox	:	udp_send verify_area moved to avoid mem leak
 *		Alan Cox	:	UDP can count its memory
 *		Alan Cox	:	send to an unknown connection causes
 *					an ECONNREFUSED off the icmp, but
 *					does NOT close.
 *		Alan Cox	:	Switched to new sk_buff handlers. No more backlog!
 *		Alan Cox	:	Using generic datagram code. Even smaller and the PEEK
 *					bug no longer crashes it.
 *		Fred Van Kempen	: 	Net2e support for sk->broadcast.
 *		Alan Cox	:	Uses skb_free_datagram
 *		Alan Cox	:	Added get/set sockopt support.
 *		Alan Cox	:	Broadcasting without option set returns EACCES.
 *		Alan Cox	:	No wakeup calls. Instead we now use the callbacks.
 *		Alan Cox	:	Use ip_tos and ip_ttl
 *		Alan Cox	:	SNMP Mibs
 *		Alan Cox	:	MSG_DONTROUTE, and 0.0.0.0 support.
 *		Matt Dillon	:	UDP length checks.
 *		Alan Cox	:	Smarter af_inet used properly.
 *		Alan Cox	:	Use new kernel side addressing.
 *		Alan Cox	:	Incorrect return on truncated datagram receive.
 *	Arnt Gulbrandsen 	:	New udp_send and stuff
 *		Alan Cox	:	Cache last socket
 *		Alan Cox	:	Route cache
 *		Jon Peatfield	:	Minor efficiency fix to sendto().
 *		Mike Shaver	:	RFC1122 checks.
 *		Alan Cox	:	Nonblocking error fix.
 *	Willy Konynenberg	:	Transparent proxying support.
 *		Mike McLagan	:	Routing by source
 *		David S. Miller	:	New socket lookup architecture.
 *					Last socket cache retained as it
 *					does have a high hit rate.
 *		Olaf Kirch	:	Don't linearise iovec on sendmsg.
 *		Andi Kleen	:	Some cleanups, cache destination entry
57
 *					for connect.
L
Linus Torvalds 已提交
58 59 60 61 62 63 64 65 66 67 68 69 70
 *	Vitaly E. Lavrov	:	Transparent proxy revived after year coma.
 *		Melvin Smith	:	Check msg_name not msg_namelen in sendto(),
 *					return ENOTCONN for unconnected sockets (POSIX)
 *		Janos Farkas	:	don't deliver multi/broadcasts to a different
 *					bound-to-device socket
 *	Hirokazu Takahashi	:	HW checksumming for outgoing UDP
 *					datagrams.
 *	Hirokazu Takahashi	:	sendfile() on UDP works now.
 *		Arnaldo C. Melo :	convert /proc/net/udp to seq_file
 *	YOSHIFUJI Hideaki @USAGI and:	Support IPV6_V6ONLY socket option, which
 *	Alexey Kuznetsov:		allow both IPv4 and IPv6 sockets to bind
 *					a single port at the same time.
 *	Derek Atkins <derek@ihtfp.com>: Add Encapulation Support
71
 *	James Chapman		:	Add L2TP encapsulation type.
L
Linus Torvalds 已提交
72 73 74 75 76 77 78
 *
 *
 *		This program is free software; you can redistribute it and/or
 *		modify it under the terms of the GNU General Public License
 *		as published by the Free Software Foundation; either version
 *		2 of the License, or (at your option) any later version.
 */
79

80 81
#define pr_fmt(fmt) "UDP: " fmt

82
#include <linux/uaccess.h>
L
Linus Torvalds 已提交
83
#include <asm/ioctls.h>
H
Hideo Aoki 已提交
84
#include <linux/bootmem.h>
85 86
#include <linux/highmem.h>
#include <linux/swap.h>
L
Linus Torvalds 已提交
87 88 89 90 91
#include <linux/types.h>
#include <linux/fcntl.h>
#include <linux/module.h>
#include <linux/socket.h>
#include <linux/sockios.h>
92
#include <linux/igmp.h>
93
#include <linux/inetdevice.h>
L
Linus Torvalds 已提交
94 95 96 97 98 99
#include <linux/in.h>
#include <linux/errno.h>
#include <linux/timer.h>
#include <linux/mm.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
100
#include <linux/slab.h>
101
#include <net/tcp_states.h>
L
Linus Torvalds 已提交
102 103 104
#include <linux/skbuff.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
105
#include <net/net_namespace.h>
L
Linus Torvalds 已提交
106
#include <net/icmp.h>
S
Shawn Bohrer 已提交
107
#include <net/inet_hashtables.h>
L
Linus Torvalds 已提交
108 109 110
#include <net/route.h>
#include <net/checksum.h>
#include <net/xfrm.h>
111
#include <trace/events/udp.h>
112
#include <linux/static_key.h>
113
#include <trace/events/skb.h>
114
#include <net/busy_poll.h>
115
#include "udp_impl.h"
116
#include <net/sock_reuseport.h>
E
Eric Dumazet 已提交
117
#include <net/addrconf.h>
L
Linus Torvalds 已提交
118

119
struct udp_table udp_table __read_mostly;
120
EXPORT_SYMBOL(udp_table);
L
Linus Torvalds 已提交
121

E
Eric Dumazet 已提交
122
long sysctl_udp_mem[3] __read_mostly;
H
Hideo Aoki 已提交
123
EXPORT_SYMBOL(sysctl_udp_mem);
E
Eric Dumazet 已提交
124

E
Eric Dumazet 已提交
125
atomic_long_t udp_memory_allocated;
H
Hideo Aoki 已提交
126 127
EXPORT_SYMBOL(udp_memory_allocated);

128 129
#define MAX_UDP_PORTS 65536
#define PORTS_PER_CHAIN (MAX_UDP_PORTS / UDP_HTABLE_SIZE_MIN)
130

131 132 133 134 135 136 137 138 139 140 141
/* IPCB reference means this can not be used from early demux */
static bool udp_lib_exact_dif_match(struct net *net, struct sk_buff *skb)
{
#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
	if (!net->ipv4.sysctl_udp_l3mdev_accept &&
	    skb && ipv4_l3mdev_skb(IPCB(skb)->flags))
		return true;
#endif
	return false;
}

142
static int udp_lib_lport_inuse(struct net *net, __u16 num,
143
			       const struct udp_hslot *hslot,
144
			       unsigned long *bitmap,
145
			       struct sock *sk, unsigned int log)
L
Linus Torvalds 已提交
146
{
147
	struct sock *sk2;
148
	kuid_t uid = sock_i_uid(sk);
149

150
	sk_for_each(sk2, &hslot->head) {
151 152
		if (net_eq(sock_net(sk2), net) &&
		    sk2 != sk &&
153
		    (bitmap || udp_sk(sk2)->udp_port_hash == num) &&
154 155 156
		    (!sk2->sk_reuse || !sk->sk_reuse) &&
		    (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if ||
		     sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
157
		    inet_rcv_saddr_equal(sk, sk2, true)) {
158 159 160 161 162 163 164 165 166 167 168
			if (sk2->sk_reuseport && sk->sk_reuseport &&
			    !rcu_access_pointer(sk->sk_reuseport_cb) &&
			    uid_eq(uid, sock_i_uid(sk2))) {
				if (!bitmap)
					return 0;
			} else {
				if (!bitmap)
					return 1;
				__set_bit(udp_sk(sk2)->udp_port_hash >> log,
					  bitmap);
			}
169
		}
170
	}
171 172 173
	return 0;
}

E
Eric Dumazet 已提交
174 175 176 177 178
/*
 * Note: we still hold spinlock of primary hash chain, so no other writer
 * can insert/delete a socket with local_port == num
 */
static int udp_lib_lport_inuse2(struct net *net, __u16 num,
179
				struct udp_hslot *hslot2,
180
				struct sock *sk)
E
Eric Dumazet 已提交
181 182
{
	struct sock *sk2;
183
	kuid_t uid = sock_i_uid(sk);
E
Eric Dumazet 已提交
184 185 186
	int res = 0;

	spin_lock(&hslot2->lock);
187
	udp_portaddr_for_each_entry(sk2, &hslot2->head) {
188 189 190 191 192 193
		if (net_eq(sock_net(sk2), net) &&
		    sk2 != sk &&
		    (udp_sk(sk2)->udp_port_hash == num) &&
		    (!sk2->sk_reuse || !sk->sk_reuse) &&
		    (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if ||
		     sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
194
		    inet_rcv_saddr_equal(sk, sk2, true)) {
195 196 197 198 199 200 201
			if (sk2->sk_reuseport && sk->sk_reuseport &&
			    !rcu_access_pointer(sk->sk_reuseport_cb) &&
			    uid_eq(uid, sock_i_uid(sk2))) {
				res = 0;
			} else {
				res = 1;
			}
E
Eric Dumazet 已提交
202 203
			break;
		}
204
	}
E
Eric Dumazet 已提交
205 206 207 208
	spin_unlock(&hslot2->lock);
	return res;
}

209
static int udp_reuseport_add_sock(struct sock *sk, struct udp_hslot *hslot)
210 211 212 213 214
{
	struct net *net = sock_net(sk);
	kuid_t uid = sock_i_uid(sk);
	struct sock *sk2;

215
	sk_for_each(sk2, &hslot->head) {
216 217 218 219 220 221 222
		if (net_eq(sock_net(sk2), net) &&
		    sk2 != sk &&
		    sk2->sk_family == sk->sk_family &&
		    ipv6_only_sock(sk2) == ipv6_only_sock(sk) &&
		    (udp_sk(sk2)->udp_port_hash == udp_sk(sk)->udp_port_hash) &&
		    (sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
		    sk2->sk_reuseport && uid_eq(uid, sock_i_uid(sk2)) &&
223
		    inet_rcv_saddr_equal(sk, sk2, false)) {
224 225 226 227
			return reuseport_add_sock(sk, sk2);
		}
	}

228
	return reuseport_alloc(sk);
229 230
}

231
/**
232
 *  udp_lib_get_port  -  UDP/-Lite port lookup for IPv4 and IPv6
233 234 235
 *
 *  @sk:          socket struct in question
 *  @snum:        port number to look up
L
Lucas De Marchi 已提交
236
 *  @hash2_nulladdr: AF-dependent hash value in secondary hash chains,
E
Eric Dumazet 已提交
237
 *                   with NULL address
238
 */
239
int udp_lib_get_port(struct sock *sk, unsigned short snum,
E
Eric Dumazet 已提交
240
		     unsigned int hash2_nulladdr)
241
{
242
	struct udp_hslot *hslot, *hslot2;
243
	struct udp_table *udptable = sk->sk_prot->h.udp_table;
244
	int    error = 1;
245
	struct net *net = sock_net(sk);
L
Linus Torvalds 已提交
246

247
	if (!snum) {
E
Eric Dumazet 已提交
248
		int low, high, remaining;
249
		unsigned int rand;
250 251
		unsigned short first, last;
		DECLARE_BITMAP(bitmap, PORTS_PER_CHAIN);
252

253
		inet_get_local_port_range(net, &low, &high);
254
		remaining = (high - low) + 1;
255

256
		rand = prandom_u32();
257
		first = reciprocal_scale(rand, remaining) + low;
258 259 260
		/*
		 * force rand to be an odd multiple of UDP_HTABLE_SIZE
		 */
261
		rand = (rand | 1) * (udptable->mask + 1);
E
Eric Dumazet 已提交
262 263
		last = first + udptable->mask + 1;
		do {
264
			hslot = udp_hashslot(udptable, net, first);
265
			bitmap_zero(bitmap, PORTS_PER_CHAIN);
266
			spin_lock_bh(&hslot->lock);
267
			udp_lib_lport_inuse(net, snum, hslot, bitmap, sk,
268
					    udptable->log);
269 270 271 272 273 274 275

			snum = first;
			/*
			 * Iterate on all possible values of snum for this hash.
			 * Using steps of an odd multiple of UDP_HTABLE_SIZE
			 * give us randomization and full range coverage.
			 */
E
Eric Dumazet 已提交
276
			do {
277
				if (low <= snum && snum <= high &&
278
				    !test_bit(snum >> udptable->log, bitmap) &&
279
				    !inet_is_local_reserved_port(net, snum))
280 281 282 283
					goto found;
				snum += rand;
			} while (snum != first);
			spin_unlock_bh(&hslot->lock);
284
			cond_resched();
E
Eric Dumazet 已提交
285
		} while (++first != last);
286
		goto fail;
287
	} else {
288
		hslot = udp_hashslot(udptable, net, snum);
289
		spin_lock_bh(&hslot->lock);
E
Eric Dumazet 已提交
290 291 292 293 294 295 296 297 298 299 300
		if (hslot->count > 10) {
			int exist;
			unsigned int slot2 = udp_sk(sk)->udp_portaddr_hash ^ snum;

			slot2          &= udptable->mask;
			hash2_nulladdr &= udptable->mask;

			hslot2 = udp_hashslot2(udptable, slot2);
			if (hslot->count < hslot2->count)
				goto scan_primary_hash;

301
			exist = udp_lib_lport_inuse2(net, snum, hslot2, sk);
E
Eric Dumazet 已提交
302 303 304
			if (!exist && (hash2_nulladdr != slot2)) {
				hslot2 = udp_hashslot2(udptable, hash2_nulladdr);
				exist = udp_lib_lport_inuse2(net, snum, hslot2,
305
							     sk);
E
Eric Dumazet 已提交
306 307 308 309 310 311 312
			}
			if (exist)
				goto fail_unlock;
			else
				goto found;
		}
scan_primary_hash:
313
		if (udp_lib_lport_inuse(net, snum, hslot, NULL, sk, 0))
314 315
			goto fail_unlock;
	}
316
found:
E
Eric Dumazet 已提交
317
	inet_sk(sk)->inet_num = snum;
318 319
	udp_sk(sk)->udp_port_hash = snum;
	udp_sk(sk)->udp_portaddr_hash ^= snum;
L
Linus Torvalds 已提交
320
	if (sk_unhashed(sk)) {
321
		if (sk->sk_reuseport &&
322
		    udp_reuseport_add_sock(sk, hslot)) {
323 324 325 326 327 328
			inet_sk(sk)->inet_num = 0;
			udp_sk(sk)->udp_port_hash = 0;
			udp_sk(sk)->udp_portaddr_hash ^= snum;
			goto fail_unlock;
		}

329
		sk_add_node_rcu(sk, &hslot->head);
E
Eric Dumazet 已提交
330
		hslot->count++;
331
		sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
332 333 334

		hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash);
		spin_lock(&hslot2->lock);
335
		if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport &&
336 337 338
		    sk->sk_family == AF_INET6)
			hlist_add_tail_rcu(&udp_sk(sk)->udp_portaddr_node,
					   &hslot2->head);
339
		else
340 341
			hlist_add_head_rcu(&udp_sk(sk)->udp_portaddr_node,
					   &hslot2->head);
342 343
		hslot2->count++;
		spin_unlock(&hslot2->lock);
L
Linus Torvalds 已提交
344
	}
345
	sock_set_flag(sk, SOCK_RCU_FREE);
346
	error = 0;
347 348
fail_unlock:
	spin_unlock_bh(&hslot->lock);
L
Linus Torvalds 已提交
349
fail:
350 351
	return error;
}
E
Eric Dumazet 已提交
352
EXPORT_SYMBOL(udp_lib_get_port);
353

354
int udp_v4_get_port(struct sock *sk, unsigned short snum)
355
{
E
Eric Dumazet 已提交
356
	unsigned int hash2_nulladdr =
357
		ipv4_portaddr_hash(sock_net(sk), htonl(INADDR_ANY), snum);
E
Eric Dumazet 已提交
358
	unsigned int hash2_partial =
359
		ipv4_portaddr_hash(sock_net(sk), inet_sk(sk)->inet_rcv_saddr, 0);
E
Eric Dumazet 已提交
360

361
	/* precompute partial secondary hash */
E
Eric Dumazet 已提交
362
	udp_sk(sk)->udp_portaddr_hash = hash2_partial;
363
	return udp_lib_get_port(sk, snum, hash2_nulladdr);
364 365
}

366 367
static int compute_score(struct sock *sk, struct net *net,
			 __be32 saddr, __be16 sport,
368 369
			 __be32 daddr, unsigned short hnum,
			 int dif, int sdif, bool exact_dif)
370
{
371 372
	int score;
	struct inet_sock *inet;
373

374 375 376 377
	if (!net_eq(sock_net(sk), net) ||
	    udp_sk(sk)->udp_port_hash != hnum ||
	    ipv6_only_sock(sk))
		return -1;
378

379 380 381 382 383 384 385
	score = (sk->sk_family == PF_INET) ? 2 : 1;
	inet = inet_sk(sk);

	if (inet->inet_rcv_saddr) {
		if (inet->inet_rcv_saddr != daddr)
			return -1;
		score += 4;
386
	}
387 388 389 390 391 392 393 394 395 396 397 398 399

	if (inet->inet_daddr) {
		if (inet->inet_daddr != saddr)
			return -1;
		score += 4;
	}

	if (inet->inet_dport) {
		if (inet->inet_dport != sport)
			return -1;
		score += 4;
	}

400
	if (sk->sk_bound_dev_if || exact_dif) {
401 402 403
		bool dev_match = (sk->sk_bound_dev_if == dif ||
				  sk->sk_bound_dev_if == sdif);

P
Paolo Abeni 已提交
404
		if (!dev_match)
405
			return -1;
P
Paolo Abeni 已提交
406
		if (sk->sk_bound_dev_if)
407
			score += 4;
408
	}
409

410 411
	if (sk->sk_incoming_cpu == raw_smp_processor_id())
		score++;
412 413 414
	return score;
}

415 416 417
static u32 udp_ehashfn(const struct net *net, const __be32 laddr,
		       const __u16 lport, const __be32 faddr,
		       const __be16 fport)
418
{
419 420 421 422
	static u32 udp_ehash_secret __read_mostly;

	net_get_random_once(&udp_ehash_secret, sizeof(udp_ehash_secret));

423
	return __inet_ehashfn(laddr, lport, faddr, fport,
424
			      udp_ehash_secret + net_hash_mix(net));
425 426
}

427
/* called with rcu_read_lock() */
E
Eric Dumazet 已提交
428
static struct sock *udp4_lib_lookup2(struct net *net,
429 430 431 432 433
				     __be32 saddr, __be16 sport,
				     __be32 daddr, unsigned int hnum,
				     int dif, int sdif, bool exact_dif,
				     struct udp_hslot *hslot2,
				     struct sk_buff *skb)
E
Eric Dumazet 已提交
434 435
{
	struct sock *sk, *result;
P
Paolo Abeni 已提交
436
	int score, badness;
437
	u32 hash = 0;
E
Eric Dumazet 已提交
438 439

	result = NULL;
440
	badness = 0;
441
	udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
442
		score = compute_score(sk, net, saddr, sport,
443
				      daddr, hnum, dif, sdif, exact_dif);
E
Eric Dumazet 已提交
444
		if (score > badness) {
P
Paolo Abeni 已提交
445
			if (sk->sk_reuseport) {
446 447
				hash = udp_ehashfn(net, daddr, hnum,
						   saddr, sport);
448
				result = reuseport_select_sock(sk, hash, skb,
449
							sizeof(struct udphdr));
450 451
				if (result)
					return result;
452
			}
453 454
			badness = score;
			result = sk;
E
Eric Dumazet 已提交
455 456 457 458 459
		}
	}
	return result;
}

460 461 462
/* UDP is nearly always wildcards out the wazoo, it makes no sense to try
 * harder than this. -DaveM
 */
463
struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr,
464 465
		__be16 sport, __be32 daddr, __be16 dport, int dif,
		int sdif, struct udp_table *udptable, struct sk_buff *skb)
466
{
467
	struct sock *sk, *result;
468
	unsigned short hnum = ntohs(dport);
E
Eric Dumazet 已提交
469 470
	unsigned int hash2, slot2, slot = udp_hashfn(net, hnum, udptable->mask);
	struct udp_hslot *hslot2, *hslot = &udptable->hash[slot];
471
	bool exact_dif = udp_lib_exact_dif_match(net, skb);
P
Paolo Abeni 已提交
472
	int score, badness;
473
	u32 hash = 0;
474

E
Eric Dumazet 已提交
475
	if (hslot->count > 10) {
476
		hash2 = ipv4_portaddr_hash(net, daddr, hnum);
E
Eric Dumazet 已提交
477 478 479 480 481 482
		slot2 = hash2 & udptable->mask;
		hslot2 = &udptable->hash2[slot2];
		if (hslot->count < hslot2->count)
			goto begin;

		result = udp4_lib_lookup2(net, saddr, sport,
483
					  daddr, hnum, dif, sdif,
484
					  exact_dif, hslot2, skb);
E
Eric Dumazet 已提交
485
		if (!result) {
486
			unsigned int old_slot2 = slot2;
487
			hash2 = ipv4_portaddr_hash(net, htonl(INADDR_ANY), hnum);
E
Eric Dumazet 已提交
488
			slot2 = hash2 & udptable->mask;
489 490 491 492
			/* avoid searching the same slot again. */
			if (unlikely(slot2 == old_slot2))
				return result;

E
Eric Dumazet 已提交
493 494 495 496
			hslot2 = &udptable->hash2[slot2];
			if (hslot->count < hslot2->count)
				goto begin;

497
			result = udp4_lib_lookup2(net, saddr, sport,
498
						  daddr, hnum, dif, sdif,
499
						  exact_dif, hslot2, skb);
E
Eric Dumazet 已提交
500 501 502
		}
		return result;
	}
503 504
begin:
	result = NULL;
505
	badness = 0;
506
	sk_for_each_rcu(sk, &hslot->head) {
507
		score = compute_score(sk, net, saddr, sport,
508
				      daddr, hnum, dif, sdif, exact_dif);
509
		if (score > badness) {
P
Paolo Abeni 已提交
510
			if (sk->sk_reuseport) {
511 512
				hash = udp_ehashfn(net, daddr, hnum,
						   saddr, sport);
513
				result = reuseport_select_sock(sk, hash, skb,
514
							sizeof(struct udphdr));
515 516
				if (result)
					return result;
517
			}
518 519
			result = sk;
			badness = score;
520 521 522 523
		}
	}
	return result;
}
524
EXPORT_SYMBOL_GPL(__udp4_lib_lookup);
525

526 527
static inline struct sock *__udp4_lib_lookup_skb(struct sk_buff *skb,
						 __be16 sport, __be16 dport,
528
						 struct udp_table *udptable)
529 530 531
{
	const struct iphdr *iph = ip_hdr(skb);

532
	return __udp4_lib_lookup(dev_net(skb->dev), iph->saddr, sport,
533
				 iph->daddr, dport, inet_iif(skb),
534
				 inet_sdif(skb), udptable, skb);
535 536
}

537 538 539
struct sock *udp4_lib_lookup_skb(struct sk_buff *skb,
				 __be16 sport, __be16 dport)
{
540
	return __udp4_lib_lookup_skb(skb, sport, dport, &udp_table);
541 542 543
}
EXPORT_SYMBOL_GPL(udp4_lib_lookup_skb);

544 545 546
/* Must be called under rcu_read_lock().
 * Does increment socket refcount.
 */
547
#if IS_ENABLED(CONFIG_NF_TPROXY_IPV4) || IS_ENABLED(CONFIG_NF_SOCKET_IPV4)
548 549 550
struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
			     __be32 daddr, __be16 dport, int dif)
{
551 552 553
	struct sock *sk;

	sk = __udp4_lib_lookup(net, saddr, sport, daddr, dport,
554
			       dif, 0, &udp_table, NULL);
555
	if (sk && !refcount_inc_not_zero(&sk->sk_refcnt))
556 557
		sk = NULL;
	return sk;
558 559
}
EXPORT_SYMBOL_GPL(udp4_lib_lookup);
560
#endif
561

S
Shawn Bohrer 已提交
562 563 564
static inline bool __udp_is_mcast_sock(struct net *net, struct sock *sk,
				       __be16 loc_port, __be32 loc_addr,
				       __be16 rmt_port, __be32 rmt_addr,
565
				       int dif, int sdif, unsigned short hnum)
S
Shawn Bohrer 已提交
566 567 568 569 570 571 572 573 574
{
	struct inet_sock *inet = inet_sk(sk);

	if (!net_eq(sock_net(sk), net) ||
	    udp_sk(sk)->udp_port_hash != hnum ||
	    (inet->inet_daddr && inet->inet_daddr != rmt_addr) ||
	    (inet->inet_dport != rmt_port && inet->inet_dport) ||
	    (inet->inet_rcv_saddr && inet->inet_rcv_saddr != loc_addr) ||
	    ipv6_only_sock(sk) ||
575 576
	    (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif &&
	     sk->sk_bound_dev_if != sdif))
S
Shawn Bohrer 已提交
577
		return false;
578
	if (!ip_mc_sf_allow(sk, loc_addr, rmt_addr, dif, sdif))
S
Shawn Bohrer 已提交
579 580 581 582
		return false;
	return true;
}

583 584 585 586 587 588 589 590 591 592 593
/*
 * This routine is called by the ICMP module when it gets some
 * sort of error condition.  If err < 0 then the socket should
 * be closed and the error returned to the user.  If err > 0
 * it's just the icmp type << 8 | icmp code.
 * Header points to the ip header of the error packet. We move
 * on past this. Then (as it used to claim before adjustment)
 * header points to the first 8 bytes of the udp header.  We need
 * to find the appropriate port.
 */

594
void __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable)
595 596
{
	struct inet_sock *inet;
597
	const struct iphdr *iph = (const struct iphdr *)skb->data;
E
Eric Dumazet 已提交
598
	struct udphdr *uh = (struct udphdr *)(skb->data+(iph->ihl<<2));
599 600 601 602 603
	const int type = icmp_hdr(skb)->type;
	const int code = icmp_hdr(skb)->code;
	struct sock *sk;
	int harderr;
	int err;
604
	struct net *net = dev_net(skb->dev);
605

606
	sk = __udp4_lib_lookup(net, iph->daddr, uh->dest,
607 608
			       iph->saddr, uh->source, skb->dev->ifindex, 0,
			       udptable, NULL);
609
	if (!sk) {
E
Eric Dumazet 已提交
610
		__ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630
		return;	/* No socket for error */
	}

	err = 0;
	harderr = 0;
	inet = inet_sk(sk);

	switch (type) {
	default:
	case ICMP_TIME_EXCEEDED:
		err = EHOSTUNREACH;
		break;
	case ICMP_SOURCE_QUENCH:
		goto out;
	case ICMP_PARAMETERPROB:
		err = EPROTO;
		harderr = 1;
		break;
	case ICMP_DEST_UNREACH:
		if (code == ICMP_FRAG_NEEDED) { /* Path MTU discovery */
631
			ipv4_sk_update_pmtu(skb, sk, info);
632 633 634 635 636 637 638 639 640 641 642 643 644
			if (inet->pmtudisc != IP_PMTUDISC_DONT) {
				err = EMSGSIZE;
				harderr = 1;
				break;
			}
			goto out;
		}
		err = EHOSTUNREACH;
		if (code <= NR_ICMP_UNREACH) {
			harderr = icmp_err_convert[code].fatal;
			err = icmp_err_convert[code].errno;
		}
		break;
645 646
	case ICMP_REDIRECT:
		ipv4_sk_redirect(skb, sk);
647
		goto out;
648 649 650 651 652 653 654 655 656
	}

	/*
	 *      RFC1122: OK.  Passes ICMP errors back to application, as per
	 *	4.1.3.3.
	 */
	if (!inet->recverr) {
		if (!harderr || sk->sk_state != TCP_ESTABLISHED)
			goto out;
657
	} else
E
Eric Dumazet 已提交
658
		ip_icmp_error(sk, skb, err, uh->dest, info, (u8 *)(uh+1));
659

660 661 662
	sk->sk_err = err;
	sk->sk_error_report(sk);
out:
663
	return;
664 665 666 667
}

void udp_err(struct sk_buff *skb, u32 info)
{
668
	__udp4_lib_err(skb, info, &udp_table);
669 670 671 672 673
}

/*
 * Throw away all pending data and cancel the corking. Socket is locked.
 */
674
void udp_flush_pending_frames(struct sock *sk)
675 676 677 678 679 680 681 682 683
{
	struct udp_sock *up = udp_sk(sk);

	if (up->pending) {
		up->len = 0;
		up->pending = 0;
		ip_flush_pending_frames(sk);
	}
}
684
EXPORT_SYMBOL(udp_flush_pending_frames);
685 686

/**
H
Herbert Xu 已提交
687
 * 	udp4_hwcsum  -  handle outgoing HW checksumming
688 689
 * 	@skb: 	sk_buff containing the filled-in UDP header
 * 	        (checksum field must be zeroed out)
H
Herbert Xu 已提交
690 691
 *	@src:	source IP address
 *	@dst:	destination IP address
692
 */
693
void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst)
694 695
{
	struct udphdr *uh = udp_hdr(skb);
H
Herbert Xu 已提交
696 697 698
	int offset = skb_transport_offset(skb);
	int len = skb->len - offset;
	int hlen = len;
699 700
	__wsum csum = 0;

701
	if (!skb_has_frag_list(skb)) {
702 703 704 705 706
		/*
		 * Only one fragment on the socket.
		 */
		skb->csum_start = skb_transport_header(skb) - skb->head;
		skb->csum_offset = offsetof(struct udphdr, check);
H
Herbert Xu 已提交
707 708
		uh->check = ~csum_tcpudp_magic(src, dst, len,
					       IPPROTO_UDP, 0);
709
	} else {
710 711
		struct sk_buff *frags;

712 713 714 715 716
		/*
		 * HW-checksum won't work as there are two or more
		 * fragments on the socket so that all csums of sk_buffs
		 * should be together
		 */
717
		skb_walk_frags(skb, frags) {
H
Herbert Xu 已提交
718 719
			csum = csum_add(csum, frags->csum);
			hlen -= frags->len;
720
		}
721

H
Herbert Xu 已提交
722
		csum = skb_checksum(skb, offset, hlen, csum);
723 724 725 726 727 728 729
		skb->ip_summed = CHECKSUM_NONE;

		uh->check = csum_tcpudp_magic(src, dst, len, IPPROTO_UDP, csum);
		if (uh->check == 0)
			uh->check = CSUM_MANGLED_0;
	}
}
730
EXPORT_SYMBOL_GPL(udp4_hwcsum);
731

732 733 734 735 736 737 738 739
/* Function to set UDP checksum for an IPv4 UDP packet. This is intended
 * for the simple case like when setting the checksum for a UDP tunnel.
 */
void udp_set_csum(bool nocheck, struct sk_buff *skb,
		  __be32 saddr, __be32 daddr, int len)
{
	struct udphdr *uh = udp_hdr(skb);

740
	if (nocheck) {
741
		uh->check = 0;
742
	} else if (skb_is_gso(skb)) {
743
		uh->check = ~udp_v4_check(len, saddr, daddr, 0);
744 745 746 747 748
	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
		uh->check = 0;
		uh->check = udp_v4_check(len, saddr, daddr, lco_csum(skb));
		if (uh->check == 0)
			uh->check = CSUM_MANGLED_0;
749
	} else {
750 751 752 753 754 755 756 757
		skb->ip_summed = CHECKSUM_PARTIAL;
		skb->csum_start = skb_transport_header(skb) - skb->head;
		skb->csum_offset = offsetof(struct udphdr, check);
		uh->check = ~udp_v4_check(len, saddr, daddr, 0);
	}
}
EXPORT_SYMBOL(udp_set_csum);

758 759
static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4,
			struct inet_cork *cork)
760
{
H
Herbert Xu 已提交
761
	struct sock *sk = skb->sk;
762 763 764 765
	struct inet_sock *inet = inet_sk(sk);
	struct udphdr *uh;
	int err = 0;
	int is_udplite = IS_UDPLITE(sk);
H
Herbert Xu 已提交
766 767
	int offset = skb_transport_offset(skb);
	int len = skb->len - offset;
768 769 770 771 772 773
	__wsum csum = 0;

	/*
	 * Create a UDP header
	 */
	uh = udp_hdr(skb);
H
Herbert Xu 已提交
774
	uh->source = inet->inet_sport;
775
	uh->dest = fl4->fl4_dport;
H
Herbert Xu 已提交
776
	uh->len = htons(len);
777 778
	uh->check = 0;

779 780 781 782 783 784 785 786
	if (cork->gso_size) {
		const int hlen = skb_network_header_len(skb) +
				 sizeof(struct udphdr);

		if (hlen + cork->gso_size > cork->fragsize)
			return -EINVAL;
		if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS)
			return -EINVAL;
787 788
		if (sk->sk_no_check_tx)
			return -EINVAL;
789 790
		if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite ||
		    dst_xfrm(skb_dst(skb)))
791 792 793 794
			return -EIO;

		skb_shinfo(skb)->gso_size = cork->gso_size;
		skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
795 796
		skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(len - sizeof(uh),
							 cork->gso_size);
797
		goto csum_partial;
798 799
	}

800
	if (is_udplite)  				 /*     UDP-Lite      */
H
Herbert Xu 已提交
801
		csum = udplite_csum(skb);
802

803
	else if (sk->sk_no_check_tx) {			 /* UDP csum off */
804 805 806 807 808

		skb->ip_summed = CHECKSUM_NONE;
		goto send;

	} else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */
809
csum_partial:
810

811
		udp4_hwcsum(skb, fl4->saddr, fl4->daddr);
812 813
		goto send;

H
Herbert Xu 已提交
814 815
	} else
		csum = udp_csum(skb);
816 817

	/* add protocol-dependent pseudo-header */
818
	uh->check = csum_tcpudp_magic(fl4->saddr, fl4->daddr, len,
E
Eric Dumazet 已提交
819
				      sk->sk_protocol, csum);
820 821 822 823
	if (uh->check == 0)
		uh->check = CSUM_MANGLED_0;

send:
E
Eric Dumazet 已提交
824
	err = ip_send_skb(sock_net(sk), skb);
E
Eric Dumazet 已提交
825 826
	if (err) {
		if (err == -ENOBUFS && !inet->recverr) {
827 828
			UDP_INC_STATS(sock_net(sk),
				      UDP_MIB_SNDBUFERRORS, is_udplite);
E
Eric Dumazet 已提交
829 830 831
			err = 0;
		}
	} else
832 833
		UDP_INC_STATS(sock_net(sk),
			      UDP_MIB_OUTDATAGRAMS, is_udplite);
H
Herbert Xu 已提交
834 835 836 837 838 839
	return err;
}

/*
 * Push out all pending data as one UDP datagram. Socket is locked.
 */
840
int udp_push_pending_frames(struct sock *sk)
H
Herbert Xu 已提交
841 842 843
{
	struct udp_sock  *up = udp_sk(sk);
	struct inet_sock *inet = inet_sk(sk);
D
David S. Miller 已提交
844
	struct flowi4 *fl4 = &inet->cork.fl.u.ip4;
H
Herbert Xu 已提交
845 846 847
	struct sk_buff *skb;
	int err = 0;

848
	skb = ip_finish_skb(sk, fl4);
H
Herbert Xu 已提交
849 850 851
	if (!skb)
		goto out;

852
	err = udp_send_skb(skb, fl4, &inet->cork.base);
H
Herbert Xu 已提交
853

854 855 856 857 858
out:
	up->len = 0;
	up->pending = 0;
	return err;
}
859
EXPORT_SYMBOL(udp_push_pending_frames);
860

W
Willem de Bruijn 已提交
861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897
static int __udp_cmsg_send(struct cmsghdr *cmsg, u16 *gso_size)
{
	switch (cmsg->cmsg_type) {
	case UDP_SEGMENT:
		if (cmsg->cmsg_len != CMSG_LEN(sizeof(__u16)))
			return -EINVAL;
		*gso_size = *(__u16 *)CMSG_DATA(cmsg);
		return 0;
	default:
		return -EINVAL;
	}
}

int udp_cmsg_send(struct sock *sk, struct msghdr *msg, u16 *gso_size)
{
	struct cmsghdr *cmsg;
	bool need_ip = false;
	int err;

	for_each_cmsghdr(cmsg, msg) {
		if (!CMSG_OK(msg, cmsg))
			return -EINVAL;

		if (cmsg->cmsg_level != SOL_UDP) {
			need_ip = true;
			continue;
		}

		err = __udp_cmsg_send(cmsg, gso_size);
		if (err)
			return err;
	}

	return need_ip;
}
EXPORT_SYMBOL_GPL(udp_cmsg_send);

898
int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
899 900 901
{
	struct inet_sock *inet = inet_sk(sk);
	struct udp_sock *up = udp_sk(sk);
A
Andrey Ignatov 已提交
902
	DECLARE_SOCKADDR(struct sockaddr_in *, usin, msg->msg_name);
903
	struct flowi4 fl4_stack;
D
David S. Miller 已提交
904
	struct flowi4 *fl4;
905 906 907 908 909 910 911 912 913 914 915
	int ulen = len;
	struct ipcm_cookie ipc;
	struct rtable *rt = NULL;
	int free = 0;
	int connected = 0;
	__be32 daddr, faddr, saddr;
	__be16 dport;
	u8  tos;
	int err, is_udplite = IS_UDPLITE(sk);
	int corkreq = up->corkflag || msg->msg_flags&MSG_MORE;
	int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
H
Herbert Xu 已提交
916
	struct sk_buff *skb;
917
	struct ip_options_data opt_copy;
918 919 920 921 922 923 924 925

	if (len > 0xFFFF)
		return -EMSGSIZE;

	/*
	 *	Check the flags.
	 */

E
Eric Dumazet 已提交
926
	if (msg->msg_flags & MSG_OOB) /* Mirror BSD error message compatibility */
927 928 929
		return -EOPNOTSUPP;

	ipc.opt = NULL;
930
	ipc.tx_flags = 0;
931 932
	ipc.ttl = 0;
	ipc.tos = -1;
933

H
Herbert Xu 已提交
934 935
	getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag;

936
	fl4 = &inet->cork.fl.u.ip4;
937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956
	if (up->pending) {
		/*
		 * There are pending frames.
		 * The socket lock must be held while it's corked.
		 */
		lock_sock(sk);
		if (likely(up->pending)) {
			if (unlikely(up->pending != AF_INET)) {
				release_sock(sk);
				return -EINVAL;
			}
			goto do_append_data;
		}
		release_sock(sk);
	}
	ulen += sizeof(struct udphdr);

	/*
	 *	Get and verify the address.
	 */
A
Andrey Ignatov 已提交
957
	if (usin) {
958 959 960 961 962 963 964 965 966 967 968 969 970 971
		if (msg->msg_namelen < sizeof(*usin))
			return -EINVAL;
		if (usin->sin_family != AF_INET) {
			if (usin->sin_family != AF_UNSPEC)
				return -EAFNOSUPPORT;
		}

		daddr = usin->sin_addr.s_addr;
		dport = usin->sin_port;
		if (dport == 0)
			return -EINVAL;
	} else {
		if (sk->sk_state != TCP_ESTABLISHED)
			return -EDESTADDRREQ;
E
Eric Dumazet 已提交
972 973
		daddr = inet->inet_daddr;
		dport = inet->inet_dport;
974 975 976 977 978 979
		/* Open fast path for connected socket.
		   Route will not be used, if at least one option is set.
		 */
		connected = 1;
	}

980 981
	ipc.sockc.tsflags = sk->sk_tsflags;
	ipc.addr = inet->inet_saddr;
982
	ipc.oif = sk->sk_bound_dev_if;
983
	ipc.gso_size = up->gso_size;
984

985
	if (msg->msg_controllen) {
W
Willem de Bruijn 已提交
986 987 988 989 990
		err = udp_cmsg_send(sk, msg, &ipc.gso_size);
		if (err > 0)
			err = ip_cmsg_send(sk, msg, &ipc,
					   sk->sk_family == AF_INET6);
		if (unlikely(err < 0)) {
991
			kfree(ipc.opt);
992
			return err;
993
		}
994 995 996 997
		if (ipc.opt)
			free = 1;
		connected = 0;
	}
998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009
	if (!ipc.opt) {
		struct ip_options_rcu *inet_opt;

		rcu_read_lock();
		inet_opt = rcu_dereference(inet->inet_opt);
		if (inet_opt) {
			memcpy(&opt_copy, inet_opt,
			       sizeof(*inet_opt) + inet_opt->opt.optlen);
			ipc.opt = &opt_copy.opt;
		}
		rcu_read_unlock();
	}
1010

A
Andrey Ignatov 已提交
1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026
	if (cgroup_bpf_enabled && !connected) {
		err = BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk,
					    (struct sockaddr *)usin, &ipc.addr);
		if (err)
			goto out_free;
		if (usin) {
			if (usin->sin_port == 0) {
				/* BPF program set invalid port. Reject it. */
				err = -EINVAL;
				goto out_free;
			}
			daddr = usin->sin_addr.s_addr;
			dport = usin->sin_port;
		}
	}

1027 1028 1029
	saddr = ipc.addr;
	ipc.addr = faddr = daddr;

1030 1031
	sock_tx_timestamp(sk, ipc.sockc.tsflags, &ipc.tx_flags);

1032
	if (ipc.opt && ipc.opt->opt.srr) {
1033 1034 1035 1036
		if (!daddr) {
			err = -EINVAL;
			goto out_free;
		}
1037
		faddr = ipc.opt->opt.faddr;
1038 1039
		connected = 0;
	}
1040
	tos = get_rttos(&ipc, inet);
1041 1042
	if (sock_flag(sk, SOCK_LOCALROUTE) ||
	    (msg->msg_flags & MSG_DONTROUTE) ||
1043
	    (ipc.opt && ipc.opt->opt.is_strictroute)) {
1044 1045 1046 1047 1048 1049 1050 1051 1052 1053
		tos |= RTO_ONLINK;
		connected = 0;
	}

	if (ipv4_is_multicast(daddr)) {
		if (!ipc.oif)
			ipc.oif = inet->mc_index;
		if (!saddr)
			saddr = inet->mc_addr;
		connected = 0;
1054
	} else if (!ipc.oif) {
1055
		ipc.oif = inet->uc_index;
1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068
	} else if (ipv4_is_lbcast(daddr) && inet->uc_index) {
		/* oif is set, packet is to local broadcast and
		 * and uc_index is set. oif is most likely set
		 * by sk_bound_dev_if. If uc_index != oif check if the
		 * oif is an L3 master and uc_index is an L3 slave.
		 * If so, we want to allow the send using the uc_index.
		 */
		if (ipc.oif != inet->uc_index &&
		    ipc.oif == l3mdev_master_ifindex_by_index(sock_net(sk),
							      inet->uc_index)) {
			ipc.oif = inet->uc_index;
		}
	}
1069 1070

	if (connected)
E
Eric Dumazet 已提交
1071
		rt = (struct rtable *)sk_dst_check(sk, 0);
1072

1073
	if (!rt) {
1074
		struct net *net = sock_net(sk);
D
David Ahern 已提交
1075
		__u8 flow_flags = inet_sk_flowi_flags(sk);
1076

1077
		fl4 = &fl4_stack;
D
David Ahern 已提交
1078

1079
		flowi4_init_output(fl4, ipc.oif, sk->sk_mark, tos,
1080
				   RT_SCOPE_UNIVERSE, sk->sk_protocol,
D
David Ahern 已提交
1081
				   flow_flags,
1082 1083
				   faddr, saddr, dport, inet->inet_sport,
				   sk->sk_uid);
1084

1085 1086
		security_sk_classify_flow(sk, flowi4_to_flowi(fl4));
		rt = ip_route_output_flow(net, fl4, sk);
1087 1088
		if (IS_ERR(rt)) {
			err = PTR_ERR(rt);
1089
			rt = NULL;
1090
			if (err == -ENETUNREACH)
1091
				IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
1092 1093 1094 1095 1096 1097 1098 1099
			goto out;
		}

		err = -EACCES;
		if ((rt->rt_flags & RTCF_BROADCAST) &&
		    !sock_flag(sk, SOCK_BROADCAST))
			goto out;
		if (connected)
1100
			sk_dst_set(sk, dst_clone(&rt->dst));
1101 1102 1103 1104 1105 1106
	}

	if (msg->msg_flags&MSG_CONFIRM)
		goto do_confirm;
back_from_confirm:

1107
	saddr = fl4->saddr;
1108
	if (!ipc.addr)
1109
		daddr = ipc.addr = fl4->daddr;
1110

H
Herbert Xu 已提交
1111 1112
	/* Lockless fast path for the non-corking case. */
	if (!corkreq) {
W
Willem de Bruijn 已提交
1113 1114
		struct inet_cork cork;

1115
		skb = ip_make_skb(sk, fl4, getfrag, msg, ulen,
H
Herbert Xu 已提交
1116
				  sizeof(struct udphdr), &ipc, &rt,
W
Willem de Bruijn 已提交
1117
				  &cork, msg->msg_flags);
H
Herbert Xu 已提交
1118
		err = PTR_ERR(skb);
1119
		if (!IS_ERR_OR_NULL(skb))
1120
			err = udp_send_skb(skb, fl4, &cork);
H
Herbert Xu 已提交
1121 1122 1123
		goto out;
	}

1124 1125 1126 1127 1128 1129
	lock_sock(sk);
	if (unlikely(up->pending)) {
		/* The socket is already corked while preparing it. */
		/* ... which is an evident application bug. --ANK */
		release_sock(sk);

1130
		net_dbg_ratelimited("socket already corked\n");
1131 1132 1133 1134 1135 1136
		err = -EINVAL;
		goto out;
	}
	/*
	 *	Now cork the socket to pend data.
	 */
D
David S. Miller 已提交
1137 1138 1139
	fl4 = &inet->cork.fl.u.ip4;
	fl4->daddr = daddr;
	fl4->saddr = saddr;
1140 1141
	fl4->fl4_dport = dport;
	fl4->fl4_sport = inet->inet_sport;
1142 1143 1144 1145
	up->pending = AF_INET;

do_append_data:
	up->len += ulen;
1146
	err = ip_append_data(sk, fl4, getfrag, msg, ulen,
1147 1148
			     sizeof(struct udphdr), &ipc, &rt,
			     corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags);
1149 1150 1151 1152 1153 1154 1155 1156 1157 1158
	if (err)
		udp_flush_pending_frames(sk);
	else if (!corkreq)
		err = udp_push_pending_frames(sk);
	else if (unlikely(skb_queue_empty(&sk->sk_write_queue)))
		up->pending = 0;
	release_sock(sk);

out:
	ip_rt_put(rt);
1159
out_free:
1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171
	if (free)
		kfree(ipc.opt);
	if (!err)
		return len;
	/*
	 * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space.  Reporting
	 * ENOBUFS might not be good (it's not tunable per se), but otherwise
	 * we don't have a good statistic (IpOutDiscards but it can be too many
	 * things).  We could add another new stat but at least for now that
	 * seems like overkill.
	 */
	if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
1172 1173
		UDP_INC_STATS(sock_net(sk),
			      UDP_MIB_SNDBUFERRORS, is_udplite);
1174 1175 1176 1177
	}
	return err;

do_confirm:
1178 1179
	if (msg->msg_flags & MSG_PROBE)
		dst_confirm_neigh(&rt->dst, &fl4->daddr);
1180 1181 1182 1183 1184
	if (!(msg->msg_flags&MSG_PROBE) || len)
		goto back_from_confirm;
	err = 0;
	goto out;
}
E
Eric Dumazet 已提交
1185
EXPORT_SYMBOL(udp_sendmsg);
1186 1187 1188 1189

int udp_sendpage(struct sock *sk, struct page *page, int offset,
		 size_t size, int flags)
{
1190
	struct inet_sock *inet = inet_sk(sk);
1191 1192 1193
	struct udp_sock *up = udp_sk(sk);
	int ret;

1194 1195 1196
	if (flags & MSG_SENDPAGE_NOTLAST)
		flags |= MSG_MORE;

1197 1198 1199 1200 1201 1202 1203
	if (!up->pending) {
		struct msghdr msg = {	.msg_flags = flags|MSG_MORE };

		/* Call udp_sendmsg to specify destination address which
		 * sendpage interface can't pass.
		 * This will succeed only when the socket is connected.
		 */
1204
		ret = udp_sendmsg(sk, &msg, 0);
1205 1206 1207 1208 1209 1210 1211 1212 1213
		if (ret < 0)
			return ret;
	}

	lock_sock(sk);

	if (unlikely(!up->pending)) {
		release_sock(sk);

1214
		net_dbg_ratelimited("cork failed\n");
1215 1216 1217
		return -EINVAL;
	}

1218 1219
	ret = ip_append_page(sk, &inet->cork.fl.u.ip4,
			     page, offset, size, flags);
1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239
	if (ret == -EOPNOTSUPP) {
		release_sock(sk);
		return sock_no_sendpage(sk->sk_socket, page, offset,
					size, flags);
	}
	if (ret < 0) {
		udp_flush_pending_frames(sk);
		goto out;
	}

	up->len += size;
	if (!(up->corkflag || (flags&MSG_MORE)))
		ret = udp_push_pending_frames(sk);
	if (!ret)
		ret = size;
out:
	release_sock(sk);
	return ret;
}

1240 1241
#define UDP_SKB_IS_STATELESS 0x80000000

1242 1243
static void udp_set_dev_scratch(struct sk_buff *skb)
{
1244
	struct udp_dev_scratch *scratch = udp_skb_scratch(skb);
1245 1246

	BUILD_BUG_ON(sizeof(struct udp_dev_scratch) > sizeof(long));
1247 1248
	scratch->_tsize_state = skb->truesize;
#if BITS_PER_LONG == 64
1249 1250 1251
	scratch->len = skb->len;
	scratch->csum_unnecessary = !!skb_csum_unnecessary(skb);
	scratch->is_linear = !skb_is_nonlinear(skb);
1252
#endif
P
Paolo Abeni 已提交
1253 1254 1255 1256 1257
	/* all head states execept sp (dst, sk, nf) are always cleared by
	 * udp_rcv() and we need to preserve secpath, if present, to eventually
	 * process IP_CMSG_PASSSEC at recvmsg() time
	 */
	if (likely(!skb_sec_path(skb)))
1258
		scratch->_tsize_state |= UDP_SKB_IS_STATELESS;
1259 1260 1261 1262
}

static int udp_skb_truesize(struct sk_buff *skb)
{
1263
	return udp_skb_scratch(skb)->_tsize_state & ~UDP_SKB_IS_STATELESS;
1264 1265
}

1266
static bool udp_skb_has_head_state(struct sk_buff *skb)
1267
{
1268
	return !(udp_skb_scratch(skb)->_tsize_state & UDP_SKB_IS_STATELESS);
1269 1270
}

1271
/* fully reclaim rmem/fwd memory allocated for skb */
1272 1273
static void udp_rmem_release(struct sock *sk, int size, int partial,
			     bool rx_queue_lock_held)
1274
{
1275
	struct udp_sock *up = udp_sk(sk);
1276
	struct sk_buff_head *sk_queue;
1277 1278
	int amt;

1279 1280 1281
	if (likely(partial)) {
		up->forward_deficit += size;
		size = up->forward_deficit;
1282
		if (size < (sk->sk_rcvbuf >> 2))
1283 1284 1285 1286 1287 1288
			return;
	} else {
		size += up->forward_deficit;
	}
	up->forward_deficit = 0;

1289 1290 1291
	/* acquire the sk_receive_queue for fwd allocated memory scheduling,
	 * if the called don't held it already
	 */
1292
	sk_queue = &sk->sk_receive_queue;
1293 1294 1295
	if (!rx_queue_lock_held)
		spin_lock(&sk_queue->lock);

1296

1297 1298 1299 1300 1301 1302
	sk->sk_forward_alloc += size;
	amt = (sk->sk_forward_alloc - partial) & ~(SK_MEM_QUANTUM - 1);
	sk->sk_forward_alloc -= amt;

	if (amt)
		__sk_mem_reduce_allocated(sk, amt >> SK_MEM_QUANTUM_SHIFT);
1303 1304

	atomic_sub(size, &sk->sk_rmem_alloc);
1305 1306 1307 1308

	/* this can save us from acquiring the rx queue lock on next receive */
	skb_queue_splice_tail_init(sk_queue, &up->reader_queue);

1309 1310
	if (!rx_queue_lock_held)
		spin_unlock(&sk_queue->lock);
1311 1312
}

1313
/* Note: called with reader_queue.lock held.
1314 1315 1316 1317
 * Instead of using skb->truesize here, find a copy of it in skb->dev_scratch
 * This avoids a cache line miss while receive_queue lock is held.
 * Look at __udp_enqueue_schedule_skb() to find where this copy is done.
 */
1318
void udp_skb_destructor(struct sock *sk, struct sk_buff *skb)
1319
{
1320 1321
	prefetch(&skb->data);
	udp_rmem_release(sk, udp_skb_truesize(skb), 1, false);
1322
}
1323
EXPORT_SYMBOL(udp_skb_destructor);
1324

1325
/* as above, but the caller held the rx queue lock, too */
1326
static void udp_skb_dtor_locked(struct sock *sk, struct sk_buff *skb)
1327
{
1328 1329
	prefetch(&skb->data);
	udp_rmem_release(sk, udp_skb_truesize(skb), 1, true);
1330 1331
}

E
Eric Dumazet 已提交
1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356
/* Idea of busylocks is to let producers grab an extra spinlock
 * to relieve pressure on the receive_queue spinlock shared by consumer.
 * Under flood, this means that only one producer can be in line
 * trying to acquire the receive_queue spinlock.
 * These busylock can be allocated on a per cpu manner, instead of a
 * per socket one (that would consume a cache line per socket)
 */
static int udp_busylocks_log __read_mostly;
static spinlock_t *udp_busylocks __read_mostly;

static spinlock_t *busylock_acquire(void *ptr)
{
	spinlock_t *busy;

	busy = udp_busylocks + hash_ptr(ptr, udp_busylocks_log);
	spin_lock(busy);
	return busy;
}

static void busylock_release(spinlock_t *busy)
{
	if (busy)
		spin_unlock(busy);
}

1357 1358 1359 1360
int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb)
{
	struct sk_buff_head *list = &sk->sk_receive_queue;
	int rmem, delta, amt, err = -ENOMEM;
E
Eric Dumazet 已提交
1361
	spinlock_t *busy = NULL;
1362
	int size;
1363 1364 1365 1366 1367

	/* try to avoid the costly atomic add/sub pair when the receive
	 * queue is full; always allow at least a packet
	 */
	rmem = atomic_read(&sk->sk_rmem_alloc);
1368
	if (rmem > sk->sk_rcvbuf)
1369 1370
		goto drop;

1371 1372 1373 1374 1375 1376
	/* Under mem pressure, it might be helpful to help udp_recvmsg()
	 * having linear skbs :
	 * - Reduce memory overhead and thus increase receive queue capacity
	 * - Less cache line misses at copyout() time
	 * - Less work at consume_skb() (less alien page frag freeing)
	 */
E
Eric Dumazet 已提交
1377
	if (rmem > (sk->sk_rcvbuf >> 1)) {
1378
		skb_condense(skb);
E
Eric Dumazet 已提交
1379 1380 1381

		busy = busylock_acquire(sk);
	}
1382
	size = skb->truesize;
1383
	udp_set_dev_scratch(skb);
1384

1385 1386 1387 1388
	/* we drop only if the receive buf is full and the receive
	 * queue contains some other skb
	 */
	rmem = atomic_add_return(size, &sk->sk_rmem_alloc);
1389
	if (rmem > (size + sk->sk_rcvbuf))
1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406
		goto uncharge_drop;

	spin_lock(&list->lock);
	if (size >= sk->sk_forward_alloc) {
		amt = sk_mem_pages(size);
		delta = amt << SK_MEM_QUANTUM_SHIFT;
		if (!__sk_mem_raise_allocated(sk, delta, amt, SK_MEM_RECV)) {
			err = -ENOBUFS;
			spin_unlock(&list->lock);
			goto uncharge_drop;
		}

		sk->sk_forward_alloc += delta;
	}

	sk->sk_forward_alloc -= size;

1407 1408 1409
	/* no need to setup a destructor, we will explicitly release the
	 * forward allocated memory on dequeue
	 */
1410 1411 1412 1413 1414 1415 1416 1417
	sock_skb_set_dropcount(sk, skb);

	__skb_queue_tail(list, skb);
	spin_unlock(&list->lock);

	if (!sock_flag(sk, SOCK_DEAD))
		sk->sk_data_ready(sk);

E
Eric Dumazet 已提交
1418
	busylock_release(busy);
1419 1420 1421 1422 1423 1424 1425
	return 0;

uncharge_drop:
	atomic_sub(skb->truesize, &sk->sk_rmem_alloc);

drop:
	atomic_inc(&sk->sk_drops);
E
Eric Dumazet 已提交
1426
	busylock_release(busy);
1427 1428 1429 1430
	return err;
}
EXPORT_SYMBOL_GPL(__udp_enqueue_schedule_skb);

1431
void udp_destruct_sock(struct sock *sk)
1432 1433
{
	/* reclaim completely the forward allocated memory */
1434
	struct udp_sock *up = udp_sk(sk);
1435 1436 1437
	unsigned int total = 0;
	struct sk_buff *skb;

1438 1439
	skb_queue_splice_tail_init(&sk->sk_receive_queue, &up->reader_queue);
	while ((skb = __skb_dequeue(&up->reader_queue)) != NULL) {
1440 1441 1442
		total += skb->truesize;
		kfree_skb(skb);
	}
1443
	udp_rmem_release(sk, total, 0, true);
1444

1445 1446
	inet_sock_destruct(sk);
}
1447
EXPORT_SYMBOL_GPL(udp_destruct_sock);
1448 1449 1450

int udp_init_sock(struct sock *sk)
{
1451
	skb_queue_head_init(&udp_sk(sk)->reader_queue);
1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464
	sk->sk_destruct = udp_destruct_sock;
	return 0;
}
EXPORT_SYMBOL_GPL(udp_init_sock);

void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len)
{
	if (unlikely(READ_ONCE(sk->sk_peek_off) >= 0)) {
		bool slow = lock_sock_fast(sk);

		sk_peek_offset_bwd(sk, len);
		unlock_sock_fast(sk, slow);
	}
P
Paolo Abeni 已提交
1465

1466 1467 1468
	if (!skb_unref(skb))
		return;

1469 1470
	/* In the more common cases we cleared the head states previously,
	 * see __udp_queue_rcv_skb().
1471
	 */
1472
	if (unlikely(udp_skb_has_head_state(skb)))
1473
		skb_release_head_state(skb);
1474
	__consume_stateless_skb(skb);
1475 1476 1477
}
EXPORT_SYMBOL_GPL(skb_consume_udp);

1478 1479 1480 1481 1482 1483
static struct sk_buff *__first_packet_length(struct sock *sk,
					     struct sk_buff_head *rcvq,
					     int *total)
{
	struct sk_buff *skb;

P
Paolo Abeni 已提交
1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500
	while ((skb = skb_peek(rcvq)) != NULL) {
		if (udp_lib_checksum_complete(skb)) {
			__UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS,
					IS_UDPLITE(sk));
			__UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS,
					IS_UDPLITE(sk));
			atomic_inc(&sk->sk_drops);
			__skb_unlink(skb, rcvq);
			*total += skb->truesize;
			kfree_skb(skb);
		} else {
			/* the csum related bits could be changed, refresh
			 * the scratch area
			 */
			udp_set_dev_scratch(skb);
			break;
		}
1501 1502 1503 1504
	}
	return skb;
}

E
Eric Dumazet 已提交
1505 1506 1507 1508 1509
/**
 *	first_packet_length	- return length of first packet in receive queue
 *	@sk: socket
 *
 *	Drops all bad checksum frames, until a valid one is found.
1510
 *	Returns the length of found skb, or -1 if none is found.
E
Eric Dumazet 已提交
1511
 */
1512
static int first_packet_length(struct sock *sk)
E
Eric Dumazet 已提交
1513
{
1514 1515
	struct sk_buff_head *rcvq = &udp_sk(sk)->reader_queue;
	struct sk_buff_head *sk_queue = &sk->sk_receive_queue;
E
Eric Dumazet 已提交
1516
	struct sk_buff *skb;
1517
	int total = 0;
1518
	int res;
E
Eric Dumazet 已提交
1519 1520

	spin_lock_bh(&rcvq->lock);
1521 1522 1523 1524 1525 1526 1527
	skb = __first_packet_length(sk, rcvq, &total);
	if (!skb && !skb_queue_empty(sk_queue)) {
		spin_lock(&sk_queue->lock);
		skb_queue_splice_tail_init(sk_queue, rcvq);
		spin_unlock(&sk_queue->lock);

		skb = __first_packet_length(sk, rcvq, &total);
E
Eric Dumazet 已提交
1528
	}
1529
	res = skb ? skb->len : -1;
1530
	if (total)
1531
		udp_rmem_release(sk, total, 1, false);
E
Eric Dumazet 已提交
1532 1533 1534 1535
	spin_unlock_bh(&rcvq->lock);
	return res;
}

L
Linus Torvalds 已提交
1536 1537 1538
/*
 *	IOCTL requests applicable to the UDP protocol
 */
1539

L
Linus Torvalds 已提交
1540 1541
int udp_ioctl(struct sock *sk, int cmd, unsigned long arg)
{
1542 1543
	switch (cmd) {
	case SIOCOUTQ:
L
Linus Torvalds 已提交
1544
	{
1545 1546
		int amount = sk_wmem_alloc_get(sk);

1547 1548
		return put_user(amount, (int __user *)arg);
	}
L
Linus Torvalds 已提交
1549

1550 1551
	case SIOCINQ:
	{
1552
		int amount = max_t(int, 0, first_packet_length(sk));
1553 1554 1555

		return put_user(amount, (int __user *)arg);
	}
L
Linus Torvalds 已提交
1556

1557 1558
	default:
		return -ENOIOCTLCMD;
L
Linus Torvalds 已提交
1559
	}
1560 1561

	return 0;
L
Linus Torvalds 已提交
1562
}
E
Eric Dumazet 已提交
1563
EXPORT_SYMBOL(udp_ioctl);
L
Linus Torvalds 已提交
1564

1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589
struct sk_buff *__skb_recv_udp(struct sock *sk, unsigned int flags,
			       int noblock, int *peeked, int *off, int *err)
{
	struct sk_buff_head *sk_queue = &sk->sk_receive_queue;
	struct sk_buff_head *queue;
	struct sk_buff *last;
	long timeo;
	int error;

	queue = &udp_sk(sk)->reader_queue;
	flags |= noblock ? MSG_DONTWAIT : 0;
	timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
	do {
		struct sk_buff *skb;

		error = sock_error(sk);
		if (error)
			break;

		error = -EAGAIN;
		*peeked = 0;
		do {
			spin_lock_bh(&queue->lock);
			skb = __skb_try_recv_from_queue(sk, queue, flags,
							udp_skb_destructor,
1590
							peeked, off, err,
1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601
							&last);
			if (skb) {
				spin_unlock_bh(&queue->lock);
				return skb;
			}

			if (skb_queue_empty(sk_queue)) {
				spin_unlock_bh(&queue->lock);
				goto busy_check;
			}

1602 1603 1604 1605 1606
			/* refill the reader queue and walk it again
			 * keep both queues locked to avoid re-acquiring
			 * the sk_receive_queue lock if fwd memory scheduling
			 * is needed.
			 */
1607 1608 1609 1610
			spin_lock(&sk_queue->lock);
			skb_queue_splice_tail_init(sk_queue, queue);

			skb = __skb_try_recv_from_queue(sk, queue, flags,
1611
							udp_skb_dtor_locked,
1612
							peeked, off, err,
1613
							&last);
1614
			spin_unlock(&sk_queue->lock);
1615
			spin_unlock_bh(&queue->lock);
1616
			if (skb)
1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635
				return skb;

busy_check:
			if (!sk_can_busy_loop(sk))
				break;

			sk_busy_loop(sk, flags & MSG_DONTWAIT);
		} while (!skb_queue_empty(sk_queue));

		/* sk_queue is empty, reader_queue may contain peeked packets */
	} while (timeo &&
		 !__skb_wait_for_more_packets(sk, &error, &timeo,
					      (struct sk_buff *)sk_queue));

	*err = error;
	return NULL;
}
EXPORT_SYMBOL_GPL(__skb_recv_udp);

1636 1637 1638 1639 1640
/*
 * 	This should be easy, if there is something there we
 * 	return it, otherwise we block.
 */

1641 1642
int udp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock,
		int flags, int *addr_len)
1643 1644
{
	struct inet_sock *inet = inet_sk(sk);
1645
	DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name);
1646
	struct sk_buff *skb;
1647
	unsigned int ulen, copied;
1648
	int peeked, peeking, off;
1649 1650
	int err;
	int is_udplite = IS_UDPLITE(sk);
1651
	bool checksum_valid = false;
1652 1653

	if (flags & MSG_ERRQUEUE)
1654
		return ip_recv_error(sk, msg, len, addr_len);
1655 1656

try_again:
1657 1658
	peeking = flags & MSG_PEEK;
	off = sk_peek_offset(sk, flags);
1659
	skb = __skb_recv_udp(sk, flags, noblock, &peeked, &off, &err);
1660
	if (!skb)
1661
		return err;
1662

1663
	ulen = udp_skb_len(skb);
1664
	copied = len;
1665 1666
	if (copied > ulen - off)
		copied = ulen - off;
1667
	else if (copied < ulen)
1668 1669 1670 1671 1672 1673 1674 1675
		msg->msg_flags |= MSG_TRUNC;

	/*
	 * If checksum is needed at all, try to do it while copying the
	 * data.  If the data is truncated, or if we only want a partial
	 * coverage checksum (UDP-Lite), do it before the copy.
	 */

1676 1677
	if (copied < ulen || peeking ||
	    (is_udplite && UDP_SKB_CB(skb)->partial_cov)) {
1678 1679
		checksum_valid = udp_skb_csum_unnecessary(skb) ||
				!__udp_lib_checksum_complete(skb);
1680
		if (!checksum_valid)
1681 1682 1683
			goto csum_copy_err;
	}

1684 1685 1686 1687 1688 1689
	if (checksum_valid || udp_skb_csum_unnecessary(skb)) {
		if (udp_skb_is_linear(skb))
			err = copy_linear_skb(skb, copied, off, &msg->msg_iter);
		else
			err = skb_copy_datagram_msg(skb, off, msg, copied);
	} else {
1690
		err = skb_copy_and_csum_datagram_msg(skb, off, msg);
1691 1692 1693 1694 1695

		if (err == -EINVAL)
			goto csum_copy_err;
	}

1696
	if (unlikely(err)) {
1697 1698
		if (!peeked) {
			atomic_inc(&sk->sk_drops);
1699 1700
			UDP_INC_STATS(sock_net(sk),
				      UDP_MIB_INERRORS, is_udplite);
1701
		}
1702
		kfree_skb(skb);
1703
		return err;
1704
	}
1705 1706

	if (!peeked)
1707 1708
		UDP_INC_STATS(sock_net(sk),
			      UDP_MIB_INDATAGRAMS, is_udplite);
1709

1710
	sock_recv_ts_and_drops(msg, sk, skb);
1711 1712

	/* Copy the address. */
E
Eric Dumazet 已提交
1713
	if (sin) {
1714 1715 1716 1717
		sin->sin_family = AF_INET;
		sin->sin_port = udp_hdr(skb)->source;
		sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
		memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
1718
		*addr_len = sizeof(*sin);
1719 1720
	}
	if (inet->cmsg_flags)
1721
		ip_cmsg_recv_offset(msg, sk, skb, sizeof(struct udphdr), off);
1722

1723
	err = copied;
1724 1725 1726
	if (flags & MSG_TRUNC)
		err = ulen;

1727
	skb_consume_udp(sk, skb, peeking ? -err : err);
1728 1729 1730
	return err;

csum_copy_err:
1731 1732
	if (!__sk_queue_drop_skb(sk, &udp_sk(sk)->reader_queue, skb, flags,
				 udp_skb_destructor)) {
1733 1734
		UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
		UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
1735
	}
1736
	kfree_skb(skb);
1737

1738 1739
	/* starting over for a new packet, but check if we need to yield */
	cond_resched();
1740
	msg->msg_flags &= ~MSG_TRUNC;
1741 1742 1743
	goto try_again;
}

A
Andrey Ignatov 已提交
1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756
int udp_pre_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
{
	/* This check is replicated from __ip4_datagram_connect() and
	 * intended to prevent BPF program called below from accessing bytes
	 * that are out of the bound specified by user in addr_len.
	 */
	if (addr_len < sizeof(struct sockaddr_in))
		return -EINVAL;

	return BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr);
}
EXPORT_SYMBOL(udp_pre_connect);

1757
int __udp_disconnect(struct sock *sk, int flags)
L
Linus Torvalds 已提交
1758 1759 1760 1761 1762
{
	struct inet_sock *inet = inet_sk(sk);
	/*
	 *	1003.1g - break association.
	 */
1763

L
Linus Torvalds 已提交
1764
	sk->sk_state = TCP_CLOSE;
E
Eric Dumazet 已提交
1765 1766
	inet->inet_daddr = 0;
	inet->inet_dport = 0;
1767
	sock_rps_reset_rxhash(sk);
L
Linus Torvalds 已提交
1768 1769 1770 1771 1772 1773
	sk->sk_bound_dev_if = 0;
	if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
		inet_reset_saddr(sk);

	if (!(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) {
		sk->sk_prot->unhash(sk);
E
Eric Dumazet 已提交
1774
		inet->inet_sport = 0;
L
Linus Torvalds 已提交
1775 1776 1777 1778
	}
	sk_dst_reset(sk);
	return 0;
}
1779 1780 1781 1782 1783 1784 1785 1786 1787
EXPORT_SYMBOL(__udp_disconnect);

int udp_disconnect(struct sock *sk, int flags)
{
	lock_sock(sk);
	__udp_disconnect(sk, flags);
	release_sock(sk);
	return 0;
}
E
Eric Dumazet 已提交
1788
EXPORT_SYMBOL(udp_disconnect);
L
Linus Torvalds 已提交
1789

1790 1791
void udp_lib_unhash(struct sock *sk)
{
1792 1793
	if (sk_hashed(sk)) {
		struct udp_table *udptable = sk->sk_prot->h.udp_table;
1794 1795 1796 1797 1798
		struct udp_hslot *hslot, *hslot2;

		hslot  = udp_hashslot(udptable, sock_net(sk),
				      udp_sk(sk)->udp_port_hash);
		hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash);
1799

1800
		spin_lock_bh(&hslot->lock);
1801 1802
		if (rcu_access_pointer(sk->sk_reuseport_cb))
			reuseport_detach_sock(sk);
1803
		if (sk_del_node_init_rcu(sk)) {
E
Eric Dumazet 已提交
1804
			hslot->count--;
E
Eric Dumazet 已提交
1805
			inet_sk(sk)->inet_num = 0;
1806
			sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
1807 1808

			spin_lock(&hslot2->lock);
1809
			hlist_del_init_rcu(&udp_sk(sk)->udp_portaddr_node);
1810 1811
			hslot2->count--;
			spin_unlock(&hslot2->lock);
1812 1813
		}
		spin_unlock_bh(&hslot->lock);
1814 1815 1816 1817
	}
}
EXPORT_SYMBOL(udp_lib_unhash);

E
Eric Dumazet 已提交
1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829
/*
 * inet_rcv_saddr was changed, we must rehash secondary hash
 */
void udp_lib_rehash(struct sock *sk, u16 newhash)
{
	if (sk_hashed(sk)) {
		struct udp_table *udptable = sk->sk_prot->h.udp_table;
		struct udp_hslot *hslot, *hslot2, *nhslot2;

		hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash);
		nhslot2 = udp_hashslot2(udptable, newhash);
		udp_sk(sk)->udp_portaddr_hash = newhash;
1830 1831 1832

		if (hslot2 != nhslot2 ||
		    rcu_access_pointer(sk->sk_reuseport_cb)) {
E
Eric Dumazet 已提交
1833 1834 1835 1836
			hslot = udp_hashslot(udptable, sock_net(sk),
					     udp_sk(sk)->udp_port_hash);
			/* we must lock primary chain too */
			spin_lock_bh(&hslot->lock);
1837 1838 1839 1840 1841
			if (rcu_access_pointer(sk->sk_reuseport_cb))
				reuseport_detach_sock(sk);

			if (hslot2 != nhslot2) {
				spin_lock(&hslot2->lock);
1842
				hlist_del_init_rcu(&udp_sk(sk)->udp_portaddr_node);
1843 1844 1845 1846
				hslot2->count--;
				spin_unlock(&hslot2->lock);

				spin_lock(&nhslot2->lock);
1847
				hlist_add_head_rcu(&udp_sk(sk)->udp_portaddr_node,
1848 1849 1850 1851
							 &nhslot2->head);
				nhslot2->count++;
				spin_unlock(&nhslot2->lock);
			}
E
Eric Dumazet 已提交
1852 1853 1854 1855 1856 1857 1858 1859 1860

			spin_unlock_bh(&hslot->lock);
		}
	}
}
EXPORT_SYMBOL(udp_lib_rehash);

static void udp_v4_rehash(struct sock *sk)
{
1861
	u16 new_hash = ipv4_portaddr_hash(sock_net(sk),
E
Eric Dumazet 已提交
1862 1863 1864 1865 1866
					  inet_sk(sk)->inet_rcv_saddr,
					  inet_sk(sk)->inet_num);
	udp_lib_rehash(sk, new_hash);
}

1867
static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
H
Herbert Xu 已提交
1868
{
T
Tom Herbert 已提交
1869
	int rc;
E
Eric Dumazet 已提交
1870

1871
	if (inet_sk(sk)->inet_daddr) {
1872
		sock_rps_save_rxhash(sk, skb);
1873
		sk_mark_napi_id(sk, skb);
E
Eric Dumazet 已提交
1874
		sk_incoming_cpu_update(sk);
1875 1876
	} else {
		sk_mark_napi_id_once(sk, skb);
1877
	}
T
Tom Herbert 已提交
1878

1879
	rc = __udp_enqueue_schedule_skb(sk, skb);
E
Eric Dumazet 已提交
1880 1881
	if (rc < 0) {
		int is_udplite = IS_UDPLITE(sk);
H
Herbert Xu 已提交
1882 1883

		/* Note that an ENOMEM error is charged twice */
E
Eric Dumazet 已提交
1884
		if (rc == -ENOMEM)
1885
			UDP_INC_STATS(sock_net(sk), UDP_MIB_RCVBUFERRORS,
1886
					is_udplite);
1887
		UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
E
Eric Dumazet 已提交
1888
		kfree_skb(skb);
1889
		trace_udp_fail_queue_rcv_skb(rc, sk);
E
Eric Dumazet 已提交
1890
		return -1;
H
Herbert Xu 已提交
1891 1892 1893 1894 1895
	}

	return 0;
}

1896
static DEFINE_STATIC_KEY_FALSE(udp_encap_needed_key);
1897 1898
void udp_encap_enable(void)
{
1899
	static_branch_enable(&udp_encap_needed_key);
1900 1901 1902
}
EXPORT_SYMBOL(udp_encap_enable);

1903 1904 1905 1906 1907 1908 1909 1910
/* returns:
 *  -1: error
 *   0: success
 *  >0: "udp encap" protocol resubmission
 *
 * Note that in the success and error cases, the skb is assumed to
 * have either been requeued or freed.
 */
1911
static int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922
{
	struct udp_sock *up = udp_sk(sk);
	int is_udplite = IS_UDPLITE(sk);

	/*
	 *	Charge it to the socket, dropping if the queue is full.
	 */
	if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
		goto drop;
	nf_reset(skb);

1923
	if (static_branch_unlikely(&udp_encap_needed_key) && up->encap_type) {
1924 1925
		int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);

1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937
		/*
		 * This is an encapsulation socket so pass the skb to
		 * the socket's udp_encap_rcv() hook. Otherwise, just
		 * fall through and pass this up the UDP socket.
		 * up->encap_rcv() returns the following value:
		 * =0 if skb was successfully passed to the encap
		 *    handler or was discarded by it.
		 * >0 if skb should be passed on to UDP.
		 * <0 if skb should be resubmitted as proto -N
		 */

		/* if we're overly short, let UDP handle it */
1938
		encap_rcv = READ_ONCE(up->encap_rcv);
1939
		if (encap_rcv) {
1940 1941
			int ret;

1942 1943 1944 1945
			/* Verify checksum before giving to encap */
			if (udp_lib_checksum_complete(skb))
				goto csum_error;

1946
			ret = encap_rcv(sk, skb);
1947
			if (ret <= 0) {
1948 1949 1950
				__UDP_INC_STATS(sock_net(sk),
						UDP_MIB_INDATAGRAMS,
						is_udplite);
1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974
				return -ret;
			}
		}

		/* FALLTHROUGH -- it's a UDP Packet */
	}

	/*
	 * 	UDP-Lite specific tests, ignored on UDP sockets
	 */
	if ((is_udplite & UDPLITE_RECV_CC)  &&  UDP_SKB_CB(skb)->partial_cov) {

		/*
		 * MIB statistics other than incrementing the error count are
		 * disabled for the following two types of errors: these depend
		 * on the application settings, not on the functioning of the
		 * protocol stack as such.
		 *
		 * RFC 3828 here recommends (sec 3.3): "There should also be a
		 * way ... to ... at least let the receiving application block
		 * delivery of packets with coverage values less than a value
		 * provided by the application."
		 */
		if (up->pcrlen == 0) {          /* full coverage was set  */
1975 1976
			net_dbg_ratelimited("UDPLite: partial coverage %d while full coverage %d requested\n",
					    UDP_SKB_CB(skb)->cscov, skb->len);
1977 1978 1979 1980 1981 1982 1983 1984 1985
			goto drop;
		}
		/* The next case involves violating the min. coverage requested
		 * by the receiver. This is subtle: if receiver wants x and x is
		 * greater than the buffersize/MTU then receiver will complain
		 * that it wants x while sender emits packets of smaller size y.
		 * Therefore the above ...()->partial_cov statement is essential.
		 */
		if (UDP_SKB_CB(skb)->cscov  <  up->pcrlen) {
1986 1987
			net_dbg_ratelimited("UDPLite: coverage %d too small, need min %d\n",
					    UDP_SKB_CB(skb)->cscov, up->pcrlen);
1988 1989 1990 1991
			goto drop;
		}
	}

1992
	prefetch(&sk->sk_rmem_alloc);
1993 1994
	if (rcu_access_pointer(sk->sk_filter) &&
	    udp_lib_checksum_complete(skb))
1995
			goto csum_error;
1996

1997
	if (sk_filter_trim_cap(sk, skb, sizeof(struct udphdr)))
1998
		goto drop;
1999

2000
	udp_csum_pull_header(skb);
2001

2002
	ipv4_pktinfo_prepare(sk, skb);
2003
	return __udp_queue_rcv_skb(sk, skb);
2004

2005
csum_error:
2006
	__UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
2007
drop:
2008
	__UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
E
Eric Dumazet 已提交
2009
	atomic_inc(&sk->sk_drops);
2010 2011 2012 2013
	kfree_skb(skb);
	return -1;
}

2014
/* For TCP sockets, sk_rx_dst is protected by socket lock
2015
 * For UDP, we use xchg() to guard against concurrent changes.
2016
 */
2017
bool udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
S
Shawn Bohrer 已提交
2018
{
2019 2020
	struct dst_entry *old;

2021 2022 2023
	if (dst_hold_safe(dst)) {
		old = xchg(&sk->sk_rx_dst, dst);
		dst_release(old);
2024
		return old != dst;
2025
	}
2026
	return false;
S
Shawn Bohrer 已提交
2027
}
2028
EXPORT_SYMBOL(udp_sk_rx_dst_set);
S
Shawn Bohrer 已提交
2029

2030 2031 2032
/*
 *	Multicasts and broadcasts go to each listener.
 *
2033
 *	Note: called only from the BH handler context.
2034
 */
2035
static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
2036 2037
				    struct udphdr  *uh,
				    __be32 saddr, __be32 daddr,
2038 2039
				    struct udp_table *udptable,
				    int proto)
2040
{
2041
	struct sock *sk, *first = NULL;
2042 2043
	unsigned short hnum = ntohs(uh->dest);
	struct udp_hslot *hslot = udp_hashslot(udptable, net, hnum);
2044
	unsigned int hash2 = 0, hash2_any = 0, use_hash2 = (hslot->count > 10);
2045 2046
	unsigned int offset = offsetof(typeof(*sk), sk_node);
	int dif = skb->dev->ifindex;
2047
	int sdif = inet_sdif(skb);
2048 2049
	struct hlist_node *node;
	struct sk_buff *nskb;
2050 2051

	if (use_hash2) {
2052
		hash2_any = ipv4_portaddr_hash(net, htonl(INADDR_ANY), hnum) &
2053
			    udptable->mask;
2054
		hash2 = ipv4_portaddr_hash(net, daddr, hnum) & udptable->mask;
2055
start_lookup:
2056
		hslot = &udptable->hash2[hash2];
2057 2058
		offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node);
	}
2059

2060 2061
	sk_for_each_entry_offset_rcu(sk, node, &hslot->head, offset) {
		if (!__udp_is_mcast_sock(net, sk, uh->dest, daddr,
2062
					 uh->source, saddr, dif, sdif, hnum))
2063 2064 2065 2066 2067
			continue;

		if (!first) {
			first = sk;
			continue;
2068
		}
2069
		nskb = skb_clone(skb, GFP_ATOMIC);
2070

2071 2072
		if (unlikely(!nskb)) {
			atomic_inc(&sk->sk_drops);
2073 2074 2075 2076
			__UDP_INC_STATS(net, UDP_MIB_RCVBUFERRORS,
					IS_UDPLITE(sk));
			__UDP_INC_STATS(net, UDP_MIB_INERRORS,
					IS_UDPLITE(sk));
2077 2078 2079 2080 2081
			continue;
		}
		if (udp_queue_rcv_skb(sk, nskb) > 0)
			consume_skb(nskb);
	}
2082

2083 2084 2085 2086 2087 2088
	/* Also lookup *:port if we are using hash2 and haven't done so yet. */
	if (use_hash2 && hash2 != hash2_any) {
		hash2 = hash2_any;
		goto start_lookup;
	}

2089 2090 2091
	if (first) {
		if (udp_queue_rcv_skb(first, skb) > 0)
			consume_skb(skb);
2092
	} else {
2093
		kfree_skb(skb);
2094 2095
		__UDP_INC_STATS(net, UDP_MIB_IGNOREDMULTI,
				proto == IPPROTO_UDPLITE);
2096
	}
2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116
	return 0;
}

/* Initialize UDP checksum. If exited with zero value (success),
 * CHECKSUM_UNNECESSARY means, that no more checks are required.
 * Otherwise, csum completion requires chacksumming packet body,
 * including udp header and folding it to skb->csum.
 */
static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh,
				 int proto)
{
	int err;

	UDP_SKB_CB(skb)->partial_cov = 0;
	UDP_SKB_CB(skb)->cscov = skb->len;

	if (proto == IPPROTO_UDPLITE) {
		err = udplite_checksum_init(skb, uh);
		if (err)
			return err;
2117 2118 2119 2120 2121

		if (UDP_SKB_CB(skb)->partial_cov) {
			skb->csum = inet_compute_pseudo(skb, proto);
			return 0;
		}
2122 2123
	}

2124 2125 2126 2127 2128
	/* Note, we are only interested in != 0 or == 0, thus the
	 * force to int.
	 */
	return (__force int)skb_checksum_init_zero_check(skb, proto, uh->check,
							 inet_compute_pseudo);
2129 2130 2131 2132 2133 2134
}

/*
 *	All we need to do is get the socket, and then do a checksum.
 */

2135
int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
2136 2137 2138
		   int proto)
{
	struct sock *sk;
2139
	struct udphdr *uh;
2140
	unsigned short ulen;
E
Eric Dumazet 已提交
2141
	struct rtable *rt = skb_rtable(skb);
2142
	__be32 saddr, daddr;
2143
	struct net *net = dev_net(skb->dev);
2144 2145 2146 2147 2148 2149 2150

	/*
	 *  Validate the packet.
	 */
	if (!pskb_may_pull(skb, sizeof(struct udphdr)))
		goto drop;		/* No space for header. */

2151
	uh   = udp_hdr(skb);
2152
	ulen = ntohs(uh->len);
2153 2154 2155
	saddr = ip_hdr(skb)->saddr;
	daddr = ip_hdr(skb)->daddr;

2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168
	if (ulen > skb->len)
		goto short_packet;

	if (proto == IPPROTO_UDP) {
		/* UDP validates ulen. */
		if (ulen < sizeof(*uh) || pskb_trim_rcsum(skb, ulen))
			goto short_packet;
		uh = udp_hdr(skb);
	}

	if (udp4_csum_init(skb, uh, proto))
		goto csum_error;

2169 2170
	sk = skb_steal_sock(skb);
	if (sk) {
2171
		struct dst_entry *dst = skb_dst(skb);
S
Shawn Bohrer 已提交
2172 2173
		int ret;

2174 2175
		if (unlikely(sk->sk_rx_dst != dst))
			udp_sk_rx_dst_set(sk, dst);
2176

S
Shawn Bohrer 已提交
2177
		ret = udp_queue_rcv_skb(sk, skb);
2178
		sock_put(sk);
S
Shawn Bohrer 已提交
2179 2180 2181 2182 2183 2184 2185
		/* a return value > 0 means to resubmit the input, but
		 * it wants the return to be -protocol, or 0
		 */
		if (ret > 0)
			return -ret;
		return 0;
	}
2186

F
Fabian Frederick 已提交
2187 2188
	if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST))
		return __udp4_lib_mcast_deliver(net, skb, uh,
2189
						saddr, daddr, udptable, proto);
F
Fabian Frederick 已提交
2190 2191

	sk = __udp4_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
2192
	if (sk) {
2193 2194
		int ret;

2195
		if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk))
2196 2197 2198
			skb_checksum_try_convert(skb, IPPROTO_UDP, uh->check,
						 inet_compute_pseudo);

2199
		ret = udp_queue_rcv_skb(sk, skb);
2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216

		/* a return value > 0 means to resubmit the input, but
		 * it wants the return to be -protocol, or 0
		 */
		if (ret > 0)
			return -ret;
		return 0;
	}

	if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
		goto drop;
	nf_reset(skb);

	/* No socket. Drop packet silently, if checksum is wrong */
	if (udp_lib_checksum_complete(skb))
		goto csum_error;

2217
	__UDP_INC_STATS(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
2218 2219 2220 2221 2222 2223 2224 2225 2226 2227
	icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);

	/*
	 * Hmm.  We got an UDP packet to a port to which we
	 * don't wanna listen.  Ignore it.
	 */
	kfree_skb(skb);
	return 0;

short_packet:
2228 2229 2230 2231 2232
	net_dbg_ratelimited("UDP%s: short packet: From %pI4:%u %d/%d to %pI4:%u\n",
			    proto == IPPROTO_UDPLITE ? "Lite" : "",
			    &saddr, ntohs(uh->source),
			    ulen, skb->len,
			    &daddr, ntohs(uh->dest));
2233 2234 2235 2236 2237 2238 2239
	goto drop;

csum_error:
	/*
	 * RFC1122: OK.  Discards the bad packet silently (as far as
	 * the network is concerned, anyway) as per 4.1.3.4 (MUST).
	 */
2240 2241 2242 2243
	net_dbg_ratelimited("UDP%s: bad checksum. From %pI4:%u to %pI4:%u ulen %d\n",
			    proto == IPPROTO_UDPLITE ? "Lite" : "",
			    &saddr, ntohs(uh->source), &daddr, ntohs(uh->dest),
			    ulen);
2244
	__UDP_INC_STATS(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE);
2245
drop:
2246
	__UDP_INC_STATS(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE);
2247 2248 2249 2250
	kfree_skb(skb);
	return 0;
}

S
Shawn Bohrer 已提交
2251 2252 2253 2254 2255 2256
/* We can only early demux multicast if there is a single matching socket.
 * If more than one socket found returns NULL
 */
static struct sock *__udp4_lib_mcast_demux_lookup(struct net *net,
						  __be16 loc_port, __be32 loc_addr,
						  __be16 rmt_port, __be32 rmt_addr,
2257
						  int dif, int sdif)
S
Shawn Bohrer 已提交
2258 2259 2260
{
	struct sock *sk, *result;
	unsigned short hnum = ntohs(loc_port);
2261
	unsigned int slot = udp_hashfn(net, hnum, udp_table.mask);
S
Shawn Bohrer 已提交
2262 2263
	struct udp_hslot *hslot = &udp_table.hash[slot];

2264 2265 2266 2267
	/* Do not bother scanning a too big list */
	if (hslot->count > 10)
		return NULL;

S
Shawn Bohrer 已提交
2268
	result = NULL;
2269 2270
	sk_for_each_rcu(sk, &hslot->head) {
		if (__udp_is_mcast_sock(net, sk, loc_port, loc_addr,
2271
					rmt_port, rmt_addr, dif, sdif, hnum)) {
2272 2273
			if (result)
				return NULL;
S
Shawn Bohrer 已提交
2274 2275 2276
			result = sk;
		}
	}
2277

S
Shawn Bohrer 已提交
2278 2279 2280 2281 2282 2283 2284 2285 2286 2287
	return result;
}

/* For unicast we should only early demux connected sockets or we can
 * break forwarding setups.  The chains here can be long so only check
 * if the first socket is an exact match and if not move on.
 */
static struct sock *__udp4_lib_demux_lookup(struct net *net,
					    __be16 loc_port, __be32 loc_addr,
					    __be16 rmt_port, __be32 rmt_addr,
2288
					    int dif, int sdif)
S
Shawn Bohrer 已提交
2289 2290
{
	unsigned short hnum = ntohs(loc_port);
2291
	unsigned int hash2 = ipv4_portaddr_hash(net, loc_addr, hnum);
S
Shawn Bohrer 已提交
2292 2293
	unsigned int slot2 = hash2 & udp_table.mask;
	struct udp_hslot *hslot2 = &udp_table.hash2[slot2];
2294
	INET_ADDR_COOKIE(acookie, rmt_addr, loc_addr);
S
Shawn Bohrer 已提交
2295
	const __portpair ports = INET_COMBINED_PORTS(rmt_port, hnum);
2296
	struct sock *sk;
S
Shawn Bohrer 已提交
2297

2298 2299
	udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
		if (INET_MATCH(sk, net, acookie, rmt_addr,
2300
			       loc_addr, ports, dif, sdif))
2301
			return sk;
S
Shawn Bohrer 已提交
2302 2303 2304
		/* Only check first socket in chain */
		break;
	}
2305
	return NULL;
S
Shawn Bohrer 已提交
2306 2307
}

2308
int udp_v4_early_demux(struct sk_buff *skb)
S
Shawn Bohrer 已提交
2309
{
2310
	struct net *net = dev_net(skb->dev);
2311
	struct in_device *in_dev = NULL;
2312 2313
	const struct iphdr *iph;
	const struct udphdr *uh;
2314
	struct sock *sk = NULL;
S
Shawn Bohrer 已提交
2315 2316
	struct dst_entry *dst;
	int dif = skb->dev->ifindex;
2317
	int sdif = inet_sdif(skb);
2318
	int ours;
S
Shawn Bohrer 已提交
2319 2320 2321

	/* validate the packet */
	if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct udphdr)))
2322
		return 0;
S
Shawn Bohrer 已提交
2323

2324 2325 2326
	iph = ip_hdr(skb);
	uh = udp_hdr(skb);

P
Paolo Abeni 已提交
2327
	if (skb->pkt_type == PACKET_MULTICAST) {
2328
		in_dev = __in_dev_get_rcu(skb->dev);
2329 2330

		if (!in_dev)
2331
			return 0;
2332

P
Paolo Abeni 已提交
2333 2334 2335 2336
		ours = ip_check_mc_rcu(in_dev, iph->daddr, iph->saddr,
				       iph->protocol);
		if (!ours)
			return 0;
2337

S
Shawn Bohrer 已提交
2338
		sk = __udp4_lib_mcast_demux_lookup(net, uh->dest, iph->daddr,
2339 2340
						   uh->source, iph->saddr,
						   dif, sdif);
2341
	} else if (skb->pkt_type == PACKET_HOST) {
S
Shawn Bohrer 已提交
2342
		sk = __udp4_lib_demux_lookup(net, uh->dest, iph->daddr,
2343
					     uh->source, iph->saddr, dif, sdif);
2344
	}
S
Shawn Bohrer 已提交
2345

2346
	if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt))
2347
		return 0;
S
Shawn Bohrer 已提交
2348 2349

	skb->sk = sk;
2350
	skb->destructor = sock_efree;
2351
	dst = READ_ONCE(sk->sk_rx_dst);
S
Shawn Bohrer 已提交
2352 2353 2354

	if (dst)
		dst = dst_check(dst, 0);
2355
	if (dst) {
2356 2357
		u32 itag = 0;

2358 2359 2360 2361 2362
		/* set noref for now.
		 * any place which wants to hold dst has to call
		 * dst_hold_safe()
		 */
		skb_dst_set_noref(skb, dst);
2363 2364 2365 2366 2367 2368 2369 2370

		/* for unconnected multicast sockets we need to validate
		 * the source on each packet
		 */
		if (!inet_sk(sk)->inet_daddr && in_dev)
			return ip_mc_validate_source(skb, iph->daddr,
						     iph->saddr, iph->tos,
						     skb->dev, in_dev, &itag);
2371
	}
2372
	return 0;
S
Shawn Bohrer 已提交
2373 2374
}

2375 2376
int udp_rcv(struct sk_buff *skb)
{
2377
	return __udp4_lib_rcv(skb, &udp_table, IPPROTO_UDP);
2378 2379
}

2380
void udp_destroy_sock(struct sock *sk)
2381
{
T
Tom Parkin 已提交
2382
	struct udp_sock *up = udp_sk(sk);
2383
	bool slow = lock_sock_fast(sk);
2384
	udp_flush_pending_frames(sk);
2385
	unlock_sock_fast(sk, slow);
2386
	if (static_branch_unlikely(&udp_encap_needed_key) && up->encap_type) {
T
Tom Parkin 已提交
2387
		void (*encap_destroy)(struct sock *sk);
2388
		encap_destroy = READ_ONCE(up->encap_destroy);
T
Tom Parkin 已提交
2389 2390 2391
		if (encap_destroy)
			encap_destroy(sk);
	}
2392 2393
}

L
Linus Torvalds 已提交
2394 2395 2396
/*
 *	Socket option code for UDP
 */
2397
int udp_lib_setsockopt(struct sock *sk, int level, int optname,
2398
		       char __user *optval, unsigned int optlen,
2399
		       int (*push_pending_frames)(struct sock *))
L
Linus Torvalds 已提交
2400 2401
{
	struct udp_sock *up = udp_sk(sk);
2402
	int val, valbool;
L
Linus Torvalds 已提交
2403
	int err = 0;
W
Wang Chen 已提交
2404
	int is_udplite = IS_UDPLITE(sk);
L
Linus Torvalds 已提交
2405

E
Eric Dumazet 已提交
2406
	if (optlen < sizeof(int))
L
Linus Torvalds 已提交
2407 2408 2409 2410 2411
		return -EINVAL;

	if (get_user(val, (int __user *)optval))
		return -EFAULT;

2412 2413
	valbool = val ? 1 : 0;

2414
	switch (optname) {
L
Linus Torvalds 已提交
2415 2416 2417 2418 2419 2420
	case UDP_CORK:
		if (val != 0) {
			up->corkflag = 1;
		} else {
			up->corkflag = 0;
			lock_sock(sk);
2421
			push_pending_frames(sk);
L
Linus Torvalds 已提交
2422 2423 2424
			release_sock(sk);
		}
		break;
2425

L
Linus Torvalds 已提交
2426 2427 2428 2429 2430
	case UDP_ENCAP:
		switch (val) {
		case 0:
		case UDP_ENCAP_ESPINUDP:
		case UDP_ENCAP_ESPINUDP_NON_IKE:
2431 2432
			up->encap_rcv = xfrm4_udp_encap_rcv;
			/* FALLTHROUGH */
2433
		case UDP_ENCAP_L2TPINUDP:
L
Linus Torvalds 已提交
2434
			up->encap_type = val;
2435
			udp_encap_enable();
L
Linus Torvalds 已提交
2436 2437 2438 2439 2440 2441 2442
			break;
		default:
			err = -ENOPROTOOPT;
			break;
		}
		break;

2443 2444 2445 2446 2447 2448 2449 2450
	case UDP_NO_CHECK6_TX:
		up->no_check6_tx = valbool;
		break;

	case UDP_NO_CHECK6_RX:
		up->no_check6_rx = valbool;
		break;

2451 2452 2453 2454 2455 2456
	case UDP_SEGMENT:
		if (val < 0 || val > USHRT_MAX)
			return -EINVAL;
		up->gso_size = val;
		break;

2457 2458 2459 2460 2461 2462
	/*
	 * 	UDP-Lite's partial checksum coverage (RFC 3828).
	 */
	/* The sender sets actual checksum coverage length via this option.
	 * The case coverage > packet length is handled by send module. */
	case UDPLITE_SEND_CSCOV:
W
Wang Chen 已提交
2463
		if (!is_udplite)         /* Disable the option on UDP sockets */
2464 2465 2466
			return -ENOPROTOOPT;
		if (val != 0 && val < 8) /* Illegal coverage: use default (8) */
			val = 8;
2467 2468
		else if (val > USHRT_MAX)
			val = USHRT_MAX;
2469 2470 2471 2472
		up->pcslen = val;
		up->pcflag |= UDPLITE_SEND_CC;
		break;

2473 2474
	/* The receiver specifies a minimum checksum coverage value. To make
	 * sense, this should be set to at least 8 (as done below). If zero is
2475 2476
	 * used, this again means full checksum coverage.                     */
	case UDPLITE_RECV_CSCOV:
W
Wang Chen 已提交
2477
		if (!is_udplite)         /* Disable the option on UDP sockets */
2478 2479 2480
			return -ENOPROTOOPT;
		if (val != 0 && val < 8) /* Avoid silly minimal values.       */
			val = 8;
2481 2482
		else if (val > USHRT_MAX)
			val = USHRT_MAX;
2483 2484 2485 2486
		up->pcrlen = val;
		up->pcflag |= UDPLITE_RECV_CC;
		break;

L
Linus Torvalds 已提交
2487 2488 2489
	default:
		err = -ENOPROTOOPT;
		break;
2490
	}
L
Linus Torvalds 已提交
2491 2492 2493

	return err;
}
E
Eric Dumazet 已提交
2494
EXPORT_SYMBOL(udp_lib_setsockopt);
L
Linus Torvalds 已提交
2495

2496
int udp_setsockopt(struct sock *sk, int level, int optname,
2497
		   char __user *optval, unsigned int optlen)
2498 2499 2500 2501 2502 2503 2504 2505 2506
{
	if (level == SOL_UDP  ||  level == SOL_UDPLITE)
		return udp_lib_setsockopt(sk, level, optname, optval, optlen,
					  udp_push_pending_frames);
	return ip_setsockopt(sk, level, optname, optval, optlen);
}

#ifdef CONFIG_COMPAT
int compat_udp_setsockopt(struct sock *sk, int level, int optname,
2507
			  char __user *optval, unsigned int optlen)
2508 2509 2510 2511 2512 2513 2514 2515
{
	if (level == SOL_UDP  ||  level == SOL_UDPLITE)
		return udp_lib_setsockopt(sk, level, optname, optval, optlen,
					  udp_push_pending_frames);
	return compat_ip_setsockopt(sk, level, optname, optval, optlen);
}
#endif

2516 2517
int udp_lib_getsockopt(struct sock *sk, int level, int optname,
		       char __user *optval, int __user *optlen)
L
Linus Torvalds 已提交
2518 2519 2520 2521
{
	struct udp_sock *up = udp_sk(sk);
	int val, len;

E
Eric Dumazet 已提交
2522
	if (get_user(len, optlen))
L
Linus Torvalds 已提交
2523 2524 2525
		return -EFAULT;

	len = min_t(unsigned int, len, sizeof(int));
2526

2527
	if (len < 0)
L
Linus Torvalds 已提交
2528 2529
		return -EINVAL;

2530
	switch (optname) {
L
Linus Torvalds 已提交
2531 2532 2533 2534 2535 2536 2537 2538
	case UDP_CORK:
		val = up->corkflag;
		break;

	case UDP_ENCAP:
		val = up->encap_type;
		break;

2539 2540 2541 2542 2543 2544 2545 2546
	case UDP_NO_CHECK6_TX:
		val = up->no_check6_tx;
		break;

	case UDP_NO_CHECK6_RX:
		val = up->no_check6_rx;
		break;

2547 2548 2549 2550
	case UDP_SEGMENT:
		val = up->gso_size;
		break;

2551 2552 2553 2554 2555 2556 2557 2558 2559 2560
	/* The following two cannot be changed on UDP sockets, the return is
	 * always 0 (which corresponds to the full checksum coverage of UDP). */
	case UDPLITE_SEND_CSCOV:
		val = up->pcslen;
		break;

	case UDPLITE_RECV_CSCOV:
		val = up->pcrlen;
		break;

L
Linus Torvalds 已提交
2561 2562
	default:
		return -ENOPROTOOPT;
2563
	}
L
Linus Torvalds 已提交
2564

2565
	if (put_user(len, optlen))
2566
		return -EFAULT;
E
Eric Dumazet 已提交
2567
	if (copy_to_user(optval, &val, len))
L
Linus Torvalds 已提交
2568
		return -EFAULT;
2569
	return 0;
L
Linus Torvalds 已提交
2570
}
E
Eric Dumazet 已提交
2571
EXPORT_SYMBOL(udp_lib_getsockopt);
L
Linus Torvalds 已提交
2572

2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589
int udp_getsockopt(struct sock *sk, int level, int optname,
		   char __user *optval, int __user *optlen)
{
	if (level == SOL_UDP  ||  level == SOL_UDPLITE)
		return udp_lib_getsockopt(sk, level, optname, optval, optlen);
	return ip_getsockopt(sk, level, optname, optval, optlen);
}

#ifdef CONFIG_COMPAT
int compat_udp_getsockopt(struct sock *sk, int level, int optname,
				 char __user *optval, int __user *optlen)
{
	if (level == SOL_UDP  ||  level == SOL_UDPLITE)
		return udp_lib_getsockopt(sk, level, optname, optval, optlen);
	return compat_ip_getsockopt(sk, level, optname, optval, optlen);
}
#endif
L
Linus Torvalds 已提交
2590 2591 2592 2593
/**
 * 	udp_poll - wait for a UDP event.
 *	@file - file struct
 *	@sock - socket
2594
 *	@events - events to wait for
L
Linus Torvalds 已提交
2595
 *
2596
 *	This is same as datagram poll, except for the special case of
L
Linus Torvalds 已提交
2597 2598 2599 2600 2601 2602
 *	blocking sockets. If application is using a blocking fd
 *	and a packet with checksum error is in the queue;
 *	then it could get return from select indicating data available
 *	but then block when reading it. Add special case code
 *	to work around these arguably broken applications.
 */
2603
__poll_t udp_poll_mask(struct socket *sock, __poll_t events)
L
Linus Torvalds 已提交
2604
{
2605
	__poll_t mask = datagram_poll_mask(sock, events);
L
Linus Torvalds 已提交
2606
	struct sock *sk = sock->sk;
2607

2608
	if (!skb_queue_empty(&udp_sk(sk)->reader_queue))
2609
		mask |= EPOLLIN | EPOLLRDNORM;
2610

L
Linus Torvalds 已提交
2611
	/* Check for false positives due to checksum errors */
2612
	if ((mask & EPOLLRDNORM) && !(sock->file->f_flags & O_NONBLOCK) &&
2613
	    !(sk->sk_shutdown & RCV_SHUTDOWN) && first_packet_length(sk) == -1)
2614
		mask &= ~(EPOLLIN | EPOLLRDNORM);
L
Linus Torvalds 已提交
2615 2616

	return mask;
2617

L
Linus Torvalds 已提交
2618
}
2619
EXPORT_SYMBOL(udp_poll_mask);
L
Linus Torvalds 已提交
2620

2621 2622 2623 2624 2625 2626
int udp_abort(struct sock *sk, int err)
{
	lock_sock(sk);

	sk->sk_err = err;
	sk->sk_error_report(sk);
2627
	__udp_disconnect(sk, 0);
2628 2629 2630 2631 2632 2633 2634

	release_sock(sk);

	return 0;
}
EXPORT_SYMBOL_GPL(udp_abort);

2635
struct proto udp_prot = {
2636 2637 2638
	.name			= "UDP",
	.owner			= THIS_MODULE,
	.close			= udp_lib_close,
A
Andrey Ignatov 已提交
2639
	.pre_connect		= udp_pre_connect,
2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660
	.connect		= ip4_datagram_connect,
	.disconnect		= udp_disconnect,
	.ioctl			= udp_ioctl,
	.init			= udp_init_sock,
	.destroy		= udp_destroy_sock,
	.setsockopt		= udp_setsockopt,
	.getsockopt		= udp_getsockopt,
	.sendmsg		= udp_sendmsg,
	.recvmsg		= udp_recvmsg,
	.sendpage		= udp_sendpage,
	.release_cb		= ip4_datagram_release_cb,
	.hash			= udp_lib_hash,
	.unhash			= udp_lib_unhash,
	.rehash			= udp_v4_rehash,
	.get_port		= udp_v4_get_port,
	.memory_allocated	= &udp_memory_allocated,
	.sysctl_mem		= sysctl_udp_mem,
	.sysctl_wmem_offset	= offsetof(struct net, ipv4.sysctl_udp_wmem_min),
	.sysctl_rmem_offset	= offsetof(struct net, ipv4.sysctl_udp_rmem_min),
	.obj_size		= sizeof(struct udp_sock),
	.h.udp_table		= &udp_table,
2661
#ifdef CONFIG_COMPAT
2662 2663
	.compat_setsockopt	= compat_udp_setsockopt,
	.compat_getsockopt	= compat_udp_getsockopt,
2664
#endif
2665
	.diag_destroy		= udp_abort,
2666
};
E
Eric Dumazet 已提交
2667
EXPORT_SYMBOL(udp_prot);
L
Linus Torvalds 已提交
2668 2669 2670 2671

/* ------------------------------------------------------------------------ */
#ifdef CONFIG_PROC_FS

2672
static struct sock *udp_get_first(struct seq_file *seq, int start)
L
Linus Torvalds 已提交
2673 2674
{
	struct sock *sk;
2675
	struct udp_seq_afinfo *afinfo = PDE_DATA(file_inode(seq->file));
L
Linus Torvalds 已提交
2676
	struct udp_iter_state *state = seq->private;
2677
	struct net *net = seq_file_net(seq);
L
Linus Torvalds 已提交
2678

2679
	for (state->bucket = start; state->bucket <= afinfo->udp_table->mask;
2680
	     ++state->bucket) {
2681
		struct udp_hslot *hslot = &afinfo->udp_table->hash[state->bucket];
2682

2683
		if (hlist_empty(&hslot->head))
2684 2685
			continue;

2686
		spin_lock_bh(&hslot->lock);
2687
		sk_for_each(sk, &hslot->head) {
2688
			if (!net_eq(sock_net(sk), net))
2689
				continue;
2690
			if (sk->sk_family == afinfo->family)
L
Linus Torvalds 已提交
2691 2692
				goto found;
		}
2693
		spin_unlock_bh(&hslot->lock);
L
Linus Torvalds 已提交
2694 2695 2696 2697 2698 2699 2700 2701
	}
	sk = NULL;
found:
	return sk;
}

static struct sock *udp_get_next(struct seq_file *seq, struct sock *sk)
{
2702
	struct udp_seq_afinfo *afinfo = PDE_DATA(file_inode(seq->file));
L
Linus Torvalds 已提交
2703
	struct udp_iter_state *state = seq->private;
2704
	struct net *net = seq_file_net(seq);
L
Linus Torvalds 已提交
2705 2706

	do {
2707
		sk = sk_next(sk);
2708
	} while (sk && (!net_eq(sock_net(sk), net) || sk->sk_family != afinfo->family));
L
Linus Torvalds 已提交
2709

2710
	if (!sk) {
2711 2712
		if (state->bucket <= afinfo->udp_table->mask)
			spin_unlock_bh(&afinfo->udp_table->hash[state->bucket].lock);
2713
		return udp_get_first(seq, state->bucket + 1);
L
Linus Torvalds 已提交
2714 2715 2716 2717 2718 2719
	}
	return sk;
}

static struct sock *udp_get_idx(struct seq_file *seq, loff_t pos)
{
2720
	struct sock *sk = udp_get_first(seq, 0);
L
Linus Torvalds 已提交
2721 2722

	if (sk)
2723
		while (pos && (sk = udp_get_next(seq, sk)) != NULL)
L
Linus Torvalds 已提交
2724 2725 2726 2727
			--pos;
	return pos ? NULL : sk;
}

2728
void *udp_seq_start(struct seq_file *seq, loff_t *pos)
L
Linus Torvalds 已提交
2729
{
2730
	struct udp_iter_state *state = seq->private;
2731
	state->bucket = MAX_UDP_PORTS;
2732

2733
	return *pos ? udp_get_idx(seq, *pos-1) : SEQ_START_TOKEN;
L
Linus Torvalds 已提交
2734
}
2735
EXPORT_SYMBOL(udp_seq_start);
L
Linus Torvalds 已提交
2736

2737
void *udp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
L
Linus Torvalds 已提交
2738 2739 2740
{
	struct sock *sk;

2741
	if (v == SEQ_START_TOKEN)
L
Linus Torvalds 已提交
2742 2743 2744 2745 2746 2747 2748
		sk = udp_get_idx(seq, 0);
	else
		sk = udp_get_next(seq, v);

	++*pos;
	return sk;
}
2749
EXPORT_SYMBOL(udp_seq_next);
L
Linus Torvalds 已提交
2750

2751
void udp_seq_stop(struct seq_file *seq, void *v)
L
Linus Torvalds 已提交
2752
{
2753
	struct udp_seq_afinfo *afinfo = PDE_DATA(file_inode(seq->file));
2754 2755
	struct udp_iter_state *state = seq->private;

2756 2757
	if (state->bucket <= afinfo->udp_table->mask)
		spin_unlock_bh(&afinfo->udp_table->hash[state->bucket].lock);
L
Linus Torvalds 已提交
2758
}
2759
EXPORT_SYMBOL(udp_seq_stop);
2760 2761

/* ------------------------------------------------------------------------ */
2762
static void udp4_format_sock(struct sock *sp, struct seq_file *f,
2763
		int bucket)
2764 2765
{
	struct inet_sock *inet = inet_sk(sp);
E
Eric Dumazet 已提交
2766 2767 2768 2769
	__be32 dest = inet->inet_daddr;
	__be32 src  = inet->inet_rcv_saddr;
	__u16 destp	  = ntohs(inet->inet_dport);
	__u16 srcp	  = ntohs(inet->inet_sport);
2770

2771
	seq_printf(f, "%5d: %08X:%04X %08X:%04X"
2772
		" %02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %d",
2773
		bucket, src, srcp, dest, destp, sp->sk_state,
2774 2775
		sk_wmem_alloc_get(sp),
		sk_rmem_alloc_get(sp),
2776 2777 2778
		0, 0L, 0,
		from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
		0, sock_i_ino(sp),
2779
		refcount_read(&sp->sk_refcnt), sp,
2780
		atomic_read(&sp->sk_drops));
2781 2782 2783 2784
}

int udp4_seq_show(struct seq_file *seq, void *v)
{
2785
	seq_setwidth(seq, 127);
2786
	if (v == SEQ_START_TOKEN)
2787
		seq_puts(seq, "  sl  local_address rem_address   st tx_queue "
2788
			   "rx_queue tr tm->when retrnsmt   uid  timeout "
E
Eric Dumazet 已提交
2789
			   "inode ref pointer drops");
2790 2791 2792
	else {
		struct udp_iter_state *state = seq->private;

2793
		udp4_format_sock(v, seq, state->bucket);
2794
	}
2795
	seq_pad(seq, '\n');
2796 2797 2798
	return 0;
}

2799
const struct seq_operations udp_seq_ops = {
2800 2801 2802 2803 2804
	.start		= udp_seq_start,
	.next		= udp_seq_next,
	.stop		= udp_seq_stop,
	.show		= udp4_seq_show,
};
2805
EXPORT_SYMBOL(udp_seq_ops);
2806

2807 2808
static struct udp_seq_afinfo udp4_seq_afinfo = {
	.family		= AF_INET,
2809
	.udp_table	= &udp_table,
2810 2811
};

2812
static int __net_init udp4_proc_init_net(struct net *net)
2813
{
2814 2815
	if (!proc_create_net_data("udp", 0444, net->proc_net, &udp_seq_ops,
			sizeof(struct udp_iter_state), &udp4_seq_afinfo))
2816 2817
		return -ENOMEM;
	return 0;
2818 2819
}

2820
static void __net_exit udp4_proc_exit_net(struct net *net)
2821
{
2822
	remove_proc_entry("udp", net->proc_net);
2823 2824 2825 2826 2827 2828 2829
}

static struct pernet_operations udp4_net_ops = {
	.init = udp4_proc_init_net,
	.exit = udp4_proc_exit_net,
};

2830 2831
int __init udp4_proc_init(void)
{
2832
	return register_pernet_subsys(&udp4_net_ops);
2833 2834 2835 2836
}

void udp4_proc_exit(void)
{
2837
	unregister_pernet_subsys(&udp4_net_ops);
2838
}
L
Linus Torvalds 已提交
2839 2840
#endif /* CONFIG_PROC_FS */

2841 2842
static __initdata unsigned long uhash_entries;
static int __init set_uhash_entries(char *str)
2843
{
2844 2845
	ssize_t ret;

2846 2847
	if (!str)
		return 0;
2848 2849 2850 2851 2852

	ret = kstrtoul(str, 0, &uhash_entries);
	if (ret)
		return 0;

2853 2854 2855 2856 2857
	if (uhash_entries && uhash_entries < UDP_HTABLE_SIZE_MIN)
		uhash_entries = UDP_HTABLE_SIZE_MIN;
	return 1;
}
__setup("uhash_entries=", set_uhash_entries);
2858

2859 2860 2861 2862
void __init udp_table_init(struct udp_table *table, const char *name)
{
	unsigned int i;

2863 2864 2865 2866 2867 2868 2869 2870 2871 2872
	table->hash = alloc_large_system_hash(name,
					      2 * sizeof(struct udp_hslot),
					      uhash_entries,
					      21, /* one slot per 2 MB */
					      0,
					      &table->log,
					      &table->mask,
					      UDP_HTABLE_SIZE_MIN,
					      64 * 1024);

2873
	table->hash2 = table->hash + (table->mask + 1);
2874
	for (i = 0; i <= table->mask; i++) {
2875
		INIT_HLIST_HEAD(&table->hash[i].head);
E
Eric Dumazet 已提交
2876
		table->hash[i].count = 0;
2877 2878
		spin_lock_init(&table->hash[i].lock);
	}
2879
	for (i = 0; i <= table->mask; i++) {
2880
		INIT_HLIST_HEAD(&table->hash2[i].head);
2881 2882 2883
		table->hash2[i].count = 0;
		spin_lock_init(&table->hash2[i].lock);
	}
2884 2885
}

2886 2887 2888 2889 2890 2891 2892 2893 2894 2895
u32 udp_flow_hashrnd(void)
{
	static u32 hashrnd __read_mostly;

	net_get_random_once(&hashrnd, sizeof(hashrnd));

	return hashrnd;
}
EXPORT_SYMBOL(udp_flow_hashrnd);

2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912
static void __udp_sysctl_init(struct net *net)
{
	net->ipv4.sysctl_udp_rmem_min = SK_MEM_QUANTUM;
	net->ipv4.sysctl_udp_wmem_min = SK_MEM_QUANTUM;

#ifdef CONFIG_NET_L3_MASTER_DEV
	net->ipv4.sysctl_udp_l3mdev_accept = 0;
#endif
}

static int __net_init udp_sysctl_init(struct net *net)
{
	__udp_sysctl_init(net);
	return 0;
}

static struct pernet_operations __net_initdata udp_sysctl_ops = {
K
Kirill Tkhai 已提交
2913
	.init	= udp_sysctl_init,
2914 2915
};

H
Hideo Aoki 已提交
2916 2917
void __init udp_init(void)
{
2918
	unsigned long limit;
E
Eric Dumazet 已提交
2919
	unsigned int i;
H
Hideo Aoki 已提交
2920

2921
	udp_table_init(&udp_table, "UDP");
2922
	limit = nr_free_buffer_pages() / 8;
H
Hideo Aoki 已提交
2923 2924 2925 2926 2927
	limit = max(limit, 128UL);
	sysctl_udp_mem[0] = limit / 4 * 3;
	sysctl_udp_mem[1] = limit;
	sysctl_udp_mem[2] = sysctl_udp_mem[0] * 2;

2928
	__udp_sysctl_init(&init_net);
E
Eric Dumazet 已提交
2929 2930 2931 2932 2933 2934 2935 2936 2937

	/* 16 spinlocks per cpu */
	udp_busylocks_log = ilog2(nr_cpu_ids) + 4;
	udp_busylocks = kmalloc(sizeof(spinlock_t) << udp_busylocks_log,
				GFP_KERNEL);
	if (!udp_busylocks)
		panic("UDP: failed to alloc udp_busylocks\n");
	for (i = 0; i < (1U << udp_busylocks_log); i++)
		spin_lock_init(udp_busylocks + i);
2938 2939 2940

	if (register_pernet_subsys(&udp_sysctl_ops))
		panic("UDP: failed to init sysctl parameters.\n");
H
Hideo Aoki 已提交
2941
}