udp.c 75.1 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7
/*
 * INET		An implementation of the TCP/IP protocol suite for the LINUX
 *		operating system.  INET is implemented using the  BSD Socket
 *		interface as the means of communication with the user level.
 *
 *		The User Datagram Protocol (UDP).
 *
8
 * Authors:	Ross Biro
L
Linus Torvalds 已提交
9 10
 *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
 *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
11
 *		Alan Cox, <alan@lxorguk.ukuu.org.uk>
L
Linus Torvalds 已提交
12 13 14 15 16 17 18 19 20
 *		Hirokazu Takahashi, <taka@valinux.co.jp>
 *
 * Fixes:
 *		Alan Cox	:	verify_area() calls
 *		Alan Cox	: 	stopped close while in use off icmp
 *					messages. Not a fix but a botch that
 *					for udp at least is 'valid'.
 *		Alan Cox	:	Fixed icmp handling properly
 *		Alan Cox	: 	Correct error for oversized datagrams
21 22
 *		Alan Cox	:	Tidied select() semantics.
 *		Alan Cox	:	udp_err() fixed properly, also now
L
Linus Torvalds 已提交
23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56
 *					select and read wake correctly on errors
 *		Alan Cox	:	udp_send verify_area moved to avoid mem leak
 *		Alan Cox	:	UDP can count its memory
 *		Alan Cox	:	send to an unknown connection causes
 *					an ECONNREFUSED off the icmp, but
 *					does NOT close.
 *		Alan Cox	:	Switched to new sk_buff handlers. No more backlog!
 *		Alan Cox	:	Using generic datagram code. Even smaller and the PEEK
 *					bug no longer crashes it.
 *		Fred Van Kempen	: 	Net2e support for sk->broadcast.
 *		Alan Cox	:	Uses skb_free_datagram
 *		Alan Cox	:	Added get/set sockopt support.
 *		Alan Cox	:	Broadcasting without option set returns EACCES.
 *		Alan Cox	:	No wakeup calls. Instead we now use the callbacks.
 *		Alan Cox	:	Use ip_tos and ip_ttl
 *		Alan Cox	:	SNMP Mibs
 *		Alan Cox	:	MSG_DONTROUTE, and 0.0.0.0 support.
 *		Matt Dillon	:	UDP length checks.
 *		Alan Cox	:	Smarter af_inet used properly.
 *		Alan Cox	:	Use new kernel side addressing.
 *		Alan Cox	:	Incorrect return on truncated datagram receive.
 *	Arnt Gulbrandsen 	:	New udp_send and stuff
 *		Alan Cox	:	Cache last socket
 *		Alan Cox	:	Route cache
 *		Jon Peatfield	:	Minor efficiency fix to sendto().
 *		Mike Shaver	:	RFC1122 checks.
 *		Alan Cox	:	Nonblocking error fix.
 *	Willy Konynenberg	:	Transparent proxying support.
 *		Mike McLagan	:	Routing by source
 *		David S. Miller	:	New socket lookup architecture.
 *					Last socket cache retained as it
 *					does have a high hit rate.
 *		Olaf Kirch	:	Don't linearise iovec on sendmsg.
 *		Andi Kleen	:	Some cleanups, cache destination entry
57
 *					for connect.
L
Linus Torvalds 已提交
58 59 60 61 62 63 64 65 66 67 68 69 70
 *	Vitaly E. Lavrov	:	Transparent proxy revived after year coma.
 *		Melvin Smith	:	Check msg_name not msg_namelen in sendto(),
 *					return ENOTCONN for unconnected sockets (POSIX)
 *		Janos Farkas	:	don't deliver multi/broadcasts to a different
 *					bound-to-device socket
 *	Hirokazu Takahashi	:	HW checksumming for outgoing UDP
 *					datagrams.
 *	Hirokazu Takahashi	:	sendfile() on UDP works now.
 *		Arnaldo C. Melo :	convert /proc/net/udp to seq_file
 *	YOSHIFUJI Hideaki @USAGI and:	Support IPV6_V6ONLY socket option, which
 *	Alexey Kuznetsov:		allow both IPv4 and IPv6 sockets to bind
 *					a single port at the same time.
 *	Derek Atkins <derek@ihtfp.com>: Add Encapulation Support
71
 *	James Chapman		:	Add L2TP encapsulation type.
L
Linus Torvalds 已提交
72 73 74 75 76 77 78
 *
 *
 *		This program is free software; you can redistribute it and/or
 *		modify it under the terms of the GNU General Public License
 *		as published by the Free Software Foundation; either version
 *		2 of the License, or (at your option) any later version.
 */
79

80 81
#define pr_fmt(fmt) "UDP: " fmt

82
#include <linux/uaccess.h>
L
Linus Torvalds 已提交
83
#include <asm/ioctls.h>
H
Hideo Aoki 已提交
84
#include <linux/bootmem.h>
85 86
#include <linux/highmem.h>
#include <linux/swap.h>
L
Linus Torvalds 已提交
87 88 89 90 91
#include <linux/types.h>
#include <linux/fcntl.h>
#include <linux/module.h>
#include <linux/socket.h>
#include <linux/sockios.h>
92
#include <linux/igmp.h>
93
#include <linux/inetdevice.h>
L
Linus Torvalds 已提交
94 95 96 97 98 99
#include <linux/in.h>
#include <linux/errno.h>
#include <linux/timer.h>
#include <linux/mm.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
100
#include <linux/slab.h>
101
#include <net/tcp_states.h>
L
Linus Torvalds 已提交
102 103 104
#include <linux/skbuff.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
105
#include <net/net_namespace.h>
L
Linus Torvalds 已提交
106
#include <net/icmp.h>
S
Shawn Bohrer 已提交
107
#include <net/inet_hashtables.h>
L
Linus Torvalds 已提交
108 109 110
#include <net/route.h>
#include <net/checksum.h>
#include <net/xfrm.h>
111
#include <trace/events/udp.h>
112
#include <linux/static_key.h>
113
#include <trace/events/skb.h>
114
#include <net/busy_poll.h>
115
#include "udp_impl.h"
116
#include <net/sock_reuseport.h>
E
Eric Dumazet 已提交
117
#include <net/addrconf.h>
L
Linus Torvalds 已提交
118

119
struct udp_table udp_table __read_mostly;
120
EXPORT_SYMBOL(udp_table);
L
Linus Torvalds 已提交
121

E
Eric Dumazet 已提交
122
long sysctl_udp_mem[3] __read_mostly;
H
Hideo Aoki 已提交
123
EXPORT_SYMBOL(sysctl_udp_mem);
E
Eric Dumazet 已提交
124

E
Eric Dumazet 已提交
125
atomic_long_t udp_memory_allocated;
H
Hideo Aoki 已提交
126 127
EXPORT_SYMBOL(udp_memory_allocated);

128 129
#define MAX_UDP_PORTS 65536
#define PORTS_PER_CHAIN (MAX_UDP_PORTS / UDP_HTABLE_SIZE_MIN)
130

131 132 133 134 135 136 137 138 139 140 141
/* IPCB reference means this can not be used from early demux */
static bool udp_lib_exact_dif_match(struct net *net, struct sk_buff *skb)
{
#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
	if (!net->ipv4.sysctl_udp_l3mdev_accept &&
	    skb && ipv4_l3mdev_skb(IPCB(skb)->flags))
		return true;
#endif
	return false;
}

142
static int udp_lib_lport_inuse(struct net *net, __u16 num,
143
			       const struct udp_hslot *hslot,
144
			       unsigned long *bitmap,
145
			       struct sock *sk, unsigned int log)
L
Linus Torvalds 已提交
146
{
147
	struct sock *sk2;
148
	kuid_t uid = sock_i_uid(sk);
149

150
	sk_for_each(sk2, &hslot->head) {
151 152
		if (net_eq(sock_net(sk2), net) &&
		    sk2 != sk &&
153
		    (bitmap || udp_sk(sk2)->udp_port_hash == num) &&
154 155 156
		    (!sk2->sk_reuse || !sk->sk_reuse) &&
		    (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if ||
		     sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
157
		    inet_rcv_saddr_equal(sk, sk2, true)) {
158 159 160 161 162 163 164 165 166 167 168
			if (sk2->sk_reuseport && sk->sk_reuseport &&
			    !rcu_access_pointer(sk->sk_reuseport_cb) &&
			    uid_eq(uid, sock_i_uid(sk2))) {
				if (!bitmap)
					return 0;
			} else {
				if (!bitmap)
					return 1;
				__set_bit(udp_sk(sk2)->udp_port_hash >> log,
					  bitmap);
			}
169
		}
170
	}
171 172 173
	return 0;
}

E
Eric Dumazet 已提交
174 175 176 177 178
/*
 * Note: we still hold spinlock of primary hash chain, so no other writer
 * can insert/delete a socket with local_port == num
 */
static int udp_lib_lport_inuse2(struct net *net, __u16 num,
179
				struct udp_hslot *hslot2,
180
				struct sock *sk)
E
Eric Dumazet 已提交
181 182
{
	struct sock *sk2;
183
	kuid_t uid = sock_i_uid(sk);
E
Eric Dumazet 已提交
184 185 186
	int res = 0;

	spin_lock(&hslot2->lock);
187
	udp_portaddr_for_each_entry(sk2, &hslot2->head) {
188 189 190 191 192 193
		if (net_eq(sock_net(sk2), net) &&
		    sk2 != sk &&
		    (udp_sk(sk2)->udp_port_hash == num) &&
		    (!sk2->sk_reuse || !sk->sk_reuse) &&
		    (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if ||
		     sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
194
		    inet_rcv_saddr_equal(sk, sk2, true)) {
195 196 197 198 199 200 201
			if (sk2->sk_reuseport && sk->sk_reuseport &&
			    !rcu_access_pointer(sk->sk_reuseport_cb) &&
			    uid_eq(uid, sock_i_uid(sk2))) {
				res = 0;
			} else {
				res = 1;
			}
E
Eric Dumazet 已提交
202 203
			break;
		}
204
	}
E
Eric Dumazet 已提交
205 206 207 208
	spin_unlock(&hslot2->lock);
	return res;
}

209
static int udp_reuseport_add_sock(struct sock *sk, struct udp_hslot *hslot)
210 211 212 213 214
{
	struct net *net = sock_net(sk);
	kuid_t uid = sock_i_uid(sk);
	struct sock *sk2;

215
	sk_for_each(sk2, &hslot->head) {
216 217 218 219 220 221 222
		if (net_eq(sock_net(sk2), net) &&
		    sk2 != sk &&
		    sk2->sk_family == sk->sk_family &&
		    ipv6_only_sock(sk2) == ipv6_only_sock(sk) &&
		    (udp_sk(sk2)->udp_port_hash == udp_sk(sk)->udp_port_hash) &&
		    (sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
		    sk2->sk_reuseport && uid_eq(uid, sock_i_uid(sk2)) &&
223
		    inet_rcv_saddr_equal(sk, sk2, false)) {
224 225 226 227
			return reuseport_add_sock(sk, sk2);
		}
	}

228
	return reuseport_alloc(sk);
229 230
}

231
/**
232
 *  udp_lib_get_port  -  UDP/-Lite port lookup for IPv4 and IPv6
233 234 235
 *
 *  @sk:          socket struct in question
 *  @snum:        port number to look up
L
Lucas De Marchi 已提交
236
 *  @hash2_nulladdr: AF-dependent hash value in secondary hash chains,
E
Eric Dumazet 已提交
237
 *                   with NULL address
238
 */
239
int udp_lib_get_port(struct sock *sk, unsigned short snum,
E
Eric Dumazet 已提交
240
		     unsigned int hash2_nulladdr)
241
{
242
	struct udp_hslot *hslot, *hslot2;
243
	struct udp_table *udptable = sk->sk_prot->h.udp_table;
244
	int    error = 1;
245
	struct net *net = sock_net(sk);
L
Linus Torvalds 已提交
246

247
	if (!snum) {
E
Eric Dumazet 已提交
248
		int low, high, remaining;
249
		unsigned int rand;
250 251
		unsigned short first, last;
		DECLARE_BITMAP(bitmap, PORTS_PER_CHAIN);
252

253
		inet_get_local_port_range(net, &low, &high);
254
		remaining = (high - low) + 1;
255

256
		rand = prandom_u32();
257
		first = reciprocal_scale(rand, remaining) + low;
258 259 260
		/*
		 * force rand to be an odd multiple of UDP_HTABLE_SIZE
		 */
261
		rand = (rand | 1) * (udptable->mask + 1);
E
Eric Dumazet 已提交
262 263
		last = first + udptable->mask + 1;
		do {
264
			hslot = udp_hashslot(udptable, net, first);
265
			bitmap_zero(bitmap, PORTS_PER_CHAIN);
266
			spin_lock_bh(&hslot->lock);
267
			udp_lib_lport_inuse(net, snum, hslot, bitmap, sk,
268
					    udptable->log);
269 270 271 272 273 274 275

			snum = first;
			/*
			 * Iterate on all possible values of snum for this hash.
			 * Using steps of an odd multiple of UDP_HTABLE_SIZE
			 * give us randomization and full range coverage.
			 */
E
Eric Dumazet 已提交
276
			do {
277
				if (low <= snum && snum <= high &&
278
				    !test_bit(snum >> udptable->log, bitmap) &&
279
				    !inet_is_local_reserved_port(net, snum))
280 281 282 283
					goto found;
				snum += rand;
			} while (snum != first);
			spin_unlock_bh(&hslot->lock);
284
			cond_resched();
E
Eric Dumazet 已提交
285
		} while (++first != last);
286
		goto fail;
287
	} else {
288
		hslot = udp_hashslot(udptable, net, snum);
289
		spin_lock_bh(&hslot->lock);
E
Eric Dumazet 已提交
290 291 292 293 294 295 296 297 298 299 300
		if (hslot->count > 10) {
			int exist;
			unsigned int slot2 = udp_sk(sk)->udp_portaddr_hash ^ snum;

			slot2          &= udptable->mask;
			hash2_nulladdr &= udptable->mask;

			hslot2 = udp_hashslot2(udptable, slot2);
			if (hslot->count < hslot2->count)
				goto scan_primary_hash;

301
			exist = udp_lib_lport_inuse2(net, snum, hslot2, sk);
E
Eric Dumazet 已提交
302 303 304
			if (!exist && (hash2_nulladdr != slot2)) {
				hslot2 = udp_hashslot2(udptable, hash2_nulladdr);
				exist = udp_lib_lport_inuse2(net, snum, hslot2,
305
							     sk);
E
Eric Dumazet 已提交
306 307 308 309 310 311 312
			}
			if (exist)
				goto fail_unlock;
			else
				goto found;
		}
scan_primary_hash:
313
		if (udp_lib_lport_inuse(net, snum, hslot, NULL, sk, 0))
314 315
			goto fail_unlock;
	}
316
found:
E
Eric Dumazet 已提交
317
	inet_sk(sk)->inet_num = snum;
318 319
	udp_sk(sk)->udp_port_hash = snum;
	udp_sk(sk)->udp_portaddr_hash ^= snum;
L
Linus Torvalds 已提交
320
	if (sk_unhashed(sk)) {
321
		if (sk->sk_reuseport &&
322
		    udp_reuseport_add_sock(sk, hslot)) {
323 324 325 326 327 328
			inet_sk(sk)->inet_num = 0;
			udp_sk(sk)->udp_port_hash = 0;
			udp_sk(sk)->udp_portaddr_hash ^= snum;
			goto fail_unlock;
		}

329
		sk_add_node_rcu(sk, &hslot->head);
E
Eric Dumazet 已提交
330
		hslot->count++;
331
		sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
332 333 334

		hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash);
		spin_lock(&hslot2->lock);
335
		if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport &&
336 337 338
		    sk->sk_family == AF_INET6)
			hlist_add_tail_rcu(&udp_sk(sk)->udp_portaddr_node,
					   &hslot2->head);
339
		else
340 341
			hlist_add_head_rcu(&udp_sk(sk)->udp_portaddr_node,
					   &hslot2->head);
342 343
		hslot2->count++;
		spin_unlock(&hslot2->lock);
L
Linus Torvalds 已提交
344
	}
345
	sock_set_flag(sk, SOCK_RCU_FREE);
346
	error = 0;
347 348
fail_unlock:
	spin_unlock_bh(&hslot->lock);
L
Linus Torvalds 已提交
349
fail:
350 351
	return error;
}
E
Eric Dumazet 已提交
352
EXPORT_SYMBOL(udp_lib_get_port);
353

354
int udp_v4_get_port(struct sock *sk, unsigned short snum)
355
{
E
Eric Dumazet 已提交
356
	unsigned int hash2_nulladdr =
357
		ipv4_portaddr_hash(sock_net(sk), htonl(INADDR_ANY), snum);
E
Eric Dumazet 已提交
358
	unsigned int hash2_partial =
359
		ipv4_portaddr_hash(sock_net(sk), inet_sk(sk)->inet_rcv_saddr, 0);
E
Eric Dumazet 已提交
360

361
	/* precompute partial secondary hash */
E
Eric Dumazet 已提交
362
	udp_sk(sk)->udp_portaddr_hash = hash2_partial;
363
	return udp_lib_get_port(sk, snum, hash2_nulladdr);
364 365
}

366 367
static int compute_score(struct sock *sk, struct net *net,
			 __be32 saddr, __be16 sport,
368 369
			 __be32 daddr, unsigned short hnum,
			 int dif, int sdif, bool exact_dif)
370
{
371 372
	int score;
	struct inet_sock *inet;
373

374 375 376 377
	if (!net_eq(sock_net(sk), net) ||
	    udp_sk(sk)->udp_port_hash != hnum ||
	    ipv6_only_sock(sk))
		return -1;
378

379 380 381 382 383 384 385
	score = (sk->sk_family == PF_INET) ? 2 : 1;
	inet = inet_sk(sk);

	if (inet->inet_rcv_saddr) {
		if (inet->inet_rcv_saddr != daddr)
			return -1;
		score += 4;
386
	}
387 388 389 390 391 392 393 394 395 396 397 398 399

	if (inet->inet_daddr) {
		if (inet->inet_daddr != saddr)
			return -1;
		score += 4;
	}

	if (inet->inet_dport) {
		if (inet->inet_dport != sport)
			return -1;
		score += 4;
	}

400
	if (sk->sk_bound_dev_if || exact_dif) {
401 402 403
		bool dev_match = (sk->sk_bound_dev_if == dif ||
				  sk->sk_bound_dev_if == sdif);

P
Paolo Abeni 已提交
404
		if (!dev_match)
405
			return -1;
P
Paolo Abeni 已提交
406
		if (sk->sk_bound_dev_if)
407
			score += 4;
408
	}
409

410 411
	if (sk->sk_incoming_cpu == raw_smp_processor_id())
		score++;
412 413 414
	return score;
}

415 416 417
static u32 udp_ehashfn(const struct net *net, const __be32 laddr,
		       const __u16 lport, const __be32 faddr,
		       const __be16 fport)
418
{
419 420 421 422
	static u32 udp_ehash_secret __read_mostly;

	net_get_random_once(&udp_ehash_secret, sizeof(udp_ehash_secret));

423
	return __inet_ehashfn(laddr, lport, faddr, fport,
424
			      udp_ehash_secret + net_hash_mix(net));
425 426
}

427
/* called with rcu_read_lock() */
E
Eric Dumazet 已提交
428
static struct sock *udp4_lib_lookup2(struct net *net,
429 430 431 432 433
				     __be32 saddr, __be16 sport,
				     __be32 daddr, unsigned int hnum,
				     int dif, int sdif, bool exact_dif,
				     struct udp_hslot *hslot2,
				     struct sk_buff *skb)
E
Eric Dumazet 已提交
434 435
{
	struct sock *sk, *result;
P
Paolo Abeni 已提交
436
	int score, badness;
437
	u32 hash = 0;
E
Eric Dumazet 已提交
438 439

	result = NULL;
440
	badness = 0;
441
	udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
442
		score = compute_score(sk, net, saddr, sport,
443
				      daddr, hnum, dif, sdif, exact_dif);
E
Eric Dumazet 已提交
444
		if (score > badness) {
P
Paolo Abeni 已提交
445
			if (sk->sk_reuseport) {
446 447
				hash = udp_ehashfn(net, daddr, hnum,
						   saddr, sport);
448
				result = reuseport_select_sock(sk, hash, skb,
449
							sizeof(struct udphdr));
450 451
				if (result)
					return result;
452
			}
453 454
			badness = score;
			result = sk;
E
Eric Dumazet 已提交
455 456 457 458 459
		}
	}
	return result;
}

460 461 462
/* UDP is nearly always wildcards out the wazoo, it makes no sense to try
 * harder than this. -DaveM
 */
463
struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr,
464 465
		__be16 sport, __be32 daddr, __be16 dport, int dif,
		int sdif, struct udp_table *udptable, struct sk_buff *skb)
466
{
467
	struct sock *sk, *result;
468
	unsigned short hnum = ntohs(dport);
E
Eric Dumazet 已提交
469 470
	unsigned int hash2, slot2, slot = udp_hashfn(net, hnum, udptable->mask);
	struct udp_hslot *hslot2, *hslot = &udptable->hash[slot];
471
	bool exact_dif = udp_lib_exact_dif_match(net, skb);
P
Paolo Abeni 已提交
472
	int score, badness;
473
	u32 hash = 0;
474

E
Eric Dumazet 已提交
475
	if (hslot->count > 10) {
476
		hash2 = ipv4_portaddr_hash(net, daddr, hnum);
E
Eric Dumazet 已提交
477 478 479 480 481 482
		slot2 = hash2 & udptable->mask;
		hslot2 = &udptable->hash2[slot2];
		if (hslot->count < hslot2->count)
			goto begin;

		result = udp4_lib_lookup2(net, saddr, sport,
483
					  daddr, hnum, dif, sdif,
484
					  exact_dif, hslot2, skb);
E
Eric Dumazet 已提交
485
		if (!result) {
486
			unsigned int old_slot2 = slot2;
487
			hash2 = ipv4_portaddr_hash(net, htonl(INADDR_ANY), hnum);
E
Eric Dumazet 已提交
488
			slot2 = hash2 & udptable->mask;
489 490 491 492
			/* avoid searching the same slot again. */
			if (unlikely(slot2 == old_slot2))
				return result;

E
Eric Dumazet 已提交
493 494 495 496
			hslot2 = &udptable->hash2[slot2];
			if (hslot->count < hslot2->count)
				goto begin;

497
			result = udp4_lib_lookup2(net, saddr, sport,
498
						  daddr, hnum, dif, sdif,
499
						  exact_dif, hslot2, skb);
E
Eric Dumazet 已提交
500 501 502
		}
		return result;
	}
503 504
begin:
	result = NULL;
505
	badness = 0;
506
	sk_for_each_rcu(sk, &hslot->head) {
507
		score = compute_score(sk, net, saddr, sport,
508
				      daddr, hnum, dif, sdif, exact_dif);
509
		if (score > badness) {
P
Paolo Abeni 已提交
510
			if (sk->sk_reuseport) {
511 512
				hash = udp_ehashfn(net, daddr, hnum,
						   saddr, sport);
513
				result = reuseport_select_sock(sk, hash, skb,
514
							sizeof(struct udphdr));
515 516
				if (result)
					return result;
517
			}
518 519
			result = sk;
			badness = score;
520 521 522 523
		}
	}
	return result;
}
524
EXPORT_SYMBOL_GPL(__udp4_lib_lookup);
525

526 527
static inline struct sock *__udp4_lib_lookup_skb(struct sk_buff *skb,
						 __be16 sport, __be16 dport,
528
						 struct udp_table *udptable)
529 530 531
{
	const struct iphdr *iph = ip_hdr(skb);

532
	return __udp4_lib_lookup(dev_net(skb->dev), iph->saddr, sport,
533
				 iph->daddr, dport, inet_iif(skb),
534
				 inet_sdif(skb), udptable, skb);
535 536
}

537 538 539
struct sock *udp4_lib_lookup_skb(struct sk_buff *skb,
				 __be16 sport, __be16 dport)
{
540
	return __udp4_lib_lookup_skb(skb, sport, dport, &udp_table);
541 542 543
}
EXPORT_SYMBOL_GPL(udp4_lib_lookup_skb);

544 545 546 547
/* Must be called under rcu_read_lock().
 * Does increment socket refcount.
 */
#if IS_ENABLED(CONFIG_NETFILTER_XT_MATCH_SOCKET) || \
548 549
    IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TPROXY) || \
    IS_ENABLED(CONFIG_NF_SOCKET_IPV4)
550 551 552
struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
			     __be32 daddr, __be16 dport, int dif)
{
553 554 555
	struct sock *sk;

	sk = __udp4_lib_lookup(net, saddr, sport, daddr, dport,
556
			       dif, 0, &udp_table, NULL);
557
	if (sk && !refcount_inc_not_zero(&sk->sk_refcnt))
558 559
		sk = NULL;
	return sk;
560 561
}
EXPORT_SYMBOL_GPL(udp4_lib_lookup);
562
#endif
563

S
Shawn Bohrer 已提交
564 565 566
static inline bool __udp_is_mcast_sock(struct net *net, struct sock *sk,
				       __be16 loc_port, __be32 loc_addr,
				       __be16 rmt_port, __be32 rmt_addr,
567
				       int dif, int sdif, unsigned short hnum)
S
Shawn Bohrer 已提交
568 569 570 571 572 573 574 575 576
{
	struct inet_sock *inet = inet_sk(sk);

	if (!net_eq(sock_net(sk), net) ||
	    udp_sk(sk)->udp_port_hash != hnum ||
	    (inet->inet_daddr && inet->inet_daddr != rmt_addr) ||
	    (inet->inet_dport != rmt_port && inet->inet_dport) ||
	    (inet->inet_rcv_saddr && inet->inet_rcv_saddr != loc_addr) ||
	    ipv6_only_sock(sk) ||
577 578
	    (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif &&
	     sk->sk_bound_dev_if != sdif))
S
Shawn Bohrer 已提交
579
		return false;
580
	if (!ip_mc_sf_allow(sk, loc_addr, rmt_addr, dif, sdif))
S
Shawn Bohrer 已提交
581 582 583 584
		return false;
	return true;
}

585 586 587 588 589 590 591 592 593 594 595
/*
 * This routine is called by the ICMP module when it gets some
 * sort of error condition.  If err < 0 then the socket should
 * be closed and the error returned to the user.  If err > 0
 * it's just the icmp type << 8 | icmp code.
 * Header points to the ip header of the error packet. We move
 * on past this. Then (as it used to claim before adjustment)
 * header points to the first 8 bytes of the udp header.  We need
 * to find the appropriate port.
 */

596
void __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable)
597 598
{
	struct inet_sock *inet;
599
	const struct iphdr *iph = (const struct iphdr *)skb->data;
E
Eric Dumazet 已提交
600
	struct udphdr *uh = (struct udphdr *)(skb->data+(iph->ihl<<2));
601 602 603 604 605
	const int type = icmp_hdr(skb)->type;
	const int code = icmp_hdr(skb)->code;
	struct sock *sk;
	int harderr;
	int err;
606
	struct net *net = dev_net(skb->dev);
607

608
	sk = __udp4_lib_lookup(net, iph->daddr, uh->dest,
609 610
			       iph->saddr, uh->source, skb->dev->ifindex, 0,
			       udptable, NULL);
611
	if (!sk) {
E
Eric Dumazet 已提交
612
		__ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632
		return;	/* No socket for error */
	}

	err = 0;
	harderr = 0;
	inet = inet_sk(sk);

	switch (type) {
	default:
	case ICMP_TIME_EXCEEDED:
		err = EHOSTUNREACH;
		break;
	case ICMP_SOURCE_QUENCH:
		goto out;
	case ICMP_PARAMETERPROB:
		err = EPROTO;
		harderr = 1;
		break;
	case ICMP_DEST_UNREACH:
		if (code == ICMP_FRAG_NEEDED) { /* Path MTU discovery */
633
			ipv4_sk_update_pmtu(skb, sk, info);
634 635 636 637 638 639 640 641 642 643 644 645 646
			if (inet->pmtudisc != IP_PMTUDISC_DONT) {
				err = EMSGSIZE;
				harderr = 1;
				break;
			}
			goto out;
		}
		err = EHOSTUNREACH;
		if (code <= NR_ICMP_UNREACH) {
			harderr = icmp_err_convert[code].fatal;
			err = icmp_err_convert[code].errno;
		}
		break;
647 648
	case ICMP_REDIRECT:
		ipv4_sk_redirect(skb, sk);
649
		goto out;
650 651 652 653 654 655 656 657 658
	}

	/*
	 *      RFC1122: OK.  Passes ICMP errors back to application, as per
	 *	4.1.3.3.
	 */
	if (!inet->recverr) {
		if (!harderr || sk->sk_state != TCP_ESTABLISHED)
			goto out;
659
	} else
E
Eric Dumazet 已提交
660
		ip_icmp_error(sk, skb, err, uh->dest, info, (u8 *)(uh+1));
661

662 663 664
	sk->sk_err = err;
	sk->sk_error_report(sk);
out:
665
	return;
666 667 668 669
}

void udp_err(struct sk_buff *skb, u32 info)
{
670
	__udp4_lib_err(skb, info, &udp_table);
671 672 673 674 675
}

/*
 * Throw away all pending data and cancel the corking. Socket is locked.
 */
676
void udp_flush_pending_frames(struct sock *sk)
677 678 679 680 681 682 683 684 685
{
	struct udp_sock *up = udp_sk(sk);

	if (up->pending) {
		up->len = 0;
		up->pending = 0;
		ip_flush_pending_frames(sk);
	}
}
686
EXPORT_SYMBOL(udp_flush_pending_frames);
687 688

/**
H
Herbert Xu 已提交
689
 * 	udp4_hwcsum  -  handle outgoing HW checksumming
690 691
 * 	@skb: 	sk_buff containing the filled-in UDP header
 * 	        (checksum field must be zeroed out)
H
Herbert Xu 已提交
692 693
 *	@src:	source IP address
 *	@dst:	destination IP address
694
 */
695
void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst)
696 697
{
	struct udphdr *uh = udp_hdr(skb);
H
Herbert Xu 已提交
698 699 700
	int offset = skb_transport_offset(skb);
	int len = skb->len - offset;
	int hlen = len;
701 702
	__wsum csum = 0;

703
	if (!skb_has_frag_list(skb)) {
704 705 706 707 708
		/*
		 * Only one fragment on the socket.
		 */
		skb->csum_start = skb_transport_header(skb) - skb->head;
		skb->csum_offset = offsetof(struct udphdr, check);
H
Herbert Xu 已提交
709 710
		uh->check = ~csum_tcpudp_magic(src, dst, len,
					       IPPROTO_UDP, 0);
711
	} else {
712 713
		struct sk_buff *frags;

714 715 716 717 718
		/*
		 * HW-checksum won't work as there are two or more
		 * fragments on the socket so that all csums of sk_buffs
		 * should be together
		 */
719
		skb_walk_frags(skb, frags) {
H
Herbert Xu 已提交
720 721
			csum = csum_add(csum, frags->csum);
			hlen -= frags->len;
722
		}
723

H
Herbert Xu 已提交
724
		csum = skb_checksum(skb, offset, hlen, csum);
725 726 727 728 729 730 731
		skb->ip_summed = CHECKSUM_NONE;

		uh->check = csum_tcpudp_magic(src, dst, len, IPPROTO_UDP, csum);
		if (uh->check == 0)
			uh->check = CSUM_MANGLED_0;
	}
}
732
EXPORT_SYMBOL_GPL(udp4_hwcsum);
733

734 735 736 737 738 739 740 741
/* Function to set UDP checksum for an IPv4 UDP packet. This is intended
 * for the simple case like when setting the checksum for a UDP tunnel.
 */
void udp_set_csum(bool nocheck, struct sk_buff *skb,
		  __be32 saddr, __be32 daddr, int len)
{
	struct udphdr *uh = udp_hdr(skb);

742
	if (nocheck) {
743
		uh->check = 0;
744
	} else if (skb_is_gso(skb)) {
745
		uh->check = ~udp_v4_check(len, saddr, daddr, 0);
746 747 748 749 750
	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
		uh->check = 0;
		uh->check = udp_v4_check(len, saddr, daddr, lco_csum(skb));
		if (uh->check == 0)
			uh->check = CSUM_MANGLED_0;
751
	} else {
752 753 754 755 756 757 758 759
		skb->ip_summed = CHECKSUM_PARTIAL;
		skb->csum_start = skb_transport_header(skb) - skb->head;
		skb->csum_offset = offsetof(struct udphdr, check);
		uh->check = ~udp_v4_check(len, saddr, daddr, 0);
	}
}
EXPORT_SYMBOL(udp_set_csum);

760 761
static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4,
			struct inet_cork *cork)
762
{
H
Herbert Xu 已提交
763
	struct sock *sk = skb->sk;
764 765 766 767
	struct inet_sock *inet = inet_sk(sk);
	struct udphdr *uh;
	int err = 0;
	int is_udplite = IS_UDPLITE(sk);
H
Herbert Xu 已提交
768 769
	int offset = skb_transport_offset(skb);
	int len = skb->len - offset;
770 771 772 773 774 775
	__wsum csum = 0;

	/*
	 * Create a UDP header
	 */
	uh = udp_hdr(skb);
H
Herbert Xu 已提交
776
	uh->source = inet->inet_sport;
777
	uh->dest = fl4->fl4_dport;
H
Herbert Xu 已提交
778
	uh->len = htons(len);
779 780
	uh->check = 0;

781 782 783 784 785 786 787 788
	if (cork->gso_size) {
		const int hlen = skb_network_header_len(skb) +
				 sizeof(struct udphdr);

		if (hlen + cork->gso_size > cork->fragsize)
			return -EINVAL;
		if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS)
			return -EINVAL;
789 790
		if (sk->sk_no_check_tx)
			return -EINVAL;
791 792
		if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite ||
		    dst_xfrm(skb_dst(skb)))
793 794 795 796
			return -EIO;

		skb_shinfo(skb)->gso_size = cork->gso_size;
		skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
797 798
		skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(len - sizeof(uh),
							 cork->gso_size);
799
		goto csum_partial;
800 801
	}

802
	if (is_udplite)  				 /*     UDP-Lite      */
H
Herbert Xu 已提交
803
		csum = udplite_csum(skb);
804

805
	else if (sk->sk_no_check_tx) {			 /* UDP csum off */
806 807 808 809 810

		skb->ip_summed = CHECKSUM_NONE;
		goto send;

	} else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */
811
csum_partial:
812

813
		udp4_hwcsum(skb, fl4->saddr, fl4->daddr);
814 815
		goto send;

H
Herbert Xu 已提交
816 817
	} else
		csum = udp_csum(skb);
818 819

	/* add protocol-dependent pseudo-header */
820
	uh->check = csum_tcpudp_magic(fl4->saddr, fl4->daddr, len,
E
Eric Dumazet 已提交
821
				      sk->sk_protocol, csum);
822 823 824 825
	if (uh->check == 0)
		uh->check = CSUM_MANGLED_0;

send:
E
Eric Dumazet 已提交
826
	err = ip_send_skb(sock_net(sk), skb);
E
Eric Dumazet 已提交
827 828
	if (err) {
		if (err == -ENOBUFS && !inet->recverr) {
829 830
			UDP_INC_STATS(sock_net(sk),
				      UDP_MIB_SNDBUFERRORS, is_udplite);
E
Eric Dumazet 已提交
831 832 833
			err = 0;
		}
	} else
834 835
		UDP_INC_STATS(sock_net(sk),
			      UDP_MIB_OUTDATAGRAMS, is_udplite);
H
Herbert Xu 已提交
836 837 838 839 840 841
	return err;
}

/*
 * Push out all pending data as one UDP datagram. Socket is locked.
 */
842
int udp_push_pending_frames(struct sock *sk)
H
Herbert Xu 已提交
843 844 845
{
	struct udp_sock  *up = udp_sk(sk);
	struct inet_sock *inet = inet_sk(sk);
D
David S. Miller 已提交
846
	struct flowi4 *fl4 = &inet->cork.fl.u.ip4;
H
Herbert Xu 已提交
847 848 849
	struct sk_buff *skb;
	int err = 0;

850
	skb = ip_finish_skb(sk, fl4);
H
Herbert Xu 已提交
851 852 853
	if (!skb)
		goto out;

854
	err = udp_send_skb(skb, fl4, &inet->cork.base);
H
Herbert Xu 已提交
855

856 857 858 859 860
out:
	up->len = 0;
	up->pending = 0;
	return err;
}
861
EXPORT_SYMBOL(udp_push_pending_frames);
862

W
Willem de Bruijn 已提交
863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899
static int __udp_cmsg_send(struct cmsghdr *cmsg, u16 *gso_size)
{
	switch (cmsg->cmsg_type) {
	case UDP_SEGMENT:
		if (cmsg->cmsg_len != CMSG_LEN(sizeof(__u16)))
			return -EINVAL;
		*gso_size = *(__u16 *)CMSG_DATA(cmsg);
		return 0;
	default:
		return -EINVAL;
	}
}

int udp_cmsg_send(struct sock *sk, struct msghdr *msg, u16 *gso_size)
{
	struct cmsghdr *cmsg;
	bool need_ip = false;
	int err;

	for_each_cmsghdr(cmsg, msg) {
		if (!CMSG_OK(msg, cmsg))
			return -EINVAL;

		if (cmsg->cmsg_level != SOL_UDP) {
			need_ip = true;
			continue;
		}

		err = __udp_cmsg_send(cmsg, gso_size);
		if (err)
			return err;
	}

	return need_ip;
}
EXPORT_SYMBOL_GPL(udp_cmsg_send);

900
int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
901 902 903
{
	struct inet_sock *inet = inet_sk(sk);
	struct udp_sock *up = udp_sk(sk);
A
Andrey Ignatov 已提交
904
	DECLARE_SOCKADDR(struct sockaddr_in *, usin, msg->msg_name);
905
	struct flowi4 fl4_stack;
D
David S. Miller 已提交
906
	struct flowi4 *fl4;
907 908 909 910 911 912 913 914 915 916 917
	int ulen = len;
	struct ipcm_cookie ipc;
	struct rtable *rt = NULL;
	int free = 0;
	int connected = 0;
	__be32 daddr, faddr, saddr;
	__be16 dport;
	u8  tos;
	int err, is_udplite = IS_UDPLITE(sk);
	int corkreq = up->corkflag || msg->msg_flags&MSG_MORE;
	int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
H
Herbert Xu 已提交
918
	struct sk_buff *skb;
919
	struct ip_options_data opt_copy;
920 921 922 923 924 925 926 927

	if (len > 0xFFFF)
		return -EMSGSIZE;

	/*
	 *	Check the flags.
	 */

E
Eric Dumazet 已提交
928
	if (msg->msg_flags & MSG_OOB) /* Mirror BSD error message compatibility */
929 930 931
		return -EOPNOTSUPP;

	ipc.opt = NULL;
932
	ipc.tx_flags = 0;
933 934
	ipc.ttl = 0;
	ipc.tos = -1;
935

H
Herbert Xu 已提交
936 937
	getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag;

938
	fl4 = &inet->cork.fl.u.ip4;
939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958
	if (up->pending) {
		/*
		 * There are pending frames.
		 * The socket lock must be held while it's corked.
		 */
		lock_sock(sk);
		if (likely(up->pending)) {
			if (unlikely(up->pending != AF_INET)) {
				release_sock(sk);
				return -EINVAL;
			}
			goto do_append_data;
		}
		release_sock(sk);
	}
	ulen += sizeof(struct udphdr);

	/*
	 *	Get and verify the address.
	 */
A
Andrey Ignatov 已提交
959
	if (usin) {
960 961 962 963 964 965 966 967 968 969 970 971 972 973
		if (msg->msg_namelen < sizeof(*usin))
			return -EINVAL;
		if (usin->sin_family != AF_INET) {
			if (usin->sin_family != AF_UNSPEC)
				return -EAFNOSUPPORT;
		}

		daddr = usin->sin_addr.s_addr;
		dport = usin->sin_port;
		if (dport == 0)
			return -EINVAL;
	} else {
		if (sk->sk_state != TCP_ESTABLISHED)
			return -EDESTADDRREQ;
E
Eric Dumazet 已提交
974 975
		daddr = inet->inet_daddr;
		dport = inet->inet_dport;
976 977 978 979 980 981
		/* Open fast path for connected socket.
		   Route will not be used, if at least one option is set.
		 */
		connected = 1;
	}

982 983
	ipc.sockc.tsflags = sk->sk_tsflags;
	ipc.addr = inet->inet_saddr;
984
	ipc.oif = sk->sk_bound_dev_if;
985
	ipc.gso_size = up->gso_size;
986

987
	if (msg->msg_controllen) {
W
Willem de Bruijn 已提交
988 989 990 991 992
		err = udp_cmsg_send(sk, msg, &ipc.gso_size);
		if (err > 0)
			err = ip_cmsg_send(sk, msg, &ipc,
					   sk->sk_family == AF_INET6);
		if (unlikely(err < 0)) {
993
			kfree(ipc.opt);
994
			return err;
995
		}
996 997 998 999
		if (ipc.opt)
			free = 1;
		connected = 0;
	}
1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011
	if (!ipc.opt) {
		struct ip_options_rcu *inet_opt;

		rcu_read_lock();
		inet_opt = rcu_dereference(inet->inet_opt);
		if (inet_opt) {
			memcpy(&opt_copy, inet_opt,
			       sizeof(*inet_opt) + inet_opt->opt.optlen);
			ipc.opt = &opt_copy.opt;
		}
		rcu_read_unlock();
	}
1012

A
Andrey Ignatov 已提交
1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028
	if (cgroup_bpf_enabled && !connected) {
		err = BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk,
					    (struct sockaddr *)usin, &ipc.addr);
		if (err)
			goto out_free;
		if (usin) {
			if (usin->sin_port == 0) {
				/* BPF program set invalid port. Reject it. */
				err = -EINVAL;
				goto out_free;
			}
			daddr = usin->sin_addr.s_addr;
			dport = usin->sin_port;
		}
	}

1029 1030 1031
	saddr = ipc.addr;
	ipc.addr = faddr = daddr;

1032 1033
	sock_tx_timestamp(sk, ipc.sockc.tsflags, &ipc.tx_flags);

1034
	if (ipc.opt && ipc.opt->opt.srr) {
1035 1036 1037 1038
		if (!daddr) {
			err = -EINVAL;
			goto out_free;
		}
1039
		faddr = ipc.opt->opt.faddr;
1040 1041
		connected = 0;
	}
1042
	tos = get_rttos(&ipc, inet);
1043 1044
	if (sock_flag(sk, SOCK_LOCALROUTE) ||
	    (msg->msg_flags & MSG_DONTROUTE) ||
1045
	    (ipc.opt && ipc.opt->opt.is_strictroute)) {
1046 1047 1048 1049 1050 1051 1052 1053 1054 1055
		tos |= RTO_ONLINK;
		connected = 0;
	}

	if (ipv4_is_multicast(daddr)) {
		if (!ipc.oif)
			ipc.oif = inet->mc_index;
		if (!saddr)
			saddr = inet->mc_addr;
		connected = 0;
1056
	} else if (!ipc.oif) {
1057
		ipc.oif = inet->uc_index;
1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070
	} else if (ipv4_is_lbcast(daddr) && inet->uc_index) {
		/* oif is set, packet is to local broadcast and
		 * and uc_index is set. oif is most likely set
		 * by sk_bound_dev_if. If uc_index != oif check if the
		 * oif is an L3 master and uc_index is an L3 slave.
		 * If so, we want to allow the send using the uc_index.
		 */
		if (ipc.oif != inet->uc_index &&
		    ipc.oif == l3mdev_master_ifindex_by_index(sock_net(sk),
							      inet->uc_index)) {
			ipc.oif = inet->uc_index;
		}
	}
1071 1072

	if (connected)
E
Eric Dumazet 已提交
1073
		rt = (struct rtable *)sk_dst_check(sk, 0);
1074

1075
	if (!rt) {
1076
		struct net *net = sock_net(sk);
D
David Ahern 已提交
1077
		__u8 flow_flags = inet_sk_flowi_flags(sk);
1078

1079
		fl4 = &fl4_stack;
D
David Ahern 已提交
1080

1081
		flowi4_init_output(fl4, ipc.oif, sk->sk_mark, tos,
1082
				   RT_SCOPE_UNIVERSE, sk->sk_protocol,
D
David Ahern 已提交
1083
				   flow_flags,
1084 1085
				   faddr, saddr, dport, inet->inet_sport,
				   sk->sk_uid);
1086

1087 1088
		security_sk_classify_flow(sk, flowi4_to_flowi(fl4));
		rt = ip_route_output_flow(net, fl4, sk);
1089 1090
		if (IS_ERR(rt)) {
			err = PTR_ERR(rt);
1091
			rt = NULL;
1092
			if (err == -ENETUNREACH)
1093
				IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
1094 1095 1096 1097 1098 1099 1100 1101
			goto out;
		}

		err = -EACCES;
		if ((rt->rt_flags & RTCF_BROADCAST) &&
		    !sock_flag(sk, SOCK_BROADCAST))
			goto out;
		if (connected)
1102
			sk_dst_set(sk, dst_clone(&rt->dst));
1103 1104 1105 1106 1107 1108
	}

	if (msg->msg_flags&MSG_CONFIRM)
		goto do_confirm;
back_from_confirm:

1109
	saddr = fl4->saddr;
1110
	if (!ipc.addr)
1111
		daddr = ipc.addr = fl4->daddr;
1112

H
Herbert Xu 已提交
1113 1114
	/* Lockless fast path for the non-corking case. */
	if (!corkreq) {
W
Willem de Bruijn 已提交
1115 1116
		struct inet_cork cork;

1117
		skb = ip_make_skb(sk, fl4, getfrag, msg, ulen,
H
Herbert Xu 已提交
1118
				  sizeof(struct udphdr), &ipc, &rt,
W
Willem de Bruijn 已提交
1119
				  &cork, msg->msg_flags);
H
Herbert Xu 已提交
1120
		err = PTR_ERR(skb);
1121
		if (!IS_ERR_OR_NULL(skb))
1122
			err = udp_send_skb(skb, fl4, &cork);
H
Herbert Xu 已提交
1123 1124 1125
		goto out;
	}

1126 1127 1128 1129 1130 1131
	lock_sock(sk);
	if (unlikely(up->pending)) {
		/* The socket is already corked while preparing it. */
		/* ... which is an evident application bug. --ANK */
		release_sock(sk);

1132
		net_dbg_ratelimited("socket already corked\n");
1133 1134 1135 1136 1137 1138
		err = -EINVAL;
		goto out;
	}
	/*
	 *	Now cork the socket to pend data.
	 */
D
David S. Miller 已提交
1139 1140 1141
	fl4 = &inet->cork.fl.u.ip4;
	fl4->daddr = daddr;
	fl4->saddr = saddr;
1142 1143
	fl4->fl4_dport = dport;
	fl4->fl4_sport = inet->inet_sport;
1144 1145 1146 1147
	up->pending = AF_INET;

do_append_data:
	up->len += ulen;
1148
	err = ip_append_data(sk, fl4, getfrag, msg, ulen,
1149 1150
			     sizeof(struct udphdr), &ipc, &rt,
			     corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags);
1151 1152 1153 1154 1155 1156 1157 1158 1159 1160
	if (err)
		udp_flush_pending_frames(sk);
	else if (!corkreq)
		err = udp_push_pending_frames(sk);
	else if (unlikely(skb_queue_empty(&sk->sk_write_queue)))
		up->pending = 0;
	release_sock(sk);

out:
	ip_rt_put(rt);
1161
out_free:
1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173
	if (free)
		kfree(ipc.opt);
	if (!err)
		return len;
	/*
	 * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space.  Reporting
	 * ENOBUFS might not be good (it's not tunable per se), but otherwise
	 * we don't have a good statistic (IpOutDiscards but it can be too many
	 * things).  We could add another new stat but at least for now that
	 * seems like overkill.
	 */
	if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
1174 1175
		UDP_INC_STATS(sock_net(sk),
			      UDP_MIB_SNDBUFERRORS, is_udplite);
1176 1177 1178 1179
	}
	return err;

do_confirm:
1180 1181
	if (msg->msg_flags & MSG_PROBE)
		dst_confirm_neigh(&rt->dst, &fl4->daddr);
1182 1183 1184 1185 1186
	if (!(msg->msg_flags&MSG_PROBE) || len)
		goto back_from_confirm;
	err = 0;
	goto out;
}
E
Eric Dumazet 已提交
1187
EXPORT_SYMBOL(udp_sendmsg);
1188 1189 1190 1191

int udp_sendpage(struct sock *sk, struct page *page, int offset,
		 size_t size, int flags)
{
1192
	struct inet_sock *inet = inet_sk(sk);
1193 1194 1195
	struct udp_sock *up = udp_sk(sk);
	int ret;

1196 1197 1198
	if (flags & MSG_SENDPAGE_NOTLAST)
		flags |= MSG_MORE;

1199 1200 1201 1202 1203 1204 1205
	if (!up->pending) {
		struct msghdr msg = {	.msg_flags = flags|MSG_MORE };

		/* Call udp_sendmsg to specify destination address which
		 * sendpage interface can't pass.
		 * This will succeed only when the socket is connected.
		 */
1206
		ret = udp_sendmsg(sk, &msg, 0);
1207 1208 1209 1210 1211 1212 1213 1214 1215
		if (ret < 0)
			return ret;
	}

	lock_sock(sk);

	if (unlikely(!up->pending)) {
		release_sock(sk);

1216
		net_dbg_ratelimited("cork failed\n");
1217 1218 1219
		return -EINVAL;
	}

1220 1221
	ret = ip_append_page(sk, &inet->cork.fl.u.ip4,
			     page, offset, size, flags);
1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241
	if (ret == -EOPNOTSUPP) {
		release_sock(sk);
		return sock_no_sendpage(sk->sk_socket, page, offset,
					size, flags);
	}
	if (ret < 0) {
		udp_flush_pending_frames(sk);
		goto out;
	}

	up->len += size;
	if (!(up->corkflag || (flags&MSG_MORE)))
		ret = udp_push_pending_frames(sk);
	if (!ret)
		ret = size;
out:
	release_sock(sk);
	return ret;
}

1242 1243
#define UDP_SKB_IS_STATELESS 0x80000000

1244 1245
static void udp_set_dev_scratch(struct sk_buff *skb)
{
1246
	struct udp_dev_scratch *scratch = udp_skb_scratch(skb);
1247 1248

	BUILD_BUG_ON(sizeof(struct udp_dev_scratch) > sizeof(long));
1249 1250
	scratch->_tsize_state = skb->truesize;
#if BITS_PER_LONG == 64
1251 1252 1253
	scratch->len = skb->len;
	scratch->csum_unnecessary = !!skb_csum_unnecessary(skb);
	scratch->is_linear = !skb_is_nonlinear(skb);
1254
#endif
P
Paolo Abeni 已提交
1255 1256 1257 1258 1259
	/* all head states execept sp (dst, sk, nf) are always cleared by
	 * udp_rcv() and we need to preserve secpath, if present, to eventually
	 * process IP_CMSG_PASSSEC at recvmsg() time
	 */
	if (likely(!skb_sec_path(skb)))
1260
		scratch->_tsize_state |= UDP_SKB_IS_STATELESS;
1261 1262 1263 1264
}

static int udp_skb_truesize(struct sk_buff *skb)
{
1265
	return udp_skb_scratch(skb)->_tsize_state & ~UDP_SKB_IS_STATELESS;
1266 1267
}

1268
static bool udp_skb_has_head_state(struct sk_buff *skb)
1269
{
1270
	return !(udp_skb_scratch(skb)->_tsize_state & UDP_SKB_IS_STATELESS);
1271 1272
}

1273
/* fully reclaim rmem/fwd memory allocated for skb */
1274 1275
static void udp_rmem_release(struct sock *sk, int size, int partial,
			     bool rx_queue_lock_held)
1276
{
1277
	struct udp_sock *up = udp_sk(sk);
1278
	struct sk_buff_head *sk_queue;
1279 1280
	int amt;

1281 1282 1283
	if (likely(partial)) {
		up->forward_deficit += size;
		size = up->forward_deficit;
1284
		if (size < (sk->sk_rcvbuf >> 2))
1285 1286 1287 1288 1289 1290
			return;
	} else {
		size += up->forward_deficit;
	}
	up->forward_deficit = 0;

1291 1292 1293
	/* acquire the sk_receive_queue for fwd allocated memory scheduling,
	 * if the called don't held it already
	 */
1294
	sk_queue = &sk->sk_receive_queue;
1295 1296 1297
	if (!rx_queue_lock_held)
		spin_lock(&sk_queue->lock);

1298

1299 1300 1301 1302 1303 1304
	sk->sk_forward_alloc += size;
	amt = (sk->sk_forward_alloc - partial) & ~(SK_MEM_QUANTUM - 1);
	sk->sk_forward_alloc -= amt;

	if (amt)
		__sk_mem_reduce_allocated(sk, amt >> SK_MEM_QUANTUM_SHIFT);
1305 1306

	atomic_sub(size, &sk->sk_rmem_alloc);
1307 1308 1309 1310

	/* this can save us from acquiring the rx queue lock on next receive */
	skb_queue_splice_tail_init(sk_queue, &up->reader_queue);

1311 1312
	if (!rx_queue_lock_held)
		spin_unlock(&sk_queue->lock);
1313 1314
}

1315
/* Note: called with reader_queue.lock held.
1316 1317 1318 1319
 * Instead of using skb->truesize here, find a copy of it in skb->dev_scratch
 * This avoids a cache line miss while receive_queue lock is held.
 * Look at __udp_enqueue_schedule_skb() to find where this copy is done.
 */
1320
void udp_skb_destructor(struct sock *sk, struct sk_buff *skb)
1321
{
1322 1323
	prefetch(&skb->data);
	udp_rmem_release(sk, udp_skb_truesize(skb), 1, false);
1324
}
1325
EXPORT_SYMBOL(udp_skb_destructor);
1326

1327
/* as above, but the caller held the rx queue lock, too */
1328
static void udp_skb_dtor_locked(struct sock *sk, struct sk_buff *skb)
1329
{
1330 1331
	prefetch(&skb->data);
	udp_rmem_release(sk, udp_skb_truesize(skb), 1, true);
1332 1333
}

E
Eric Dumazet 已提交
1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358
/* Idea of busylocks is to let producers grab an extra spinlock
 * to relieve pressure on the receive_queue spinlock shared by consumer.
 * Under flood, this means that only one producer can be in line
 * trying to acquire the receive_queue spinlock.
 * These busylock can be allocated on a per cpu manner, instead of a
 * per socket one (that would consume a cache line per socket)
 */
static int udp_busylocks_log __read_mostly;
static spinlock_t *udp_busylocks __read_mostly;

static spinlock_t *busylock_acquire(void *ptr)
{
	spinlock_t *busy;

	busy = udp_busylocks + hash_ptr(ptr, udp_busylocks_log);
	spin_lock(busy);
	return busy;
}

static void busylock_release(spinlock_t *busy)
{
	if (busy)
		spin_unlock(busy);
}

1359 1360 1361 1362
int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb)
{
	struct sk_buff_head *list = &sk->sk_receive_queue;
	int rmem, delta, amt, err = -ENOMEM;
E
Eric Dumazet 已提交
1363
	spinlock_t *busy = NULL;
1364
	int size;
1365 1366 1367 1368 1369

	/* try to avoid the costly atomic add/sub pair when the receive
	 * queue is full; always allow at least a packet
	 */
	rmem = atomic_read(&sk->sk_rmem_alloc);
1370
	if (rmem > sk->sk_rcvbuf)
1371 1372
		goto drop;

1373 1374 1375 1376 1377 1378
	/* Under mem pressure, it might be helpful to help udp_recvmsg()
	 * having linear skbs :
	 * - Reduce memory overhead and thus increase receive queue capacity
	 * - Less cache line misses at copyout() time
	 * - Less work at consume_skb() (less alien page frag freeing)
	 */
E
Eric Dumazet 已提交
1379
	if (rmem > (sk->sk_rcvbuf >> 1)) {
1380
		skb_condense(skb);
E
Eric Dumazet 已提交
1381 1382 1383

		busy = busylock_acquire(sk);
	}
1384
	size = skb->truesize;
1385
	udp_set_dev_scratch(skb);
1386

1387 1388 1389 1390
	/* we drop only if the receive buf is full and the receive
	 * queue contains some other skb
	 */
	rmem = atomic_add_return(size, &sk->sk_rmem_alloc);
1391
	if (rmem > (size + sk->sk_rcvbuf))
1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408
		goto uncharge_drop;

	spin_lock(&list->lock);
	if (size >= sk->sk_forward_alloc) {
		amt = sk_mem_pages(size);
		delta = amt << SK_MEM_QUANTUM_SHIFT;
		if (!__sk_mem_raise_allocated(sk, delta, amt, SK_MEM_RECV)) {
			err = -ENOBUFS;
			spin_unlock(&list->lock);
			goto uncharge_drop;
		}

		sk->sk_forward_alloc += delta;
	}

	sk->sk_forward_alloc -= size;

1409 1410 1411
	/* no need to setup a destructor, we will explicitly release the
	 * forward allocated memory on dequeue
	 */
1412 1413 1414 1415 1416 1417 1418 1419
	sock_skb_set_dropcount(sk, skb);

	__skb_queue_tail(list, skb);
	spin_unlock(&list->lock);

	if (!sock_flag(sk, SOCK_DEAD))
		sk->sk_data_ready(sk);

E
Eric Dumazet 已提交
1420
	busylock_release(busy);
1421 1422 1423 1424 1425 1426 1427
	return 0;

uncharge_drop:
	atomic_sub(skb->truesize, &sk->sk_rmem_alloc);

drop:
	atomic_inc(&sk->sk_drops);
E
Eric Dumazet 已提交
1428
	busylock_release(busy);
1429 1430 1431 1432
	return err;
}
EXPORT_SYMBOL_GPL(__udp_enqueue_schedule_skb);

1433
void udp_destruct_sock(struct sock *sk)
1434 1435
{
	/* reclaim completely the forward allocated memory */
1436
	struct udp_sock *up = udp_sk(sk);
1437 1438 1439
	unsigned int total = 0;
	struct sk_buff *skb;

1440 1441
	skb_queue_splice_tail_init(&sk->sk_receive_queue, &up->reader_queue);
	while ((skb = __skb_dequeue(&up->reader_queue)) != NULL) {
1442 1443 1444
		total += skb->truesize;
		kfree_skb(skb);
	}
1445
	udp_rmem_release(sk, total, 0, true);
1446

1447 1448
	inet_sock_destruct(sk);
}
1449
EXPORT_SYMBOL_GPL(udp_destruct_sock);
1450 1451 1452

int udp_init_sock(struct sock *sk)
{
1453
	skb_queue_head_init(&udp_sk(sk)->reader_queue);
1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466
	sk->sk_destruct = udp_destruct_sock;
	return 0;
}
EXPORT_SYMBOL_GPL(udp_init_sock);

void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len)
{
	if (unlikely(READ_ONCE(sk->sk_peek_off) >= 0)) {
		bool slow = lock_sock_fast(sk);

		sk_peek_offset_bwd(sk, len);
		unlock_sock_fast(sk, slow);
	}
P
Paolo Abeni 已提交
1467

1468 1469 1470
	if (!skb_unref(skb))
		return;

1471 1472
	/* In the more common cases we cleared the head states previously,
	 * see __udp_queue_rcv_skb().
1473
	 */
1474
	if (unlikely(udp_skb_has_head_state(skb)))
1475
		skb_release_head_state(skb);
1476
	__consume_stateless_skb(skb);
1477 1478 1479
}
EXPORT_SYMBOL_GPL(skb_consume_udp);

1480 1481 1482 1483 1484 1485
static struct sk_buff *__first_packet_length(struct sock *sk,
					     struct sk_buff_head *rcvq,
					     int *total)
{
	struct sk_buff *skb;

P
Paolo Abeni 已提交
1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502
	while ((skb = skb_peek(rcvq)) != NULL) {
		if (udp_lib_checksum_complete(skb)) {
			__UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS,
					IS_UDPLITE(sk));
			__UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS,
					IS_UDPLITE(sk));
			atomic_inc(&sk->sk_drops);
			__skb_unlink(skb, rcvq);
			*total += skb->truesize;
			kfree_skb(skb);
		} else {
			/* the csum related bits could be changed, refresh
			 * the scratch area
			 */
			udp_set_dev_scratch(skb);
			break;
		}
1503 1504 1505 1506
	}
	return skb;
}

E
Eric Dumazet 已提交
1507 1508 1509 1510 1511
/**
 *	first_packet_length	- return length of first packet in receive queue
 *	@sk: socket
 *
 *	Drops all bad checksum frames, until a valid one is found.
1512
 *	Returns the length of found skb, or -1 if none is found.
E
Eric Dumazet 已提交
1513
 */
1514
static int first_packet_length(struct sock *sk)
E
Eric Dumazet 已提交
1515
{
1516 1517
	struct sk_buff_head *rcvq = &udp_sk(sk)->reader_queue;
	struct sk_buff_head *sk_queue = &sk->sk_receive_queue;
E
Eric Dumazet 已提交
1518
	struct sk_buff *skb;
1519
	int total = 0;
1520
	int res;
E
Eric Dumazet 已提交
1521 1522

	spin_lock_bh(&rcvq->lock);
1523 1524 1525 1526 1527 1528 1529
	skb = __first_packet_length(sk, rcvq, &total);
	if (!skb && !skb_queue_empty(sk_queue)) {
		spin_lock(&sk_queue->lock);
		skb_queue_splice_tail_init(sk_queue, rcvq);
		spin_unlock(&sk_queue->lock);

		skb = __first_packet_length(sk, rcvq, &total);
E
Eric Dumazet 已提交
1530
	}
1531
	res = skb ? skb->len : -1;
1532
	if (total)
1533
		udp_rmem_release(sk, total, 1, false);
E
Eric Dumazet 已提交
1534 1535 1536 1537
	spin_unlock_bh(&rcvq->lock);
	return res;
}

L
Linus Torvalds 已提交
1538 1539 1540
/*
 *	IOCTL requests applicable to the UDP protocol
 */
1541

L
Linus Torvalds 已提交
1542 1543
int udp_ioctl(struct sock *sk, int cmd, unsigned long arg)
{
1544 1545
	switch (cmd) {
	case SIOCOUTQ:
L
Linus Torvalds 已提交
1546
	{
1547 1548
		int amount = sk_wmem_alloc_get(sk);

1549 1550
		return put_user(amount, (int __user *)arg);
	}
L
Linus Torvalds 已提交
1551

1552 1553
	case SIOCINQ:
	{
1554
		int amount = max_t(int, 0, first_packet_length(sk));
1555 1556 1557

		return put_user(amount, (int __user *)arg);
	}
L
Linus Torvalds 已提交
1558

1559 1560
	default:
		return -ENOIOCTLCMD;
L
Linus Torvalds 已提交
1561
	}
1562 1563

	return 0;
L
Linus Torvalds 已提交
1564
}
E
Eric Dumazet 已提交
1565
EXPORT_SYMBOL(udp_ioctl);
L
Linus Torvalds 已提交
1566

1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591
struct sk_buff *__skb_recv_udp(struct sock *sk, unsigned int flags,
			       int noblock, int *peeked, int *off, int *err)
{
	struct sk_buff_head *sk_queue = &sk->sk_receive_queue;
	struct sk_buff_head *queue;
	struct sk_buff *last;
	long timeo;
	int error;

	queue = &udp_sk(sk)->reader_queue;
	flags |= noblock ? MSG_DONTWAIT : 0;
	timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
	do {
		struct sk_buff *skb;

		error = sock_error(sk);
		if (error)
			break;

		error = -EAGAIN;
		*peeked = 0;
		do {
			spin_lock_bh(&queue->lock);
			skb = __skb_try_recv_from_queue(sk, queue, flags,
							udp_skb_destructor,
1592
							peeked, off, err,
1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603
							&last);
			if (skb) {
				spin_unlock_bh(&queue->lock);
				return skb;
			}

			if (skb_queue_empty(sk_queue)) {
				spin_unlock_bh(&queue->lock);
				goto busy_check;
			}

1604 1605 1606 1607 1608
			/* refill the reader queue and walk it again
			 * keep both queues locked to avoid re-acquiring
			 * the sk_receive_queue lock if fwd memory scheduling
			 * is needed.
			 */
1609 1610 1611 1612
			spin_lock(&sk_queue->lock);
			skb_queue_splice_tail_init(sk_queue, queue);

			skb = __skb_try_recv_from_queue(sk, queue, flags,
1613
							udp_skb_dtor_locked,
1614
							peeked, off, err,
1615
							&last);
1616
			spin_unlock(&sk_queue->lock);
1617
			spin_unlock_bh(&queue->lock);
1618
			if (skb)
1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637
				return skb;

busy_check:
			if (!sk_can_busy_loop(sk))
				break;

			sk_busy_loop(sk, flags & MSG_DONTWAIT);
		} while (!skb_queue_empty(sk_queue));

		/* sk_queue is empty, reader_queue may contain peeked packets */
	} while (timeo &&
		 !__skb_wait_for_more_packets(sk, &error, &timeo,
					      (struct sk_buff *)sk_queue));

	*err = error;
	return NULL;
}
EXPORT_SYMBOL_GPL(__skb_recv_udp);

1638 1639 1640 1641 1642
/*
 * 	This should be easy, if there is something there we
 * 	return it, otherwise we block.
 */

1643 1644
int udp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock,
		int flags, int *addr_len)
1645 1646
{
	struct inet_sock *inet = inet_sk(sk);
1647
	DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name);
1648
	struct sk_buff *skb;
1649
	unsigned int ulen, copied;
1650
	int peeked, peeking, off;
1651 1652
	int err;
	int is_udplite = IS_UDPLITE(sk);
1653
	bool checksum_valid = false;
1654 1655

	if (flags & MSG_ERRQUEUE)
1656
		return ip_recv_error(sk, msg, len, addr_len);
1657 1658

try_again:
1659 1660
	peeking = flags & MSG_PEEK;
	off = sk_peek_offset(sk, flags);
1661
	skb = __skb_recv_udp(sk, flags, noblock, &peeked, &off, &err);
1662
	if (!skb)
1663
		return err;
1664

1665
	ulen = udp_skb_len(skb);
1666
	copied = len;
1667 1668
	if (copied > ulen - off)
		copied = ulen - off;
1669
	else if (copied < ulen)
1670 1671 1672 1673 1674 1675 1676 1677
		msg->msg_flags |= MSG_TRUNC;

	/*
	 * If checksum is needed at all, try to do it while copying the
	 * data.  If the data is truncated, or if we only want a partial
	 * coverage checksum (UDP-Lite), do it before the copy.
	 */

1678 1679
	if (copied < ulen || peeking ||
	    (is_udplite && UDP_SKB_CB(skb)->partial_cov)) {
1680 1681
		checksum_valid = udp_skb_csum_unnecessary(skb) ||
				!__udp_lib_checksum_complete(skb);
1682
		if (!checksum_valid)
1683 1684 1685
			goto csum_copy_err;
	}

1686 1687 1688 1689 1690 1691
	if (checksum_valid || udp_skb_csum_unnecessary(skb)) {
		if (udp_skb_is_linear(skb))
			err = copy_linear_skb(skb, copied, off, &msg->msg_iter);
		else
			err = skb_copy_datagram_msg(skb, off, msg, copied);
	} else {
1692
		err = skb_copy_and_csum_datagram_msg(skb, off, msg);
1693 1694 1695 1696 1697

		if (err == -EINVAL)
			goto csum_copy_err;
	}

1698
	if (unlikely(err)) {
1699 1700
		if (!peeked) {
			atomic_inc(&sk->sk_drops);
1701 1702
			UDP_INC_STATS(sock_net(sk),
				      UDP_MIB_INERRORS, is_udplite);
1703
		}
1704
		kfree_skb(skb);
1705
		return err;
1706
	}
1707 1708

	if (!peeked)
1709 1710
		UDP_INC_STATS(sock_net(sk),
			      UDP_MIB_INDATAGRAMS, is_udplite);
1711

1712
	sock_recv_ts_and_drops(msg, sk, skb);
1713 1714

	/* Copy the address. */
E
Eric Dumazet 已提交
1715
	if (sin) {
1716 1717 1718 1719
		sin->sin_family = AF_INET;
		sin->sin_port = udp_hdr(skb)->source;
		sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
		memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
1720
		*addr_len = sizeof(*sin);
1721 1722
	}
	if (inet->cmsg_flags)
1723
		ip_cmsg_recv_offset(msg, sk, skb, sizeof(struct udphdr), off);
1724

1725
	err = copied;
1726 1727 1728
	if (flags & MSG_TRUNC)
		err = ulen;

1729
	skb_consume_udp(sk, skb, peeking ? -err : err);
1730 1731 1732
	return err;

csum_copy_err:
1733 1734
	if (!__sk_queue_drop_skb(sk, &udp_sk(sk)->reader_queue, skb, flags,
				 udp_skb_destructor)) {
1735 1736
		UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
		UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
1737
	}
1738
	kfree_skb(skb);
1739

1740 1741
	/* starting over for a new packet, but check if we need to yield */
	cond_resched();
1742
	msg->msg_flags &= ~MSG_TRUNC;
1743 1744 1745
	goto try_again;
}

A
Andrey Ignatov 已提交
1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758
int udp_pre_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
{
	/* This check is replicated from __ip4_datagram_connect() and
	 * intended to prevent BPF program called below from accessing bytes
	 * that are out of the bound specified by user in addr_len.
	 */
	if (addr_len < sizeof(struct sockaddr_in))
		return -EINVAL;

	return BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr);
}
EXPORT_SYMBOL(udp_pre_connect);

1759
int __udp_disconnect(struct sock *sk, int flags)
L
Linus Torvalds 已提交
1760 1761 1762 1763 1764
{
	struct inet_sock *inet = inet_sk(sk);
	/*
	 *	1003.1g - break association.
	 */
1765

L
Linus Torvalds 已提交
1766
	sk->sk_state = TCP_CLOSE;
E
Eric Dumazet 已提交
1767 1768
	inet->inet_daddr = 0;
	inet->inet_dport = 0;
1769
	sock_rps_reset_rxhash(sk);
L
Linus Torvalds 已提交
1770 1771 1772 1773 1774 1775
	sk->sk_bound_dev_if = 0;
	if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
		inet_reset_saddr(sk);

	if (!(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) {
		sk->sk_prot->unhash(sk);
E
Eric Dumazet 已提交
1776
		inet->inet_sport = 0;
L
Linus Torvalds 已提交
1777 1778 1779 1780
	}
	sk_dst_reset(sk);
	return 0;
}
1781 1782 1783 1784 1785 1786 1787 1788 1789
EXPORT_SYMBOL(__udp_disconnect);

int udp_disconnect(struct sock *sk, int flags)
{
	lock_sock(sk);
	__udp_disconnect(sk, flags);
	release_sock(sk);
	return 0;
}
E
Eric Dumazet 已提交
1790
EXPORT_SYMBOL(udp_disconnect);
L
Linus Torvalds 已提交
1791

1792 1793
void udp_lib_unhash(struct sock *sk)
{
1794 1795
	if (sk_hashed(sk)) {
		struct udp_table *udptable = sk->sk_prot->h.udp_table;
1796 1797 1798 1799 1800
		struct udp_hslot *hslot, *hslot2;

		hslot  = udp_hashslot(udptable, sock_net(sk),
				      udp_sk(sk)->udp_port_hash);
		hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash);
1801

1802
		spin_lock_bh(&hslot->lock);
1803 1804
		if (rcu_access_pointer(sk->sk_reuseport_cb))
			reuseport_detach_sock(sk);
1805
		if (sk_del_node_init_rcu(sk)) {
E
Eric Dumazet 已提交
1806
			hslot->count--;
E
Eric Dumazet 已提交
1807
			inet_sk(sk)->inet_num = 0;
1808
			sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
1809 1810

			spin_lock(&hslot2->lock);
1811
			hlist_del_init_rcu(&udp_sk(sk)->udp_portaddr_node);
1812 1813
			hslot2->count--;
			spin_unlock(&hslot2->lock);
1814 1815
		}
		spin_unlock_bh(&hslot->lock);
1816 1817 1818 1819
	}
}
EXPORT_SYMBOL(udp_lib_unhash);

E
Eric Dumazet 已提交
1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831
/*
 * inet_rcv_saddr was changed, we must rehash secondary hash
 */
void udp_lib_rehash(struct sock *sk, u16 newhash)
{
	if (sk_hashed(sk)) {
		struct udp_table *udptable = sk->sk_prot->h.udp_table;
		struct udp_hslot *hslot, *hslot2, *nhslot2;

		hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash);
		nhslot2 = udp_hashslot2(udptable, newhash);
		udp_sk(sk)->udp_portaddr_hash = newhash;
1832 1833 1834

		if (hslot2 != nhslot2 ||
		    rcu_access_pointer(sk->sk_reuseport_cb)) {
E
Eric Dumazet 已提交
1835 1836 1837 1838
			hslot = udp_hashslot(udptable, sock_net(sk),
					     udp_sk(sk)->udp_port_hash);
			/* we must lock primary chain too */
			spin_lock_bh(&hslot->lock);
1839 1840 1841 1842 1843
			if (rcu_access_pointer(sk->sk_reuseport_cb))
				reuseport_detach_sock(sk);

			if (hslot2 != nhslot2) {
				spin_lock(&hslot2->lock);
1844
				hlist_del_init_rcu(&udp_sk(sk)->udp_portaddr_node);
1845 1846 1847 1848
				hslot2->count--;
				spin_unlock(&hslot2->lock);

				spin_lock(&nhslot2->lock);
1849
				hlist_add_head_rcu(&udp_sk(sk)->udp_portaddr_node,
1850 1851 1852 1853
							 &nhslot2->head);
				nhslot2->count++;
				spin_unlock(&nhslot2->lock);
			}
E
Eric Dumazet 已提交
1854 1855 1856 1857 1858 1859 1860 1861 1862

			spin_unlock_bh(&hslot->lock);
		}
	}
}
EXPORT_SYMBOL(udp_lib_rehash);

static void udp_v4_rehash(struct sock *sk)
{
1863
	u16 new_hash = ipv4_portaddr_hash(sock_net(sk),
E
Eric Dumazet 已提交
1864 1865 1866 1867 1868
					  inet_sk(sk)->inet_rcv_saddr,
					  inet_sk(sk)->inet_num);
	udp_lib_rehash(sk, new_hash);
}

1869
static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
H
Herbert Xu 已提交
1870
{
T
Tom Herbert 已提交
1871
	int rc;
E
Eric Dumazet 已提交
1872

1873
	if (inet_sk(sk)->inet_daddr) {
1874
		sock_rps_save_rxhash(sk, skb);
1875
		sk_mark_napi_id(sk, skb);
E
Eric Dumazet 已提交
1876
		sk_incoming_cpu_update(sk);
1877 1878
	} else {
		sk_mark_napi_id_once(sk, skb);
1879
	}
T
Tom Herbert 已提交
1880

1881
	rc = __udp_enqueue_schedule_skb(sk, skb);
E
Eric Dumazet 已提交
1882 1883
	if (rc < 0) {
		int is_udplite = IS_UDPLITE(sk);
H
Herbert Xu 已提交
1884 1885

		/* Note that an ENOMEM error is charged twice */
E
Eric Dumazet 已提交
1886
		if (rc == -ENOMEM)
1887
			UDP_INC_STATS(sock_net(sk), UDP_MIB_RCVBUFERRORS,
1888
					is_udplite);
1889
		UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
E
Eric Dumazet 已提交
1890
		kfree_skb(skb);
1891
		trace_udp_fail_queue_rcv_skb(rc, sk);
E
Eric Dumazet 已提交
1892
		return -1;
H
Herbert Xu 已提交
1893 1894 1895 1896 1897
	}

	return 0;
}

1898
static DEFINE_STATIC_KEY_FALSE(udp_encap_needed_key);
1899 1900
void udp_encap_enable(void)
{
1901
	static_branch_enable(&udp_encap_needed_key);
1902 1903 1904
}
EXPORT_SYMBOL(udp_encap_enable);

1905 1906 1907 1908 1909 1910 1911 1912
/* returns:
 *  -1: error
 *   0: success
 *  >0: "udp encap" protocol resubmission
 *
 * Note that in the success and error cases, the skb is assumed to
 * have either been requeued or freed.
 */
1913
static int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924
{
	struct udp_sock *up = udp_sk(sk);
	int is_udplite = IS_UDPLITE(sk);

	/*
	 *	Charge it to the socket, dropping if the queue is full.
	 */
	if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
		goto drop;
	nf_reset(skb);

1925
	if (static_branch_unlikely(&udp_encap_needed_key) && up->encap_type) {
1926 1927
		int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);

1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939
		/*
		 * This is an encapsulation socket so pass the skb to
		 * the socket's udp_encap_rcv() hook. Otherwise, just
		 * fall through and pass this up the UDP socket.
		 * up->encap_rcv() returns the following value:
		 * =0 if skb was successfully passed to the encap
		 *    handler or was discarded by it.
		 * >0 if skb should be passed on to UDP.
		 * <0 if skb should be resubmitted as proto -N
		 */

		/* if we're overly short, let UDP handle it */
1940
		encap_rcv = READ_ONCE(up->encap_rcv);
1941
		if (encap_rcv) {
1942 1943
			int ret;

1944 1945 1946 1947
			/* Verify checksum before giving to encap */
			if (udp_lib_checksum_complete(skb))
				goto csum_error;

1948
			ret = encap_rcv(sk, skb);
1949
			if (ret <= 0) {
1950 1951 1952
				__UDP_INC_STATS(sock_net(sk),
						UDP_MIB_INDATAGRAMS,
						is_udplite);
1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976
				return -ret;
			}
		}

		/* FALLTHROUGH -- it's a UDP Packet */
	}

	/*
	 * 	UDP-Lite specific tests, ignored on UDP sockets
	 */
	if ((is_udplite & UDPLITE_RECV_CC)  &&  UDP_SKB_CB(skb)->partial_cov) {

		/*
		 * MIB statistics other than incrementing the error count are
		 * disabled for the following two types of errors: these depend
		 * on the application settings, not on the functioning of the
		 * protocol stack as such.
		 *
		 * RFC 3828 here recommends (sec 3.3): "There should also be a
		 * way ... to ... at least let the receiving application block
		 * delivery of packets with coverage values less than a value
		 * provided by the application."
		 */
		if (up->pcrlen == 0) {          /* full coverage was set  */
1977 1978
			net_dbg_ratelimited("UDPLite: partial coverage %d while full coverage %d requested\n",
					    UDP_SKB_CB(skb)->cscov, skb->len);
1979 1980 1981 1982 1983 1984 1985 1986 1987
			goto drop;
		}
		/* The next case involves violating the min. coverage requested
		 * by the receiver. This is subtle: if receiver wants x and x is
		 * greater than the buffersize/MTU then receiver will complain
		 * that it wants x while sender emits packets of smaller size y.
		 * Therefore the above ...()->partial_cov statement is essential.
		 */
		if (UDP_SKB_CB(skb)->cscov  <  up->pcrlen) {
1988 1989
			net_dbg_ratelimited("UDPLite: coverage %d too small, need min %d\n",
					    UDP_SKB_CB(skb)->cscov, up->pcrlen);
1990 1991 1992 1993
			goto drop;
		}
	}

1994
	prefetch(&sk->sk_rmem_alloc);
1995 1996
	if (rcu_access_pointer(sk->sk_filter) &&
	    udp_lib_checksum_complete(skb))
1997
			goto csum_error;
1998

1999
	if (sk_filter_trim_cap(sk, skb, sizeof(struct udphdr)))
2000
		goto drop;
2001

2002
	udp_csum_pull_header(skb);
2003

2004
	ipv4_pktinfo_prepare(sk, skb);
2005
	return __udp_queue_rcv_skb(sk, skb);
2006

2007
csum_error:
2008
	__UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
2009
drop:
2010
	__UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
E
Eric Dumazet 已提交
2011
	atomic_inc(&sk->sk_drops);
2012 2013 2014 2015
	kfree_skb(skb);
	return -1;
}

2016
/* For TCP sockets, sk_rx_dst is protected by socket lock
2017
 * For UDP, we use xchg() to guard against concurrent changes.
2018
 */
2019
bool udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
S
Shawn Bohrer 已提交
2020
{
2021 2022
	struct dst_entry *old;

2023 2024 2025
	if (dst_hold_safe(dst)) {
		old = xchg(&sk->sk_rx_dst, dst);
		dst_release(old);
2026
		return old != dst;
2027
	}
2028
	return false;
S
Shawn Bohrer 已提交
2029
}
2030
EXPORT_SYMBOL(udp_sk_rx_dst_set);
S
Shawn Bohrer 已提交
2031

2032 2033 2034
/*
 *	Multicasts and broadcasts go to each listener.
 *
2035
 *	Note: called only from the BH handler context.
2036
 */
2037
static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
2038 2039
				    struct udphdr  *uh,
				    __be32 saddr, __be32 daddr,
2040 2041
				    struct udp_table *udptable,
				    int proto)
2042
{
2043
	struct sock *sk, *first = NULL;
2044 2045
	unsigned short hnum = ntohs(uh->dest);
	struct udp_hslot *hslot = udp_hashslot(udptable, net, hnum);
2046
	unsigned int hash2 = 0, hash2_any = 0, use_hash2 = (hslot->count > 10);
2047 2048
	unsigned int offset = offsetof(typeof(*sk), sk_node);
	int dif = skb->dev->ifindex;
2049
	int sdif = inet_sdif(skb);
2050 2051
	struct hlist_node *node;
	struct sk_buff *nskb;
2052 2053

	if (use_hash2) {
2054
		hash2_any = ipv4_portaddr_hash(net, htonl(INADDR_ANY), hnum) &
2055
			    udptable->mask;
2056
		hash2 = ipv4_portaddr_hash(net, daddr, hnum) & udptable->mask;
2057
start_lookup:
2058
		hslot = &udptable->hash2[hash2];
2059 2060
		offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node);
	}
2061

2062 2063
	sk_for_each_entry_offset_rcu(sk, node, &hslot->head, offset) {
		if (!__udp_is_mcast_sock(net, sk, uh->dest, daddr,
2064
					 uh->source, saddr, dif, sdif, hnum))
2065 2066 2067 2068 2069
			continue;

		if (!first) {
			first = sk;
			continue;
2070
		}
2071
		nskb = skb_clone(skb, GFP_ATOMIC);
2072

2073 2074
		if (unlikely(!nskb)) {
			atomic_inc(&sk->sk_drops);
2075 2076 2077 2078
			__UDP_INC_STATS(net, UDP_MIB_RCVBUFERRORS,
					IS_UDPLITE(sk));
			__UDP_INC_STATS(net, UDP_MIB_INERRORS,
					IS_UDPLITE(sk));
2079 2080 2081 2082 2083
			continue;
		}
		if (udp_queue_rcv_skb(sk, nskb) > 0)
			consume_skb(nskb);
	}
2084

2085 2086 2087 2088 2089 2090
	/* Also lookup *:port if we are using hash2 and haven't done so yet. */
	if (use_hash2 && hash2 != hash2_any) {
		hash2 = hash2_any;
		goto start_lookup;
	}

2091 2092 2093
	if (first) {
		if (udp_queue_rcv_skb(first, skb) > 0)
			consume_skb(skb);
2094
	} else {
2095
		kfree_skb(skb);
2096 2097
		__UDP_INC_STATS(net, UDP_MIB_IGNOREDMULTI,
				proto == IPPROTO_UDPLITE);
2098
	}
2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118
	return 0;
}

/* Initialize UDP checksum. If exited with zero value (success),
 * CHECKSUM_UNNECESSARY means, that no more checks are required.
 * Otherwise, csum completion requires chacksumming packet body,
 * including udp header and folding it to skb->csum.
 */
static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh,
				 int proto)
{
	int err;

	UDP_SKB_CB(skb)->partial_cov = 0;
	UDP_SKB_CB(skb)->cscov = skb->len;

	if (proto == IPPROTO_UDPLITE) {
		err = udplite_checksum_init(skb, uh);
		if (err)
			return err;
2119 2120 2121 2122 2123

		if (UDP_SKB_CB(skb)->partial_cov) {
			skb->csum = inet_compute_pseudo(skb, proto);
			return 0;
		}
2124 2125
	}

2126 2127 2128 2129 2130
	/* Note, we are only interested in != 0 or == 0, thus the
	 * force to int.
	 */
	return (__force int)skb_checksum_init_zero_check(skb, proto, uh->check,
							 inet_compute_pseudo);
2131 2132 2133 2134 2135 2136
}

/*
 *	All we need to do is get the socket, and then do a checksum.
 */

2137
int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
2138 2139 2140
		   int proto)
{
	struct sock *sk;
2141
	struct udphdr *uh;
2142
	unsigned short ulen;
E
Eric Dumazet 已提交
2143
	struct rtable *rt = skb_rtable(skb);
2144
	__be32 saddr, daddr;
2145
	struct net *net = dev_net(skb->dev);
2146 2147 2148 2149 2150 2151 2152

	/*
	 *  Validate the packet.
	 */
	if (!pskb_may_pull(skb, sizeof(struct udphdr)))
		goto drop;		/* No space for header. */

2153
	uh   = udp_hdr(skb);
2154
	ulen = ntohs(uh->len);
2155 2156 2157
	saddr = ip_hdr(skb)->saddr;
	daddr = ip_hdr(skb)->daddr;

2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170
	if (ulen > skb->len)
		goto short_packet;

	if (proto == IPPROTO_UDP) {
		/* UDP validates ulen. */
		if (ulen < sizeof(*uh) || pskb_trim_rcsum(skb, ulen))
			goto short_packet;
		uh = udp_hdr(skb);
	}

	if (udp4_csum_init(skb, uh, proto))
		goto csum_error;

2171 2172
	sk = skb_steal_sock(skb);
	if (sk) {
2173
		struct dst_entry *dst = skb_dst(skb);
S
Shawn Bohrer 已提交
2174 2175
		int ret;

2176 2177
		if (unlikely(sk->sk_rx_dst != dst))
			udp_sk_rx_dst_set(sk, dst);
2178

S
Shawn Bohrer 已提交
2179
		ret = udp_queue_rcv_skb(sk, skb);
2180
		sock_put(sk);
S
Shawn Bohrer 已提交
2181 2182 2183 2184 2185 2186 2187
		/* a return value > 0 means to resubmit the input, but
		 * it wants the return to be -protocol, or 0
		 */
		if (ret > 0)
			return -ret;
		return 0;
	}
2188

F
Fabian Frederick 已提交
2189 2190
	if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST))
		return __udp4_lib_mcast_deliver(net, skb, uh,
2191
						saddr, daddr, udptable, proto);
F
Fabian Frederick 已提交
2192 2193

	sk = __udp4_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
2194
	if (sk) {
2195 2196
		int ret;

2197
		if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk))
2198 2199 2200
			skb_checksum_try_convert(skb, IPPROTO_UDP, uh->check,
						 inet_compute_pseudo);

2201
		ret = udp_queue_rcv_skb(sk, skb);
2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218

		/* a return value > 0 means to resubmit the input, but
		 * it wants the return to be -protocol, or 0
		 */
		if (ret > 0)
			return -ret;
		return 0;
	}

	if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
		goto drop;
	nf_reset(skb);

	/* No socket. Drop packet silently, if checksum is wrong */
	if (udp_lib_checksum_complete(skb))
		goto csum_error;

2219
	__UDP_INC_STATS(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
2220 2221 2222 2223 2224 2225 2226 2227 2228 2229
	icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);

	/*
	 * Hmm.  We got an UDP packet to a port to which we
	 * don't wanna listen.  Ignore it.
	 */
	kfree_skb(skb);
	return 0;

short_packet:
2230 2231 2232 2233 2234
	net_dbg_ratelimited("UDP%s: short packet: From %pI4:%u %d/%d to %pI4:%u\n",
			    proto == IPPROTO_UDPLITE ? "Lite" : "",
			    &saddr, ntohs(uh->source),
			    ulen, skb->len,
			    &daddr, ntohs(uh->dest));
2235 2236 2237 2238 2239 2240 2241
	goto drop;

csum_error:
	/*
	 * RFC1122: OK.  Discards the bad packet silently (as far as
	 * the network is concerned, anyway) as per 4.1.3.4 (MUST).
	 */
2242 2243 2244 2245
	net_dbg_ratelimited("UDP%s: bad checksum. From %pI4:%u to %pI4:%u ulen %d\n",
			    proto == IPPROTO_UDPLITE ? "Lite" : "",
			    &saddr, ntohs(uh->source), &daddr, ntohs(uh->dest),
			    ulen);
2246
	__UDP_INC_STATS(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE);
2247
drop:
2248
	__UDP_INC_STATS(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE);
2249 2250 2251 2252
	kfree_skb(skb);
	return 0;
}

S
Shawn Bohrer 已提交
2253 2254 2255 2256 2257 2258
/* We can only early demux multicast if there is a single matching socket.
 * If more than one socket found returns NULL
 */
static struct sock *__udp4_lib_mcast_demux_lookup(struct net *net,
						  __be16 loc_port, __be32 loc_addr,
						  __be16 rmt_port, __be32 rmt_addr,
2259
						  int dif, int sdif)
S
Shawn Bohrer 已提交
2260 2261 2262
{
	struct sock *sk, *result;
	unsigned short hnum = ntohs(loc_port);
2263
	unsigned int slot = udp_hashfn(net, hnum, udp_table.mask);
S
Shawn Bohrer 已提交
2264 2265
	struct udp_hslot *hslot = &udp_table.hash[slot];

2266 2267 2268 2269
	/* Do not bother scanning a too big list */
	if (hslot->count > 10)
		return NULL;

S
Shawn Bohrer 已提交
2270
	result = NULL;
2271 2272
	sk_for_each_rcu(sk, &hslot->head) {
		if (__udp_is_mcast_sock(net, sk, loc_port, loc_addr,
2273
					rmt_port, rmt_addr, dif, sdif, hnum)) {
2274 2275
			if (result)
				return NULL;
S
Shawn Bohrer 已提交
2276 2277 2278
			result = sk;
		}
	}
2279

S
Shawn Bohrer 已提交
2280 2281 2282 2283 2284 2285 2286 2287 2288 2289
	return result;
}

/* For unicast we should only early demux connected sockets or we can
 * break forwarding setups.  The chains here can be long so only check
 * if the first socket is an exact match and if not move on.
 */
static struct sock *__udp4_lib_demux_lookup(struct net *net,
					    __be16 loc_port, __be32 loc_addr,
					    __be16 rmt_port, __be32 rmt_addr,
2290
					    int dif, int sdif)
S
Shawn Bohrer 已提交
2291 2292
{
	unsigned short hnum = ntohs(loc_port);
2293
	unsigned int hash2 = ipv4_portaddr_hash(net, loc_addr, hnum);
S
Shawn Bohrer 已提交
2294 2295
	unsigned int slot2 = hash2 & udp_table.mask;
	struct udp_hslot *hslot2 = &udp_table.hash2[slot2];
2296
	INET_ADDR_COOKIE(acookie, rmt_addr, loc_addr);
S
Shawn Bohrer 已提交
2297
	const __portpair ports = INET_COMBINED_PORTS(rmt_port, hnum);
2298
	struct sock *sk;
S
Shawn Bohrer 已提交
2299

2300 2301
	udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
		if (INET_MATCH(sk, net, acookie, rmt_addr,
2302
			       loc_addr, ports, dif, sdif))
2303
			return sk;
S
Shawn Bohrer 已提交
2304 2305 2306
		/* Only check first socket in chain */
		break;
	}
2307
	return NULL;
S
Shawn Bohrer 已提交
2308 2309
}

2310
int udp_v4_early_demux(struct sk_buff *skb)
S
Shawn Bohrer 已提交
2311
{
2312
	struct net *net = dev_net(skb->dev);
2313
	struct in_device *in_dev = NULL;
2314 2315
	const struct iphdr *iph;
	const struct udphdr *uh;
2316
	struct sock *sk = NULL;
S
Shawn Bohrer 已提交
2317 2318
	struct dst_entry *dst;
	int dif = skb->dev->ifindex;
2319
	int sdif = inet_sdif(skb);
2320
	int ours;
S
Shawn Bohrer 已提交
2321 2322 2323

	/* validate the packet */
	if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct udphdr)))
2324
		return 0;
S
Shawn Bohrer 已提交
2325

2326 2327 2328
	iph = ip_hdr(skb);
	uh = udp_hdr(skb);

P
Paolo Abeni 已提交
2329
	if (skb->pkt_type == PACKET_MULTICAST) {
2330
		in_dev = __in_dev_get_rcu(skb->dev);
2331 2332

		if (!in_dev)
2333
			return 0;
2334

P
Paolo Abeni 已提交
2335 2336 2337 2338
		ours = ip_check_mc_rcu(in_dev, iph->daddr, iph->saddr,
				       iph->protocol);
		if (!ours)
			return 0;
2339

S
Shawn Bohrer 已提交
2340
		sk = __udp4_lib_mcast_demux_lookup(net, uh->dest, iph->daddr,
2341 2342
						   uh->source, iph->saddr,
						   dif, sdif);
2343
	} else if (skb->pkt_type == PACKET_HOST) {
S
Shawn Bohrer 已提交
2344
		sk = __udp4_lib_demux_lookup(net, uh->dest, iph->daddr,
2345
					     uh->source, iph->saddr, dif, sdif);
2346
	}
S
Shawn Bohrer 已提交
2347

2348
	if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt))
2349
		return 0;
S
Shawn Bohrer 已提交
2350 2351

	skb->sk = sk;
2352
	skb->destructor = sock_efree;
2353
	dst = READ_ONCE(sk->sk_rx_dst);
S
Shawn Bohrer 已提交
2354 2355 2356

	if (dst)
		dst = dst_check(dst, 0);
2357
	if (dst) {
2358 2359
		u32 itag = 0;

2360 2361 2362 2363 2364
		/* set noref for now.
		 * any place which wants to hold dst has to call
		 * dst_hold_safe()
		 */
		skb_dst_set_noref(skb, dst);
2365 2366 2367 2368 2369 2370 2371 2372

		/* for unconnected multicast sockets we need to validate
		 * the source on each packet
		 */
		if (!inet_sk(sk)->inet_daddr && in_dev)
			return ip_mc_validate_source(skb, iph->daddr,
						     iph->saddr, iph->tos,
						     skb->dev, in_dev, &itag);
2373
	}
2374
	return 0;
S
Shawn Bohrer 已提交
2375 2376
}

2377 2378
int udp_rcv(struct sk_buff *skb)
{
2379
	return __udp4_lib_rcv(skb, &udp_table, IPPROTO_UDP);
2380 2381
}

2382
void udp_destroy_sock(struct sock *sk)
2383
{
T
Tom Parkin 已提交
2384
	struct udp_sock *up = udp_sk(sk);
2385
	bool slow = lock_sock_fast(sk);
2386
	udp_flush_pending_frames(sk);
2387
	unlock_sock_fast(sk, slow);
2388
	if (static_branch_unlikely(&udp_encap_needed_key) && up->encap_type) {
T
Tom Parkin 已提交
2389
		void (*encap_destroy)(struct sock *sk);
2390
		encap_destroy = READ_ONCE(up->encap_destroy);
T
Tom Parkin 已提交
2391 2392 2393
		if (encap_destroy)
			encap_destroy(sk);
	}
2394 2395
}

L
Linus Torvalds 已提交
2396 2397 2398
/*
 *	Socket option code for UDP
 */
2399
int udp_lib_setsockopt(struct sock *sk, int level, int optname,
2400
		       char __user *optval, unsigned int optlen,
2401
		       int (*push_pending_frames)(struct sock *))
L
Linus Torvalds 已提交
2402 2403
{
	struct udp_sock *up = udp_sk(sk);
2404
	int val, valbool;
L
Linus Torvalds 已提交
2405
	int err = 0;
W
Wang Chen 已提交
2406
	int is_udplite = IS_UDPLITE(sk);
L
Linus Torvalds 已提交
2407

E
Eric Dumazet 已提交
2408
	if (optlen < sizeof(int))
L
Linus Torvalds 已提交
2409 2410 2411 2412 2413
		return -EINVAL;

	if (get_user(val, (int __user *)optval))
		return -EFAULT;

2414 2415
	valbool = val ? 1 : 0;

2416
	switch (optname) {
L
Linus Torvalds 已提交
2417 2418 2419 2420 2421 2422
	case UDP_CORK:
		if (val != 0) {
			up->corkflag = 1;
		} else {
			up->corkflag = 0;
			lock_sock(sk);
2423
			push_pending_frames(sk);
L
Linus Torvalds 已提交
2424 2425 2426
			release_sock(sk);
		}
		break;
2427

L
Linus Torvalds 已提交
2428 2429 2430 2431 2432
	case UDP_ENCAP:
		switch (val) {
		case 0:
		case UDP_ENCAP_ESPINUDP:
		case UDP_ENCAP_ESPINUDP_NON_IKE:
2433 2434
			up->encap_rcv = xfrm4_udp_encap_rcv;
			/* FALLTHROUGH */
2435
		case UDP_ENCAP_L2TPINUDP:
L
Linus Torvalds 已提交
2436
			up->encap_type = val;
2437
			udp_encap_enable();
L
Linus Torvalds 已提交
2438 2439 2440 2441 2442 2443 2444
			break;
		default:
			err = -ENOPROTOOPT;
			break;
		}
		break;

2445 2446 2447 2448 2449 2450 2451 2452
	case UDP_NO_CHECK6_TX:
		up->no_check6_tx = valbool;
		break;

	case UDP_NO_CHECK6_RX:
		up->no_check6_rx = valbool;
		break;

2453 2454 2455 2456 2457 2458
	case UDP_SEGMENT:
		if (val < 0 || val > USHRT_MAX)
			return -EINVAL;
		up->gso_size = val;
		break;

2459 2460 2461 2462 2463 2464
	/*
	 * 	UDP-Lite's partial checksum coverage (RFC 3828).
	 */
	/* The sender sets actual checksum coverage length via this option.
	 * The case coverage > packet length is handled by send module. */
	case UDPLITE_SEND_CSCOV:
W
Wang Chen 已提交
2465
		if (!is_udplite)         /* Disable the option on UDP sockets */
2466 2467 2468
			return -ENOPROTOOPT;
		if (val != 0 && val < 8) /* Illegal coverage: use default (8) */
			val = 8;
2469 2470
		else if (val > USHRT_MAX)
			val = USHRT_MAX;
2471 2472 2473 2474
		up->pcslen = val;
		up->pcflag |= UDPLITE_SEND_CC;
		break;

2475 2476
	/* The receiver specifies a minimum checksum coverage value. To make
	 * sense, this should be set to at least 8 (as done below). If zero is
2477 2478
	 * used, this again means full checksum coverage.                     */
	case UDPLITE_RECV_CSCOV:
W
Wang Chen 已提交
2479
		if (!is_udplite)         /* Disable the option on UDP sockets */
2480 2481 2482
			return -ENOPROTOOPT;
		if (val != 0 && val < 8) /* Avoid silly minimal values.       */
			val = 8;
2483 2484
		else if (val > USHRT_MAX)
			val = USHRT_MAX;
2485 2486 2487 2488
		up->pcrlen = val;
		up->pcflag |= UDPLITE_RECV_CC;
		break;

L
Linus Torvalds 已提交
2489 2490 2491
	default:
		err = -ENOPROTOOPT;
		break;
2492
	}
L
Linus Torvalds 已提交
2493 2494 2495

	return err;
}
E
Eric Dumazet 已提交
2496
EXPORT_SYMBOL(udp_lib_setsockopt);
L
Linus Torvalds 已提交
2497

2498
int udp_setsockopt(struct sock *sk, int level, int optname,
2499
		   char __user *optval, unsigned int optlen)
2500 2501 2502 2503 2504 2505 2506 2507 2508
{
	if (level == SOL_UDP  ||  level == SOL_UDPLITE)
		return udp_lib_setsockopt(sk, level, optname, optval, optlen,
					  udp_push_pending_frames);
	return ip_setsockopt(sk, level, optname, optval, optlen);
}

#ifdef CONFIG_COMPAT
int compat_udp_setsockopt(struct sock *sk, int level, int optname,
2509
			  char __user *optval, unsigned int optlen)
2510 2511 2512 2513 2514 2515 2516 2517
{
	if (level == SOL_UDP  ||  level == SOL_UDPLITE)
		return udp_lib_setsockopt(sk, level, optname, optval, optlen,
					  udp_push_pending_frames);
	return compat_ip_setsockopt(sk, level, optname, optval, optlen);
}
#endif

2518 2519
int udp_lib_getsockopt(struct sock *sk, int level, int optname,
		       char __user *optval, int __user *optlen)
L
Linus Torvalds 已提交
2520 2521 2522 2523
{
	struct udp_sock *up = udp_sk(sk);
	int val, len;

E
Eric Dumazet 已提交
2524
	if (get_user(len, optlen))
L
Linus Torvalds 已提交
2525 2526 2527
		return -EFAULT;

	len = min_t(unsigned int, len, sizeof(int));
2528

2529
	if (len < 0)
L
Linus Torvalds 已提交
2530 2531
		return -EINVAL;

2532
	switch (optname) {
L
Linus Torvalds 已提交
2533 2534 2535 2536 2537 2538 2539 2540
	case UDP_CORK:
		val = up->corkflag;
		break;

	case UDP_ENCAP:
		val = up->encap_type;
		break;

2541 2542 2543 2544 2545 2546 2547 2548
	case UDP_NO_CHECK6_TX:
		val = up->no_check6_tx;
		break;

	case UDP_NO_CHECK6_RX:
		val = up->no_check6_rx;
		break;

2549 2550 2551 2552
	case UDP_SEGMENT:
		val = up->gso_size;
		break;

2553 2554 2555 2556 2557 2558 2559 2560 2561 2562
	/* The following two cannot be changed on UDP sockets, the return is
	 * always 0 (which corresponds to the full checksum coverage of UDP). */
	case UDPLITE_SEND_CSCOV:
		val = up->pcslen;
		break;

	case UDPLITE_RECV_CSCOV:
		val = up->pcrlen;
		break;

L
Linus Torvalds 已提交
2563 2564
	default:
		return -ENOPROTOOPT;
2565
	}
L
Linus Torvalds 已提交
2566

2567
	if (put_user(len, optlen))
2568
		return -EFAULT;
E
Eric Dumazet 已提交
2569
	if (copy_to_user(optval, &val, len))
L
Linus Torvalds 已提交
2570
		return -EFAULT;
2571
	return 0;
L
Linus Torvalds 已提交
2572
}
E
Eric Dumazet 已提交
2573
EXPORT_SYMBOL(udp_lib_getsockopt);
L
Linus Torvalds 已提交
2574

2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591
int udp_getsockopt(struct sock *sk, int level, int optname,
		   char __user *optval, int __user *optlen)
{
	if (level == SOL_UDP  ||  level == SOL_UDPLITE)
		return udp_lib_getsockopt(sk, level, optname, optval, optlen);
	return ip_getsockopt(sk, level, optname, optval, optlen);
}

#ifdef CONFIG_COMPAT
int compat_udp_getsockopt(struct sock *sk, int level, int optname,
				 char __user *optval, int __user *optlen)
{
	if (level == SOL_UDP  ||  level == SOL_UDPLITE)
		return udp_lib_getsockopt(sk, level, optname, optval, optlen);
	return compat_ip_getsockopt(sk, level, optname, optval, optlen);
}
#endif
L
Linus Torvalds 已提交
2592 2593 2594 2595 2596 2597
/**
 * 	udp_poll - wait for a UDP event.
 *	@file - file struct
 *	@sock - socket
 *	@wait - poll table
 *
2598
 *	This is same as datagram poll, except for the special case of
L
Linus Torvalds 已提交
2599 2600 2601 2602 2603 2604
 *	blocking sockets. If application is using a blocking fd
 *	and a packet with checksum error is in the queue;
 *	then it could get return from select indicating data available
 *	but then block when reading it. Add special case code
 *	to work around these arguably broken applications.
 */
A
Al Viro 已提交
2605
__poll_t udp_poll(struct file *file, struct socket *sock, poll_table *wait)
L
Linus Torvalds 已提交
2606
{
A
Al Viro 已提交
2607
	__poll_t mask = datagram_poll(file, sock, wait);
L
Linus Torvalds 已提交
2608
	struct sock *sk = sock->sk;
2609

2610
	if (!skb_queue_empty(&udp_sk(sk)->reader_queue))
2611
		mask |= EPOLLIN | EPOLLRDNORM;
2612

L
Linus Torvalds 已提交
2613
	/* Check for false positives due to checksum errors */
2614
	if ((mask & EPOLLRDNORM) && !(file->f_flags & O_NONBLOCK) &&
2615
	    !(sk->sk_shutdown & RCV_SHUTDOWN) && first_packet_length(sk) == -1)
2616
		mask &= ~(EPOLLIN | EPOLLRDNORM);
L
Linus Torvalds 已提交
2617 2618

	return mask;
2619

L
Linus Torvalds 已提交
2620
}
E
Eric Dumazet 已提交
2621
EXPORT_SYMBOL(udp_poll);
L
Linus Torvalds 已提交
2622

2623 2624 2625 2626 2627 2628
int udp_abort(struct sock *sk, int err)
{
	lock_sock(sk);

	sk->sk_err = err;
	sk->sk_error_report(sk);
2629
	__udp_disconnect(sk, 0);
2630 2631 2632 2633 2634 2635 2636

	release_sock(sk);

	return 0;
}
EXPORT_SYMBOL_GPL(udp_abort);

2637
struct proto udp_prot = {
2638 2639 2640
	.name			= "UDP",
	.owner			= THIS_MODULE,
	.close			= udp_lib_close,
A
Andrey Ignatov 已提交
2641
	.pre_connect		= udp_pre_connect,
2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662
	.connect		= ip4_datagram_connect,
	.disconnect		= udp_disconnect,
	.ioctl			= udp_ioctl,
	.init			= udp_init_sock,
	.destroy		= udp_destroy_sock,
	.setsockopt		= udp_setsockopt,
	.getsockopt		= udp_getsockopt,
	.sendmsg		= udp_sendmsg,
	.recvmsg		= udp_recvmsg,
	.sendpage		= udp_sendpage,
	.release_cb		= ip4_datagram_release_cb,
	.hash			= udp_lib_hash,
	.unhash			= udp_lib_unhash,
	.rehash			= udp_v4_rehash,
	.get_port		= udp_v4_get_port,
	.memory_allocated	= &udp_memory_allocated,
	.sysctl_mem		= sysctl_udp_mem,
	.sysctl_wmem_offset	= offsetof(struct net, ipv4.sysctl_udp_wmem_min),
	.sysctl_rmem_offset	= offsetof(struct net, ipv4.sysctl_udp_rmem_min),
	.obj_size		= sizeof(struct udp_sock),
	.h.udp_table		= &udp_table,
2663
#ifdef CONFIG_COMPAT
2664 2665
	.compat_setsockopt	= compat_udp_setsockopt,
	.compat_getsockopt	= compat_udp_getsockopt,
2666
#endif
2667
	.diag_destroy		= udp_abort,
2668
};
E
Eric Dumazet 已提交
2669
EXPORT_SYMBOL(udp_prot);
L
Linus Torvalds 已提交
2670 2671 2672 2673

/* ------------------------------------------------------------------------ */
#ifdef CONFIG_PROC_FS

2674
static struct sock *udp_get_first(struct seq_file *seq, int start)
L
Linus Torvalds 已提交
2675 2676 2677
{
	struct sock *sk;
	struct udp_iter_state *state = seq->private;
2678
	struct net *net = seq_file_net(seq);
L
Linus Torvalds 已提交
2679

2680 2681
	for (state->bucket = start; state->bucket <= state->udp_table->mask;
	     ++state->bucket) {
2682
		struct udp_hslot *hslot = &state->udp_table->hash[state->bucket];
2683

2684
		if (hlist_empty(&hslot->head))
2685 2686
			continue;

2687
		spin_lock_bh(&hslot->lock);
2688
		sk_for_each(sk, &hslot->head) {
2689
			if (!net_eq(sock_net(sk), net))
2690
				continue;
L
Linus Torvalds 已提交
2691 2692 2693
			if (sk->sk_family == state->family)
				goto found;
		}
2694
		spin_unlock_bh(&hslot->lock);
L
Linus Torvalds 已提交
2695 2696 2697 2698 2699 2700 2701 2702 2703
	}
	sk = NULL;
found:
	return sk;
}

static struct sock *udp_get_next(struct seq_file *seq, struct sock *sk)
{
	struct udp_iter_state *state = seq->private;
2704
	struct net *net = seq_file_net(seq);
L
Linus Torvalds 已提交
2705 2706

	do {
2707
		sk = sk_next(sk);
2708
	} while (sk && (!net_eq(sock_net(sk), net) || sk->sk_family != state->family));
L
Linus Torvalds 已提交
2709

2710
	if (!sk) {
2711
		if (state->bucket <= state->udp_table->mask)
2712
			spin_unlock_bh(&state->udp_table->hash[state->bucket].lock);
2713
		return udp_get_first(seq, state->bucket + 1);
L
Linus Torvalds 已提交
2714 2715 2716 2717 2718 2719
	}
	return sk;
}

static struct sock *udp_get_idx(struct seq_file *seq, loff_t pos)
{
2720
	struct sock *sk = udp_get_first(seq, 0);
L
Linus Torvalds 已提交
2721 2722

	if (sk)
2723
		while (pos && (sk = udp_get_next(seq, sk)) != NULL)
L
Linus Torvalds 已提交
2724 2725 2726 2727 2728 2729
			--pos;
	return pos ? NULL : sk;
}

static void *udp_seq_start(struct seq_file *seq, loff_t *pos)
{
2730
	struct udp_iter_state *state = seq->private;
2731
	state->bucket = MAX_UDP_PORTS;
2732

2733
	return *pos ? udp_get_idx(seq, *pos-1) : SEQ_START_TOKEN;
L
Linus Torvalds 已提交
2734 2735 2736 2737 2738 2739
}

static void *udp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
	struct sock *sk;

2740
	if (v == SEQ_START_TOKEN)
L
Linus Torvalds 已提交
2741 2742 2743 2744 2745 2746 2747 2748 2749 2750
		sk = udp_get_idx(seq, 0);
	else
		sk = udp_get_next(seq, v);

	++*pos;
	return sk;
}

static void udp_seq_stop(struct seq_file *seq, void *v)
{
2751 2752
	struct udp_iter_state *state = seq->private;

2753
	if (state->bucket <= state->udp_table->mask)
2754
		spin_unlock_bh(&state->udp_table->hash[state->bucket].lock);
L
Linus Torvalds 已提交
2755 2756
}

2757
int udp_seq_open(struct inode *inode, struct file *file)
L
Linus Torvalds 已提交
2758
{
A
Al Viro 已提交
2759
	struct udp_seq_afinfo *afinfo = PDE_DATA(inode);
2760 2761
	struct udp_iter_state *s;
	int err;
2762

2763 2764 2765 2766
	err = seq_open_net(inode, file, &afinfo->seq_ops,
			   sizeof(struct udp_iter_state));
	if (err < 0)
		return err;
2767

2768
	s = ((struct seq_file *)file->private_data)->private;
L
Linus Torvalds 已提交
2769
	s->family		= afinfo->family;
2770
	s->udp_table		= afinfo->udp_table;
2771
	return err;
2772
}
2773
EXPORT_SYMBOL(udp_seq_open);
2774

L
Linus Torvalds 已提交
2775
/* ------------------------------------------------------------------------ */
2776
int udp_proc_register(struct net *net, struct udp_seq_afinfo *afinfo)
L
Linus Torvalds 已提交
2777 2778 2779 2780
{
	struct proc_dir_entry *p;
	int rc = 0;

2781 2782 2783 2784
	afinfo->seq_ops.start		= udp_seq_start;
	afinfo->seq_ops.next		= udp_seq_next;
	afinfo->seq_ops.stop		= udp_seq_stop;

2785
	p = proc_create_data(afinfo->name, 0444, net->proc_net,
2786
			     afinfo->seq_fops, afinfo);
2787
	if (!p)
L
Linus Torvalds 已提交
2788 2789 2790
		rc = -ENOMEM;
	return rc;
}
E
Eric Dumazet 已提交
2791
EXPORT_SYMBOL(udp_proc_register);
L
Linus Torvalds 已提交
2792

2793
void udp_proc_unregister(struct net *net, struct udp_seq_afinfo *afinfo)
L
Linus Torvalds 已提交
2794
{
2795
	remove_proc_entry(afinfo->name, net->proc_net);
L
Linus Torvalds 已提交
2796
}
E
Eric Dumazet 已提交
2797
EXPORT_SYMBOL(udp_proc_unregister);
2798 2799

/* ------------------------------------------------------------------------ */
2800
static void udp4_format_sock(struct sock *sp, struct seq_file *f,
2801
		int bucket)
2802 2803
{
	struct inet_sock *inet = inet_sk(sp);
E
Eric Dumazet 已提交
2804 2805 2806 2807
	__be32 dest = inet->inet_daddr;
	__be32 src  = inet->inet_rcv_saddr;
	__u16 destp	  = ntohs(inet->inet_dport);
	__u16 srcp	  = ntohs(inet->inet_sport);
2808

2809
	seq_printf(f, "%5d: %08X:%04X %08X:%04X"
2810
		" %02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %d",
2811
		bucket, src, srcp, dest, destp, sp->sk_state,
2812 2813
		sk_wmem_alloc_get(sp),
		sk_rmem_alloc_get(sp),
2814 2815 2816
		0, 0L, 0,
		from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
		0, sock_i_ino(sp),
2817
		refcount_read(&sp->sk_refcnt), sp,
2818
		atomic_read(&sp->sk_drops));
2819 2820 2821 2822
}

int udp4_seq_show(struct seq_file *seq, void *v)
{
2823
	seq_setwidth(seq, 127);
2824
	if (v == SEQ_START_TOKEN)
2825
		seq_puts(seq, "  sl  local_address rem_address   st tx_queue "
2826
			   "rx_queue tr tm->when retrnsmt   uid  timeout "
E
Eric Dumazet 已提交
2827
			   "inode ref pointer drops");
2828 2829 2830
	else {
		struct udp_iter_state *state = seq->private;

2831
		udp4_format_sock(v, seq, state->bucket);
2832
	}
2833
	seq_pad(seq, '\n');
2834 2835 2836
	return 0;
}

2837 2838 2839 2840 2841 2842 2843
static const struct file_operations udp_afinfo_seq_fops = {
	.open     = udp_seq_open,
	.read     = seq_read,
	.llseek   = seq_lseek,
	.release  = seq_release_net
};

2844 2845 2846 2847
/* ------------------------------------------------------------------------ */
static struct udp_seq_afinfo udp4_seq_afinfo = {
	.name		= "udp",
	.family		= AF_INET,
2848
	.udp_table	= &udp_table,
2849
	.seq_fops	= &udp_afinfo_seq_fops,
2850 2851 2852
	.seq_ops	= {
		.show		= udp4_seq_show,
	},
2853 2854
};

2855
static int __net_init udp4_proc_init_net(struct net *net)
2856 2857 2858 2859
{
	return udp_proc_register(net, &udp4_seq_afinfo);
}

2860
static void __net_exit udp4_proc_exit_net(struct net *net)
2861 2862 2863 2864 2865 2866 2867 2868 2869
{
	udp_proc_unregister(net, &udp4_seq_afinfo);
}

static struct pernet_operations udp4_net_ops = {
	.init = udp4_proc_init_net,
	.exit = udp4_proc_exit_net,
};

2870 2871
int __init udp4_proc_init(void)
{
2872
	return register_pernet_subsys(&udp4_net_ops);
2873 2874 2875 2876
}

void udp4_proc_exit(void)
{
2877
	unregister_pernet_subsys(&udp4_net_ops);
2878
}
L
Linus Torvalds 已提交
2879 2880
#endif /* CONFIG_PROC_FS */

2881 2882
static __initdata unsigned long uhash_entries;
static int __init set_uhash_entries(char *str)
2883
{
2884 2885
	ssize_t ret;

2886 2887
	if (!str)
		return 0;
2888 2889 2890 2891 2892

	ret = kstrtoul(str, 0, &uhash_entries);
	if (ret)
		return 0;

2893 2894 2895 2896 2897
	if (uhash_entries && uhash_entries < UDP_HTABLE_SIZE_MIN)
		uhash_entries = UDP_HTABLE_SIZE_MIN;
	return 1;
}
__setup("uhash_entries=", set_uhash_entries);
2898

2899 2900 2901 2902
void __init udp_table_init(struct udp_table *table, const char *name)
{
	unsigned int i;

2903 2904 2905 2906 2907 2908 2909 2910 2911 2912
	table->hash = alloc_large_system_hash(name,
					      2 * sizeof(struct udp_hslot),
					      uhash_entries,
					      21, /* one slot per 2 MB */
					      0,
					      &table->log,
					      &table->mask,
					      UDP_HTABLE_SIZE_MIN,
					      64 * 1024);

2913
	table->hash2 = table->hash + (table->mask + 1);
2914
	for (i = 0; i <= table->mask; i++) {
2915
		INIT_HLIST_HEAD(&table->hash[i].head);
E
Eric Dumazet 已提交
2916
		table->hash[i].count = 0;
2917 2918
		spin_lock_init(&table->hash[i].lock);
	}
2919
	for (i = 0; i <= table->mask; i++) {
2920
		INIT_HLIST_HEAD(&table->hash2[i].head);
2921 2922 2923
		table->hash2[i].count = 0;
		spin_lock_init(&table->hash2[i].lock);
	}
2924 2925
}

2926 2927 2928 2929 2930 2931 2932 2933 2934 2935
u32 udp_flow_hashrnd(void)
{
	static u32 hashrnd __read_mostly;

	net_get_random_once(&hashrnd, sizeof(hashrnd));

	return hashrnd;
}
EXPORT_SYMBOL(udp_flow_hashrnd);

2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952
static void __udp_sysctl_init(struct net *net)
{
	net->ipv4.sysctl_udp_rmem_min = SK_MEM_QUANTUM;
	net->ipv4.sysctl_udp_wmem_min = SK_MEM_QUANTUM;

#ifdef CONFIG_NET_L3_MASTER_DEV
	net->ipv4.sysctl_udp_l3mdev_accept = 0;
#endif
}

static int __net_init udp_sysctl_init(struct net *net)
{
	__udp_sysctl_init(net);
	return 0;
}

static struct pernet_operations __net_initdata udp_sysctl_ops = {
K
Kirill Tkhai 已提交
2953
	.init	= udp_sysctl_init,
2954 2955
};

H
Hideo Aoki 已提交
2956 2957
void __init udp_init(void)
{
2958
	unsigned long limit;
E
Eric Dumazet 已提交
2959
	unsigned int i;
H
Hideo Aoki 已提交
2960

2961
	udp_table_init(&udp_table, "UDP");
2962
	limit = nr_free_buffer_pages() / 8;
H
Hideo Aoki 已提交
2963 2964 2965 2966 2967
	limit = max(limit, 128UL);
	sysctl_udp_mem[0] = limit / 4 * 3;
	sysctl_udp_mem[1] = limit;
	sysctl_udp_mem[2] = sysctl_udp_mem[0] * 2;

2968
	__udp_sysctl_init(&init_net);
E
Eric Dumazet 已提交
2969 2970 2971 2972 2973 2974 2975 2976 2977

	/* 16 spinlocks per cpu */
	udp_busylocks_log = ilog2(nr_cpu_ids) + 4;
	udp_busylocks = kmalloc(sizeof(spinlock_t) << udp_busylocks_log,
				GFP_KERNEL);
	if (!udp_busylocks)
		panic("UDP: failed to alloc udp_busylocks\n");
	for (i = 0; i < (1U << udp_busylocks_log); i++)
		spin_lock_init(udp_busylocks + i);
2978 2979 2980

	if (register_pernet_subsys(&udp_sysctl_ops))
		panic("UDP: failed to init sysctl parameters.\n");
H
Hideo Aoki 已提交
2981
}