sock.c 83.3 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9
/*
 * INET		An implementation of the TCP/IP protocol suite for the LINUX
 *		operating system.  INET is implemented using the  BSD Socket
 *		interface as the means of communication with the user level.
 *
 *		Generic socket support routines. Memory allocators, socket lock/release
 *		handler for protocols to use and generic option handler.
 *
 *
10
 * Authors:	Ross Biro
L
Linus Torvalds 已提交
11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34
 *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
 *		Florian La Roche, <flla@stud.uni-sb.de>
 *		Alan Cox, <A.Cox@swansea.ac.uk>
 *
 * Fixes:
 *		Alan Cox	: 	Numerous verify_area() problems
 *		Alan Cox	:	Connecting on a connecting socket
 *					now returns an error for tcp.
 *		Alan Cox	:	sock->protocol is set correctly.
 *					and is not sometimes left as 0.
 *		Alan Cox	:	connect handles icmp errors on a
 *					connect properly. Unfortunately there
 *					is a restart syscall nasty there. I
 *					can't match BSD without hacking the C
 *					library. Ideas urgently sought!
 *		Alan Cox	:	Disallow bind() to addresses that are
 *					not ours - especially broadcast ones!!
 *		Alan Cox	:	Socket 1024 _IS_ ok for users. (fencepost)
 *		Alan Cox	:	sock_wfree/sock_rfree don't destroy sockets,
 *					instead they leave that for the DESTROY timer.
 *		Alan Cox	:	Clean up error flag in accept
 *		Alan Cox	:	TCP ack handling is buggy, the DESTROY timer
 *					was buggy. Put a remove_sock() in the handler
 *					for memory when we hit 0. Also altered the timer
35
 *					code. The ACK stuff can wait and needs major
L
Linus Torvalds 已提交
36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91
 *					TCP layer surgery.
 *		Alan Cox	:	Fixed TCP ack bug, removed remove sock
 *					and fixed timer/inet_bh race.
 *		Alan Cox	:	Added zapped flag for TCP
 *		Alan Cox	:	Move kfree_skb into skbuff.c and tidied up surplus code
 *		Alan Cox	:	for new sk_buff allocations wmalloc/rmalloc now call alloc_skb
 *		Alan Cox	:	kfree_s calls now are kfree_skbmem so we can track skb resources
 *		Alan Cox	:	Supports socket option broadcast now as does udp. Packet and raw need fixing.
 *		Alan Cox	:	Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so...
 *		Rick Sladkey	:	Relaxed UDP rules for matching packets.
 *		C.E.Hawkins	:	IFF_PROMISC/SIOCGHWADDR support
 *	Pauline Middelink	:	identd support
 *		Alan Cox	:	Fixed connect() taking signals I think.
 *		Alan Cox	:	SO_LINGER supported
 *		Alan Cox	:	Error reporting fixes
 *		Anonymous	:	inet_create tidied up (sk->reuse setting)
 *		Alan Cox	:	inet sockets don't set sk->type!
 *		Alan Cox	:	Split socket option code
 *		Alan Cox	:	Callbacks
 *		Alan Cox	:	Nagle flag for Charles & Johannes stuff
 *		Alex		:	Removed restriction on inet fioctl
 *		Alan Cox	:	Splitting INET from NET core
 *		Alan Cox	:	Fixed bogus SO_TYPE handling in getsockopt()
 *		Adam Caldwell	:	Missing return in SO_DONTROUTE/SO_DEBUG code
 *		Alan Cox	:	Split IP from generic code
 *		Alan Cox	:	New kfree_skbmem()
 *		Alan Cox	:	Make SO_DEBUG superuser only.
 *		Alan Cox	:	Allow anyone to clear SO_DEBUG
 *					(compatibility fix)
 *		Alan Cox	:	Added optimistic memory grabbing for AF_UNIX throughput.
 *		Alan Cox	:	Allocator for a socket is settable.
 *		Alan Cox	:	SO_ERROR includes soft errors.
 *		Alan Cox	:	Allow NULL arguments on some SO_ opts
 *		Alan Cox	: 	Generic socket allocation to make hooks
 *					easier (suggested by Craig Metz).
 *		Michael Pall	:	SO_ERROR returns positive errno again
 *              Steve Whitehouse:       Added default destructor to free
 *                                      protocol private data.
 *              Steve Whitehouse:       Added various other default routines
 *                                      common to several socket families.
 *              Chris Evans     :       Call suser() check last on F_SETOWN
 *		Jay Schulist	:	Added SO_ATTACH_FILTER and SO_DETACH_FILTER.
 *		Andi Kleen	:	Add sock_kmalloc()/sock_kfree_s()
 *		Andi Kleen	:	Fix write_space callback
 *		Chris Evans	:	Security fixes - signedness again
 *		Arnaldo C. Melo :       cleanups, use skb_queue_purge
 *
 * To Fix:
 *
 *
 *		This program is free software; you can redistribute it and/or
 *		modify it under the terms of the GNU General Public License
 *		as published by the Free Software Foundation; either version
 *		2 of the License, or (at your option) any later version.
 */

J
Joe Perches 已提交
92 93
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

94
#include <asm/unaligned.h>
95
#include <linux/capability.h>
L
Linus Torvalds 已提交
96
#include <linux/errno.h>
97
#include <linux/errqueue.h>
L
Linus Torvalds 已提交
98 99 100 101 102 103 104 105
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/sched.h>
106
#include <linux/sched/mm.h>
L
Linus Torvalds 已提交
107 108 109 110 111 112 113 114 115 116
#include <linux/timer.h>
#include <linux/string.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/poll.h>
#include <linux/tcp.h>
#include <linux/init.h>
A
Al Viro 已提交
117
#include <linux/highmem.h>
118
#include <linux/user_namespace.h>
119
#include <linux/static_key.h>
120
#include <linux/memcontrol.h>
121
#include <linux/prefetch.h>
L
Linus Torvalds 已提交
122

123
#include <linux/uaccess.h>
L
Linus Torvalds 已提交
124 125 126 127

#include <linux/netdevice.h>
#include <net/protocol.h>
#include <linux/skbuff.h>
128
#include <net/net_namespace.h>
129
#include <net/request_sock.h>
L
Linus Torvalds 已提交
130
#include <net/sock.h>
131
#include <linux/net_tstamp.h>
L
Linus Torvalds 已提交
132 133
#include <net/xfrm.h>
#include <linux/ipsec.h>
134
#include <net/cls_cgroup.h>
135
#include <net/netprio_cgroup.h>
136
#include <linux/sock_diag.h>
L
Linus Torvalds 已提交
137 138

#include <linux/filter.h>
139
#include <net/sock_reuseport.h>
L
Linus Torvalds 已提交
140

141 142
#include <trace/events/sock.h>

L
Linus Torvalds 已提交
143
#include <net/tcp.h>
144
#include <net/busy_poll.h>
E
Eliezer Tamir 已提交
145

146
static DEFINE_MUTEX(proto_list_mutex);
G
Glauber Costa 已提交
147 148
static LIST_HEAD(proto_list);

149 150
static void sock_inuse_add(struct net *net, int val);

151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171
/**
 * sk_ns_capable - General socket capability test
 * @sk: Socket to use a capability on or through
 * @user_ns: The user namespace of the capability to use
 * @cap: The capability to use
 *
 * Test to see if the opener of the socket had when the socket was
 * created and the current process has the capability @cap in the user
 * namespace @user_ns.
 */
bool sk_ns_capable(const struct sock *sk,
		   struct user_namespace *user_ns, int cap)
{
	return file_ns_capable(sk->sk_socket->file, user_ns, cap) &&
		ns_capable(user_ns, cap);
}
EXPORT_SYMBOL(sk_ns_capable);

/**
 * sk_capable - Socket global capability test
 * @sk: Socket to use a capability on or through
172
 * @cap: The global capability to use
173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188
 *
 * Test to see if the opener of the socket had when the socket was
 * created and the current process has the capability @cap in all user
 * namespaces.
 */
bool sk_capable(const struct sock *sk, int cap)
{
	return sk_ns_capable(sk, &init_user_ns, cap);
}
EXPORT_SYMBOL(sk_capable);

/**
 * sk_net_capable - Network namespace socket capability test
 * @sk: Socket to use a capability on or through
 * @cap: The capability to use
 *
189
 * Test to see if the opener of the socket had when the socket was created
190 191 192 193 194 195 196 197 198
 * and the current process has the capability @cap over the network namespace
 * the socket is a member of.
 */
bool sk_net_capable(const struct sock *sk, int cap)
{
	return sk_ns_capable(sk, sock_net(sk)->user_ns, cap);
}
EXPORT_SYMBOL(sk_net_capable);

199 200
/*
 * Each address family might have different locking rules, so we have
201 202
 * one slock key per address family and separate keys for internal and
 * userspace sockets.
203
 */
I
Ingo Molnar 已提交
204
static struct lock_class_key af_family_keys[AF_MAX];
205
static struct lock_class_key af_family_kern_keys[AF_MAX];
I
Ingo Molnar 已提交
206
static struct lock_class_key af_family_slock_keys[AF_MAX];
207
static struct lock_class_key af_family_kern_slock_keys[AF_MAX];
I
Ingo Molnar 已提交
208 209 210 211 212 213

/*
 * Make lock validator output more readable. (we pre-construct these
 * strings build-time, so that runtime initialization of socket
 * locks is fast):
 */
214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229

#define _sock_locks(x)						  \
  x "AF_UNSPEC",	x "AF_UNIX"     ,	x "AF_INET"     , \
  x "AF_AX25"  ,	x "AF_IPX"      ,	x "AF_APPLETALK", \
  x "AF_NETROM",	x "AF_BRIDGE"   ,	x "AF_ATMPVC"   , \
  x "AF_X25"   ,	x "AF_INET6"    ,	x "AF_ROSE"     , \
  x "AF_DECnet",	x "AF_NETBEUI"  ,	x "AF_SECURITY" , \
  x "AF_KEY"   ,	x "AF_NETLINK"  ,	x "AF_PACKET"   , \
  x "AF_ASH"   ,	x "AF_ECONET"   ,	x "AF_ATMSVC"   , \
  x "AF_RDS"   ,	x "AF_SNA"      ,	x "AF_IRDA"     , \
  x "AF_PPPOX" ,	x "AF_WANPIPE"  ,	x "AF_LLC"      , \
  x "27"       ,	x "28"          ,	x "AF_CAN"      , \
  x "AF_TIPC"  ,	x "AF_BLUETOOTH",	x "IUCV"        , \
  x "AF_RXRPC" ,	x "AF_ISDN"     ,	x "AF_PHONET"   , \
  x "AF_IEEE802154",	x "AF_CAIF"	,	x "AF_ALG"      , \
  x "AF_NFC"   ,	x "AF_VSOCK"    ,	x "AF_KCM"      , \
B
Björn Töpel 已提交
230 231
  x "AF_QIPCRTR",	x "AF_SMC"	,	x "AF_XDP"	, \
  x "AF_MAX"
232

233
static const char *const af_family_key_strings[AF_MAX+1] = {
234
	_sock_locks("sk_lock-")
I
Ingo Molnar 已提交
235
};
236
static const char *const af_family_slock_key_strings[AF_MAX+1] = {
237
	_sock_locks("slock-")
I
Ingo Molnar 已提交
238
};
239
static const char *const af_family_clock_key_strings[AF_MAX+1] = {
240 241 242 243 244 245 246 247 248 249 250
	_sock_locks("clock-")
};

static const char *const af_family_kern_key_strings[AF_MAX+1] = {
	_sock_locks("k-sk_lock-")
};
static const char *const af_family_kern_slock_key_strings[AF_MAX+1] = {
	_sock_locks("k-slock-")
};
static const char *const af_family_kern_clock_key_strings[AF_MAX+1] = {
	_sock_locks("k-clock-")
251
};
252
static const char *const af_family_rlock_key_strings[AF_MAX+1] = {
253
	_sock_locks("rlock-")
254 255
};
static const char *const af_family_wlock_key_strings[AF_MAX+1] = {
256
	_sock_locks("wlock-")
257 258
};
static const char *const af_family_elock_key_strings[AF_MAX+1] = {
259
	_sock_locks("elock-")
260
};
261 262

/*
263
 * sk_callback_lock and sk queues locking rules are per-address-family,
264 265 266
 * so split the lock classes by using a per-AF key:
 */
static struct lock_class_key af_callback_keys[AF_MAX];
267 268 269
static struct lock_class_key af_rlock_keys[AF_MAX];
static struct lock_class_key af_wlock_keys[AF_MAX];
static struct lock_class_key af_elock_keys[AF_MAX];
270
static struct lock_class_key af_kern_callback_keys[AF_MAX];
271

L
Linus Torvalds 已提交
272
/* Run time adjustable parameters. */
273
__u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX;
274
EXPORT_SYMBOL(sysctl_wmem_max);
275
__u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX;
276
EXPORT_SYMBOL(sysctl_rmem_max);
277 278
__u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX;
__u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
L
Linus Torvalds 已提交
279

L
Lucas De Marchi 已提交
280
/* Maximal space eaten by iovec or ancillary data plus some space */
281
int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512);
E
Eric Dumazet 已提交
282
EXPORT_SYMBOL(sysctl_optmem_max);
L
Linus Torvalds 已提交
283

284 285
int sysctl_tstamp_allow_data __read_mostly = 1;

286 287
DEFINE_STATIC_KEY_FALSE(memalloc_socks_key);
EXPORT_SYMBOL_GPL(memalloc_socks_key);
288

289 290 291 292 293 294 295 296 297 298 299 300
/**
 * sk_set_memalloc - sets %SOCK_MEMALLOC
 * @sk: socket to set it on
 *
 * Set %SOCK_MEMALLOC on a socket for access to emergency reserves.
 * It's the responsibility of the admin to adjust min_free_kbytes
 * to meet the requirements
 */
void sk_set_memalloc(struct sock *sk)
{
	sock_set_flag(sk, SOCK_MEMALLOC);
	sk->sk_allocation |= __GFP_MEMALLOC;
301
	static_branch_inc(&memalloc_socks_key);
302 303 304 305 306 307 308
}
EXPORT_SYMBOL_GPL(sk_set_memalloc);

void sk_clear_memalloc(struct sock *sk)
{
	sock_reset_flag(sk, SOCK_MEMALLOC);
	sk->sk_allocation &= ~__GFP_MEMALLOC;
309
	static_branch_dec(&memalloc_socks_key);
310 311 312

	/*
	 * SOCK_MEMALLOC is allowed to ignore rmem limits to ensure forward
313 314 315 316
	 * progress of swapping. SOCK_MEMALLOC may be cleared while
	 * it has rmem allocations due to the last swapfile being deactivated
	 * but there is a risk that the socket is unusable due to exceeding
	 * the rmem limits. Reclaim the reserves and obey rmem limits again.
317
	 */
318
	sk_mem_reclaim(sk);
319 320 321
}
EXPORT_SYMBOL_GPL(sk_clear_memalloc);

322 323 324
int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
{
	int ret;
325
	unsigned int noreclaim_flag;
326 327 328 329

	/* these should have been dropped before queueing */
	BUG_ON(!sock_flag(sk, SOCK_MEMALLOC));

330
	noreclaim_flag = memalloc_noreclaim_save();
331
	ret = sk->sk_backlog_rcv(sk, skb);
332
	memalloc_noreclaim_restore(noreclaim_flag);
333 334 335 336 337

	return ret;
}
EXPORT_SYMBOL(__sk_backlog_rcv);

338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359
static int sock_get_timeout(long timeo, void *optval)
{
	struct __kernel_old_timeval tv;

	if (timeo == MAX_SCHEDULE_TIMEOUT) {
		tv.tv_sec = 0;
		tv.tv_usec = 0;
	} else {
		tv.tv_sec = timeo / HZ;
		tv.tv_usec = ((timeo % HZ) * USEC_PER_SEC) / HZ;
	}

	if (in_compat_syscall() && !COMPAT_USE_64BIT_TIME) {
		struct old_timeval32 tv32 = { tv.tv_sec, tv.tv_usec };
		*(struct old_timeval32 *)optval = tv32;
		return sizeof(tv32);
	}

	*(struct __kernel_old_timeval *)optval = tv;
	return sizeof(tv);
}

L
Linus Torvalds 已提交
360 361
static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen)
{
362
	struct __kernel_old_timeval tv;
L
Linus Torvalds 已提交
363

364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379
	if (in_compat_syscall() && !COMPAT_USE_64BIT_TIME) {
		struct old_timeval32 tv32;

		if (optlen < sizeof(tv32))
			return -EINVAL;

		if (copy_from_user(&tv32, optval, sizeof(tv32)))
			return -EFAULT;
		tv.tv_sec = tv32.tv_sec;
		tv.tv_usec = tv32.tv_usec;
	} else {
		if (optlen < sizeof(tv))
			return -EINVAL;
		if (copy_from_user(&tv, optval, sizeof(tv)))
			return -EFAULT;
	}
380 381
	if (tv.tv_usec < 0 || tv.tv_usec >= USEC_PER_SEC)
		return -EDOM;
L
Linus Torvalds 已提交
382

383
	if (tv.tv_sec < 0) {
384 385
		static int warned __read_mostly;

386
		*timeo_p = 0;
387
		if (warned < 10 && net_ratelimit()) {
388
			warned++;
J
Joe Perches 已提交
389 390
			pr_info("%s: `%s' (pid %d) tries to set negative timeout\n",
				__func__, current->comm, task_pid_nr(current));
391
		}
392 393
		return 0;
	}
L
Linus Torvalds 已提交
394 395 396 397
	*timeo_p = MAX_SCHEDULE_TIMEOUT;
	if (tv.tv_sec == 0 && tv.tv_usec == 0)
		return 0;
	if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT/HZ - 1))
398
		*timeo_p = tv.tv_sec * HZ + DIV_ROUND_UP(tv.tv_usec, USEC_PER_SEC / HZ);
L
Linus Torvalds 已提交
399 400 401 402 403 404 405
	return 0;
}

static void sock_warn_obsolete_bsdism(const char *name)
{
	static int warned;
	static char warncomm[TASK_COMM_LEN];
406 407
	if (strcmp(warncomm, current->comm) && warned < 5) {
		strcpy(warncomm,  current->comm);
J
Joe Perches 已提交
408 409
		pr_warn("process `%s' is using obsolete %s SO_BSDCOMPAT\n",
			warncomm, name);
L
Linus Torvalds 已提交
410 411 412 413
		warned++;
	}
}

414 415 416 417 418 419 420 421 422 423 424
static bool sock_needs_netstamp(const struct sock *sk)
{
	switch (sk->sk_family) {
	case AF_UNSPEC:
	case AF_UNIX:
		return false;
	default:
		return true;
	}
}

E
Eric Dumazet 已提交
425
static void sock_disable_timestamp(struct sock *sk, unsigned long flags)
426
{
E
Eric Dumazet 已提交
427 428
	if (sk->sk_flags & flags) {
		sk->sk_flags &= ~flags;
429 430
		if (sock_needs_netstamp(sk) &&
		    !(sk->sk_flags & SK_FLAGS_TIMESTAMP))
431
			net_disable_timestamp();
L
Linus Torvalds 已提交
432 433 434 435
	}
}


436
int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
437
{
438 439
	unsigned long flags;
	struct sk_buff_head *list = &sk->sk_receive_queue;
440

E
Eric Dumazet 已提交
441
	if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
E
Eric Dumazet 已提交
442
		atomic_inc(&sk->sk_drops);
443
		trace_sock_rcvqueue_full(sk, skb);
E
Eric Dumazet 已提交
444
		return -ENOMEM;
445 446
	}

447
	if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
E
Eric Dumazet 已提交
448 449
		atomic_inc(&sk->sk_drops);
		return -ENOBUFS;
450 451
	}

452 453
	skb->dev = NULL;
	skb_set_owner_r(skb, sk);
454

E
Eric Dumazet 已提交
455 456 457 458 459
	/* we escape from rcu protected region, make sure we dont leak
	 * a norefcounted dst
	 */
	skb_dst_force(skb);

460
	spin_lock_irqsave(&list->lock, flags);
461
	sock_skb_set_dropcount(sk, skb);
462 463
	__skb_queue_tail(list, skb);
	spin_unlock_irqrestore(&list->lock, flags);
464 465

	if (!sock_flag(sk, SOCK_DEAD))
466
		sk->sk_data_ready(sk);
E
Eric Dumazet 已提交
467
	return 0;
468
}
469 470 471 472 473 474 475 476 477 478 479 480
EXPORT_SYMBOL(__sock_queue_rcv_skb);

int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
{
	int err;

	err = sk_filter(sk, skb);
	if (err)
		return err;

	return __sock_queue_rcv_skb(sk, skb);
}
481 482
EXPORT_SYMBOL(sock_queue_rcv_skb);

483
int __sk_receive_skb(struct sock *sk, struct sk_buff *skb,
484
		     const int nested, unsigned int trim_cap, bool refcounted)
485 486 487
{
	int rc = NET_RX_SUCCESS;

488
	if (sk_filter_trim_cap(sk, skb, trim_cap))
489 490 491 492
		goto discard_and_relse;

	skb->dev = NULL;

493
	if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
494 495 496
		atomic_inc(&sk->sk_drops);
		goto discard_and_relse;
	}
497 498 499 500
	if (nested)
		bh_lock_sock_nested(sk);
	else
		bh_lock_sock(sk);
I
Ingo Molnar 已提交
501 502 503 504 505 506
	if (!sock_owned_by_user(sk)) {
		/*
		 * trylock + unlock semantics:
		 */
		mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_);

P
Peter Zijlstra 已提交
507
		rc = sk_backlog_rcv(sk, skb);
I
Ingo Molnar 已提交
508 509

		mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
510
	} else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
Z
Zhu Yi 已提交
511 512 513 514 515
		bh_unlock_sock(sk);
		atomic_inc(&sk->sk_drops);
		goto discard_and_relse;
	}

516 517
	bh_unlock_sock(sk);
out:
518 519
	if (refcounted)
		sock_put(sk);
520 521 522 523 524
	return rc;
discard_and_relse:
	kfree_skb(skb);
	goto out;
}
525
EXPORT_SYMBOL(__sk_receive_skb);
526 527 528

struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
{
E
Eric Dumazet 已提交
529
	struct dst_entry *dst = __sk_dst_get(sk);
530 531

	if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
K
Krishna Kumar 已提交
532
		sk_tx_queue_clear(sk);
533
		sk->sk_dst_pending_confirm = 0;
534
		RCU_INIT_POINTER(sk->sk_dst_cache, NULL);
535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556
		dst_release(dst);
		return NULL;
	}

	return dst;
}
EXPORT_SYMBOL(__sk_dst_check);

struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie)
{
	struct dst_entry *dst = sk_dst_get(sk);

	if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
		sk_dst_reset(sk);
		dst_release(dst);
		return NULL;
	}

	return dst;
}
EXPORT_SYMBOL(sk_dst_check);

557
static int sock_setbindtodevice_locked(struct sock *sk, int ifindex)
558 559 560
{
	int ret = -ENOPROTOOPT;
#ifdef CONFIG_NETDEVICES
561
	struct net *net = sock_net(sk);
562 563 564

	/* Sorry... */
	ret = -EPERM;
565
	if (!ns_capable(net->user_ns, CAP_NET_RAW))
566 567
		goto out;

568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593
	ret = -EINVAL;
	if (ifindex < 0)
		goto out;

	sk->sk_bound_dev_if = ifindex;
	if (sk->sk_prot->rehash)
		sk->sk_prot->rehash(sk);
	sk_dst_reset(sk);

	ret = 0;

out:
#endif

	return ret;
}

static int sock_setbindtodevice(struct sock *sk, char __user *optval,
				int optlen)
{
	int ret = -ENOPROTOOPT;
#ifdef CONFIG_NETDEVICES
	struct net *net = sock_net(sk);
	char devname[IFNAMSIZ];
	int index;

594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610
	ret = -EINVAL;
	if (optlen < 0)
		goto out;

	/* Bind this socket to a particular device like "eth0",
	 * as specified in the passed interface name. If the
	 * name is "" or the option length is zero the socket
	 * is not bound.
	 */
	if (optlen > IFNAMSIZ - 1)
		optlen = IFNAMSIZ - 1;
	memset(devname, 0, sizeof(devname));

	ret = -EFAULT;
	if (copy_from_user(devname, optval, optlen))
		goto out;

611 612
	index = 0;
	if (devname[0] != '\0') {
613
		struct net_device *dev;
614

615 616 617 618 619
		rcu_read_lock();
		dev = dev_get_by_name_rcu(net, devname);
		if (dev)
			index = dev->ifindex;
		rcu_read_unlock();
620 621 622 623 624 625
		ret = -ENODEV;
		if (!dev)
			goto out;
	}

	lock_sock(sk);
626
	ret = sock_setbindtodevice_locked(sk, index);
627 628 629 630 631 632 633 634
	release_sock(sk);

out:
#endif

	return ret;
}

635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651
static int sock_getbindtodevice(struct sock *sk, char __user *optval,
				int __user *optlen, int len)
{
	int ret = -ENOPROTOOPT;
#ifdef CONFIG_NETDEVICES
	struct net *net = sock_net(sk);
	char devname[IFNAMSIZ];

	if (sk->sk_bound_dev_if == 0) {
		len = 0;
		goto zero;
	}

	ret = -EINVAL;
	if (len < IFNAMSIZ)
		goto out;

652 653
	ret = netdev_get_name(net, devname, sk->sk_bound_dev_if);
	if (ret)
654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674
		goto out;

	len = strlen(devname) + 1;

	ret = -EFAULT;
	if (copy_to_user(optval, devname, len))
		goto out;

zero:
	ret = -EFAULT;
	if (put_user(len, optlen))
		goto out;

	ret = 0;

out:
#endif

	return ret;
}

675 676 677 678 679 680 681 682
static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool)
{
	if (valbool)
		sock_set_flag(sk, bit);
	else
		sock_reset_flag(sk, bit);
}

683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701
bool sk_mc_loop(struct sock *sk)
{
	if (dev_recursion_level())
		return false;
	if (!sk)
		return true;
	switch (sk->sk_family) {
	case AF_INET:
		return inet_sk(sk)->mc_loop;
#if IS_ENABLED(CONFIG_IPV6)
	case AF_INET6:
		return inet6_sk(sk)->mc_loop;
#endif
	}
	WARN_ON(1);
	return true;
}
EXPORT_SYMBOL(sk_mc_loop);

L
Linus Torvalds 已提交
702 703 704 705 706 707
/*
 *	This is meant for all protocols to use and covers goings on
 *	at the socket level. Everything here is generic.
 */

int sock_setsockopt(struct socket *sock, int level, int optname,
708
		    char __user *optval, unsigned int optlen)
L
Linus Torvalds 已提交
709
{
710
	struct sock_txtime sk_txtime;
E
Eric Dumazet 已提交
711
	struct sock *sk = sock->sk;
L
Linus Torvalds 已提交
712 713 714 715
	int val;
	int valbool;
	struct linger ling;
	int ret = 0;
716

L
Linus Torvalds 已提交
717 718 719 720
	/*
	 *	Options without arguments
	 */

721
	if (optname == SO_BINDTODEVICE)
722
		return sock_setbindtodevice(sk, optval, optlen);
723

724 725
	if (optlen < sizeof(int))
		return -EINVAL;
726

L
Linus Torvalds 已提交
727 728
	if (get_user(val, (int __user *)optval))
		return -EFAULT;
729

E
Eric Dumazet 已提交
730
	valbool = val ? 1 : 0;
L
Linus Torvalds 已提交
731 732 733

	lock_sock(sk);

E
Eric Dumazet 已提交
734
	switch (optname) {
735
	case SO_DEBUG:
E
Eric Dumazet 已提交
736
		if (val && !capable(CAP_NET_ADMIN))
737
			ret = -EACCES;
E
Eric Dumazet 已提交
738
		else
739
			sock_valbool_flag(sk, SOCK_DBG, valbool);
740 741
		break;
	case SO_REUSEADDR:
742
		sk->sk_reuse = (valbool ? SK_CAN_REUSE : SK_NO_REUSE);
743
		break;
T
Tom Herbert 已提交
744 745 746
	case SO_REUSEPORT:
		sk->sk_reuseport = valbool;
		break;
747
	case SO_TYPE:
748
	case SO_PROTOCOL:
749
	case SO_DOMAIN:
750 751 752 753
	case SO_ERROR:
		ret = -ENOPROTOOPT;
		break;
	case SO_DONTROUTE:
754
		sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool);
755
		sk_dst_reset(sk);
756 757 758 759 760 761
		break;
	case SO_BROADCAST:
		sock_valbool_flag(sk, SOCK_BROADCAST, valbool);
		break;
	case SO_SNDBUF:
		/* Don't error on this BSD doesn't and if you think
762 763 764 765 766
		 * about it this is right. Otherwise apps have to
		 * play 'guess the biggest size' games. RCVBUF/SNDBUF
		 * are treated in BSD as hints
		 */
		val = min_t(u32, val, sysctl_wmem_max);
767
set_sndbuf:
768
		sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
769
		sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF);
770
		/* Wake up sending tasks if we upped the value. */
771 772
		sk->sk_write_space(sk);
		break;
L
Linus Torvalds 已提交
773

774 775 776 777 778 779
	case SO_SNDBUFFORCE:
		if (!capable(CAP_NET_ADMIN)) {
			ret = -EPERM;
			break;
		}
		goto set_sndbuf;
780

781 782
	case SO_RCVBUF:
		/* Don't error on this BSD doesn't and if you think
783 784 785 786 787
		 * about it this is right. Otherwise apps have to
		 * play 'guess the biggest size' games. RCVBUF/SNDBUF
		 * are treated in BSD as hints
		 */
		val = min_t(u32, val, sysctl_rmem_max);
788
set_rcvbuf:
789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804
		sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
		/*
		 * We double it on the way in to account for
		 * "struct sk_buff" etc. overhead.   Applications
		 * assume that the SO_RCVBUF setting they make will
		 * allow that much actual data to be received on that
		 * socket.
		 *
		 * Applications are unaware that "struct sk_buff" and
		 * other overheads allocate from the receive buffer
		 * during socket buffer allocation.
		 *
		 * And after considering the possible alternatives,
		 * returning the value we actually used in getsockopt
		 * is the most desirable behavior.
		 */
805
		sk->sk_rcvbuf = max_t(int, val * 2, SOCK_MIN_RCVBUF);
806 807 808 809 810
		break;

	case SO_RCVBUFFORCE:
		if (!capable(CAP_NET_ADMIN)) {
			ret = -EPERM;
L
Linus Torvalds 已提交
811
			break;
812 813
		}
		goto set_rcvbuf;
L
Linus Torvalds 已提交
814

815
	case SO_KEEPALIVE:
816 817
		if (sk->sk_prot->keepalive)
			sk->sk_prot->keepalive(sk, valbool);
818 819 820 821 822 823 824 825
		sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
		break;

	case SO_OOBINLINE:
		sock_valbool_flag(sk, SOCK_URGINLINE, valbool);
		break;

	case SO_NO_CHECK:
826
		sk->sk_no_check_tx = valbool;
827 828 829
		break;

	case SO_PRIORITY:
830 831
		if ((val >= 0 && val <= 6) ||
		    ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
832 833 834 835 836 837 838 839
			sk->sk_priority = val;
		else
			ret = -EPERM;
		break;

	case SO_LINGER:
		if (optlen < sizeof(ling)) {
			ret = -EINVAL;	/* 1003.1g */
L
Linus Torvalds 已提交
840
			break;
841
		}
E
Eric Dumazet 已提交
842
		if (copy_from_user(&ling, optval, sizeof(ling))) {
843
			ret = -EFAULT;
L
Linus Torvalds 已提交
844
			break;
845 846 847 848
		}
		if (!ling.l_onoff)
			sock_reset_flag(sk, SOCK_LINGER);
		else {
L
Linus Torvalds 已提交
849
#if (BITS_PER_LONG == 32)
850 851
			if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ)
				sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT;
L
Linus Torvalds 已提交
852
			else
853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869
#endif
				sk->sk_lingertime = (unsigned int)ling.l_linger * HZ;
			sock_set_flag(sk, SOCK_LINGER);
		}
		break;

	case SO_BSDCOMPAT:
		sock_warn_obsolete_bsdism("setsockopt");
		break;

	case SO_PASSCRED:
		if (valbool)
			set_bit(SOCK_PASSCRED, &sock->flags);
		else
			clear_bit(SOCK_PASSCRED, &sock->flags);
		break;

870 871
	case SO_TIMESTAMP_OLD:
	case SO_TIMESTAMPNS_OLD:
872
		if (valbool)  {
873
			if (optname == SO_TIMESTAMP_OLD)
874 875 876
				sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
			else
				sock_set_flag(sk, SOCK_RCVTSTAMPNS);
877
			sock_set_flag(sk, SOCK_RCVTSTAMP);
878
			sock_enable_timestamp(sk, SOCK_TIMESTAMP);
879
		} else {
880
			sock_reset_flag(sk, SOCK_RCVTSTAMP);
881 882
			sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
		}
883 884
		break;

885
	case SO_TIMESTAMPING_OLD:
886
		if (val & ~SOF_TIMESTAMPING_MASK) {
887
			ret = -EINVAL;
888 889
			break;
		}
890

891
		if (val & SOF_TIMESTAMPING_OPT_ID &&
892
		    !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)) {
893 894
			if (sk->sk_protocol == IPPROTO_TCP &&
			    sk->sk_type == SOCK_STREAM) {
895 896
				if ((1 << sk->sk_state) &
				    (TCPF_CLOSE | TCPF_LISTEN)) {
897 898 899 900 901 902 903 904
					ret = -EINVAL;
					break;
				}
				sk->sk_tskey = tcp_sk(sk)->snd_una;
			} else {
				sk->sk_tskey = 0;
			}
		}
905 906 907 908 909 910 911

		if (val & SOF_TIMESTAMPING_OPT_STATS &&
		    !(val & SOF_TIMESTAMPING_OPT_TSONLY)) {
			ret = -EINVAL;
			break;
		}

912
		sk->sk_tsflags = val;
913 914 915 916 917
		if (val & SOF_TIMESTAMPING_RX_SOFTWARE)
			sock_enable_timestamp(sk,
					      SOCK_TIMESTAMPING_RX_SOFTWARE);
		else
			sock_disable_timestamp(sk,
E
Eric Dumazet 已提交
918
					       (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE));
919 920
		break;

921 922 923
	case SO_RCVLOWAT:
		if (val < 0)
			val = INT_MAX;
924 925 926 927
		if (sock->ops->set_rcvlowat)
			ret = sock->ops->set_rcvlowat(sk, val);
		else
			sk->sk_rcvlowat = val ? : 1;
928 929 930 931 932 933 934 935 936
		break;

	case SO_RCVTIMEO:
		ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen);
		break;

	case SO_SNDTIMEO:
		ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen);
		break;
L
Linus Torvalds 已提交
937

938 939 940 941
	case SO_ATTACH_FILTER:
		ret = -EINVAL;
		if (optlen == sizeof(struct sock_fprog)) {
			struct sock_fprog fprog;
L
Linus Torvalds 已提交
942

943 944
			ret = -EFAULT;
			if (copy_from_user(&fprog, optval, sizeof(fprog)))
L
Linus Torvalds 已提交
945
				break;
946 947 948 949 950

			ret = sk_attach_filter(&fprog, sk);
		}
		break;

951 952 953 954 955 956 957 958 959 960 961 962 963
	case SO_ATTACH_BPF:
		ret = -EINVAL;
		if (optlen == sizeof(u32)) {
			u32 ufd;

			ret = -EFAULT;
			if (copy_from_user(&ufd, optval, sizeof(ufd)))
				break;

			ret = sk_attach_bpf(ufd, sk);
		}
		break;

964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989
	case SO_ATTACH_REUSEPORT_CBPF:
		ret = -EINVAL;
		if (optlen == sizeof(struct sock_fprog)) {
			struct sock_fprog fprog;

			ret = -EFAULT;
			if (copy_from_user(&fprog, optval, sizeof(fprog)))
				break;

			ret = sk_reuseport_attach_filter(&fprog, sk);
		}
		break;

	case SO_ATTACH_REUSEPORT_EBPF:
		ret = -EINVAL;
		if (optlen == sizeof(u32)) {
			u32 ufd;

			ret = -EFAULT;
			if (copy_from_user(&ufd, optval, sizeof(ufd)))
				break;

			ret = sk_reuseport_attach_bpf(ufd, sk);
		}
		break;

990
	case SO_DETACH_FILTER:
991
		ret = sk_detach_filter(sk);
992
		break;
L
Linus Torvalds 已提交
993

994 995 996 997 998 999 1000
	case SO_LOCK_FILTER:
		if (sock_flag(sk, SOCK_FILTER_LOCKED) && !valbool)
			ret = -EPERM;
		else
			sock_valbool_flag(sk, SOCK_FILTER_LOCKED, valbool);
		break;

1001 1002 1003 1004 1005 1006
	case SO_PASSSEC:
		if (valbool)
			set_bit(SOCK_PASSSEC, &sock->flags);
		else
			clear_bit(SOCK_PASSSEC, &sock->flags);
		break;
1007
	case SO_MARK:
1008
		if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) {
1009
			ret = -EPERM;
1010
		} else if (val != sk->sk_mark) {
1011
			sk->sk_mark = val;
1012 1013
			sk_dst_reset(sk);
		}
1014
		break;
C
Catherine Zhang 已提交
1015

1016
	case SO_RXQ_OVFL:
1017
		sock_valbool_flag(sk, SOCK_RXQ_OVFL, valbool);
1018
		break;
1019 1020 1021 1022 1023

	case SO_WIFI_STATUS:
		sock_valbool_flag(sk, SOCK_WIFI_STATUS, valbool);
		break;

1024 1025
	case SO_PEEK_OFF:
		if (sock->ops->set_peek_off)
1026
			ret = sock->ops->set_peek_off(sk, val);
1027 1028 1029
		else
			ret = -EOPNOTSUPP;
		break;
1030 1031 1032 1033 1034

	case SO_NOFCS:
		sock_valbool_flag(sk, SOCK_NOFCS, valbool);
		break;

1035 1036 1037 1038
	case SO_SELECT_ERR_QUEUE:
		sock_valbool_flag(sk, SOCK_SELECT_ERR_QUEUE, valbool);
		break;

1039
#ifdef CONFIG_NET_RX_BUSY_POLL
1040
	case SO_BUSY_POLL:
1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051
		/* allow unprivileged users to decrease the value */
		if ((val > sk->sk_ll_usec) && !capable(CAP_NET_ADMIN))
			ret = -EPERM;
		else {
			if (val < 0)
				ret = -EINVAL;
			else
				sk->sk_ll_usec = val;
		}
		break;
#endif
E
Eric Dumazet 已提交
1052 1053

	case SO_MAX_PACING_RATE:
1054 1055 1056 1057
		if (val != ~0U)
			cmpxchg(&sk->sk_pacing_status,
				SK_PACING_NONE,
				SK_PACING_NEEDED);
1058
		sk->sk_max_pacing_rate = (val == ~0U) ? ~0UL : val;
E
Eric Dumazet 已提交
1059 1060 1061 1062
		sk->sk_pacing_rate = min(sk->sk_pacing_rate,
					 sk->sk_max_pacing_rate);
		break;

1063 1064 1065 1066
	case SO_INCOMING_CPU:
		sk->sk_incoming_cpu = val;
		break;

1067 1068 1069 1070
	case SO_CNX_ADVICE:
		if (val == 1)
			dst_negative_advice(sk);
		break;
1071 1072

	case SO_ZEROCOPY:
1073
		if (sk->sk_family == PF_INET || sk->sk_family == PF_INET6) {
W
Willem de Bruijn 已提交
1074 1075 1076 1077
			if (!((sk->sk_type == SOCK_STREAM &&
			       sk->sk_protocol == IPPROTO_TCP) ||
			      (sk->sk_type == SOCK_DGRAM &&
			       sk->sk_protocol == IPPROTO_UDP)))
1078 1079
				ret = -ENOTSUPP;
		} else if (sk->sk_family != PF_RDS) {
1080
			ret = -ENOTSUPP;
1081 1082 1083 1084 1085 1086 1087
		}
		if (!ret) {
			if (val < 0 || val > 1)
				ret = -EINVAL;
			else
				sock_valbool_flag(sk, SOCK_ZEROCOPY, valbool);
		}
1088 1089
		break;

1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104
	case SO_TXTIME:
		if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) {
			ret = -EPERM;
		} else if (optlen != sizeof(struct sock_txtime)) {
			ret = -EINVAL;
		} else if (copy_from_user(&sk_txtime, optval,
			   sizeof(struct sock_txtime))) {
			ret = -EFAULT;
		} else if (sk_txtime.flags & ~SOF_TXTIME_FLAGS_MASK) {
			ret = -EINVAL;
		} else {
			sock_valbool_flag(sk, SOCK_TXTIME, true);
			sk->sk_clockid = sk_txtime.clockid;
			sk->sk_txtime_deadline_mode =
				!!(sk_txtime.flags & SOF_TXTIME_DEADLINE_MODE);
1105 1106
			sk->sk_txtime_report_errors =
				!!(sk_txtime.flags & SOF_TXTIME_REPORT_ERRORS);
1107 1108 1109
		}
		break;

1110 1111 1112 1113
	case SO_BINDTOIFINDEX:
		ret = sock_setbindtodevice_locked(sk, val);
		break;

1114 1115 1116
	default:
		ret = -ENOPROTOOPT;
		break;
1117
	}
L
Linus Torvalds 已提交
1118 1119 1120
	release_sock(sk);
	return ret;
}
E
Eric Dumazet 已提交
1121
EXPORT_SYMBOL(sock_setsockopt);
L
Linus Torvalds 已提交
1122 1123


S
stephen hemminger 已提交
1124 1125
static void cred_to_ucred(struct pid *pid, const struct cred *cred,
			  struct ucred *ucred)
1126 1127 1128 1129 1130 1131
{
	ucred->pid = pid_vnr(pid);
	ucred->uid = ucred->gid = -1;
	if (cred) {
		struct user_namespace *current_ns = current_user_ns();

1132 1133
		ucred->uid = from_kuid_munged(current_ns, cred->euid);
		ucred->gid = from_kgid_munged(current_ns, cred->egid);
1134 1135 1136
	}
}

1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148
static int groups_to_user(gid_t __user *dst, const struct group_info *src)
{
	struct user_namespace *user_ns = current_user_ns();
	int i;

	for (i = 0; i < src->ngroups; i++)
		if (put_user(from_kgid_munged(user_ns, src->gid[i]), dst + i))
			return -EFAULT;

	return 0;
}

L
Linus Torvalds 已提交
1149 1150 1151 1152
int sock_getsockopt(struct socket *sock, int level, int optname,
		    char __user *optval, int __user *optlen)
{
	struct sock *sk = sock->sk;
1153

1154
	union {
1155
		int val;
1156
		u64 val64;
1157
		struct linger ling;
1158 1159
		struct old_timeval32 tm32;
		struct __kernel_old_timeval tm;
1160
		struct sock_txtime txtime;
L
Linus Torvalds 已提交
1161
	} v;
1162

1163
	int lv = sizeof(int);
L
Linus Torvalds 已提交
1164
	int len;
1165

1166
	if (get_user(len, optlen))
1167
		return -EFAULT;
1168
	if (len < 0)
L
Linus Torvalds 已提交
1169
		return -EINVAL;
1170

1171
	memset(&v, 0, sizeof(v));
1172

E
Eric Dumazet 已提交
1173
	switch (optname) {
1174 1175 1176 1177 1178 1179 1180 1181 1182
	case SO_DEBUG:
		v.val = sock_flag(sk, SOCK_DBG);
		break;

	case SO_DONTROUTE:
		v.val = sock_flag(sk, SOCK_LOCALROUTE);
		break;

	case SO_BROADCAST:
E
Eric Dumazet 已提交
1183
		v.val = sock_flag(sk, SOCK_BROADCAST);
1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197
		break;

	case SO_SNDBUF:
		v.val = sk->sk_sndbuf;
		break;

	case SO_RCVBUF:
		v.val = sk->sk_rcvbuf;
		break;

	case SO_REUSEADDR:
		v.val = sk->sk_reuse;
		break;

T
Tom Herbert 已提交
1198 1199 1200 1201
	case SO_REUSEPORT:
		v.val = sk->sk_reuseport;
		break;

1202
	case SO_KEEPALIVE:
E
Eric Dumazet 已提交
1203
		v.val = sock_flag(sk, SOCK_KEEPOPEN);
1204 1205 1206 1207 1208 1209
		break;

	case SO_TYPE:
		v.val = sk->sk_type;
		break;

1210 1211 1212 1213
	case SO_PROTOCOL:
		v.val = sk->sk_protocol;
		break;

1214 1215 1216 1217
	case SO_DOMAIN:
		v.val = sk->sk_family;
		break;

1218 1219
	case SO_ERROR:
		v.val = -sock_error(sk);
E
Eric Dumazet 已提交
1220
		if (v.val == 0)
1221 1222 1223 1224
			v.val = xchg(&sk->sk_err_soft, 0);
		break;

	case SO_OOBINLINE:
E
Eric Dumazet 已提交
1225
		v.val = sock_flag(sk, SOCK_URGINLINE);
1226 1227 1228
		break;

	case SO_NO_CHECK:
1229
		v.val = sk->sk_no_check_tx;
1230 1231 1232 1233 1234 1235 1236 1237
		break;

	case SO_PRIORITY:
		v.val = sk->sk_priority;
		break;

	case SO_LINGER:
		lv		= sizeof(v.ling);
E
Eric Dumazet 已提交
1238
		v.ling.l_onoff	= sock_flag(sk, SOCK_LINGER);
1239 1240 1241 1242 1243 1244 1245
		v.ling.l_linger	= sk->sk_lingertime / HZ;
		break;

	case SO_BSDCOMPAT:
		sock_warn_obsolete_bsdism("getsockopt");
		break;

1246
	case SO_TIMESTAMP_OLD:
1247 1248 1249 1250
		v.val = sock_flag(sk, SOCK_RCVTSTAMP) &&
				!sock_flag(sk, SOCK_RCVTSTAMPNS);
		break;

1251
	case SO_TIMESTAMPNS_OLD:
1252
		v.val = sock_flag(sk, SOCK_RCVTSTAMPNS);
1253 1254
		break;

1255
	case SO_TIMESTAMPING_OLD:
1256
		v.val = sk->sk_tsflags;
1257 1258
		break;

1259
	case SO_RCVTIMEO:
1260
		lv = sock_get_timeout(sk->sk_rcvtimeo, &v);
1261 1262 1263
		break;

	case SO_SNDTIMEO:
1264
		lv = sock_get_timeout(sk->sk_sndtimeo, &v);
1265
		break;
L
Linus Torvalds 已提交
1266

1267 1268 1269
	case SO_RCVLOWAT:
		v.val = sk->sk_rcvlowat;
		break;
L
Linus Torvalds 已提交
1270

1271
	case SO_SNDLOWAT:
E
Eric Dumazet 已提交
1272
		v.val = 1;
1273
		break;
L
Linus Torvalds 已提交
1274

1275
	case SO_PASSCRED:
1276
		v.val = !!test_bit(SOCK_PASSCRED, &sock->flags);
1277
		break;
L
Linus Torvalds 已提交
1278

1279
	case SO_PEERCRED:
1280 1281 1282 1283 1284 1285
	{
		struct ucred peercred;
		if (len > sizeof(peercred))
			len = sizeof(peercred);
		cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
		if (copy_to_user(optval, &peercred, len))
1286 1287
			return -EFAULT;
		goto lenout;
1288
	}
L
Linus Torvalds 已提交
1289

1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310
	case SO_PEERGROUPS:
	{
		int ret, n;

		if (!sk->sk_peer_cred)
			return -ENODATA;

		n = sk->sk_peer_cred->group_info->ngroups;
		if (len < n * sizeof(gid_t)) {
			len = n * sizeof(gid_t);
			return put_user(len, optlen) ? -EFAULT : -ERANGE;
		}
		len = n * sizeof(gid_t);

		ret = groups_to_user((gid_t __user *)optval,
				     sk->sk_peer_cred->group_info);
		if (ret)
			return ret;
		goto lenout;
	}

1311 1312 1313 1314
	case SO_PEERNAME:
	{
		char address[128];

1315 1316
		lv = sock->ops->getname(sock, (struct sockaddr *)address, 2);
		if (lv < 0)
1317 1318 1319 1320 1321 1322 1323
			return -ENOTCONN;
		if (lv < len)
			return -EINVAL;
		if (copy_to_user(optval, address, len))
			return -EFAULT;
		goto lenout;
	}
L
Linus Torvalds 已提交
1324

1325 1326 1327 1328 1329 1330
	/* Dubious BSD thing... Probably nobody even uses it, but
	 * the UNIX standard wants it for whatever reason... -DaveM
	 */
	case SO_ACCEPTCONN:
		v.val = sk->sk_state == TCP_LISTEN;
		break;
L
Linus Torvalds 已提交
1331

1332
	case SO_PASSSEC:
1333
		v.val = !!test_bit(SOCK_PASSSEC, &sock->flags);
1334
		break;
C
Catherine Zhang 已提交
1335

1336 1337
	case SO_PEERSEC:
		return security_socket_getpeersec_stream(sock, optval, optlen, len);
L
Linus Torvalds 已提交
1338

1339 1340 1341 1342
	case SO_MARK:
		v.val = sk->sk_mark;
		break;

1343
	case SO_RXQ_OVFL:
E
Eric Dumazet 已提交
1344
		v.val = sock_flag(sk, SOCK_RXQ_OVFL);
1345 1346
		break;

1347
	case SO_WIFI_STATUS:
E
Eric Dumazet 已提交
1348
		v.val = sock_flag(sk, SOCK_WIFI_STATUS);
1349 1350
		break;

1351 1352 1353 1354 1355 1356
	case SO_PEEK_OFF:
		if (!sock->ops->set_peek_off)
			return -EOPNOTSUPP;

		v.val = sk->sk_peek_off;
		break;
1357
	case SO_NOFCS:
E
Eric Dumazet 已提交
1358
		v.val = sock_flag(sk, SOCK_NOFCS);
1359
		break;
1360

1361
	case SO_BINDTODEVICE:
1362 1363
		return sock_getbindtodevice(sk, optval, optlen, len);

1364 1365 1366 1367 1368 1369
	case SO_GET_FILTER:
		len = sk_get_filter(sk, (struct sock_filter __user *)optval, len);
		if (len < 0)
			return len;

		goto lenout;
1370

1371 1372 1373 1374
	case SO_LOCK_FILTER:
		v.val = sock_flag(sk, SOCK_FILTER_LOCKED);
		break;

1375 1376 1377 1378
	case SO_BPF_EXTENSIONS:
		v.val = bpf_tell_extensions();
		break;

1379 1380 1381 1382
	case SO_SELECT_ERR_QUEUE:
		v.val = sock_flag(sk, SOCK_SELECT_ERR_QUEUE);
		break;

1383
#ifdef CONFIG_NET_RX_BUSY_POLL
1384
	case SO_BUSY_POLL:
1385 1386 1387 1388
		v.val = sk->sk_ll_usec;
		break;
#endif

E
Eric Dumazet 已提交
1389
	case SO_MAX_PACING_RATE:
1390 1391
		/* 32bit version */
		v.val = min_t(unsigned long, sk->sk_max_pacing_rate, ~0U);
E
Eric Dumazet 已提交
1392 1393
		break;

E
Eric Dumazet 已提交
1394 1395 1396 1397
	case SO_INCOMING_CPU:
		v.val = sk->sk_incoming_cpu;
		break;

1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412
	case SO_MEMINFO:
	{
		u32 meminfo[SK_MEMINFO_VARS];

		if (get_user(len, optlen))
			return -EFAULT;

		sk_get_meminfo(sk, meminfo);

		len = min_t(unsigned int, len, sizeof(meminfo));
		if (copy_to_user(optval, &meminfo, len))
			return -EFAULT;

		goto lenout;
	}
1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424

#ifdef CONFIG_NET_RX_BUSY_POLL
	case SO_INCOMING_NAPI_ID:
		v.val = READ_ONCE(sk->sk_napi_id);

		/* aggregate non-NAPI IDs down to 0 */
		if (v.val < MIN_NAPI_ID)
			v.val = 0;

		break;
#endif

1425 1426 1427 1428 1429 1430 1431
	case SO_COOKIE:
		lv = sizeof(u64);
		if (len < lv)
			return -EINVAL;
		v.val64 = sock_gen_cookie(sk);
		break;

1432 1433 1434 1435
	case SO_ZEROCOPY:
		v.val = sock_flag(sk, SOCK_ZEROCOPY);
		break;

1436 1437 1438 1439 1440
	case SO_TXTIME:
		lv = sizeof(v.txtime);
		v.txtime.clockid = sk->sk_clockid;
		v.txtime.flags |= sk->sk_txtime_deadline_mode ?
				  SOF_TXTIME_DEADLINE_MODE : 0;
1441 1442
		v.txtime.flags |= sk->sk_txtime_report_errors ?
				  SOF_TXTIME_REPORT_ERRORS : 0;
1443 1444
		break;

1445 1446 1447 1448
	case SO_BINDTOIFINDEX:
		v.val = sk->sk_bound_dev_if;
		break;

1449
	default:
1450 1451 1452
		/* We implement the SO_SNDLOWAT etc to not be settable
		 * (1003.1g 7).
		 */
1453
		return -ENOPROTOOPT;
L
Linus Torvalds 已提交
1454
	}
1455

L
Linus Torvalds 已提交
1456 1457 1458 1459 1460
	if (len > lv)
		len = lv;
	if (copy_to_user(optval, &v, len))
		return -EFAULT;
lenout:
1461 1462 1463
	if (put_user(len, optlen))
		return -EFAULT;
	return 0;
L
Linus Torvalds 已提交
1464 1465
}

I
Ingo Molnar 已提交
1466 1467 1468 1469 1470
/*
 * Initialize an sk_lock.
 *
 * (We also register the sk_lock with the lock validator.)
 */
D
Dave Jones 已提交
1471
static inline void sock_lock_init(struct sock *sk)
I
Ingo Molnar 已提交
1472
{
1473 1474 1475 1476 1477 1478 1479 1480 1481 1482
	if (sk->sk_kern_sock)
		sock_lock_init_class_and_name(
			sk,
			af_family_kern_slock_key_strings[sk->sk_family],
			af_family_kern_slock_keys + sk->sk_family,
			af_family_kern_key_strings[sk->sk_family],
			af_family_kern_keys + sk->sk_family);
	else
		sock_lock_init_class_and_name(
			sk,
1483 1484 1485 1486
			af_family_slock_key_strings[sk->sk_family],
			af_family_slock_keys + sk->sk_family,
			af_family_key_strings[sk->sk_family],
			af_family_keys + sk->sk_family);
I
Ingo Molnar 已提交
1487 1488
}

E
Eric Dumazet 已提交
1489 1490 1491
/*
 * Copy all fields from osk to nsk but nsk->sk_refcnt must not change yet,
 * even temporarly, because of RCU lookups. sk_node should also be left as is.
1492
 * We must not copy fields between sk_dontcopy_begin and sk_dontcopy_end
E
Eric Dumazet 已提交
1493
 */
1494 1495 1496 1497 1498
static void sock_copy(struct sock *nsk, const struct sock *osk)
{
#ifdef CONFIG_SECURITY_NETWORK
	void *sptr = nsk->sk_security;
#endif
1499 1500 1501 1502 1503
	memcpy(nsk, osk, offsetof(struct sock, sk_dontcopy_begin));

	memcpy(&nsk->sk_dontcopy_end, &osk->sk_dontcopy_end,
	       osk->sk_prot->obj_size - offsetof(struct sock, sk_dontcopy_end));

1504 1505 1506 1507 1508 1509
#ifdef CONFIG_SECURITY_NETWORK
	nsk->sk_security = sptr;
	security_sk_clone(osk, nsk);
#endif
}

1510 1511
static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
		int family)
1512 1513 1514 1515 1516
{
	struct sock *sk;
	struct kmem_cache *slab;

	slab = prot->slab;
1517 1518 1519 1520
	if (slab != NULL) {
		sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO);
		if (!sk)
			return sk;
E
Eric Dumazet 已提交
1521 1522
		if (priority & __GFP_ZERO)
			sk_prot_clear_nulls(sk, prot->obj_size);
1523
	} else
1524 1525
		sk = kmalloc(prot->obj_size, priority);

1526 1527 1528 1529 1530 1531
	if (sk != NULL) {
		if (security_sk_alloc(sk, family, priority))
			goto out_free;

		if (!try_module_get(prot->owner))
			goto out_free_sec;
K
Krishna Kumar 已提交
1532
		sk_tx_queue_clear(sk);
1533 1534
	}

1535
	return sk;
1536 1537 1538 1539 1540 1541 1542 1543 1544

out_free_sec:
	security_sk_free(sk);
out_free:
	if (slab != NULL)
		kmem_cache_free(slab, sk);
	else
		kfree(sk);
	return NULL;
1545 1546 1547 1548 1549
}

static void sk_prot_free(struct proto *prot, struct sock *sk)
{
	struct kmem_cache *slab;
1550
	struct module *owner;
1551

1552
	owner = prot->owner;
1553
	slab = prot->slab;
1554

T
Tejun Heo 已提交
1555
	cgroup_sk_free(&sk->sk_cgrp_data);
1556
	mem_cgroup_sk_free(sk);
1557
	security_sk_free(sk);
1558 1559 1560 1561
	if (slab != NULL)
		kmem_cache_free(slab, sk);
	else
		kfree(sk);
1562
	module_put(owner);
1563 1564
}

L
Linus Torvalds 已提交
1565 1566
/**
 *	sk_alloc - All socket objects are allocated here
1567
 *	@net: the applicable net namespace
1568 1569 1570
 *	@family: protocol family
 *	@priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
 *	@prot: struct proto associated with this new sock instance
1571
 *	@kern: is this to be a kernel socket?
L
Linus Torvalds 已提交
1572
 */
1573
struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
1574
		      struct proto *prot, int kern)
L
Linus Torvalds 已提交
1575
{
1576
	struct sock *sk;
L
Linus Torvalds 已提交
1577

1578
	sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family);
L
Linus Torvalds 已提交
1579
	if (sk) {
1580 1581 1582 1583 1584 1585
		sk->sk_family = family;
		/*
		 * See comment in struct sock definition to understand
		 * why we need sk_prot_creator -acme
		 */
		sk->sk_prot = sk->sk_prot_creator = prot;
1586
		sk->sk_kern_sock = kern;
1587
		sock_lock_init(sk);
1588
		sk->sk_net_refcnt = kern ? 0 : 1;
1589
		if (likely(sk->sk_net_refcnt)) {
1590
			get_net(net);
1591 1592 1593
			sock_inuse_add(net, 1);
		}

1594
		sock_net_set(sk, net);
1595
		refcount_set(&sk->sk_wmem_alloc, 1);
1596

1597
		mem_cgroup_sk_alloc(sk);
1598
		cgroup_sk_alloc(&sk->sk_cgrp_data);
1599 1600
		sock_update_classid(&sk->sk_cgrp_data);
		sock_update_netprioidx(&sk->sk_cgrp_data);
L
Linus Torvalds 已提交
1601
	}
1602

1603
	return sk;
L
Linus Torvalds 已提交
1604
}
E
Eric Dumazet 已提交
1605
EXPORT_SYMBOL(sk_alloc);
L
Linus Torvalds 已提交
1606

1607 1608 1609 1610
/* Sockets having SOCK_RCU_FREE will call this function after one RCU
 * grace period. This is the case for UDP sockets and TCP listeners.
 */
static void __sk_destruct(struct rcu_head *head)
L
Linus Torvalds 已提交
1611
{
1612
	struct sock *sk = container_of(head, struct sock, sk_rcu);
L
Linus Torvalds 已提交
1613 1614 1615 1616 1617
	struct sk_filter *filter;

	if (sk->sk_destruct)
		sk->sk_destruct(sk);

1618
	filter = rcu_dereference_check(sk->sk_filter,
1619
				       refcount_read(&sk->sk_wmem_alloc) == 0);
L
Linus Torvalds 已提交
1620
	if (filter) {
1621
		sk_filter_uncharge(sk, filter);
1622
		RCU_INIT_POINTER(sk->sk_filter, NULL);
L
Linus Torvalds 已提交
1623
	}
1624 1625
	if (rcu_access_pointer(sk->sk_reuseport_cb))
		reuseport_detach_sock(sk);
L
Linus Torvalds 已提交
1626

E
Eric Dumazet 已提交
1627
	sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP);
L
Linus Torvalds 已提交
1628 1629

	if (atomic_read(&sk->sk_omem_alloc))
J
Joe Perches 已提交
1630 1631
		pr_debug("%s: optmem leakage (%d bytes) detected\n",
			 __func__, atomic_read(&sk->sk_omem_alloc));
L
Linus Torvalds 已提交
1632

1633 1634 1635 1636 1637
	if (sk->sk_frag.page) {
		put_page(sk->sk_frag.page);
		sk->sk_frag.page = NULL;
	}

1638 1639 1640
	if (sk->sk_peer_cred)
		put_cred(sk->sk_peer_cred);
	put_pid(sk->sk_peer_pid);
1641 1642
	if (likely(sk->sk_net_refcnt))
		put_net(sock_net(sk));
1643
	sk_prot_free(sk->sk_prot_creator, sk);
L
Linus Torvalds 已提交
1644
}
1645

1646 1647 1648 1649 1650 1651 1652 1653
void sk_destruct(struct sock *sk)
{
	if (sock_flag(sk, SOCK_RCU_FREE))
		call_rcu(&sk->sk_rcu, __sk_destruct);
	else
		__sk_destruct(&sk->sk_rcu);
}

1654 1655
static void __sk_free(struct sock *sk)
{
1656 1657 1658
	if (likely(sk->sk_net_refcnt))
		sock_inuse_add(sock_net(sk), -1);

1659
	if (unlikely(sk->sk_net_refcnt && sock_diag_has_destroy_listeners(sk)))
1660 1661 1662 1663 1664
		sock_diag_broadcast_destroy(sk);
	else
		sk_destruct(sk);
}

1665 1666 1667
void sk_free(struct sock *sk)
{
	/*
L
Lucas De Marchi 已提交
1668
	 * We subtract one from sk_wmem_alloc and can know if
1669 1670 1671
	 * some packets are still in some tx queue.
	 * If not null, sock_wfree() will call __sk_free(sk) later
	 */
1672
	if (refcount_dec_and_test(&sk->sk_wmem_alloc))
1673 1674
		__sk_free(sk);
}
E
Eric Dumazet 已提交
1675
EXPORT_SYMBOL(sk_free);
L
Linus Torvalds 已提交
1676

1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697
static void sk_init_common(struct sock *sk)
{
	skb_queue_head_init(&sk->sk_receive_queue);
	skb_queue_head_init(&sk->sk_write_queue);
	skb_queue_head_init(&sk->sk_error_queue);

	rwlock_init(&sk->sk_callback_lock);
	lockdep_set_class_and_name(&sk->sk_receive_queue.lock,
			af_rlock_keys + sk->sk_family,
			af_family_rlock_key_strings[sk->sk_family]);
	lockdep_set_class_and_name(&sk->sk_write_queue.lock,
			af_wlock_keys + sk->sk_family,
			af_family_wlock_key_strings[sk->sk_family]);
	lockdep_set_class_and_name(&sk->sk_error_queue.lock,
			af_elock_keys + sk->sk_family,
			af_family_elock_key_strings[sk->sk_family]);
	lockdep_set_class_and_name(&sk->sk_callback_lock,
			af_callback_keys + sk->sk_family,
			af_family_clock_key_strings[sk->sk_family]);
}

1698 1699 1700 1701 1702 1703 1704 1705
/**
 *	sk_clone_lock - clone a socket, and lock its clone
 *	@sk: the socket to clone
 *	@priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
 *
 *	Caller must unlock socket even in error path (bh_unlock_sock(newsk))
 */
struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
1706
{
1707
	struct sock *newsk;
1708
	bool is_charged = true;
1709

1710
	newsk = sk_prot_alloc(sk->sk_prot, priority, sk->sk_family);
1711 1712 1713
	if (newsk != NULL) {
		struct sk_filter *filter;

1714
		sock_copy(newsk, sk);
1715

1716 1717
		newsk->sk_prot_creator = sk->sk_prot;

1718
		/* SANITY */
1719 1720
		if (likely(newsk->sk_net_refcnt))
			get_net(sock_net(newsk));
1721 1722 1723
		sk_node_init(&newsk->sk_node);
		sock_lock_init(newsk);
		bh_lock_sock(newsk);
1724
		newsk->sk_backlog.head	= newsk->sk_backlog.tail = NULL;
Z
Zhu Yi 已提交
1725
		newsk->sk_backlog.len = 0;
1726 1727

		atomic_set(&newsk->sk_rmem_alloc, 0);
1728 1729 1730
		/*
		 * sk_wmem_alloc set to one (see sk_free() and sock_wfree())
		 */
1731
		refcount_set(&newsk->sk_wmem_alloc, 1);
1732
		atomic_set(&newsk->sk_omem_alloc, 0);
1733
		sk_init_common(newsk);
1734 1735

		newsk->sk_dst_cache	= NULL;
1736
		newsk->sk_dst_pending_confirm = 0;
1737 1738
		newsk->sk_wmem_queued	= 0;
		newsk->sk_forward_alloc = 0;
1739
		atomic_set(&newsk->sk_drops, 0);
1740 1741
		newsk->sk_send_head	= NULL;
		newsk->sk_userlocks	= sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
W
Willem de Bruijn 已提交
1742
		atomic_set(&newsk->sk_zckey, 0);
1743 1744

		sock_reset_flag(newsk, SOCK_DONE);
1745
		mem_cgroup_sk_alloc(newsk);
1746
		cgroup_sk_alloc(&newsk->sk_cgrp_data);
1747

1748 1749
		rcu_read_lock();
		filter = rcu_dereference(sk->sk_filter);
1750
		if (filter != NULL)
1751 1752 1753 1754 1755
			/* though it's an empty new sock, the charging may fail
			 * if sysctl_optmem_max was changed between creation of
			 * original socket and cloning
			 */
			is_charged = sk_filter_charge(newsk, filter);
1756 1757
		RCU_INIT_POINTER(newsk->sk_filter, filter);
		rcu_read_unlock();
1758

1759
		if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk, sk))) {
1760 1761 1762 1763 1764 1765
			/* We need to make sure that we don't uncharge the new
			 * socket if we couldn't charge it in the first place
			 * as otherwise we uncharge the parent's filter.
			 */
			if (!is_charged)
				RCU_INIT_POINTER(newsk->sk_filter, NULL);
1766
			sk_free_unlock_clone(newsk);
1767 1768 1769
			newsk = NULL;
			goto out;
		}
1770
		RCU_INIT_POINTER(newsk->sk_reuseport_cb, NULL);
1771 1772

		newsk->sk_err	   = 0;
1773
		newsk->sk_err_soft = 0;
1774
		newsk->sk_priority = 0;
E
Eric Dumazet 已提交
1775
		newsk->sk_incoming_cpu = raw_smp_processor_id();
1776 1777
		if (likely(newsk->sk_net_refcnt))
			sock_inuse_add(sock_net(newsk), 1);
1778

E
Eric Dumazet 已提交
1779 1780 1781 1782 1783
		/*
		 * Before updating sk_refcnt, we must commit prior changes to memory
		 * (Documentation/RCU/rculist_nulls.txt for details)
		 */
		smp_wmb();
1784
		refcount_set(&newsk->sk_refcnt, 2);
1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797

		/*
		 * Increment the counter in the same struct proto as the master
		 * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that
		 * is the same as sk->sk_prot->socks, as this field was copied
		 * with memcpy).
		 *
		 * This _changes_ the previous behaviour, where
		 * tcp_create_openreq_child always was incrementing the
		 * equivalent to tcp_prot->socks (inet_sock_nr), so this have
		 * to be taken into account in all callers. -acme
		 */
		sk_refcnt_debug_inc(newsk);
1798
		sk_set_socket(newsk, NULL);
1799
		newsk->sk_wq = NULL;
1800 1801

		if (newsk->sk_prot->sockets_allocated)
1802
			sk_sockets_allocated_inc(newsk);
1803

1804 1805
		if (sock_needs_netstamp(sk) &&
		    newsk->sk_flags & SK_FLAGS_TIMESTAMP)
1806
			net_enable_timestamp();
1807 1808 1809 1810
	}
out:
	return newsk;
}
1811
EXPORT_SYMBOL_GPL(sk_clone_lock);
1812

1813 1814 1815 1816 1817 1818 1819 1820 1821 1822
void sk_free_unlock_clone(struct sock *sk)
{
	/* It is still raw copy of parent, so invalidate
	 * destructor and make plain sk_free() */
	sk->sk_destruct = NULL;
	bh_unlock_sock(sk);
	sk_free(sk);
}
EXPORT_SYMBOL_GPL(sk_free_unlock_clone);

1823 1824
void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
{
1825 1826
	u32 max_segs = 1;

E
Eric Dumazet 已提交
1827
	sk_dst_set(sk, dst);
1828
	sk->sk_route_caps = dst->dev->features | sk->sk_route_forced_caps;
1829
	if (sk->sk_route_caps & NETIF_F_GSO)
1830
		sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE;
E
Eric Dumazet 已提交
1831
	sk->sk_route_caps &= ~sk->sk_route_nocaps;
1832
	if (sk_can_gso(sk)) {
1833
		if (dst->header_len && !xfrm_dst_offload_ok(dst)) {
1834
			sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
1835
		} else {
1836
			sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
1837
			sk->sk_gso_max_size = dst->dev->gso_max_size;
1838
			max_segs = max_t(u32, dst->dev->gso_max_segs, 1);
1839
		}
1840
	}
1841
	sk->sk_gso_max_segs = max_segs;
1842 1843 1844
}
EXPORT_SYMBOL_GPL(sk_setup_caps);

L
Linus Torvalds 已提交
1845 1846 1847 1848 1849
/*
 *	Simple resource managers for sockets.
 */


1850 1851
/*
 * Write buffer destructor automatically called from kfree_skb.
L
Linus Torvalds 已提交
1852 1853 1854 1855
 */
void sock_wfree(struct sk_buff *skb)
{
	struct sock *sk = skb->sk;
E
Eric Dumazet 已提交
1856
	unsigned int len = skb->truesize;
L
Linus Torvalds 已提交
1857

E
Eric Dumazet 已提交
1858 1859 1860 1861 1862
	if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) {
		/*
		 * Keep a reference on sk_wmem_alloc, this will be released
		 * after sk_write_space() call
		 */
1863
		WARN_ON(refcount_sub_and_test(len - 1, &sk->sk_wmem_alloc));
L
Linus Torvalds 已提交
1864
		sk->sk_write_space(sk);
E
Eric Dumazet 已提交
1865 1866
		len = 1;
	}
1867
	/*
E
Eric Dumazet 已提交
1868 1869
	 * if sk_wmem_alloc reaches 0, we must finish what sk_free()
	 * could not do because of in-flight packets
1870
	 */
1871
	if (refcount_sub_and_test(len, &sk->sk_wmem_alloc))
1872
		__sk_free(sk);
L
Linus Torvalds 已提交
1873
}
E
Eric Dumazet 已提交
1874
EXPORT_SYMBOL(sock_wfree);
L
Linus Torvalds 已提交
1875

E
Eric Dumazet 已提交
1876 1877 1878 1879 1880 1881 1882
/* This variant of sock_wfree() is used by TCP,
 * since it sets SOCK_USE_WRITE_QUEUE.
 */
void __sock_wfree(struct sk_buff *skb)
{
	struct sock *sk = skb->sk;

1883
	if (refcount_sub_and_test(skb->truesize, &sk->sk_wmem_alloc))
E
Eric Dumazet 已提交
1884 1885 1886
		__sk_free(sk);
}

1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904
void skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
{
	skb_orphan(skb);
	skb->sk = sk;
#ifdef CONFIG_INET
	if (unlikely(!sk_fullsock(sk))) {
		skb->destructor = sock_edemux;
		sock_hold(sk);
		return;
	}
#endif
	skb->destructor = sock_wfree;
	skb_set_hash_from_sk(skb, sk);
	/*
	 * We used to take a refcount on sk, but following operation
	 * is enough to guarantee sk_free() wont free this sock until
	 * all in-flight packets are completed
	 */
1905
	refcount_add(skb->truesize, &sk->sk_wmem_alloc);
1906 1907 1908
}
EXPORT_SYMBOL(skb_set_owner_w);

E
Eric Dumazet 已提交
1909 1910 1911 1912
/* This helper is used by netem, as it can hold packets in its
 * delay queue. We want to allow the owner socket to send more
 * packets, as if they were already TX completed by a typical driver.
 * But we also want to keep skb->sk set because some packet schedulers
E
Eric Dumazet 已提交
1913
 * rely on it (sch_fq for example).
E
Eric Dumazet 已提交
1914
 */
1915 1916
void skb_orphan_partial(struct sk_buff *skb)
{
E
Eric Dumazet 已提交
1917
	if (skb_is_tcp_pure_ack(skb))
E
Eric Dumazet 已提交
1918 1919
		return;

1920 1921 1922 1923 1924
	if (skb->destructor == sock_wfree
#ifdef CONFIG_INET
	    || skb->destructor == tcp_wfree
#endif
		) {
E
Eric Dumazet 已提交
1925 1926
		struct sock *sk = skb->sk;

1927
		if (refcount_inc_not_zero(&sk->sk_refcnt)) {
1928
			WARN_ON(refcount_sub_and_test(skb->truesize, &sk->sk_wmem_alloc));
E
Eric Dumazet 已提交
1929 1930
			skb->destructor = sock_efree;
		}
1931 1932 1933 1934 1935 1936
	} else {
		skb_orphan(skb);
	}
}
EXPORT_SYMBOL(skb_orphan_partial);

1937 1938
/*
 * Read buffer destructor automatically called from kfree_skb.
L
Linus Torvalds 已提交
1939 1940 1941 1942
 */
void sock_rfree(struct sk_buff *skb)
{
	struct sock *sk = skb->sk;
E
Eric Dumazet 已提交
1943
	unsigned int len = skb->truesize;
L
Linus Torvalds 已提交
1944

E
Eric Dumazet 已提交
1945 1946
	atomic_sub(len, &sk->sk_rmem_alloc);
	sk_mem_uncharge(sk, len);
L
Linus Torvalds 已提交
1947
}
E
Eric Dumazet 已提交
1948
EXPORT_SYMBOL(sock_rfree);
L
Linus Torvalds 已提交
1949

1950 1951 1952 1953
/*
 * Buffer destructor for skbs that are not used directly in read or write
 * path, e.g. for error handler skbs. Automatically called from kfree_skb.
 */
1954 1955 1956 1957 1958 1959
void sock_efree(struct sk_buff *skb)
{
	sock_put(skb->sk);
}
EXPORT_SYMBOL(sock_efree);

1960
kuid_t sock_i_uid(struct sock *sk)
L
Linus Torvalds 已提交
1961
{
1962
	kuid_t uid;
L
Linus Torvalds 已提交
1963

E
Eric Dumazet 已提交
1964
	read_lock_bh(&sk->sk_callback_lock);
1965
	uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : GLOBAL_ROOT_UID;
E
Eric Dumazet 已提交
1966
	read_unlock_bh(&sk->sk_callback_lock);
L
Linus Torvalds 已提交
1967 1968
	return uid;
}
E
Eric Dumazet 已提交
1969
EXPORT_SYMBOL(sock_i_uid);
L
Linus Torvalds 已提交
1970 1971 1972 1973 1974

unsigned long sock_i_ino(struct sock *sk)
{
	unsigned long ino;

E
Eric Dumazet 已提交
1975
	read_lock_bh(&sk->sk_callback_lock);
L
Linus Torvalds 已提交
1976
	ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0;
E
Eric Dumazet 已提交
1977
	read_unlock_bh(&sk->sk_callback_lock);
L
Linus Torvalds 已提交
1978 1979
	return ino;
}
E
Eric Dumazet 已提交
1980
EXPORT_SYMBOL(sock_i_ino);
L
Linus Torvalds 已提交
1981 1982 1983 1984

/*
 * Allocate a skb from the socket's send buffer.
 */
V
Victor Fusco 已提交
1985
struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
A
Al Viro 已提交
1986
			     gfp_t priority)
L
Linus Torvalds 已提交
1987
{
1988
	if (force || refcount_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
E
Eric Dumazet 已提交
1989
		struct sk_buff *skb = alloc_skb(size, priority);
L
Linus Torvalds 已提交
1990 1991 1992 1993 1994 1995 1996
		if (skb) {
			skb_set_owner_w(skb, sk);
			return skb;
		}
	}
	return NULL;
}
E
Eric Dumazet 已提交
1997
EXPORT_SYMBOL(sock_wmalloc);
L
Linus Torvalds 已提交
1998

1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025
static void sock_ofree(struct sk_buff *skb)
{
	struct sock *sk = skb->sk;

	atomic_sub(skb->truesize, &sk->sk_omem_alloc);
}

struct sk_buff *sock_omalloc(struct sock *sk, unsigned long size,
			     gfp_t priority)
{
	struct sk_buff *skb;

	/* small safe race: SKB_TRUESIZE may differ from final skb->truesize */
	if (atomic_read(&sk->sk_omem_alloc) + SKB_TRUESIZE(size) >
	    sysctl_optmem_max)
		return NULL;

	skb = alloc_skb(size, priority);
	if (!skb)
		return NULL;

	atomic_add(skb->truesize, &sk->sk_omem_alloc);
	skb->sk = sk;
	skb->destructor = sock_ofree;
	return skb;
}

2026
/*
L
Linus Torvalds 已提交
2027
 * Allocate a memory block from the socket's option memory buffer.
2028
 */
A
Al Viro 已提交
2029
void *sock_kmalloc(struct sock *sk, int size, gfp_t priority)
L
Linus Torvalds 已提交
2030
{
2031
	if ((unsigned int)size <= sysctl_optmem_max &&
L
Linus Torvalds 已提交
2032 2033 2034
	    atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
		void *mem;
		/* First do the add, to avoid the race if kmalloc
2035
		 * might sleep.
L
Linus Torvalds 已提交
2036 2037 2038 2039 2040 2041 2042 2043 2044
		 */
		atomic_add(size, &sk->sk_omem_alloc);
		mem = kmalloc(size, priority);
		if (mem)
			return mem;
		atomic_sub(size, &sk->sk_omem_alloc);
	}
	return NULL;
}
E
Eric Dumazet 已提交
2045
EXPORT_SYMBOL(sock_kmalloc);
L
Linus Torvalds 已提交
2046

2047 2048 2049
/* Free an option memory block. Note, we actually want the inline
 * here as this allows gcc to detect the nullify and fold away the
 * condition entirely.
L
Linus Torvalds 已提交
2050
 */
2051 2052
static inline void __sock_kfree_s(struct sock *sk, void *mem, int size,
				  const bool nullify)
L
Linus Torvalds 已提交
2053
{
2054 2055
	if (WARN_ON_ONCE(!mem))
		return;
2056 2057 2058 2059
	if (nullify)
		kzfree(mem);
	else
		kfree(mem);
L
Linus Torvalds 已提交
2060 2061
	atomic_sub(size, &sk->sk_omem_alloc);
}
2062 2063 2064 2065 2066

void sock_kfree_s(struct sock *sk, void *mem, int size)
{
	__sock_kfree_s(sk, mem, size, false);
}
E
Eric Dumazet 已提交
2067
EXPORT_SYMBOL(sock_kfree_s);
L
Linus Torvalds 已提交
2068

2069 2070 2071 2072 2073 2074
void sock_kzfree_s(struct sock *sk, void *mem, int size)
{
	__sock_kfree_s(sk, mem, size, true);
}
EXPORT_SYMBOL(sock_kzfree_s);

L
Linus Torvalds 已提交
2075 2076 2077
/* It is almost wait_for_tcp_memory minus release_sock/lock_sock.
   I think, these locks should be removed for datagram sockets.
 */
E
Eric Dumazet 已提交
2078
static long sock_wait_for_wmem(struct sock *sk, long timeo)
L
Linus Torvalds 已提交
2079 2080 2081
{
	DEFINE_WAIT(wait);

2082
	sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
L
Linus Torvalds 已提交
2083 2084 2085 2086 2087 2088
	for (;;) {
		if (!timeo)
			break;
		if (signal_pending(current))
			break;
		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
E
Eric Dumazet 已提交
2089
		prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
2090
		if (refcount_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf)
L
Linus Torvalds 已提交
2091 2092 2093 2094 2095 2096 2097
			break;
		if (sk->sk_shutdown & SEND_SHUTDOWN)
			break;
		if (sk->sk_err)
			break;
		timeo = schedule_timeout(timeo);
	}
E
Eric Dumazet 已提交
2098
	finish_wait(sk_sleep(sk), &wait);
L
Linus Torvalds 已提交
2099 2100 2101 2102 2103 2104 2105 2106
	return timeo;
}


/*
 *	Generic send/receive buffer handlers
 */

H
Herbert Xu 已提交
2107 2108
struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
				     unsigned long data_len, int noblock,
2109
				     int *errcode, int max_page_order)
L
Linus Torvalds 已提交
2110
{
2111
	struct sk_buff *skb;
L
Linus Torvalds 已提交
2112 2113 2114 2115
	long timeo;
	int err;

	timeo = sock_sndtimeo(sk, noblock);
2116
	for (;;) {
L
Linus Torvalds 已提交
2117 2118 2119 2120 2121 2122 2123 2124
		err = sock_error(sk);
		if (err != 0)
			goto failure;

		err = -EPIPE;
		if (sk->sk_shutdown & SEND_SHUTDOWN)
			goto failure;

2125 2126
		if (sk_wmem_alloc_get(sk) < sk->sk_sndbuf)
			break;
2127

2128
		sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
2129 2130 2131
		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
		err = -EAGAIN;
		if (!timeo)
L
Linus Torvalds 已提交
2132
			goto failure;
2133 2134 2135
		if (signal_pending(current))
			goto interrupted;
		timeo = sock_wait_for_wmem(sk, timeo);
L
Linus Torvalds 已提交
2136
	}
2137 2138 2139 2140
	skb = alloc_skb_with_frags(header_len, data_len, max_page_order,
				   errcode, sk->sk_allocation);
	if (skb)
		skb_set_owner_w(skb, sk);
L
Linus Torvalds 已提交
2141 2142 2143 2144 2145 2146 2147 2148
	return skb;

interrupted:
	err = sock_intr_errno(timeo);
failure:
	*errcode = err;
	return NULL;
}
H
Herbert Xu 已提交
2149
EXPORT_SYMBOL(sock_alloc_send_pskb);
L
Linus Torvalds 已提交
2150

2151
struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
L
Linus Torvalds 已提交
2152 2153
				    int noblock, int *errcode)
{
2154
	return sock_alloc_send_pskb(sk, size, 0, noblock, errcode, 0);
L
Linus Torvalds 已提交
2155
}
E
Eric Dumazet 已提交
2156
EXPORT_SYMBOL(sock_alloc_send_skb);
L
Linus Torvalds 已提交
2157

2158 2159 2160
int __sock_cmsg_send(struct sock *sk, struct msghdr *msg, struct cmsghdr *cmsg,
		     struct sockcm_cookie *sockc)
{
2161 2162
	u32 tsflags;

2163 2164 2165 2166 2167 2168 2169 2170
	switch (cmsg->cmsg_type) {
	case SO_MARK:
		if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
			return -EPERM;
		if (cmsg->cmsg_len != CMSG_LEN(sizeof(u32)))
			return -EINVAL;
		sockc->mark = *(u32 *)CMSG_DATA(cmsg);
		break;
2171
	case SO_TIMESTAMPING_OLD:
2172 2173 2174 2175 2176 2177 2178 2179 2180 2181
		if (cmsg->cmsg_len != CMSG_LEN(sizeof(u32)))
			return -EINVAL;

		tsflags = *(u32 *)CMSG_DATA(cmsg);
		if (tsflags & ~SOF_TIMESTAMPING_TX_RECORD_MASK)
			return -EINVAL;

		sockc->tsflags &= ~SOF_TIMESTAMPING_TX_RECORD_MASK;
		sockc->tsflags |= tsflags;
		break;
2182 2183 2184 2185 2186 2187 2188
	case SCM_TXTIME:
		if (!sock_flag(sk, SOCK_TXTIME))
			return -EINVAL;
		if (cmsg->cmsg_len != CMSG_LEN(sizeof(u64)))
			return -EINVAL;
		sockc->transmit_time = get_unaligned((u64 *)CMSG_DATA(cmsg));
		break;
2189 2190 2191 2192
	/* SCM_RIGHTS and SCM_CREDENTIALS are semantically in SOL_UNIX. */
	case SCM_RIGHTS:
	case SCM_CREDENTIALS:
		break;
2193 2194 2195 2196 2197 2198 2199
	default:
		return -EINVAL;
	}
	return 0;
}
EXPORT_SYMBOL(__sock_cmsg_send);

E
Edward Jee 已提交
2200 2201 2202 2203
int sock_cmsg_send(struct sock *sk, struct msghdr *msg,
		   struct sockcm_cookie *sockc)
{
	struct cmsghdr *cmsg;
2204
	int ret;
E
Edward Jee 已提交
2205 2206 2207 2208 2209 2210

	for_each_cmsghdr(cmsg, msg) {
		if (!CMSG_OK(msg, cmsg))
			return -EINVAL;
		if (cmsg->cmsg_level != SOL_SOCKET)
			continue;
2211 2212 2213
		ret = __sock_cmsg_send(sk, msg, cmsg, sockc);
		if (ret)
			return ret;
E
Edward Jee 已提交
2214 2215 2216 2217 2218
	}
	return 0;
}
EXPORT_SYMBOL(sock_cmsg_send);

2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238
static void sk_enter_memory_pressure(struct sock *sk)
{
	if (!sk->sk_prot->enter_memory_pressure)
		return;

	sk->sk_prot->enter_memory_pressure(sk);
}

static void sk_leave_memory_pressure(struct sock *sk)
{
	if (sk->sk_prot->leave_memory_pressure) {
		sk->sk_prot->leave_memory_pressure(sk);
	} else {
		unsigned long *memory_pressure = sk->sk_prot->memory_pressure;

		if (memory_pressure && *memory_pressure)
			*memory_pressure = 0;
	}
}

2239 2240 2241
/* On 32bit arches, an skb frag is limited to 2^15 */
#define SKB_FRAG_PAGE_ORDER	get_order(32768)

E
Eric Dumazet 已提交
2242 2243 2244 2245
/**
 * skb_page_frag_refill - check that a page_frag contains enough room
 * @sz: minimum size of the fragment we want to get
 * @pfrag: pointer to page_frag
2246
 * @gfp: priority for memory allocation
E
Eric Dumazet 已提交
2247 2248 2249 2250 2251
 *
 * Note: While this allocator tries to use high order pages, there is
 * no guarantee that allocations succeed. Therefore, @sz MUST be
 * less or equal than PAGE_SIZE.
 */
2252
bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t gfp)
2253 2254
{
	if (pfrag->page) {
2255
		if (page_ref_count(pfrag->page) == 1) {
2256 2257 2258
			pfrag->offset = 0;
			return true;
		}
E
Eric Dumazet 已提交
2259
		if (pfrag->offset + sz <= pfrag->size)
2260 2261 2262 2263
			return true;
		put_page(pfrag->page);
	}

2264 2265
	pfrag->offset = 0;
	if (SKB_FRAG_PAGE_ORDER) {
2266 2267 2268 2269
		/* Avoid direct reclaim but allow kswapd to wake */
		pfrag->page = alloc_pages((gfp & ~__GFP_DIRECT_RECLAIM) |
					  __GFP_COMP | __GFP_NOWARN |
					  __GFP_NORETRY,
2270
					  SKB_FRAG_PAGE_ORDER);
2271
		if (likely(pfrag->page)) {
2272
			pfrag->size = PAGE_SIZE << SKB_FRAG_PAGE_ORDER;
2273 2274
			return true;
		}
2275 2276 2277 2278 2279 2280
	}
	pfrag->page = alloc_page(gfp);
	if (likely(pfrag->page)) {
		pfrag->size = PAGE_SIZE;
		return true;
	}
E
Eric Dumazet 已提交
2281 2282 2283 2284 2285 2286 2287 2288 2289
	return false;
}
EXPORT_SYMBOL(skb_page_frag_refill);

bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag)
{
	if (likely(skb_page_frag_refill(32U, pfrag, sk->sk_allocation)))
		return true;

2290 2291 2292 2293 2294 2295
	sk_enter_memory_pressure(sk);
	sk_stream_moderate_sndbuf(sk);
	return false;
}
EXPORT_SYMBOL(sk_page_frag_refill);

L
Linus Torvalds 已提交
2296
static void __lock_sock(struct sock *sk)
2297 2298
	__releases(&sk->sk_lock.slock)
	__acquires(&sk->sk_lock.slock)
L
Linus Torvalds 已提交
2299 2300 2301
{
	DEFINE_WAIT(wait);

2302
	for (;;) {
L
Linus Torvalds 已提交
2303 2304 2305 2306 2307
		prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait,
					TASK_UNINTERRUPTIBLE);
		spin_unlock_bh(&sk->sk_lock.slock);
		schedule();
		spin_lock_bh(&sk->sk_lock.slock);
2308
		if (!sock_owned_by_user(sk))
L
Linus Torvalds 已提交
2309 2310 2311 2312 2313
			break;
	}
	finish_wait(&sk->sk_lock.wq, &wait);
}

2314
void __release_sock(struct sock *sk)
2315 2316
	__releases(&sk->sk_lock.slock)
	__acquires(&sk->sk_lock.slock)
L
Linus Torvalds 已提交
2317
{
2318
	struct sk_buff *skb, *next;
L
Linus Torvalds 已提交
2319

2320
	while ((skb = sk->sk_backlog.head) != NULL) {
L
Linus Torvalds 已提交
2321 2322
		sk->sk_backlog.head = sk->sk_backlog.tail = NULL;

2323
		spin_unlock_bh(&sk->sk_lock.slock);
L
Linus Torvalds 已提交
2324

2325 2326
		do {
			next = skb->next;
2327
			prefetch(next);
E
Eric Dumazet 已提交
2328
			WARN_ON_ONCE(skb_dst_is_noref(skb));
2329
			skb_mark_not_on_list(skb);
P
Peter Zijlstra 已提交
2330
			sk_backlog_rcv(sk, skb);
L
Linus Torvalds 已提交
2331

2332
			cond_resched();
L
Linus Torvalds 已提交
2333 2334 2335 2336

			skb = next;
		} while (skb != NULL);

2337 2338
		spin_lock_bh(&sk->sk_lock.slock);
	}
Z
Zhu Yi 已提交
2339 2340 2341 2342 2343 2344

	/*
	 * Doing the zeroing here guarantee we can not loop forever
	 * while a wild producer attempts to flood us.
	 */
	sk->sk_backlog.len = 0;
L
Linus Torvalds 已提交
2345 2346
}

2347 2348 2349 2350 2351 2352 2353
void __sk_flush_backlog(struct sock *sk)
{
	spin_lock_bh(&sk->sk_lock.slock);
	__release_sock(sk);
	spin_unlock_bh(&sk->sk_lock.slock);
}

L
Linus Torvalds 已提交
2354 2355
/**
 * sk_wait_data - wait for data to arrive at sk_receive_queue
2356 2357
 * @sk:    sock to wait on
 * @timeo: for how long
2358
 * @skb:   last skb seen on sk_receive_queue
L
Linus Torvalds 已提交
2359 2360 2361 2362 2363 2364
 *
 * Now socket state including sk->sk_err is changed only under lock,
 * hence we may omit checks after joining wait queue.
 * We check receive queue before schedule() only as optimization;
 * it is very likely that release_sock() added new data.
 */
2365
int sk_wait_data(struct sock *sk, long *timeo, const struct sk_buff *skb)
L
Linus Torvalds 已提交
2366
{
W
WANG Cong 已提交
2367
	DEFINE_WAIT_FUNC(wait, woken_wake_function);
L
Linus Torvalds 已提交
2368 2369
	int rc;

W
WANG Cong 已提交
2370
	add_wait_queue(sk_sleep(sk), &wait);
2371
	sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
W
WANG Cong 已提交
2372
	rc = sk_wait_event(sk, timeo, skb_peek_tail(&sk->sk_receive_queue) != skb, &wait);
2373
	sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
W
WANG Cong 已提交
2374
	remove_wait_queue(sk_sleep(sk), &wait);
L
Linus Torvalds 已提交
2375 2376 2377 2378
	return rc;
}
EXPORT_SYMBOL(sk_wait_data);

2379
/**
2380
 *	__sk_mem_raise_allocated - increase memory_allocated
2381 2382
 *	@sk: socket
 *	@size: memory size to allocate
2383
 *	@amt: pages to allocate
2384 2385
 *	@kind: allocation type
 *
2386
 *	Similar to __sk_mem_schedule(), but does not update sk_forward_alloc
2387
 */
2388
int __sk_mem_raise_allocated(struct sock *sk, int size, int amt, int kind)
2389 2390
{
	struct proto *prot = sk->sk_prot;
2391
	long allocated = sk_memory_allocated_add(sk, amt);
2392
	bool charged = true;
2393

2394
	if (mem_cgroup_sockets_enabled && sk->sk_memcg &&
2395
	    !(charged = mem_cgroup_charge_skmem(sk->sk_memcg, amt)))
2396
		goto suppress_allocation;
2397 2398

	/* Under limit. */
2399
	if (allocated <= sk_prot_mem_limits(sk, 0)) {
2400
		sk_leave_memory_pressure(sk);
2401 2402 2403
		return 1;
	}

2404 2405
	/* Under pressure. */
	if (allocated > sk_prot_mem_limits(sk, 1))
2406
		sk_enter_memory_pressure(sk);
2407

2408 2409
	/* Over hard limit. */
	if (allocated > sk_prot_mem_limits(sk, 2))
2410 2411 2412 2413
		goto suppress_allocation;

	/* guarantee minimum buffer size under pressure */
	if (kind == SK_MEM_RECV) {
2414
		if (atomic_read(&sk->sk_rmem_alloc) < sk_get_rmem0(sk, prot))
2415
			return 1;
2416

2417
	} else { /* SK_MEM_SEND */
2418 2419
		int wmem0 = sk_get_wmem0(sk, prot);

2420
		if (sk->sk_type == SOCK_STREAM) {
2421
			if (sk->sk_wmem_queued < wmem0)
2422
				return 1;
2423
		} else if (refcount_read(&sk->sk_wmem_alloc) < wmem0) {
2424
				return 1;
2425
		}
2426 2427
	}

2428
	if (sk_has_memory_pressure(sk)) {
2429 2430
		int alloc;

2431
		if (!sk_under_memory_pressure(sk))
2432
			return 1;
2433 2434
		alloc = sk_sockets_allocated_read_positive(sk);
		if (sk_prot_mem_limits(sk, 2) > alloc *
2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452
		    sk_mem_pages(sk->sk_wmem_queued +
				 atomic_read(&sk->sk_rmem_alloc) +
				 sk->sk_forward_alloc))
			return 1;
	}

suppress_allocation:

	if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) {
		sk_stream_moderate_sndbuf(sk);

		/* Fail only if socket is _under_ its sndbuf.
		 * In this case we cannot block, so that we have to fail.
		 */
		if (sk->sk_wmem_queued + size >= sk->sk_sndbuf)
			return 1;
	}

2453 2454
	if (kind == SK_MEM_SEND || (kind == SK_MEM_RECV && charged))
		trace_sock_exceed_buf_limit(sk, prot, allocated, kind);
2455

2456
	sk_memory_allocated_sub(sk, amt);
2457

2458 2459
	if (mem_cgroup_sockets_enabled && sk->sk_memcg)
		mem_cgroup_uncharge_skmem(sk->sk_memcg, amt);
2460

2461 2462
	return 0;
}
2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484
EXPORT_SYMBOL(__sk_mem_raise_allocated);

/**
 *	__sk_mem_schedule - increase sk_forward_alloc and memory_allocated
 *	@sk: socket
 *	@size: memory size to allocate
 *	@kind: allocation type
 *
 *	If kind is SK_MEM_SEND, it means wmem allocation. Otherwise it means
 *	rmem allocation. This function assumes that protocols which have
 *	memory_pressure use sk_wmem_queued as write buffer accounting.
 */
int __sk_mem_schedule(struct sock *sk, int size, int kind)
{
	int ret, amt = sk_mem_pages(size);

	sk->sk_forward_alloc += amt << SK_MEM_QUANTUM_SHIFT;
	ret = __sk_mem_raise_allocated(sk, size, amt, kind);
	if (!ret)
		sk->sk_forward_alloc -= amt << SK_MEM_QUANTUM_SHIFT;
	return ret;
}
2485 2486 2487
EXPORT_SYMBOL(__sk_mem_schedule);

/**
2488
 *	__sk_mem_reduce_allocated - reclaim memory_allocated
2489
 *	@sk: socket
2490 2491 2492
 *	@amount: number of quanta
 *
 *	Similar to __sk_mem_reclaim(), but does not update sk_forward_alloc
2493
 */
2494
void __sk_mem_reduce_allocated(struct sock *sk, int amount)
2495
{
E
Eric Dumazet 已提交
2496
	sk_memory_allocated_sub(sk, amount);
2497

2498 2499
	if (mem_cgroup_sockets_enabled && sk->sk_memcg)
		mem_cgroup_uncharge_skmem(sk->sk_memcg, amount);
2500

2501 2502 2503
	if (sk_under_memory_pressure(sk) &&
	    (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)))
		sk_leave_memory_pressure(sk);
2504
}
2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517
EXPORT_SYMBOL(__sk_mem_reduce_allocated);

/**
 *	__sk_mem_reclaim - reclaim sk_forward_alloc and memory_allocated
 *	@sk: socket
 *	@amount: number of bytes (rounded down to a SK_MEM_QUANTUM multiple)
 */
void __sk_mem_reclaim(struct sock *sk, int amount)
{
	amount >>= SK_MEM_QUANTUM_SHIFT;
	sk->sk_forward_alloc -= amount << SK_MEM_QUANTUM_SHIFT;
	__sk_mem_reduce_allocated(sk, amount);
}
2518 2519
EXPORT_SYMBOL(__sk_mem_reclaim);

2520 2521 2522 2523 2524 2525
int sk_set_peek_off(struct sock *sk, int val)
{
	sk->sk_peek_off = val;
	return 0;
}
EXPORT_SYMBOL_GPL(sk_set_peek_off);
2526

L
Linus Torvalds 已提交
2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537
/*
 * Set of default routines for initialising struct proto_ops when
 * the protocol does not support a particular function. In certain
 * cases where it makes no sense for a protocol to have a "do nothing"
 * function, some default processing is provided.
 */

int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len)
{
	return -EOPNOTSUPP;
}
E
Eric Dumazet 已提交
2538
EXPORT_SYMBOL(sock_no_bind);
L
Linus Torvalds 已提交
2539

2540
int sock_no_connect(struct socket *sock, struct sockaddr *saddr,
L
Linus Torvalds 已提交
2541 2542 2543 2544
		    int len, int flags)
{
	return -EOPNOTSUPP;
}
E
Eric Dumazet 已提交
2545
EXPORT_SYMBOL(sock_no_connect);
L
Linus Torvalds 已提交
2546 2547 2548 2549 2550

int sock_no_socketpair(struct socket *sock1, struct socket *sock2)
{
	return -EOPNOTSUPP;
}
E
Eric Dumazet 已提交
2551
EXPORT_SYMBOL(sock_no_socketpair);
L
Linus Torvalds 已提交
2552

2553 2554
int sock_no_accept(struct socket *sock, struct socket *newsock, int flags,
		   bool kern)
L
Linus Torvalds 已提交
2555 2556 2557
{
	return -EOPNOTSUPP;
}
E
Eric Dumazet 已提交
2558
EXPORT_SYMBOL(sock_no_accept);
L
Linus Torvalds 已提交
2559

2560
int sock_no_getname(struct socket *sock, struct sockaddr *saddr,
2561
		    int peer)
L
Linus Torvalds 已提交
2562 2563 2564
{
	return -EOPNOTSUPP;
}
E
Eric Dumazet 已提交
2565
EXPORT_SYMBOL(sock_no_getname);
L
Linus Torvalds 已提交
2566 2567 2568 2569 2570

int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
{
	return -EOPNOTSUPP;
}
E
Eric Dumazet 已提交
2571
EXPORT_SYMBOL(sock_no_ioctl);
L
Linus Torvalds 已提交
2572 2573 2574 2575 2576

int sock_no_listen(struct socket *sock, int backlog)
{
	return -EOPNOTSUPP;
}
E
Eric Dumazet 已提交
2577
EXPORT_SYMBOL(sock_no_listen);
L
Linus Torvalds 已提交
2578 2579 2580 2581 2582

int sock_no_shutdown(struct socket *sock, int how)
{
	return -EOPNOTSUPP;
}
E
Eric Dumazet 已提交
2583
EXPORT_SYMBOL(sock_no_shutdown);
L
Linus Torvalds 已提交
2584 2585

int sock_no_setsockopt(struct socket *sock, int level, int optname,
2586
		    char __user *optval, unsigned int optlen)
L
Linus Torvalds 已提交
2587 2588 2589
{
	return -EOPNOTSUPP;
}
E
Eric Dumazet 已提交
2590
EXPORT_SYMBOL(sock_no_setsockopt);
L
Linus Torvalds 已提交
2591 2592 2593 2594 2595 2596

int sock_no_getsockopt(struct socket *sock, int level, int optname,
		    char __user *optval, int __user *optlen)
{
	return -EOPNOTSUPP;
}
E
Eric Dumazet 已提交
2597
EXPORT_SYMBOL(sock_no_getsockopt);
L
Linus Torvalds 已提交
2598

2599
int sock_no_sendmsg(struct socket *sock, struct msghdr *m, size_t len)
L
Linus Torvalds 已提交
2600 2601 2602
{
	return -EOPNOTSUPP;
}
E
Eric Dumazet 已提交
2603
EXPORT_SYMBOL(sock_no_sendmsg);
L
Linus Torvalds 已提交
2604

2605 2606 2607 2608 2609 2610
int sock_no_sendmsg_locked(struct sock *sk, struct msghdr *m, size_t len)
{
	return -EOPNOTSUPP;
}
EXPORT_SYMBOL(sock_no_sendmsg_locked);

2611 2612
int sock_no_recvmsg(struct socket *sock, struct msghdr *m, size_t len,
		    int flags)
L
Linus Torvalds 已提交
2613 2614 2615
{
	return -EOPNOTSUPP;
}
E
Eric Dumazet 已提交
2616
EXPORT_SYMBOL(sock_no_recvmsg);
L
Linus Torvalds 已提交
2617 2618 2619 2620 2621 2622

int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma)
{
	/* Mirror missing mmap method error code */
	return -ENODEV;
}
E
Eric Dumazet 已提交
2623
EXPORT_SYMBOL(sock_no_mmap);
L
Linus Torvalds 已提交
2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636

ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags)
{
	ssize_t res;
	struct msghdr msg = {.msg_flags = flags};
	struct kvec iov;
	char *kaddr = kmap(page);
	iov.iov_base = kaddr + offset;
	iov.iov_len = size;
	res = kernel_sendmsg(sock, &msg, &iov, 1, size);
	kunmap(page);
	return res;
}
E
Eric Dumazet 已提交
2637
EXPORT_SYMBOL(sock_no_sendpage);
L
Linus Torvalds 已提交
2638

2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654
ssize_t sock_no_sendpage_locked(struct sock *sk, struct page *page,
				int offset, size_t size, int flags)
{
	ssize_t res;
	struct msghdr msg = {.msg_flags = flags};
	struct kvec iov;
	char *kaddr = kmap(page);

	iov.iov_base = kaddr + offset;
	iov.iov_len = size;
	res = kernel_sendmsg_locked(sk, &msg, &iov, 1, size);
	kunmap(page);
	return res;
}
EXPORT_SYMBOL(sock_no_sendpage_locked);

L
Linus Torvalds 已提交
2655 2656 2657 2658 2659 2660
/*
 *	Default Socket Callbacks
 */

static void sock_def_wakeup(struct sock *sk)
{
2661 2662 2663 2664
	struct socket_wq *wq;

	rcu_read_lock();
	wq = rcu_dereference(sk->sk_wq);
H
Herbert Xu 已提交
2665
	if (skwq_has_sleeper(wq))
2666 2667
		wake_up_interruptible_all(&wq->wait);
	rcu_read_unlock();
L
Linus Torvalds 已提交
2668 2669 2670 2671
}

static void sock_def_error_report(struct sock *sk)
{
2672 2673 2674 2675
	struct socket_wq *wq;

	rcu_read_lock();
	wq = rcu_dereference(sk->sk_wq);
H
Herbert Xu 已提交
2676
	if (skwq_has_sleeper(wq))
2677
		wake_up_interruptible_poll(&wq->wait, EPOLLERR);
2678
	sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
2679
	rcu_read_unlock();
L
Linus Torvalds 已提交
2680 2681
}

2682
static void sock_def_readable(struct sock *sk)
L
Linus Torvalds 已提交
2683
{
2684 2685 2686 2687
	struct socket_wq *wq;

	rcu_read_lock();
	wq = rcu_dereference(sk->sk_wq);
H
Herbert Xu 已提交
2688
	if (skwq_has_sleeper(wq))
2689 2690
		wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN | EPOLLPRI |
						EPOLLRDNORM | EPOLLRDBAND);
2691
	sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
2692
	rcu_read_unlock();
L
Linus Torvalds 已提交
2693 2694 2695 2696
}

static void sock_def_write_space(struct sock *sk)
{
2697 2698 2699
	struct socket_wq *wq;

	rcu_read_lock();
L
Linus Torvalds 已提交
2700 2701 2702 2703

	/* Do not wake up a writer until he can make "significant"
	 * progress.  --DaveM
	 */
2704
	if ((refcount_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
2705
		wq = rcu_dereference(sk->sk_wq);
H
Herbert Xu 已提交
2706
		if (skwq_has_sleeper(wq))
2707 2708
			wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT |
						EPOLLWRNORM | EPOLLWRBAND);
L
Linus Torvalds 已提交
2709 2710 2711

		/* Should agree with poll, otherwise some programs break */
		if (sock_writeable(sk))
2712
			sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
L
Linus Torvalds 已提交
2713 2714
	}

2715
	rcu_read_unlock();
L
Linus Torvalds 已提交
2716 2717 2718 2719 2720 2721 2722 2723 2724 2725
}

static void sock_def_destruct(struct sock *sk)
{
}

void sk_send_sigurg(struct sock *sk)
{
	if (sk->sk_socket && sk->sk_socket->file)
		if (send_sigurg(&sk->sk_socket->file->f_owner))
2726
			sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI);
L
Linus Torvalds 已提交
2727
}
E
Eric Dumazet 已提交
2728
EXPORT_SYMBOL(sk_send_sigurg);
L
Linus Torvalds 已提交
2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739

void sk_reset_timer(struct sock *sk, struct timer_list* timer,
		    unsigned long expires)
{
	if (!mod_timer(timer, expires))
		sock_hold(sk);
}
EXPORT_SYMBOL(sk_reset_timer);

void sk_stop_timer(struct sock *sk, struct timer_list* timer)
{
2740
	if (del_timer(timer))
L
Linus Torvalds 已提交
2741 2742 2743 2744 2745 2746
		__sock_put(sk);
}
EXPORT_SYMBOL(sk_stop_timer);

void sock_init_data(struct socket *sock, struct sock *sk)
{
2747
	sk_init_common(sk);
L
Linus Torvalds 已提交
2748 2749
	sk->sk_send_head	=	NULL;

2750
	timer_setup(&sk->sk_timer, NULL, 0);
2751

L
Linus Torvalds 已提交
2752 2753 2754 2755
	sk->sk_allocation	=	GFP_KERNEL;
	sk->sk_rcvbuf		=	sysctl_rmem_default;
	sk->sk_sndbuf		=	sysctl_wmem_default;
	sk->sk_state		=	TCP_CLOSE;
2756
	sk_set_socket(sk, sock);
L
Linus Torvalds 已提交
2757 2758 2759

	sock_set_flag(sk, SOCK_ZAPPED);

2760
	if (sock) {
L
Linus Torvalds 已提交
2761
		sk->sk_type	=	sock->type;
2762
		sk->sk_wq	=	sock->wq;
L
Linus Torvalds 已提交
2763
		sock->sk	=	sk;
2764 2765
		sk->sk_uid	=	SOCK_INODE(sock)->i_uid;
	} else {
2766
		sk->sk_wq	=	NULL;
2767 2768
		sk->sk_uid	=	make_kuid(sock_net(sk)->user_ns, 0);
	}
L
Linus Torvalds 已提交
2769 2770

	rwlock_init(&sk->sk_callback_lock);
2771 2772 2773 2774 2775 2776 2777 2778
	if (sk->sk_kern_sock)
		lockdep_set_class_and_name(
			&sk->sk_callback_lock,
			af_kern_callback_keys + sk->sk_family,
			af_family_kern_clock_key_strings[sk->sk_family]);
	else
		lockdep_set_class_and_name(
			&sk->sk_callback_lock,
2779 2780
			af_callback_keys + sk->sk_family,
			af_family_clock_key_strings[sk->sk_family]);
L
Linus Torvalds 已提交
2781 2782 2783 2784 2785 2786 2787

	sk->sk_state_change	=	sock_def_wakeup;
	sk->sk_data_ready	=	sock_def_readable;
	sk->sk_write_space	=	sock_def_write_space;
	sk->sk_error_report	=	sock_def_error_report;
	sk->sk_destruct		=	sock_def_destruct;

2788 2789
	sk->sk_frag.page	=	NULL;
	sk->sk_frag.offset	=	0;
2790
	sk->sk_peek_off		=	-1;
L
Linus Torvalds 已提交
2791

2792 2793
	sk->sk_peer_pid 	=	NULL;
	sk->sk_peer_cred	=	NULL;
L
Linus Torvalds 已提交
2794 2795 2796 2797 2798
	sk->sk_write_pending	=	0;
	sk->sk_rcvlowat		=	1;
	sk->sk_rcvtimeo		=	MAX_SCHEDULE_TIMEOUT;
	sk->sk_sndtimeo		=	MAX_SCHEDULE_TIMEOUT;

2799
	sk->sk_stamp = SK_DEFAULT_STAMP;
2800 2801 2802
#if BITS_PER_LONG==32
	seqlock_init(&sk->sk_stamp_seq);
#endif
W
Willem de Bruijn 已提交
2803
	atomic_set(&sk->sk_zckey, 0);
L
Linus Torvalds 已提交
2804

2805
#ifdef CONFIG_NET_RX_BUSY_POLL
E
Eliezer Tamir 已提交
2806
	sk->sk_napi_id		=	0;
2807
	sk->sk_ll_usec		=	sysctl_net_busy_read;
E
Eliezer Tamir 已提交
2808 2809
#endif

2810 2811
	sk->sk_max_pacing_rate = ~0UL;
	sk->sk_pacing_rate = ~0UL;
2812
	sk->sk_pacing_shift = 10;
2813
	sk->sk_incoming_cpu = -1;
2814 2815

	sk_rx_queue_clear(sk);
E
Eric Dumazet 已提交
2816 2817 2818 2819 2820
	/*
	 * Before updating sk_refcnt, we must commit prior changes to memory
	 * (Documentation/RCU/rculist_nulls.txt for details)
	 */
	smp_wmb();
2821
	refcount_set(&sk->sk_refcnt, 1);
W
Wang Chen 已提交
2822
	atomic_set(&sk->sk_drops, 0);
L
Linus Torvalds 已提交
2823
}
E
Eric Dumazet 已提交
2824
EXPORT_SYMBOL(sock_init_data);
L
Linus Torvalds 已提交
2825

H
Harvey Harrison 已提交
2826
void lock_sock_nested(struct sock *sk, int subclass)
L
Linus Torvalds 已提交
2827 2828
{
	might_sleep();
I
Ingo Molnar 已提交
2829
	spin_lock_bh(&sk->sk_lock.slock);
2830
	if (sk->sk_lock.owned)
L
Linus Torvalds 已提交
2831
		__lock_sock(sk);
2832
	sk->sk_lock.owned = 1;
I
Ingo Molnar 已提交
2833 2834 2835 2836
	spin_unlock(&sk->sk_lock.slock);
	/*
	 * The sk_lock has mutex_lock() semantics here:
	 */
2837
	mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
I
Ingo Molnar 已提交
2838
	local_bh_enable();
L
Linus Torvalds 已提交
2839
}
2840
EXPORT_SYMBOL(lock_sock_nested);
L
Linus Torvalds 已提交
2841

H
Harvey Harrison 已提交
2842
void release_sock(struct sock *sk)
L
Linus Torvalds 已提交
2843
{
I
Ingo Molnar 已提交
2844
	spin_lock_bh(&sk->sk_lock.slock);
L
Linus Torvalds 已提交
2845 2846
	if (sk->sk_backlog.tail)
		__release_sock(sk);
E
Eric Dumazet 已提交
2847

2848 2849 2850
	/* Warning : release_cb() might need to release sk ownership,
	 * ie call sock_release_ownership(sk) before us.
	 */
E
Eric Dumazet 已提交
2851 2852 2853
	if (sk->sk_prot->release_cb)
		sk->sk_prot->release_cb(sk);

2854
	sock_release_ownership(sk);
I
Ingo Molnar 已提交
2855 2856 2857
	if (waitqueue_active(&sk->sk_lock.wq))
		wake_up(&sk->sk_lock.wq);
	spin_unlock_bh(&sk->sk_lock.slock);
L
Linus Torvalds 已提交
2858 2859 2860
}
EXPORT_SYMBOL(release_sock);

2861 2862 2863 2864 2865
/**
 * lock_sock_fast - fast version of lock_sock
 * @sk: socket
 *
 * This version should be used for very small section, where process wont block
2866 2867
 * return false if fast path is taken:
 *
2868
 *   sk_lock.slock locked, owned = 0, BH disabled
2869 2870 2871
 *
 * return true if slow path is taken:
 *
2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896
 *   sk_lock.slock unlocked, owned = 1, BH enabled
 */
bool lock_sock_fast(struct sock *sk)
{
	might_sleep();
	spin_lock_bh(&sk->sk_lock.slock);

	if (!sk->sk_lock.owned)
		/*
		 * Note : We must disable BH
		 */
		return false;

	__lock_sock(sk);
	sk->sk_lock.owned = 1;
	spin_unlock(&sk->sk_lock.slock);
	/*
	 * The sk_lock has mutex_lock() semantics here:
	 */
	mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_);
	local_bh_enable();
	return true;
}
EXPORT_SYMBOL(lock_sock_fast);

L
Linus Torvalds 已提交
2897
int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
2898
{
2899
	struct timeval tv;
2900 2901

	sock_enable_timestamp(sk, SOCK_TIMESTAMP);
2902
	tv = ktime_to_timeval(sock_read_timestamp(sk));
2903
	if (tv.tv_sec == -1)
L
Linus Torvalds 已提交
2904
		return -ENOENT;
2905
	if (tv.tv_sec == 0) {
2906 2907 2908
		ktime_t kt = ktime_get_real();
		sock_write_timestamp(sk, kt);
		tv = ktime_to_timeval(kt);
2909 2910
	}
	return copy_to_user(userstamp, &tv, sizeof(tv)) ? -EFAULT : 0;
2911
}
L
Linus Torvalds 已提交
2912 2913
EXPORT_SYMBOL(sock_get_timestamp);

2914 2915 2916
int sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp)
{
	struct timespec ts;
2917 2918

	sock_enable_timestamp(sk, SOCK_TIMESTAMP);
2919
	ts = ktime_to_timespec(sock_read_timestamp(sk));
2920 2921 2922
	if (ts.tv_sec == -1)
		return -ENOENT;
	if (ts.tv_sec == 0) {
2923 2924
		ktime_t kt = ktime_get_real();
		sock_write_timestamp(sk, kt);
2925 2926 2927 2928 2929 2930
		ts = ktime_to_timespec(sk->sk_stamp);
	}
	return copy_to_user(userstamp, &ts, sizeof(ts)) ? -EFAULT : 0;
}
EXPORT_SYMBOL(sock_get_timestampns);

2931
void sock_enable_timestamp(struct sock *sk, int flag)
2932
{
2933
	if (!sock_flag(sk, flag)) {
E
Eric Dumazet 已提交
2934 2935
		unsigned long previous_flags = sk->sk_flags;

2936 2937 2938 2939 2940 2941
		sock_set_flag(sk, flag);
		/*
		 * we just set one of the two flags which require net
		 * time stamping, but time stamping might have been on
		 * already because of the other one
		 */
2942 2943
		if (sock_needs_netstamp(sk) &&
		    !(previous_flags & SK_FLAGS_TIMESTAMP))
2944
			net_enable_timestamp();
L
Linus Torvalds 已提交
2945 2946 2947
	}
}

2948 2949 2950 2951
int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
		       int level, int type)
{
	struct sock_exterr_skb *serr;
2952
	struct sk_buff *skb;
2953 2954 2955
	int copied, err;

	err = -EAGAIN;
2956
	skb = sock_dequeue_err_skb(sk);
2957 2958 2959 2960 2961 2962 2963 2964
	if (skb == NULL)
		goto out;

	copied = skb->len;
	if (copied > len) {
		msg->msg_flags |= MSG_TRUNC;
		copied = len;
	}
2965
	err = skb_copy_datagram_msg(skb, 0, msg, copied);
2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983
	if (err)
		goto out_free_skb;

	sock_recv_timestamp(msg, sk, skb);

	serr = SKB_EXT_ERR(skb);
	put_cmsg(msg, level, type, sizeof(serr->ee), &serr->ee);

	msg->msg_flags |= MSG_ERRQUEUE;
	err = copied;

out_free_skb:
	kfree_skb(skb);
out:
	return err;
}
EXPORT_SYMBOL(sock_recv_errqueue);

L
Linus Torvalds 已提交
2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999
/*
 *	Get a socket option on an socket.
 *
 *	FIX: POSIX 1003.1g is very ambiguous here. It states that
 *	asynchronous errors should be reported by getsockopt. We assume
 *	this means if you specify SO_ERROR (otherwise whats the point of it).
 */
int sock_common_getsockopt(struct socket *sock, int level, int optname,
			   char __user *optval, int __user *optlen)
{
	struct sock *sk = sock->sk;

	return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
}
EXPORT_SYMBOL(sock_common_getsockopt);

3000
#ifdef CONFIG_COMPAT
3001 3002
int compat_sock_common_getsockopt(struct socket *sock, int level, int optname,
				  char __user *optval, int __user *optlen)
3003 3004 3005
{
	struct sock *sk = sock->sk;

3006
	if (sk->sk_prot->compat_getsockopt != NULL)
3007 3008
		return sk->sk_prot->compat_getsockopt(sk, level, optname,
						      optval, optlen);
3009 3010 3011 3012 3013
	return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
}
EXPORT_SYMBOL(compat_sock_common_getsockopt);
#endif

3014 3015
int sock_common_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
			int flags)
L
Linus Torvalds 已提交
3016 3017 3018 3019 3020
{
	struct sock *sk = sock->sk;
	int addr_len = 0;
	int err;

3021
	err = sk->sk_prot->recvmsg(sk, msg, size, flags & MSG_DONTWAIT,
L
Linus Torvalds 已提交
3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032
				   flags & ~MSG_DONTWAIT, &addr_len);
	if (err >= 0)
		msg->msg_namelen = addr_len;
	return err;
}
EXPORT_SYMBOL(sock_common_recvmsg);

/*
 *	Set socket options on an inet socket.
 */
int sock_common_setsockopt(struct socket *sock, int level, int optname,
3033
			   char __user *optval, unsigned int optlen)
L
Linus Torvalds 已提交
3034 3035 3036 3037 3038 3039 3040
{
	struct sock *sk = sock->sk;

	return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
}
EXPORT_SYMBOL(sock_common_setsockopt);

3041
#ifdef CONFIG_COMPAT
3042
int compat_sock_common_setsockopt(struct socket *sock, int level, int optname,
3043
				  char __user *optval, unsigned int optlen)
3044 3045 3046
{
	struct sock *sk = sock->sk;

3047 3048 3049
	if (sk->sk_prot->compat_setsockopt != NULL)
		return sk->sk_prot->compat_setsockopt(sk, level, optname,
						      optval, optlen);
3050 3051 3052 3053 3054
	return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
}
EXPORT_SYMBOL(compat_sock_common_setsockopt);
#endif

L
Linus Torvalds 已提交
3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085
void sk_common_release(struct sock *sk)
{
	if (sk->sk_prot->destroy)
		sk->sk_prot->destroy(sk);

	/*
	 * Observation: when sock_common_release is called, processes have
	 * no access to socket. But net still has.
	 * Step one, detach it from networking:
	 *
	 * A. Remove from hash tables.
	 */

	sk->sk_prot->unhash(sk);

	/*
	 * In this point socket cannot receive new packets, but it is possible
	 * that some packets are in flight because some CPU runs receiver and
	 * did hash table lookup before we unhashed socket. They will achieve
	 * receive queue and will be purged by socket destructor.
	 *
	 * Also we still have packets pending on receive queue and probably,
	 * our own packets waiting in device queues. sock_destroy will drain
	 * receive queue, but transmitted packets will delay socket destruction
	 * until the last reference will be released.
	 */

	sock_orphan(sk);

	xfrm_sk_free_policy(sk);

3086
	sk_refcnt_debug_release(sk);
3087

L
Linus Torvalds 已提交
3088 3089 3090 3091
	sock_put(sk);
}
EXPORT_SYMBOL(sk_common_release);

3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106
void sk_get_meminfo(const struct sock *sk, u32 *mem)
{
	memset(mem, 0, sizeof(*mem) * SK_MEMINFO_VARS);

	mem[SK_MEMINFO_RMEM_ALLOC] = sk_rmem_alloc_get(sk);
	mem[SK_MEMINFO_RCVBUF] = sk->sk_rcvbuf;
	mem[SK_MEMINFO_WMEM_ALLOC] = sk_wmem_alloc_get(sk);
	mem[SK_MEMINFO_SNDBUF] = sk->sk_sndbuf;
	mem[SK_MEMINFO_FWD_ALLOC] = sk->sk_forward_alloc;
	mem[SK_MEMINFO_WMEM_QUEUED] = sk->sk_wmem_queued;
	mem[SK_MEMINFO_OPTMEM] = atomic_read(&sk->sk_omem_alloc);
	mem[SK_MEMINFO_BACKLOG] = sk->sk_backlog.len;
	mem[SK_MEMINFO_DROPS] = atomic_read(&sk->sk_drops);
}

3107 3108
#ifdef CONFIG_PROC_FS
#define PROTO_INUSE_NR	64	/* should be enough for the first time */
3109 3110 3111
struct prot_inuse {
	int val[PROTO_INUSE_NR];
};
3112 3113

static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR);
3114 3115 3116

void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
{
3117
	__this_cpu_add(net->core.prot_inuse->val[prot->inuse_idx], val);
3118 3119 3120 3121 3122 3123 3124 3125 3126
}
EXPORT_SYMBOL_GPL(sock_prot_inuse_add);

int sock_prot_inuse_get(struct net *net, struct proto *prot)
{
	int cpu, idx = prot->inuse_idx;
	int res = 0;

	for_each_possible_cpu(cpu)
3127
		res += per_cpu_ptr(net->core.prot_inuse, cpu)->val[idx];
3128 3129 3130 3131 3132

	return res >= 0 ? res : 0;
}
EXPORT_SYMBOL_GPL(sock_prot_inuse_get);

3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149
static void sock_inuse_add(struct net *net, int val)
{
	this_cpu_add(*net->core.sock_inuse, val);
}

int sock_inuse_get(struct net *net)
{
	int cpu, res = 0;

	for_each_possible_cpu(cpu)
		res += *per_cpu_ptr(net->core.sock_inuse, cpu);

	return res;
}

EXPORT_SYMBOL_GPL(sock_inuse_get);

3150
static int __net_init sock_inuse_init_net(struct net *net)
3151
{
3152
	net->core.prot_inuse = alloc_percpu(struct prot_inuse);
3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164
	if (net->core.prot_inuse == NULL)
		return -ENOMEM;

	net->core.sock_inuse = alloc_percpu(int);
	if (net->core.sock_inuse == NULL)
		goto out;

	return 0;

out:
	free_percpu(net->core.prot_inuse);
	return -ENOMEM;
3165 3166
}

3167
static void __net_exit sock_inuse_exit_net(struct net *net)
3168
{
3169
	free_percpu(net->core.prot_inuse);
3170
	free_percpu(net->core.sock_inuse);
3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186
}

static struct pernet_operations net_inuse_ops = {
	.init = sock_inuse_init_net,
	.exit = sock_inuse_exit_net,
};

static __init int net_inuse_init(void)
{
	if (register_pernet_subsys(&net_inuse_ops))
		panic("Cannot initialize net inuse counters");

	return 0;
}

core_initcall(net_inuse_init);
3187 3188 3189 3190 3191 3192

static void assign_proto_idx(struct proto *prot)
{
	prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR);

	if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) {
J
Joe Perches 已提交
3193
		pr_err("PROTO_INUSE_NR exhausted\n");
3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212
		return;
	}

	set_bit(prot->inuse_idx, proto_inuse_idx);
}

static void release_proto_idx(struct proto *prot)
{
	if (prot->inuse_idx != PROTO_INUSE_NR - 1)
		clear_bit(prot->inuse_idx, proto_inuse_idx);
}
#else
static inline void assign_proto_idx(struct proto *prot)
{
}

static inline void release_proto_idx(struct proto *prot)
{
}
3213 3214 3215 3216

static void sock_inuse_add(struct net *net, int val)
{
}
3217 3218
#endif

3219 3220 3221 3222 3223 3224
static void req_prot_cleanup(struct request_sock_ops *rsk_prot)
{
	if (!rsk_prot)
		return;
	kfree(rsk_prot->slab_name);
	rsk_prot->slab_name = NULL;
3225 3226
	kmem_cache_destroy(rsk_prot->slab);
	rsk_prot->slab = NULL;
3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242
}

static int req_prot_init(const struct proto *prot)
{
	struct request_sock_ops *rsk_prot = prot->rsk_prot;

	if (!rsk_prot)
		return 0;

	rsk_prot->slab_name = kasprintf(GFP_KERNEL, "request_sock_%s",
					prot->name);
	if (!rsk_prot->slab_name)
		return -ENOMEM;

	rsk_prot->slab = kmem_cache_create(rsk_prot->slab_name,
					   rsk_prot->obj_size, 0,
3243 3244
					   SLAB_ACCOUNT | prot->slab_flags,
					   NULL);
3245 3246 3247 3248 3249 3250 3251 3252 3253

	if (!rsk_prot->slab) {
		pr_crit("%s: Can't create request sock SLAB cache!\n",
			prot->name);
		return -ENOMEM;
	}
	return 0;
}

3254 3255
int proto_register(struct proto *prot, int alloc_slab)
{
L
Linus Torvalds 已提交
3256
	if (alloc_slab) {
3257 3258
		prot->slab = kmem_cache_create_usercopy(prot->name,
					prot->obj_size, 0,
3259 3260
					SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT |
					prot->slab_flags,
3261
					prot->useroffset, prot->usersize,
3262
					NULL);
L
Linus Torvalds 已提交
3263 3264

		if (prot->slab == NULL) {
J
Joe Perches 已提交
3265 3266
			pr_crit("%s: Can't create sock SLAB cache!\n",
				prot->name);
3267
			goto out;
L
Linus Torvalds 已提交
3268
		}
3269

3270 3271
		if (req_prot_init(prot))
			goto out_free_request_sock_slab;
3272

3273
		if (prot->twsk_prot != NULL) {
3274
			prot->twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s", prot->name);
3275

3276
			if (prot->twsk_prot->twsk_slab_name == NULL)
3277 3278
				goto out_free_request_sock_slab;

3279
			prot->twsk_prot->twsk_slab =
3280
				kmem_cache_create(prot->twsk_prot->twsk_slab_name,
3281
						  prot->twsk_prot->twsk_obj_size,
3282
						  0,
3283
						  SLAB_ACCOUNT |
3284
						  prot->slab_flags,
3285
						  NULL);
3286
			if (prot->twsk_prot->twsk_slab == NULL)
3287 3288
				goto out_free_timewait_sock_slab_name;
		}
L
Linus Torvalds 已提交
3289 3290
	}

3291
	mutex_lock(&proto_list_mutex);
L
Linus Torvalds 已提交
3292
	list_add(&prot->node, &proto_list);
3293
	assign_proto_idx(prot);
3294
	mutex_unlock(&proto_list_mutex);
3295 3296
	return 0;

3297
out_free_timewait_sock_slab_name:
3298
	kfree(prot->twsk_prot->twsk_slab_name);
3299
out_free_request_sock_slab:
3300 3301
	req_prot_cleanup(prot->rsk_prot);

3302 3303
	kmem_cache_destroy(prot->slab);
	prot->slab = NULL;
3304 3305
out:
	return -ENOBUFS;
L
Linus Torvalds 已提交
3306 3307 3308 3309 3310
}
EXPORT_SYMBOL(proto_register);

void proto_unregister(struct proto *prot)
{
3311
	mutex_lock(&proto_list_mutex);
3312
	release_proto_idx(prot);
3313
	list_del(&prot->node);
3314
	mutex_unlock(&proto_list_mutex);
L
Linus Torvalds 已提交
3315

3316 3317
	kmem_cache_destroy(prot->slab);
	prot->slab = NULL;
L
Linus Torvalds 已提交
3318

3319
	req_prot_cleanup(prot->rsk_prot);
3320

3321 3322
	if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) {
		kmem_cache_destroy(prot->twsk_prot->twsk_slab);
3323
		kfree(prot->twsk_prot->twsk_slab_name);
3324
		prot->twsk_prot->twsk_slab = NULL;
3325
	}
L
Linus Torvalds 已提交
3326 3327 3328
}
EXPORT_SYMBOL(proto_unregister);

3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340
int sock_load_diag_module(int family, int protocol)
{
	if (!protocol) {
		if (!sock_is_registered(family))
			return -ENOENT;

		return request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK,
				      NETLINK_SOCK_DIAG, family);
	}

#ifdef CONFIG_INET
	if (family == AF_INET &&
3341
	    protocol != IPPROTO_RAW &&
3342 3343 3344 3345 3346 3347 3348 3349 3350
	    !rcu_access_pointer(inet_protos[protocol]))
		return -ENOENT;
#endif

	return request_module("net-pf-%d-proto-%d-type-%d-%d", PF_NETLINK,
			      NETLINK_SOCK_DIAG, family, protocol);
}
EXPORT_SYMBOL(sock_load_diag_module);

L
Linus Torvalds 已提交
3351 3352
#ifdef CONFIG_PROC_FS
static void *proto_seq_start(struct seq_file *seq, loff_t *pos)
3353
	__acquires(proto_list_mutex)
L
Linus Torvalds 已提交
3354
{
3355
	mutex_lock(&proto_list_mutex);
3356
	return seq_list_start_head(&proto_list, *pos);
L
Linus Torvalds 已提交
3357 3358 3359 3360
}

static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
3361
	return seq_list_next(v, &proto_list, pos);
L
Linus Torvalds 已提交
3362 3363 3364
}

static void proto_seq_stop(struct seq_file *seq, void *v)
3365
	__releases(proto_list_mutex)
L
Linus Torvalds 已提交
3366
{
3367
	mutex_unlock(&proto_list_mutex);
L
Linus Torvalds 已提交
3368 3369 3370 3371 3372 3373
}

static char proto_method_implemented(const void *method)
{
	return method == NULL ? 'n' : 'y';
}
3374 3375
static long sock_prot_memory_allocated(struct proto *proto)
{
3376
	return proto->memory_allocated != NULL ? proto_memory_allocated(proto) : -1L;
3377 3378 3379 3380 3381 3382 3383
}

static char *sock_prot_memory_pressure(struct proto *proto)
{
	return proto->memory_pressure != NULL ?
	proto_memory_pressure(proto) ? "yes" : "no" : "NI";
}
L
Linus Torvalds 已提交
3384 3385 3386

static void proto_seq_printf(struct seq_file *seq, struct proto *proto)
{
3387

E
Eric Dumazet 已提交
3388
	seq_printf(seq, "%-9s %4u %6d  %6ld   %-3s %6u   %-3s  %-10s "
L
Linus Torvalds 已提交
3389 3390 3391
			"%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n",
		   proto->name,
		   proto->obj_size,
3392
		   sock_prot_inuse_get(seq_file_net(seq), proto),
3393 3394
		   sock_prot_memory_allocated(proto),
		   sock_prot_memory_pressure(proto),
L
Linus Torvalds 已提交
3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420
		   proto->max_header,
		   proto->slab == NULL ? "no" : "yes",
		   module_name(proto->owner),
		   proto_method_implemented(proto->close),
		   proto_method_implemented(proto->connect),
		   proto_method_implemented(proto->disconnect),
		   proto_method_implemented(proto->accept),
		   proto_method_implemented(proto->ioctl),
		   proto_method_implemented(proto->init),
		   proto_method_implemented(proto->destroy),
		   proto_method_implemented(proto->shutdown),
		   proto_method_implemented(proto->setsockopt),
		   proto_method_implemented(proto->getsockopt),
		   proto_method_implemented(proto->sendmsg),
		   proto_method_implemented(proto->recvmsg),
		   proto_method_implemented(proto->sendpage),
		   proto_method_implemented(proto->bind),
		   proto_method_implemented(proto->backlog_rcv),
		   proto_method_implemented(proto->hash),
		   proto_method_implemented(proto->unhash),
		   proto_method_implemented(proto->get_port),
		   proto_method_implemented(proto->enter_memory_pressure));
}

static int proto_seq_show(struct seq_file *seq, void *v)
{
3421
	if (v == &proto_list)
L
Linus Torvalds 已提交
3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432
		seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s",
			   "protocol",
			   "size",
			   "sockets",
			   "memory",
			   "press",
			   "maxhdr",
			   "slab",
			   "module",
			   "cl co di ac io in de sh ss gs se re sp bi br ha uh gp em\n");
	else
3433
		proto_seq_printf(seq, list_entry(v, struct proto, node));
L
Linus Torvalds 已提交
3434 3435 3436
	return 0;
}

3437
static const struct seq_operations proto_seq_ops = {
L
Linus Torvalds 已提交
3438 3439 3440 3441 3442 3443
	.start  = proto_seq_start,
	.next   = proto_seq_next,
	.stop   = proto_seq_stop,
	.show   = proto_seq_show,
};

3444 3445
static __net_init int proto_init_net(struct net *net)
{
3446 3447
	if (!proc_create_net("protocols", 0444, net->proc_net, &proto_seq_ops,
			sizeof(struct seq_net_private)))
3448 3449 3450 3451 3452 3453 3454
		return -ENOMEM;

	return 0;
}

static __net_exit void proto_exit_net(struct net *net)
{
3455
	remove_proc_entry("protocols", net->proc_net);
3456 3457 3458 3459 3460 3461
}


static __net_initdata struct pernet_operations proto_net_ops = {
	.init = proto_init_net,
	.exit = proto_exit_net,
L
Linus Torvalds 已提交
3462 3463 3464 3465
};

static int __init proto_init(void)
{
3466
	return register_pernet_subsys(&proto_net_ops);
L
Linus Torvalds 已提交
3467 3468 3469 3470 3471
}

subsys_initcall(proto_init);

#endif /* PROC_FS */
3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482

#ifdef CONFIG_NET_RX_BUSY_POLL
bool sk_busy_loop_end(void *p, unsigned long start_time)
{
	struct sock *sk = p;

	return !skb_queue_empty(&sk->sk_receive_queue) ||
	       sk_busy_loop_timeout(sk, start_time);
}
EXPORT_SYMBOL(sk_busy_loop_end);
#endif /* CONFIG_NET_RX_BUSY_POLL */