inet_connection_sock.c 27.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
/*
 * INET		An implementation of the TCP/IP protocol suite for the LINUX
 *		operating system.  INET is implemented using the  BSD Socket
 *		interface as the means of communication with the user level.
 *
 *		Support for INET connection oriented protocols.
 *
 * Authors:	See the TCP sources
 *
 *		This program is free software; you can redistribute it and/or
 *		modify it under the terms of the GNU General Public License
 *		as published by the Free Software Foundation; either version
 *		2 of the License, or(at your option) any later version.
 */

#include <linux/module.h>
#include <linux/jhash.h>

#include <net/inet_connection_sock.h>
#include <net/inet_hashtables.h>
#include <net/inet_timewait_sock.h>
#include <net/ip.h>
#include <net/route.h>
#include <net/tcp_states.h>
25
#include <net/xfrm.h>
26
#include <net/tcp.h>
27 28 29 30 31 32

#ifdef INET_CSK_DEBUG
const char inet_csk_timer_bug_msg[] = "inet_csk BUG: unknown timer value\n";
EXPORT_SYMBOL(inet_csk_timer_bug_msg);
#endif

33
void inet_get_local_port_range(struct net *net, int *low, int *high)
34
{
35 36
	unsigned int seq;

37
	do {
38
		seq = read_seqbegin(&net->ipv4.ip_local_ports.lock);
39

40 41 42
		*low = net->ipv4.ip_local_ports.range[0];
		*high = net->ipv4.ip_local_ports.range[1];
	} while (read_seqretry(&net->ipv4.ip_local_ports.lock, seq));
43 44
}
EXPORT_SYMBOL(inet_get_local_port_range);
45

46
int inet_csk_bind_conflict(const struct sock *sk,
47
			   const struct inet_bind_bucket *tb, bool relax)
48 49 50
{
	struct sock *sk2;
	int reuse = sk->sk_reuse;
51 52
	int reuseport = sk->sk_reuseport;
	kuid_t uid = sock_i_uid((struct sock *)sk);
53

54 55 56 57 58 59 60
	/*
	 * Unlike other sk lookup places we do not check
	 * for sk_net here, since _all_ the socks listed
	 * in tb->owners list belong to the same net - the
	 * one this bucket belongs to.
	 */

61
	sk_for_each_bound(sk2, &tb->owners) {
62 63 64 65 66
		if (sk != sk2 &&
		    !inet_v6_ipv6only(sk2) &&
		    (!sk->sk_bound_dev_if ||
		     !sk2->sk_bound_dev_if ||
		     sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
67 68 69 70 71
			if ((!reuse || !sk2->sk_reuse ||
			    sk2->sk_state == TCP_LISTEN) &&
			    (!reuseport || !sk2->sk_reuseport ||
			    (sk2->sk_state != TCP_TIME_WAIT &&
			     !uid_eq(uid, sock_i_uid(sk2))))) {
E
Eric Dumazet 已提交
72 73 74

				if (!sk2->sk_rcv_saddr || !sk->sk_rcv_saddr ||
				    sk2->sk_rcv_saddr == sk->sk_rcv_saddr)
75
					break;
76
			}
77 78 79
			if (!relax && reuse && sk2->sk_reuse &&
			    sk2->sk_state != TCP_LISTEN) {

E
Eric Dumazet 已提交
80 81
				if (!sk2->sk_rcv_saddr || !sk->sk_rcv_saddr ||
				    sk2->sk_rcv_saddr == sk->sk_rcv_saddr)
82 83
					break;
			}
84 85
		}
	}
86
	return sk2 != NULL;
87
}
88 89
EXPORT_SYMBOL_GPL(inet_csk_bind_conflict);

90 91 92
/* Obtain a reference to a local port for the given sock,
 * if snum is zero it means select any available local port.
 */
93
int inet_csk_get_port(struct sock *sk, unsigned short snum)
94
{
95
	struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
96 97
	struct inet_bind_hashbucket *head;
	struct inet_bind_bucket *tb;
98
	int ret, attempts = 5;
99
	struct net *net = sock_net(sk);
100
	int smallest_size = -1, smallest_rover;
101
	kuid_t uid = sock_i_uid(sk);
102
	int attempt_half = (sk->sk_reuse == SK_CAN_REUSE) ? 1 : 0;
103 104 105

	local_bh_disable();
	if (!snum) {
106 107
		int remaining, rover, low, high;

108
again:
109
		inet_get_local_port_range(net, &low, &high);
110 111 112 113 114 115 116 117
		if (attempt_half) {
			int half = low + ((high - low) >> 1);

			if (attempt_half == 1)
				high = half;
			else
				low = half;
		}
118
		remaining = (high - low) + 1;
119
		smallest_rover = rover = prandom_u32() % remaining + low;
120

121
		smallest_size = -1;
122
		do {
123
			if (inet_is_local_reserved_port(net, rover))
124
				goto next_nolock;
125 126
			head = &hashinfo->bhash[inet_bhashfn(net, rover,
					hashinfo->bhash_size)];
127
			spin_lock(&head->lock);
128
			inet_bind_bucket_for_each(tb, &head->chain)
O
Octavian Purdila 已提交
129
				if (net_eq(ib_net(tb), net) && tb->port == rover) {
130 131 132 133 134 135
					if (((tb->fastreuse > 0 &&
					      sk->sk_reuse &&
					      sk->sk_state != TCP_LISTEN) ||
					     (tb->fastreuseport > 0 &&
					      sk->sk_reuseport &&
					      uid_eq(tb->fastuid, uid))) &&
136 137 138 139
					    (tb->num_owners < smallest_size || smallest_size == -1)) {
						smallest_size = tb->num_owners;
						smallest_rover = rover;
					}
140
					if (!inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb, false)) {
141
						snum = rover;
142
						goto tb_found;
143
					}
144
					goto next;
145
				}
146 147 148
			break;
		next:
			spin_unlock(&head->lock);
149
		next_nolock:
150 151
			if (++rover > high)
				rover = low;
152 153 154 155 156 157 158 159 160
		} while (--remaining > 0);

		/* Exhausted local port range during search?  It is not
		 * possible for us to be holding one of the bind hash
		 * locks if this test triggers, because if 'remaining'
		 * drops to zero, we broke out of the do/while loop at
		 * the top level, not from the 'break;' statement.
		 */
		ret = 1;
161 162 163 164 165
		if (remaining <= 0) {
			if (smallest_size != -1) {
				snum = smallest_rover;
				goto have_snum;
			}
166 167 168 169 170
			if (attempt_half == 1) {
				/* OK we now try the upper half of the range */
				attempt_half = 2;
				goto again;
			}
171
			goto fail;
172
		}
173 174 175 176 177
		/* OK, here is the one we will use.  HEAD is
		 * non-NULL and we hold it's mutex.
		 */
		snum = rover;
	} else {
178
have_snum:
179 180
		head = &hashinfo->bhash[inet_bhashfn(net, snum,
				hashinfo->bhash_size)];
181
		spin_lock(&head->lock);
182
		inet_bind_bucket_for_each(tb, &head->chain)
O
Octavian Purdila 已提交
183
			if (net_eq(ib_net(tb), net) && tb->port == snum)
184 185 186 187 188 189
				goto tb_found;
	}
	tb = NULL;
	goto tb_not_found;
tb_found:
	if (!hlist_empty(&tb->owners)) {
190 191 192
		if (sk->sk_reuse == SK_FORCE_REUSE)
			goto success;

193 194 195 196
		if (((tb->fastreuse > 0 &&
		      sk->sk_reuse && sk->sk_state != TCP_LISTEN) ||
		     (tb->fastreuseport > 0 &&
		      sk->sk_reuseport && uid_eq(tb->fastuid, uid))) &&
197
		    smallest_size == -1) {
198 199 200
			goto success;
		} else {
			ret = 1;
201
			if (inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb, true)) {
202
				if (((sk->sk_reuse && sk->sk_state != TCP_LISTEN) ||
203 204
				     (tb->fastreuseport > 0 &&
				      sk->sk_reuseport && uid_eq(tb->fastuid, uid))) &&
205
				    smallest_size != -1 && --attempts >= 0) {
206 207 208
					spin_unlock(&head->lock);
					goto again;
				}
209

210
				goto fail_unlock;
211
			}
212 213 214 215
		}
	}
tb_not_found:
	ret = 1;
216 217
	if (!tb && (tb = inet_bind_bucket_create(hashinfo->bind_bucket_cachep,
					net, head, snum)) == NULL)
218 219 220 221 222 223
		goto fail_unlock;
	if (hlist_empty(&tb->owners)) {
		if (sk->sk_reuse && sk->sk_state != TCP_LISTEN)
			tb->fastreuse = 1;
		else
			tb->fastreuse = 0;
224 225 226
		if (sk->sk_reuseport) {
			tb->fastreuseport = 1;
			tb->fastuid = uid;
227
		} else
228 229 230 231 232 233
			tb->fastreuseport = 0;
	} else {
		if (tb->fastreuse &&
		    (!sk->sk_reuse || sk->sk_state == TCP_LISTEN))
			tb->fastreuse = 0;
		if (tb->fastreuseport &&
234
		    (!sk->sk_reuseport || !uid_eq(tb->fastuid, uid)))
235 236
			tb->fastreuseport = 0;
	}
237 238 239
success:
	if (!inet_csk(sk)->icsk_bind_hash)
		inet_bind_hash(sk, tb, snum);
240
	WARN_ON(inet_csk(sk)->icsk_bind_hash != tb);
241
	ret = 0;
242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275

fail_unlock:
	spin_unlock(&head->lock);
fail:
	local_bh_enable();
	return ret;
}
EXPORT_SYMBOL_GPL(inet_csk_get_port);

/*
 * Wait for an incoming connection, avoid race conditions. This must be called
 * with the socket locked.
 */
static int inet_csk_wait_for_connect(struct sock *sk, long timeo)
{
	struct inet_connection_sock *icsk = inet_csk(sk);
	DEFINE_WAIT(wait);
	int err;

	/*
	 * True wake-one mechanism for incoming connections: only
	 * one process gets woken up, not the 'whole herd'.
	 * Since we do not 'race & poll' for established sockets
	 * anymore, the common case will execute the loop only once.
	 *
	 * Subtle issue: "add_wait_queue_exclusive()" will be added
	 * after any current non-exclusive waiters, and we know that
	 * it will always _stay_ after any new non-exclusive waiters
	 * because all non-exclusive waiters are added at the
	 * beginning of the wait-queue. As such, it's ok to "drop"
	 * our exclusiveness temporarily when we get woken up without
	 * having to remove and re-insert us on the wait queue.
	 */
	for (;;) {
E
Eric Dumazet 已提交
276
		prepare_to_wait_exclusive(sk_sleep(sk), &wait,
277 278 279 280
					  TASK_INTERRUPTIBLE);
		release_sock(sk);
		if (reqsk_queue_empty(&icsk->icsk_accept_queue))
			timeo = schedule_timeout(timeo);
281
		sched_annotate_sleep();
282 283 284 285 286 287 288 289 290 291 292 293 294 295
		lock_sock(sk);
		err = 0;
		if (!reqsk_queue_empty(&icsk->icsk_accept_queue))
			break;
		err = -EINVAL;
		if (sk->sk_state != TCP_LISTEN)
			break;
		err = sock_intr_errno(timeo);
		if (signal_pending(current))
			break;
		err = -EAGAIN;
		if (!timeo)
			break;
	}
E
Eric Dumazet 已提交
296
	finish_wait(sk_sleep(sk), &wait);
297 298 299 300 301 302 303 304 305
	return err;
}

/*
 * This will accept the next outstanding connection.
 */
struct sock *inet_csk_accept(struct sock *sk, int flags, int *err)
{
	struct inet_connection_sock *icsk = inet_csk(sk);
306 307
	struct request_sock_queue *queue = &icsk->icsk_accept_queue;
	struct request_sock *req;
308
	struct sock *newsk;
309 310 311 312 313 314 315 316 317 318 319 320
	int error;

	lock_sock(sk);

	/* We need to make sure that this socket is listening,
	 * and that it has something pending.
	 */
	error = -EINVAL;
	if (sk->sk_state != TCP_LISTEN)
		goto out_err;

	/* Find already established connection */
321
	if (reqsk_queue_empty(queue)) {
322 323 324 325 326 327 328 329 330 331 332
		long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);

		/* If this is a non blocking socket don't sleep */
		error = -EAGAIN;
		if (!timeo)
			goto out_err;

		error = inet_csk_wait_for_connect(sk, timeo);
		if (error)
			goto out_err;
	}
333 334 335 336
	req = reqsk_queue_remove(queue);
	newsk = req->sk;

	sk_acceptq_removed(sk);
337
	if (sk->sk_protocol == IPPROTO_TCP &&
338 339
	    tcp_rsk(req)->tfo_listener) {
		spin_lock_bh(&queue->fastopenq.lock);
340
		if (tcp_rsk(req)->tfo_listener) {
341 342 343 344 345 346 347 348 349
			/* We are still waiting for the final ACK from 3WHS
			 * so can't free req now. Instead, we set req->sk to
			 * NULL to signify that the child socket is taken
			 * so reqsk_fastopen_remove() will free the req
			 * when 3WHS finishes (or is aborted).
			 */
			req->sk = NULL;
			req = NULL;
		}
350
		spin_unlock_bh(&queue->fastopenq.lock);
351
	}
352 353
out:
	release_sock(sk);
354
	if (req)
355
		reqsk_put(req);
356 357 358
	return newsk;
out_err:
	newsk = NULL;
359
	req = NULL;
360 361 362 363 364 365 366
	*err = error;
	goto out;
}
EXPORT_SYMBOL(inet_csk_accept);

/*
 * Using different timers for retransmit, delayed acks and probes
367
 * We may wish use just one timer maintaining a list of expire jiffies
368 369 370 371 372 373 374 375 376
 * to optimize.
 */
void inet_csk_init_xmit_timers(struct sock *sk,
			       void (*retransmit_handler)(unsigned long),
			       void (*delack_handler)(unsigned long),
			       void (*keepalive_handler)(unsigned long))
{
	struct inet_connection_sock *icsk = inet_csk(sk);

377 378 379 380 381
	setup_timer(&icsk->icsk_retransmit_timer, retransmit_handler,
			(unsigned long)sk);
	setup_timer(&icsk->icsk_delack_timer, delack_handler,
			(unsigned long)sk);
	setup_timer(&sk->sk_timer, keepalive_handler, (unsigned long)sk);
382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409
	icsk->icsk_pending = icsk->icsk_ack.pending = 0;
}
EXPORT_SYMBOL(inet_csk_init_xmit_timers);

void inet_csk_clear_xmit_timers(struct sock *sk)
{
	struct inet_connection_sock *icsk = inet_csk(sk);

	icsk->icsk_pending = icsk->icsk_ack.pending = icsk->icsk_ack.blocked = 0;

	sk_stop_timer(sk, &icsk->icsk_retransmit_timer);
	sk_stop_timer(sk, &icsk->icsk_delack_timer);
	sk_stop_timer(sk, &sk->sk_timer);
}
EXPORT_SYMBOL(inet_csk_clear_xmit_timers);

void inet_csk_delete_keepalive_timer(struct sock *sk)
{
	sk_stop_timer(sk, &sk->sk_timer);
}
EXPORT_SYMBOL(inet_csk_delete_keepalive_timer);

void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long len)
{
	sk_reset_timer(sk, &sk->sk_timer, jiffies + len);
}
EXPORT_SYMBOL(inet_csk_reset_keepalive_timer);

410
struct dst_entry *inet_csk_route_req(const struct sock *sk,
411
				     struct flowi4 *fl4,
412
				     const struct request_sock *req)
413 414
{
	const struct inet_request_sock *ireq = inet_rsk(req);
415 416 417
	struct net *net = read_pnet(&ireq->ireq_net);
	struct ip_options_rcu *opt = ireq->opt;
	struct rtable *rt;
418

419
	flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark,
420
			   RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
421
			   sk->sk_protocol, inet_sk_flowi_flags(sk),
422
			   (opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr,
423 424
			   ireq->ir_loc_addr, ireq->ir_rmt_port,
			   htons(ireq->ir_num));
425 426
	security_req_classify_flow(req, flowi4_to_flowi(fl4));
	rt = ip_route_output_flow(net, fl4, sk);
427
	if (IS_ERR(rt))
I
Ilpo Järvinen 已提交
428
		goto no_route;
J
Julian Anastasov 已提交
429
	if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway)
I
Ilpo Järvinen 已提交
430
		goto route_err;
431
	return &rt->dst;
I
Ilpo Järvinen 已提交
432 433 434 435 436 437

route_err:
	ip_rt_put(rt);
no_route:
	IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES);
	return NULL;
438 439 440
}
EXPORT_SYMBOL_GPL(inet_csk_route_req);

441
struct dst_entry *inet_csk_route_child_sock(const struct sock *sk,
442 443 444 445
					    struct sock *newsk,
					    const struct request_sock *req)
{
	const struct inet_request_sock *ireq = inet_rsk(req);
446
	struct net *net = read_pnet(&ireq->ireq_net);
447
	struct inet_sock *newinet = inet_sk(newsk);
448
	struct ip_options_rcu *opt;
449 450 451 452
	struct flowi4 *fl4;
	struct rtable *rt;

	fl4 = &newinet->cork.fl.u.ip4;
453 454 455

	rcu_read_lock();
	opt = rcu_dereference(newinet->inet_opt);
456
	flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark,
457 458
			   RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
			   sk->sk_protocol, inet_sk_flowi_flags(sk),
459
			   (opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr,
460 461
			   ireq->ir_loc_addr, ireq->ir_rmt_port,
			   htons(ireq->ir_num));
462 463 464 465
	security_req_classify_flow(req, flowi4_to_flowi(fl4));
	rt = ip_route_output_flow(net, fl4, sk);
	if (IS_ERR(rt))
		goto no_route;
J
Julian Anastasov 已提交
466
	if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway)
467
		goto route_err;
468
	rcu_read_unlock();
469 470 471 472 473
	return &rt->dst;

route_err:
	ip_rt_put(rt);
no_route:
474
	rcu_read_unlock();
475 476 477 478 479
	IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES);
	return NULL;
}
EXPORT_SYMBOL_GPL(inet_csk_route_child_sock);

480
static inline u32 inet_synq_hash(const __be32 raddr, const __be16 rport,
481
				 const u32 rnd, const u32 synq_hsize)
482
{
483
	return jhash_2words((__force u32)raddr, (__force u32)rport, rnd) & (synq_hsize - 1);
484 485
}

E
Eric Dumazet 已提交
486
#if IS_ENABLED(CONFIG_IPV6)
487 488
#define AF_INET_FAMILY(fam) ((fam) == AF_INET)
#else
489
#define AF_INET_FAMILY(fam) true
490 491
#endif

492 493 494 495 496 497
/* Note: this is temporary :
 * req sock will no longer be in listener hash table
*/
struct request_sock *inet_csk_search_req(struct sock *sk,
					 const __be16 rport,
					 const __be32 raddr,
498
					 const __be32 laddr)
499
{
500
	struct inet_connection_sock *icsk = inet_csk(sk);
501
	struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
502
	struct request_sock *req;
503 504
	u32 hash = inet_synq_hash(raddr, rport, lopt->hash_rnd,
				  lopt->nr_table_entries);
505

506
	spin_lock(&icsk->icsk_accept_queue.syn_wait_lock);
507
	for (req = lopt->syn_table[hash]; req != NULL; req = req->dl_next) {
508 509
		const struct inet_request_sock *ireq = inet_rsk(req);

510 511 512
		if (ireq->ir_rmt_port == rport &&
		    ireq->ir_rmt_addr == raddr &&
		    ireq->ir_loc_addr == laddr &&
513
		    AF_INET_FAMILY(req->rsk_ops->family)) {
514
			atomic_inc(&req->rsk_refcnt);
515
			WARN_ON(req->sk);
516 517 518
			break;
		}
	}
519
	spin_unlock(&icsk->icsk_accept_queue.syn_wait_lock);
520 521 522 523 524 525

	return req;
}
EXPORT_SYMBOL_GPL(inet_csk_search_req);

void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
526
				   unsigned long timeout)
527 528 529
{
	struct inet_connection_sock *icsk = inet_csk(sk);
	struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
530 531
	const u32 h = inet_synq_hash(inet_rsk(req)->ir_rmt_addr,
				     inet_rsk(req)->ir_rmt_port,
532 533 534 535 536
				     lopt->hash_rnd, lopt->nr_table_entries);

	reqsk_queue_hash_req(&icsk->icsk_accept_queue, h, req, timeout);
	inet_csk_reqsk_queue_added(sk, timeout);
}
E
Eric Dumazet 已提交
537
EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_hash_add);
538

539 540 541
/* Only thing we need from tcp.h */
extern int sysctl_tcp_synack_retries;

542

543 544 545 546 547 548 549
/* Decide when to expire the request and when to resend SYN-ACK */
static inline void syn_ack_recalc(struct request_sock *req, const int thresh,
				  const int max_retries,
				  const u8 rskq_defer_accept,
				  int *expire, int *resend)
{
	if (!rskq_defer_accept) {
550
		*expire = req->num_timeout >= thresh;
551 552 553
		*resend = 1;
		return;
	}
554 555
	*expire = req->num_timeout >= thresh &&
		  (!inet_rsk(req)->acked || req->num_timeout >= max_retries);
556 557 558 559 560 561
	/*
	 * Do not resend while waiting for data after ACK,
	 * start to resend on end of deferring period to give
	 * last chance for data or ACK to create established socket.
	 */
	*resend = !inet_rsk(req)->acked ||
562
		  req->num_timeout >= rskq_defer_accept - 1;
563 564
}

565
int inet_rtx_syn_ack(const struct sock *parent, struct request_sock *req)
566
{
C
Christoph Paasch 已提交
567
	int err = req->rsk_ops->rtx_syn_ack(parent, req);
568 569 570 571 572 573 574

	if (!err)
		req->num_retrans++;
	return err;
}
EXPORT_SYMBOL(inet_rtx_syn_ack);

575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594
/* return true if req was found in the syn_table[] */
static bool reqsk_queue_unlink(struct request_sock_queue *queue,
			       struct request_sock *req)
{
	struct listen_sock *lopt = queue->listen_opt;
	struct request_sock **prev;
	bool found = false;

	spin_lock(&queue->syn_wait_lock);

	for (prev = &lopt->syn_table[req->rsk_hash]; *prev != NULL;
	     prev = &(*prev)->dl_next) {
		if (*prev == req) {
			*prev = req->dl_next;
			found = true;
			break;
		}
	}

	spin_unlock(&queue->syn_wait_lock);
595
	if (timer_pending(&req->rsk_timer) && del_timer_sync(&req->rsk_timer))
596 597 598 599 600 601 602 603 604 605 606 607 608
		reqsk_put(req);
	return found;
}

void inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req)
{
	if (reqsk_queue_unlink(&inet_csk(sk)->icsk_accept_queue, req)) {
		reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req);
		reqsk_put(req);
	}
}
EXPORT_SYMBOL(inet_csk_reqsk_queue_drop);

609
static void reqsk_timer_handler(unsigned long data)
610
{
611 612 613
	struct request_sock *req = (struct request_sock *)data;
	struct sock *sk_listener = req->rsk_listener;
	struct inet_connection_sock *icsk = inet_csk(sk_listener);
614 615
	struct request_sock_queue *queue = &icsk->icsk_accept_queue;
	struct listen_sock *lopt = queue->listen_opt;
616
	int qlen, expire = 0, resend = 0;
617
	int max_retries, thresh;
618
	u8 defer_accept;
619

620 621
	if (sk_listener->sk_state != TCP_LISTEN || !lopt) {
		reqsk_put(req);
622
		return;
623
	}
624

625 626
	max_retries = icsk->icsk_syn_retries ? : sysctl_tcp_synack_retries;
	thresh = max_retries;
627 628
	/* Normally all the openreqs are young and become mature
	 * (i.e. converted to established socket) for first timeout.
629
	 * If synack was not acknowledged for 1 second, it means
630 631 632 633 634 635 636 637 638 639 640 641 642 643
	 * one of the following things: synack was lost, ack was lost,
	 * rtt is high or nobody planned to ack (i.e. synflood).
	 * When server is a bit loaded, queue is populated with old
	 * open requests, reducing effective size of queue.
	 * When server is well loaded, queue size reduces to zero
	 * after several minutes of work. It is not synflood,
	 * it is normal operation. The solution is pruning
	 * too old entries overriding normal timeout, when
	 * situation becomes dangerous.
	 *
	 * Essentially, we reserve half of room for young
	 * embrions; and abort old ones without pity, if old
	 * ones are about to clog our table.
	 */
644 645
	qlen = listen_sock_qlen(lopt);
	if (qlen >> (lopt->max_qlen_log - 1)) {
646
		int young = listen_sock_young(lopt) << 1;
647 648

		while (thresh > 2) {
649
			if (qlen < young)
650 651 652 653 654
				break;
			thresh--;
			young <<= 1;
		}
	}
655 656 657 658
	defer_accept = READ_ONCE(queue->rskq_defer_accept);
	if (defer_accept)
		max_retries = defer_accept;
	syn_ack_recalc(req, thresh, max_retries, defer_accept,
659
		       &expire, &resend);
660
	req->rsk_ops->syn_ack_timeout(req);
661 662 663 664 665 666 667 668 669 670 671 672 673 674 675
	if (!expire &&
	    (!resend ||
	     !inet_rtx_syn_ack(sk_listener, req) ||
	     inet_rsk(req)->acked)) {
		unsigned long timeo;

		if (req->num_timeout++ == 0)
			atomic_inc(&lopt->young_dec);
		timeo = min(TCP_TIMEOUT_INIT << req->num_timeout, TCP_RTO_MAX);
		mod_timer_pinned(&req->rsk_timer, jiffies + timeo);
		return;
	}
	inet_csk_reqsk_queue_drop(sk_listener, req);
	reqsk_put(req);
}
676

677 678 679 680 681
void reqsk_queue_hash_req(struct request_sock_queue *queue,
			  u32 hash, struct request_sock *req,
			  unsigned long timeout)
{
	struct listen_sock *lopt = queue->listen_opt;
682

683 684 685
	req->num_retrans = 0;
	req->num_timeout = 0;
	req->sk = NULL;
686

687 688 689 690
	setup_timer(&req->rsk_timer, reqsk_timer_handler, (unsigned long)req);
	mod_timer_pinned(&req->rsk_timer, jiffies + timeout);
	req->rsk_hash = hash;

691 692 693 694 695
	/* before letting lookups find us, make sure all req fields
	 * are committed to memory and refcnt initialized.
	 */
	smp_wmb();
	atomic_set(&req->rsk_refcnt, 2);
696

697
	spin_lock(&queue->syn_wait_lock);
698 699
	req->dl_next = lopt->syn_table[hash];
	lopt->syn_table[hash] = req;
700
	spin_unlock(&queue->syn_wait_lock);
701
}
702
EXPORT_SYMBOL(reqsk_queue_hash_req);
703

704 705 706 707 708 709 710 711 712 713 714
/**
 *	inet_csk_clone_lock - clone an inet socket, and lock its clone
 *	@sk: the socket to clone
 *	@req: request_sock
 *	@priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
 *
 *	Caller must unlock socket even in error path (bh_unlock_sock(newsk))
 */
struct sock *inet_csk_clone_lock(const struct sock *sk,
				 const struct request_sock *req,
				 const gfp_t priority)
715
{
716
	struct sock *newsk = sk_clone_lock(sk, priority);
717

718
	if (newsk) {
719 720 721 722 723
		struct inet_connection_sock *newicsk = inet_csk(newsk);

		newsk->sk_state = TCP_SYN_RECV;
		newicsk->icsk_bind_hash = NULL;

724
		inet_sk(newsk)->inet_dport = inet_rsk(req)->ir_rmt_port;
E
Eric Dumazet 已提交
725 726
		inet_sk(newsk)->inet_num = inet_rsk(req)->ir_num;
		inet_sk(newsk)->inet_sport = htons(inet_rsk(req)->ir_num);
727 728
		newsk->sk_write_space = sk_stream_write_space;

729
		newsk->sk_mark = inet_rsk(req)->ir_mark;
E
Eric Dumazet 已提交
730 731
		atomic64_set(&newsk->sk_cookie,
			     atomic64_read(&inet_rsk(req)->ir_cookie));
732

733
		newicsk->icsk_retransmits = 0;
734 735
		newicsk->icsk_backoff	  = 0;
		newicsk->icsk_probes_out  = 0;
736 737 738

		/* Deinitialize accept_queue to trap illegal accesses. */
		memset(&newicsk->icsk_accept_queue, 0, sizeof(newicsk->icsk_accept_queue));
739 740

		security_inet_csk_clone(newsk, req);
741 742 743
	}
	return newsk;
}
744
EXPORT_SYMBOL_GPL(inet_csk_clone_lock);
745 746 747 748 749 750 751 752 753

/*
 * At this point, there should be no process reference to this
 * socket, and thus no user references at all.  Therefore we
 * can assume the socket waitqueue is inactive and nobody will
 * try to jump onto it.
 */
void inet_csk_destroy_sock(struct sock *sk)
{
754 755
	WARN_ON(sk->sk_state != TCP_CLOSE);
	WARN_ON(!sock_flag(sk, SOCK_DEAD));
756 757

	/* It cannot be in hash table! */
758
	WARN_ON(!sk_unhashed(sk));
759

E
Eric Dumazet 已提交
760 761
	/* If it has not 0 inet_sk(sk)->inet_num, it must be bound */
	WARN_ON(inet_sk(sk)->inet_num && !inet_csk(sk)->icsk_bind_hash);
762 763 764 765 766 767 768 769 770

	sk->sk_prot->destroy(sk);

	sk_stream_kill_queues(sk);

	xfrm_sk_free_policy(sk);

	sk_refcnt_debug_release(sk);

771
	percpu_counter_dec(sk->sk_prot->orphan_count);
772 773 774 775
	sock_put(sk);
}
EXPORT_SYMBOL(inet_csk_destroy_sock);

776 777 778 779
/* This function allows to force a closure of a socket after the call to
 * tcp/dccp_create_openreq_child().
 */
void inet_csk_prepare_forced_close(struct sock *sk)
780
	__releases(&sk->sk_lock.slock)
781 782 783 784 785 786 787 788 789 790 791 792
{
	/* sk_clone_lock locked the socket and set refcnt to 2 */
	bh_unlock_sock(sk);
	sock_put(sk);

	/* The below has to be done to allow calling inet_csk_destroy_sock */
	sock_set_flag(sk, SOCK_DEAD);
	percpu_counter_inc(sk->sk_prot->orphan_count);
	inet_sk(sk)->inet_num = 0;
}
EXPORT_SYMBOL(inet_csk_prepare_forced_close);

793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811
int inet_csk_listen_start(struct sock *sk, const int nr_table_entries)
{
	struct inet_sock *inet = inet_sk(sk);
	struct inet_connection_sock *icsk = inet_csk(sk);
	int rc = reqsk_queue_alloc(&icsk->icsk_accept_queue, nr_table_entries);

	if (rc != 0)
		return rc;

	sk->sk_max_ack_backlog = 0;
	sk->sk_ack_backlog = 0;
	inet_csk_delack_init(sk);

	/* There is race window here: we announce ourselves listening,
	 * but this transition is still not validated by get_port().
	 * It is OK, because this socket enters to hash table only
	 * after validation is complete.
	 */
	sk->sk_state = TCP_LISTEN;
E
Eric Dumazet 已提交
812 813
	if (!sk->sk_prot->get_port(sk, inet->inet_num)) {
		inet->inet_sport = htons(inet->inet_num);
814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833

		sk_dst_reset(sk);
		sk->sk_prot->hash(sk);

		return 0;
	}

	sk->sk_state = TCP_CLOSE;
	__reqsk_queue_destroy(&icsk->icsk_accept_queue);
	return -EADDRINUSE;
}
EXPORT_SYMBOL_GPL(inet_csk_listen_start);

/*
 *	This routine closes sockets which have been at least partially
 *	opened, but not yet accepted.
 */
void inet_csk_listen_stop(struct sock *sk)
{
	struct inet_connection_sock *icsk = inet_csk(sk);
834
	struct request_sock_queue *queue = &icsk->icsk_accept_queue;
835 836 837 838
	struct request_sock *acc_req;
	struct request_sock *req;

	/* make all the listen_opt local to us */
839
	acc_req = reqsk_queue_yank_acceptq(queue);
840 841 842 843 844 845 846 847 848

	/* Following specs, it would be better either to send FIN
	 * (and enter FIN-WAIT-1, it is normal close)
	 * or to send active reset (abort).
	 * Certainly, it is pretty dangerous while synflood, but it is
	 * bad justification for our negligence 8)
	 * To be honest, we are not able to make either
	 * of the variants now.			--ANK
	 */
849
	reqsk_queue_destroy(queue);
850 851 852 853 854 855 856 857

	while ((req = acc_req) != NULL) {
		struct sock *child = req->sk;

		acc_req = req->dl_next;

		local_bh_disable();
		bh_lock_sock(child);
858
		WARN_ON(sock_owned_by_user(child));
859 860 861 862 863 864
		sock_hold(child);

		sk->sk_prot->disconnect(child, O_NONBLOCK);

		sock_orphan(child);

H
Herbert Xu 已提交
865 866
		percpu_counter_inc(sk->sk_prot->orphan_count);

867
		if (sk->sk_protocol == IPPROTO_TCP && tcp_rsk(req)->tfo_listener) {
868
			BUG_ON(tcp_sk(child)->fastopen_rsk != req);
869
			BUG_ON(sk != req->rsk_listener);
870 871 872 873 874 875 876 877 878

			/* Paranoid, to prevent race condition if
			 * an inbound pkt destined for child is
			 * blocked by sock lock in tcp_v4_rcv().
			 * Also to satisfy an assertion in
			 * tcp_v4_destroy_sock().
			 */
			tcp_sk(child)->fastopen_rsk = NULL;
		}
879 880 881 882 883 884 885
		inet_csk_destroy_sock(child);

		bh_unlock_sock(child);
		local_bh_enable();
		sock_put(child);

		sk_acceptq_removed(sk);
886
		reqsk_put(req);
887
	}
888
	if (queue->fastopenq.rskq_rst_head) {
889
		/* Free all the reqs queued in rskq_rst_head. */
890 891 892 893
		spin_lock_bh(&queue->fastopenq.lock);
		acc_req = queue->fastopenq.rskq_rst_head;
		queue->fastopenq.rskq_rst_head = NULL;
		spin_unlock_bh(&queue->fastopenq.lock);
894 895
		while ((req = acc_req) != NULL) {
			acc_req = req->dl_next;
896
			reqsk_put(req);
897 898
		}
	}
899
	WARN_ON(sk->sk_ack_backlog);
900 901
}
EXPORT_SYMBOL_GPL(inet_csk_listen_stop);
902 903 904 905 906 907 908

void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr)
{
	struct sockaddr_in *sin = (struct sockaddr_in *)uaddr;
	const struct inet_sock *inet = inet_sk(sk);

	sin->sin_family		= AF_INET;
E
Eric Dumazet 已提交
909 910
	sin->sin_addr.s_addr	= inet->inet_daddr;
	sin->sin_port		= inet->inet_dport;
911 912
}
EXPORT_SYMBOL_GPL(inet_csk_addr2sockaddr);
913

914 915 916 917
#ifdef CONFIG_COMPAT
int inet_csk_compat_getsockopt(struct sock *sk, int level, int optname,
			       char __user *optval, int __user *optlen)
{
918
	const struct inet_connection_sock *icsk = inet_csk(sk);
919

920
	if (icsk->icsk_af_ops->compat_getsockopt)
921 922 923 924 925 926 927 928
		return icsk->icsk_af_ops->compat_getsockopt(sk, level, optname,
							    optval, optlen);
	return icsk->icsk_af_ops->getsockopt(sk, level, optname,
					     optval, optlen);
}
EXPORT_SYMBOL_GPL(inet_csk_compat_getsockopt);

int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname,
929
			       char __user *optval, unsigned int optlen)
930
{
931
	const struct inet_connection_sock *icsk = inet_csk(sk);
932

933
	if (icsk->icsk_af_ops->compat_setsockopt)
934 935 936 937 938 939 940
		return icsk->icsk_af_ops->compat_setsockopt(sk, level, optname,
							    optval, optlen);
	return icsk->icsk_af_ops->setsockopt(sk, level, optname,
					     optval, optlen);
}
EXPORT_SYMBOL_GPL(inet_csk_compat_setsockopt);
#endif
941 942 943

static struct dst_entry *inet_csk_rebuild_route(struct sock *sk, struct flowi *fl)
{
E
Eric Dumazet 已提交
944 945
	const struct inet_sock *inet = inet_sk(sk);
	const struct ip_options_rcu *inet_opt;
946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977
	__be32 daddr = inet->inet_daddr;
	struct flowi4 *fl4;
	struct rtable *rt;

	rcu_read_lock();
	inet_opt = rcu_dereference(inet->inet_opt);
	if (inet_opt && inet_opt->opt.srr)
		daddr = inet_opt->opt.faddr;
	fl4 = &fl->u.ip4;
	rt = ip_route_output_ports(sock_net(sk), fl4, sk, daddr,
				   inet->inet_saddr, inet->inet_dport,
				   inet->inet_sport, sk->sk_protocol,
				   RT_CONN_FLAGS(sk), sk->sk_bound_dev_if);
	if (IS_ERR(rt))
		rt = NULL;
	if (rt)
		sk_setup_caps(sk, &rt->dst);
	rcu_read_unlock();

	return &rt->dst;
}

struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu)
{
	struct dst_entry *dst = __sk_dst_check(sk, 0);
	struct inet_sock *inet = inet_sk(sk);

	if (!dst) {
		dst = inet_csk_rebuild_route(sk, &inet->cork.fl);
		if (!dst)
			goto out;
	}
978
	dst->ops->update_pmtu(dst, sk, NULL, mtu);
979 980 981 982 983 984 985 986

	dst = __sk_dst_check(sk, 0);
	if (!dst)
		dst = inet_csk_rebuild_route(sk, &inet->cork.fl);
out:
	return dst;
}
EXPORT_SYMBOL_GPL(inet_csk_update_pmtu);