inet_connection_sock.c 26.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
/*
 * INET		An implementation of the TCP/IP protocol suite for the LINUX
 *		operating system.  INET is implemented using the  BSD Socket
 *		interface as the means of communication with the user level.
 *
 *		Support for INET connection oriented protocols.
 *
 * Authors:	See the TCP sources
 *
 *		This program is free software; you can redistribute it and/or
 *		modify it under the terms of the GNU General Public License
 *		as published by the Free Software Foundation; either version
 *		2 of the License, or(at your option) any later version.
 */

#include <linux/module.h>
#include <linux/jhash.h>

#include <net/inet_connection_sock.h>
#include <net/inet_hashtables.h>
#include <net/inet_timewait_sock.h>
#include <net/ip.h>
#include <net/route.h>
#include <net/tcp_states.h>
25
#include <net/xfrm.h>
26
#include <net/tcp.h>
27 28 29 30 31 32

#ifdef INET_CSK_DEBUG
const char inet_csk_timer_bug_msg[] = "inet_csk BUG: unknown timer value\n";
EXPORT_SYMBOL(inet_csk_timer_bug_msg);
#endif

33
void inet_get_local_port_range(struct net *net, int *low, int *high)
34
{
35 36
	unsigned int seq;

37
	do {
38
		seq = read_seqbegin(&net->ipv4.ip_local_ports.lock);
39

40 41 42
		*low = net->ipv4.ip_local_ports.range[0];
		*high = net->ipv4.ip_local_ports.range[1];
	} while (read_seqretry(&net->ipv4.ip_local_ports.lock, seq));
43 44
}
EXPORT_SYMBOL(inet_get_local_port_range);
45

46
int inet_csk_bind_conflict(const struct sock *sk,
47
			   const struct inet_bind_bucket *tb, bool relax)
48 49 50
{
	struct sock *sk2;
	int reuse = sk->sk_reuse;
51 52
	int reuseport = sk->sk_reuseport;
	kuid_t uid = sock_i_uid((struct sock *)sk);
53

54 55 56 57 58 59 60
	/*
	 * Unlike other sk lookup places we do not check
	 * for sk_net here, since _all_ the socks listed
	 * in tb->owners list belong to the same net - the
	 * one this bucket belongs to.
	 */

61
	sk_for_each_bound(sk2, &tb->owners) {
62 63 64 65 66
		if (sk != sk2 &&
		    !inet_v6_ipv6only(sk2) &&
		    (!sk->sk_bound_dev_if ||
		     !sk2->sk_bound_dev_if ||
		     sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
67 68 69 70 71
			if ((!reuse || !sk2->sk_reuse ||
			    sk2->sk_state == TCP_LISTEN) &&
			    (!reuseport || !sk2->sk_reuseport ||
			    (sk2->sk_state != TCP_TIME_WAIT &&
			     !uid_eq(uid, sock_i_uid(sk2))))) {
E
Eric Dumazet 已提交
72 73 74

				if (!sk2->sk_rcv_saddr || !sk->sk_rcv_saddr ||
				    sk2->sk_rcv_saddr == sk->sk_rcv_saddr)
75
					break;
76
			}
77 78 79
			if (!relax && reuse && sk2->sk_reuse &&
			    sk2->sk_state != TCP_LISTEN) {

E
Eric Dumazet 已提交
80 81
				if (!sk2->sk_rcv_saddr || !sk->sk_rcv_saddr ||
				    sk2->sk_rcv_saddr == sk->sk_rcv_saddr)
82 83
					break;
			}
84 85
		}
	}
86
	return sk2 != NULL;
87
}
88 89
EXPORT_SYMBOL_GPL(inet_csk_bind_conflict);

90 91 92
/* Obtain a reference to a local port for the given sock,
 * if snum is zero it means select any available local port.
 */
93
int inet_csk_get_port(struct sock *sk, unsigned short snum)
94
{
95
	struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
96 97
	struct inet_bind_hashbucket *head;
	struct inet_bind_bucket *tb;
98
	int ret, attempts = 5;
99
	struct net *net = sock_net(sk);
100
	int smallest_size = -1, smallest_rover;
101
	kuid_t uid = sock_i_uid(sk);
102 103 104

	local_bh_disable();
	if (!snum) {
105 106
		int remaining, rover, low, high;

107
again:
108
		inet_get_local_port_range(net, &low, &high);
109
		remaining = (high - low) + 1;
110
		smallest_rover = rover = prandom_u32() % remaining + low;
111

112
		smallest_size = -1;
113
		do {
114
			if (inet_is_local_reserved_port(net, rover))
115
				goto next_nolock;
116 117
			head = &hashinfo->bhash[inet_bhashfn(net, rover,
					hashinfo->bhash_size)];
118
			spin_lock(&head->lock);
119
			inet_bind_bucket_for_each(tb, &head->chain)
O
Octavian Purdila 已提交
120
				if (net_eq(ib_net(tb), net) && tb->port == rover) {
121 122 123 124 125 126
					if (((tb->fastreuse > 0 &&
					      sk->sk_reuse &&
					      sk->sk_state != TCP_LISTEN) ||
					     (tb->fastreuseport > 0 &&
					      sk->sk_reuseport &&
					      uid_eq(tb->fastuid, uid))) &&
127 128 129 130
					    (tb->num_owners < smallest_size || smallest_size == -1)) {
						smallest_size = tb->num_owners;
						smallest_rover = rover;
					}
131
					if (!inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb, false)) {
132
						snum = rover;
133
						goto tb_found;
134
					}
135
					goto next;
136
				}
137 138 139
			break;
		next:
			spin_unlock(&head->lock);
140
		next_nolock:
141 142
			if (++rover > high)
				rover = low;
143 144 145 146 147 148 149 150 151
		} while (--remaining > 0);

		/* Exhausted local port range during search?  It is not
		 * possible for us to be holding one of the bind hash
		 * locks if this test triggers, because if 'remaining'
		 * drops to zero, we broke out of the do/while loop at
		 * the top level, not from the 'break;' statement.
		 */
		ret = 1;
152 153 154 155 156
		if (remaining <= 0) {
			if (smallest_size != -1) {
				snum = smallest_rover;
				goto have_snum;
			}
157
			goto fail;
158
		}
159 160 161 162 163
		/* OK, here is the one we will use.  HEAD is
		 * non-NULL and we hold it's mutex.
		 */
		snum = rover;
	} else {
164
have_snum:
165 166
		head = &hashinfo->bhash[inet_bhashfn(net, snum,
				hashinfo->bhash_size)];
167
		spin_lock(&head->lock);
168
		inet_bind_bucket_for_each(tb, &head->chain)
O
Octavian Purdila 已提交
169
			if (net_eq(ib_net(tb), net) && tb->port == snum)
170 171 172 173 174 175
				goto tb_found;
	}
	tb = NULL;
	goto tb_not_found;
tb_found:
	if (!hlist_empty(&tb->owners)) {
176 177 178
		if (sk->sk_reuse == SK_FORCE_REUSE)
			goto success;

179 180 181 182
		if (((tb->fastreuse > 0 &&
		      sk->sk_reuse && sk->sk_state != TCP_LISTEN) ||
		     (tb->fastreuseport > 0 &&
		      sk->sk_reuseport && uid_eq(tb->fastuid, uid))) &&
183
		    smallest_size == -1) {
184 185 186
			goto success;
		} else {
			ret = 1;
187
			if (inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb, true)) {
188
				if (((sk->sk_reuse && sk->sk_state != TCP_LISTEN) ||
189 190
				     (tb->fastreuseport > 0 &&
				      sk->sk_reuseport && uid_eq(tb->fastuid, uid))) &&
191
				    smallest_size != -1 && --attempts >= 0) {
192 193 194
					spin_unlock(&head->lock);
					goto again;
				}
195

196
				goto fail_unlock;
197
			}
198 199 200 201
		}
	}
tb_not_found:
	ret = 1;
202 203
	if (!tb && (tb = inet_bind_bucket_create(hashinfo->bind_bucket_cachep,
					net, head, snum)) == NULL)
204 205 206 207 208 209
		goto fail_unlock;
	if (hlist_empty(&tb->owners)) {
		if (sk->sk_reuse && sk->sk_state != TCP_LISTEN)
			tb->fastreuse = 1;
		else
			tb->fastreuse = 0;
210 211 212
		if (sk->sk_reuseport) {
			tb->fastreuseport = 1;
			tb->fastuid = uid;
213
		} else
214 215 216 217 218 219
			tb->fastreuseport = 0;
	} else {
		if (tb->fastreuse &&
		    (!sk->sk_reuse || sk->sk_state == TCP_LISTEN))
			tb->fastreuse = 0;
		if (tb->fastreuseport &&
220
		    (!sk->sk_reuseport || !uid_eq(tb->fastuid, uid)))
221 222
			tb->fastreuseport = 0;
	}
223 224 225
success:
	if (!inet_csk(sk)->icsk_bind_hash)
		inet_bind_hash(sk, tb, snum);
226
	WARN_ON(inet_csk(sk)->icsk_bind_hash != tb);
227
	ret = 0;
228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261

fail_unlock:
	spin_unlock(&head->lock);
fail:
	local_bh_enable();
	return ret;
}
EXPORT_SYMBOL_GPL(inet_csk_get_port);

/*
 * Wait for an incoming connection, avoid race conditions. This must be called
 * with the socket locked.
 */
static int inet_csk_wait_for_connect(struct sock *sk, long timeo)
{
	struct inet_connection_sock *icsk = inet_csk(sk);
	DEFINE_WAIT(wait);
	int err;

	/*
	 * True wake-one mechanism for incoming connections: only
	 * one process gets woken up, not the 'whole herd'.
	 * Since we do not 'race & poll' for established sockets
	 * anymore, the common case will execute the loop only once.
	 *
	 * Subtle issue: "add_wait_queue_exclusive()" will be added
	 * after any current non-exclusive waiters, and we know that
	 * it will always _stay_ after any new non-exclusive waiters
	 * because all non-exclusive waiters are added at the
	 * beginning of the wait-queue. As such, it's ok to "drop"
	 * our exclusiveness temporarily when we get woken up without
	 * having to remove and re-insert us on the wait queue.
	 */
	for (;;) {
E
Eric Dumazet 已提交
262
		prepare_to_wait_exclusive(sk_sleep(sk), &wait,
263 264 265 266
					  TASK_INTERRUPTIBLE);
		release_sock(sk);
		if (reqsk_queue_empty(&icsk->icsk_accept_queue))
			timeo = schedule_timeout(timeo);
267
		sched_annotate_sleep();
268 269 270 271 272 273 274 275 276 277 278 279 280 281
		lock_sock(sk);
		err = 0;
		if (!reqsk_queue_empty(&icsk->icsk_accept_queue))
			break;
		err = -EINVAL;
		if (sk->sk_state != TCP_LISTEN)
			break;
		err = sock_intr_errno(timeo);
		if (signal_pending(current))
			break;
		err = -EAGAIN;
		if (!timeo)
			break;
	}
E
Eric Dumazet 已提交
282
	finish_wait(sk_sleep(sk), &wait);
283 284 285 286 287 288 289 290 291
	return err;
}

/*
 * This will accept the next outstanding connection.
 */
struct sock *inet_csk_accept(struct sock *sk, int flags, int *err)
{
	struct inet_connection_sock *icsk = inet_csk(sk);
292 293
	struct request_sock_queue *queue = &icsk->icsk_accept_queue;
	struct request_sock *req;
294
	struct sock *newsk;
295 296 297 298 299 300 301 302 303 304 305 306
	int error;

	lock_sock(sk);

	/* We need to make sure that this socket is listening,
	 * and that it has something pending.
	 */
	error = -EINVAL;
	if (sk->sk_state != TCP_LISTEN)
		goto out_err;

	/* Find already established connection */
307
	if (reqsk_queue_empty(queue)) {
308 309 310 311 312 313 314 315 316 317 318
		long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);

		/* If this is a non blocking socket don't sleep */
		error = -EAGAIN;
		if (!timeo)
			goto out_err;

		error = inet_csk_wait_for_connect(sk, timeo);
		if (error)
			goto out_err;
	}
319 320 321 322
	req = reqsk_queue_remove(queue);
	newsk = req->sk;

	sk_acceptq_removed(sk);
323 324 325
	if (sk->sk_protocol == IPPROTO_TCP &&
	    tcp_rsk(req)->tfo_listener &&
	    queue->fastopenq) {
326
		spin_lock_bh(&queue->fastopenq->lock);
327
		if (tcp_rsk(req)->tfo_listener) {
328 329 330 331 332 333 334 335 336 337 338
			/* We are still waiting for the final ACK from 3WHS
			 * so can't free req now. Instead, we set req->sk to
			 * NULL to signify that the child socket is taken
			 * so reqsk_fastopen_remove() will free the req
			 * when 3WHS finishes (or is aborted).
			 */
			req->sk = NULL;
			req = NULL;
		}
		spin_unlock_bh(&queue->fastopenq->lock);
	}
339 340
out:
	release_sock(sk);
341
	if (req)
342
		reqsk_put(req);
343 344 345
	return newsk;
out_err:
	newsk = NULL;
346
	req = NULL;
347 348 349 350 351 352 353
	*err = error;
	goto out;
}
EXPORT_SYMBOL(inet_csk_accept);

/*
 * Using different timers for retransmit, delayed acks and probes
354
 * We may wish use just one timer maintaining a list of expire jiffies
355 356 357 358 359 360 361 362 363
 * to optimize.
 */
void inet_csk_init_xmit_timers(struct sock *sk,
			       void (*retransmit_handler)(unsigned long),
			       void (*delack_handler)(unsigned long),
			       void (*keepalive_handler)(unsigned long))
{
	struct inet_connection_sock *icsk = inet_csk(sk);

364 365 366 367 368
	setup_timer(&icsk->icsk_retransmit_timer, retransmit_handler,
			(unsigned long)sk);
	setup_timer(&icsk->icsk_delack_timer, delack_handler,
			(unsigned long)sk);
	setup_timer(&sk->sk_timer, keepalive_handler, (unsigned long)sk);
369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396
	icsk->icsk_pending = icsk->icsk_ack.pending = 0;
}
EXPORT_SYMBOL(inet_csk_init_xmit_timers);

void inet_csk_clear_xmit_timers(struct sock *sk)
{
	struct inet_connection_sock *icsk = inet_csk(sk);

	icsk->icsk_pending = icsk->icsk_ack.pending = icsk->icsk_ack.blocked = 0;

	sk_stop_timer(sk, &icsk->icsk_retransmit_timer);
	sk_stop_timer(sk, &icsk->icsk_delack_timer);
	sk_stop_timer(sk, &sk->sk_timer);
}
EXPORT_SYMBOL(inet_csk_clear_xmit_timers);

void inet_csk_delete_keepalive_timer(struct sock *sk)
{
	sk_stop_timer(sk, &sk->sk_timer);
}
EXPORT_SYMBOL(inet_csk_delete_keepalive_timer);

void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long len)
{
	sk_reset_timer(sk, &sk->sk_timer, jiffies + len);
}
EXPORT_SYMBOL(inet_csk_reset_keepalive_timer);

397
struct dst_entry *inet_csk_route_req(struct sock *sk,
398
				     struct flowi4 *fl4,
399
				     const struct request_sock *req)
400 401
{
	const struct inet_request_sock *ireq = inet_rsk(req);
402 403 404
	struct net *net = read_pnet(&ireq->ireq_net);
	struct ip_options_rcu *opt = ireq->opt;
	struct rtable *rt;
405

406
	flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark,
407
			   RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
408
			   sk->sk_protocol, inet_sk_flowi_flags(sk),
409
			   (opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr,
410 411
			   ireq->ir_loc_addr, ireq->ir_rmt_port,
			   htons(ireq->ir_num));
412 413
	security_req_classify_flow(req, flowi4_to_flowi(fl4));
	rt = ip_route_output_flow(net, fl4, sk);
414
	if (IS_ERR(rt))
I
Ilpo Järvinen 已提交
415
		goto no_route;
J
Julian Anastasov 已提交
416
	if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway)
I
Ilpo Järvinen 已提交
417
		goto route_err;
418
	return &rt->dst;
I
Ilpo Järvinen 已提交
419 420 421 422 423 424

route_err:
	ip_rt_put(rt);
no_route:
	IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES);
	return NULL;
425 426 427
}
EXPORT_SYMBOL_GPL(inet_csk_route_req);

428 429 430 431 432
struct dst_entry *inet_csk_route_child_sock(struct sock *sk,
					    struct sock *newsk,
					    const struct request_sock *req)
{
	const struct inet_request_sock *ireq = inet_rsk(req);
433
	struct net *net = read_pnet(&ireq->ireq_net);
434
	struct inet_sock *newinet = inet_sk(newsk);
435
	struct ip_options_rcu *opt;
436 437 438 439
	struct flowi4 *fl4;
	struct rtable *rt;

	fl4 = &newinet->cork.fl.u.ip4;
440 441 442

	rcu_read_lock();
	opt = rcu_dereference(newinet->inet_opt);
443
	flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark,
444 445
			   RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
			   sk->sk_protocol, inet_sk_flowi_flags(sk),
446
			   (opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr,
447 448
			   ireq->ir_loc_addr, ireq->ir_rmt_port,
			   htons(ireq->ir_num));
449 450 451 452
	security_req_classify_flow(req, flowi4_to_flowi(fl4));
	rt = ip_route_output_flow(net, fl4, sk);
	if (IS_ERR(rt))
		goto no_route;
J
Julian Anastasov 已提交
453
	if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway)
454
		goto route_err;
455
	rcu_read_unlock();
456 457 458 459 460
	return &rt->dst;

route_err:
	ip_rt_put(rt);
no_route:
461
	rcu_read_unlock();
462 463 464 465 466
	IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES);
	return NULL;
}
EXPORT_SYMBOL_GPL(inet_csk_route_child_sock);

467
static inline u32 inet_synq_hash(const __be32 raddr, const __be16 rport,
468
				 const u32 rnd, const u32 synq_hsize)
469
{
470
	return jhash_2words((__force u32)raddr, (__force u32)rport, rnd) & (synq_hsize - 1);
471 472
}

E
Eric Dumazet 已提交
473
#if IS_ENABLED(CONFIG_IPV6)
474 475
#define AF_INET_FAMILY(fam) ((fam) == AF_INET)
#else
476
#define AF_INET_FAMILY(fam) true
477 478
#endif

479 480 481 482 483 484
/* Note: this is temporary :
 * req sock will no longer be in listener hash table
*/
struct request_sock *inet_csk_search_req(struct sock *sk,
					 const __be16 rport,
					 const __be32 raddr,
485
					 const __be32 laddr)
486
{
487
	struct inet_connection_sock *icsk = inet_csk(sk);
488
	struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
489
	struct request_sock *req;
490 491
	u32 hash = inet_synq_hash(raddr, rport, lopt->hash_rnd,
				  lopt->nr_table_entries);
492

493
	spin_lock(&icsk->icsk_accept_queue.syn_wait_lock);
494
	for (req = lopt->syn_table[hash]; req != NULL; req = req->dl_next) {
495 496
		const struct inet_request_sock *ireq = inet_rsk(req);

497 498 499
		if (ireq->ir_rmt_port == rport &&
		    ireq->ir_rmt_addr == raddr &&
		    ireq->ir_loc_addr == laddr &&
500
		    AF_INET_FAMILY(req->rsk_ops->family)) {
501
			atomic_inc(&req->rsk_refcnt);
502
			WARN_ON(req->sk);
503 504 505
			break;
		}
	}
506
	spin_unlock(&icsk->icsk_accept_queue.syn_wait_lock);
507 508 509 510 511 512

	return req;
}
EXPORT_SYMBOL_GPL(inet_csk_search_req);

void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
513
				   unsigned long timeout)
514 515 516
{
	struct inet_connection_sock *icsk = inet_csk(sk);
	struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
517 518
	const u32 h = inet_synq_hash(inet_rsk(req)->ir_rmt_addr,
				     inet_rsk(req)->ir_rmt_port,
519 520 521 522 523
				     lopt->hash_rnd, lopt->nr_table_entries);

	reqsk_queue_hash_req(&icsk->icsk_accept_queue, h, req, timeout);
	inet_csk_reqsk_queue_added(sk, timeout);
}
E
Eric Dumazet 已提交
524
EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_hash_add);
525

526 527 528
/* Only thing we need from tcp.h */
extern int sysctl_tcp_synack_retries;

529

530 531 532 533 534 535 536
/* Decide when to expire the request and when to resend SYN-ACK */
static inline void syn_ack_recalc(struct request_sock *req, const int thresh,
				  const int max_retries,
				  const u8 rskq_defer_accept,
				  int *expire, int *resend)
{
	if (!rskq_defer_accept) {
537
		*expire = req->num_timeout >= thresh;
538 539 540
		*resend = 1;
		return;
	}
541 542
	*expire = req->num_timeout >= thresh &&
		  (!inet_rsk(req)->acked || req->num_timeout >= max_retries);
543 544 545 546 547 548
	/*
	 * Do not resend while waiting for data after ACK,
	 * start to resend on end of deferring period to give
	 * last chance for data or ACK to create established socket.
	 */
	*resend = !inet_rsk(req)->acked ||
549
		  req->num_timeout >= rskq_defer_accept - 1;
550 551
}

552 553
int inet_rtx_syn_ack(struct sock *parent, struct request_sock *req)
{
C
Christoph Paasch 已提交
554
	int err = req->rsk_ops->rtx_syn_ack(parent, req);
555 556 557 558 559 560 561

	if (!err)
		req->num_retrans++;
	return err;
}
EXPORT_SYMBOL(inet_rtx_syn_ack);

562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595
/* return true if req was found in the syn_table[] */
static bool reqsk_queue_unlink(struct request_sock_queue *queue,
			       struct request_sock *req)
{
	struct listen_sock *lopt = queue->listen_opt;
	struct request_sock **prev;
	bool found = false;

	spin_lock(&queue->syn_wait_lock);

	for (prev = &lopt->syn_table[req->rsk_hash]; *prev != NULL;
	     prev = &(*prev)->dl_next) {
		if (*prev == req) {
			*prev = req->dl_next;
			found = true;
			break;
		}
	}

	spin_unlock(&queue->syn_wait_lock);
	if (del_timer(&req->rsk_timer))
		reqsk_put(req);
	return found;
}

void inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req)
{
	if (reqsk_queue_unlink(&inet_csk(sk)->icsk_accept_queue, req)) {
		reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req);
		reqsk_put(req);
	}
}
EXPORT_SYMBOL(inet_csk_reqsk_queue_drop);

596
static void reqsk_timer_handler(unsigned long data)
597
{
598 599 600
	struct request_sock *req = (struct request_sock *)data;
	struct sock *sk_listener = req->rsk_listener;
	struct inet_connection_sock *icsk = inet_csk(sk_listener);
601 602
	struct request_sock_queue *queue = &icsk->icsk_accept_queue;
	struct listen_sock *lopt = queue->listen_opt;
603
	int qlen, expire = 0, resend = 0;
604
	int max_retries, thresh;
605
	u8 defer_accept;
606

607 608
	if (sk_listener->sk_state != TCP_LISTEN || !lopt) {
		reqsk_put(req);
609
		return;
610
	}
611

612 613
	max_retries = icsk->icsk_syn_retries ? : sysctl_tcp_synack_retries;
	thresh = max_retries;
614 615
	/* Normally all the openreqs are young and become mature
	 * (i.e. converted to established socket) for first timeout.
616
	 * If synack was not acknowledged for 1 second, it means
617 618 619 620 621 622 623 624 625 626 627 628 629 630
	 * one of the following things: synack was lost, ack was lost,
	 * rtt is high or nobody planned to ack (i.e. synflood).
	 * When server is a bit loaded, queue is populated with old
	 * open requests, reducing effective size of queue.
	 * When server is well loaded, queue size reduces to zero
	 * after several minutes of work. It is not synflood,
	 * it is normal operation. The solution is pruning
	 * too old entries overriding normal timeout, when
	 * situation becomes dangerous.
	 *
	 * Essentially, we reserve half of room for young
	 * embrions; and abort old ones without pity, if old
	 * ones are about to clog our table.
	 */
631 632
	qlen = listen_sock_qlen(lopt);
	if (qlen >> (lopt->max_qlen_log - 1)) {
633
		int young = listen_sock_young(lopt) << 1;
634 635

		while (thresh > 2) {
636
			if (qlen < young)
637 638 639 640 641
				break;
			thresh--;
			young <<= 1;
		}
	}
642 643 644 645
	defer_accept = READ_ONCE(queue->rskq_defer_accept);
	if (defer_accept)
		max_retries = defer_accept;
	syn_ack_recalc(req, thresh, max_retries, defer_accept,
646
		       &expire, &resend);
647
	req->rsk_ops->syn_ack_timeout(req);
648 649 650 651 652 653 654 655 656 657 658 659 660 661 662
	if (!expire &&
	    (!resend ||
	     !inet_rtx_syn_ack(sk_listener, req) ||
	     inet_rsk(req)->acked)) {
		unsigned long timeo;

		if (req->num_timeout++ == 0)
			atomic_inc(&lopt->young_dec);
		timeo = min(TCP_TIMEOUT_INIT << req->num_timeout, TCP_RTO_MAX);
		mod_timer_pinned(&req->rsk_timer, jiffies + timeo);
		return;
	}
	inet_csk_reqsk_queue_drop(sk_listener, req);
	reqsk_put(req);
}
663

664 665 666 667 668
void reqsk_queue_hash_req(struct request_sock_queue *queue,
			  u32 hash, struct request_sock *req,
			  unsigned long timeout)
{
	struct listen_sock *lopt = queue->listen_opt;
669

670 671 672
	req->num_retrans = 0;
	req->num_timeout = 0;
	req->sk = NULL;
673

674 675 676 677 678 679 680
	/* before letting lookups find us, make sure all req fields
	 * are committed to memory and refcnt initialized.
	 */
	smp_wmb();
	atomic_set(&req->rsk_refcnt, 2);
	setup_timer(&req->rsk_timer, reqsk_timer_handler, (unsigned long)req);
	req->rsk_hash = hash;
681

682
	spin_lock(&queue->syn_wait_lock);
683 684
	req->dl_next = lopt->syn_table[hash];
	lopt->syn_table[hash] = req;
685
	spin_unlock(&queue->syn_wait_lock);
686

687
	mod_timer_pinned(&req->rsk_timer, jiffies + timeout);
688
}
689
EXPORT_SYMBOL(reqsk_queue_hash_req);
690

691 692 693 694 695 696 697 698 699 700 701
/**
 *	inet_csk_clone_lock - clone an inet socket, and lock its clone
 *	@sk: the socket to clone
 *	@req: request_sock
 *	@priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
 *
 *	Caller must unlock socket even in error path (bh_unlock_sock(newsk))
 */
struct sock *inet_csk_clone_lock(const struct sock *sk,
				 const struct request_sock *req,
				 const gfp_t priority)
702
{
703
	struct sock *newsk = sk_clone_lock(sk, priority);
704

705
	if (newsk) {
706 707 708 709 710
		struct inet_connection_sock *newicsk = inet_csk(newsk);

		newsk->sk_state = TCP_SYN_RECV;
		newicsk->icsk_bind_hash = NULL;

711
		inet_sk(newsk)->inet_dport = inet_rsk(req)->ir_rmt_port;
E
Eric Dumazet 已提交
712 713
		inet_sk(newsk)->inet_num = inet_rsk(req)->ir_num;
		inet_sk(newsk)->inet_sport = htons(inet_rsk(req)->ir_num);
714 715
		newsk->sk_write_space = sk_stream_write_space;

716
		newsk->sk_mark = inet_rsk(req)->ir_mark;
E
Eric Dumazet 已提交
717 718
		atomic64_set(&newsk->sk_cookie,
			     atomic64_read(&inet_rsk(req)->ir_cookie));
719

720
		newicsk->icsk_retransmits = 0;
721 722
		newicsk->icsk_backoff	  = 0;
		newicsk->icsk_probes_out  = 0;
723 724 725

		/* Deinitialize accept_queue to trap illegal accesses. */
		memset(&newicsk->icsk_accept_queue, 0, sizeof(newicsk->icsk_accept_queue));
726 727

		security_inet_csk_clone(newsk, req);
728 729 730
	}
	return newsk;
}
731
EXPORT_SYMBOL_GPL(inet_csk_clone_lock);
732 733 734 735 736 737 738 739 740

/*
 * At this point, there should be no process reference to this
 * socket, and thus no user references at all.  Therefore we
 * can assume the socket waitqueue is inactive and nobody will
 * try to jump onto it.
 */
void inet_csk_destroy_sock(struct sock *sk)
{
741 742
	WARN_ON(sk->sk_state != TCP_CLOSE);
	WARN_ON(!sock_flag(sk, SOCK_DEAD));
743 744

	/* It cannot be in hash table! */
745
	WARN_ON(!sk_unhashed(sk));
746

E
Eric Dumazet 已提交
747 748
	/* If it has not 0 inet_sk(sk)->inet_num, it must be bound */
	WARN_ON(inet_sk(sk)->inet_num && !inet_csk(sk)->icsk_bind_hash);
749 750 751 752 753 754 755 756 757

	sk->sk_prot->destroy(sk);

	sk_stream_kill_queues(sk);

	xfrm_sk_free_policy(sk);

	sk_refcnt_debug_release(sk);

758
	percpu_counter_dec(sk->sk_prot->orphan_count);
759 760 761 762
	sock_put(sk);
}
EXPORT_SYMBOL(inet_csk_destroy_sock);

763 764 765 766
/* This function allows to force a closure of a socket after the call to
 * tcp/dccp_create_openreq_child().
 */
void inet_csk_prepare_forced_close(struct sock *sk)
767
	__releases(&sk->sk_lock.slock)
768 769 770 771 772 773 774 775 776 777 778 779
{
	/* sk_clone_lock locked the socket and set refcnt to 2 */
	bh_unlock_sock(sk);
	sock_put(sk);

	/* The below has to be done to allow calling inet_csk_destroy_sock */
	sock_set_flag(sk, SOCK_DEAD);
	percpu_counter_inc(sk->sk_prot->orphan_count);
	inet_sk(sk)->inet_num = 0;
}
EXPORT_SYMBOL(inet_csk_prepare_forced_close);

780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798
int inet_csk_listen_start(struct sock *sk, const int nr_table_entries)
{
	struct inet_sock *inet = inet_sk(sk);
	struct inet_connection_sock *icsk = inet_csk(sk);
	int rc = reqsk_queue_alloc(&icsk->icsk_accept_queue, nr_table_entries);

	if (rc != 0)
		return rc;

	sk->sk_max_ack_backlog = 0;
	sk->sk_ack_backlog = 0;
	inet_csk_delack_init(sk);

	/* There is race window here: we announce ourselves listening,
	 * but this transition is still not validated by get_port().
	 * It is OK, because this socket enters to hash table only
	 * after validation is complete.
	 */
	sk->sk_state = TCP_LISTEN;
E
Eric Dumazet 已提交
799 800
	if (!sk->sk_prot->get_port(sk, inet->inet_num)) {
		inet->inet_sport = htons(inet->inet_num);
801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820

		sk_dst_reset(sk);
		sk->sk_prot->hash(sk);

		return 0;
	}

	sk->sk_state = TCP_CLOSE;
	__reqsk_queue_destroy(&icsk->icsk_accept_queue);
	return -EADDRINUSE;
}
EXPORT_SYMBOL_GPL(inet_csk_listen_start);

/*
 *	This routine closes sockets which have been at least partially
 *	opened, but not yet accepted.
 */
void inet_csk_listen_stop(struct sock *sk)
{
	struct inet_connection_sock *icsk = inet_csk(sk);
821
	struct request_sock_queue *queue = &icsk->icsk_accept_queue;
822 823 824 825
	struct request_sock *acc_req;
	struct request_sock *req;

	/* make all the listen_opt local to us */
826
	acc_req = reqsk_queue_yank_acceptq(queue);
827 828 829 830 831 832 833 834 835

	/* Following specs, it would be better either to send FIN
	 * (and enter FIN-WAIT-1, it is normal close)
	 * or to send active reset (abort).
	 * Certainly, it is pretty dangerous while synflood, but it is
	 * bad justification for our negligence 8)
	 * To be honest, we are not able to make either
	 * of the variants now.			--ANK
	 */
836
	reqsk_queue_destroy(queue);
837 838 839 840 841 842 843 844

	while ((req = acc_req) != NULL) {
		struct sock *child = req->sk;

		acc_req = req->dl_next;

		local_bh_disable();
		bh_lock_sock(child);
845
		WARN_ON(sock_owned_by_user(child));
846 847 848 849 850 851
		sock_hold(child);

		sk->sk_prot->disconnect(child, O_NONBLOCK);

		sock_orphan(child);

H
Herbert Xu 已提交
852 853
		percpu_counter_inc(sk->sk_prot->orphan_count);

854
		if (sk->sk_protocol == IPPROTO_TCP && tcp_rsk(req)->tfo_listener) {
855
			BUG_ON(tcp_sk(child)->fastopen_rsk != req);
856
			BUG_ON(sk != req->rsk_listener);
857 858 859 860 861 862 863 864 865

			/* Paranoid, to prevent race condition if
			 * an inbound pkt destined for child is
			 * blocked by sock lock in tcp_v4_rcv().
			 * Also to satisfy an assertion in
			 * tcp_v4_destroy_sock().
			 */
			tcp_sk(child)->fastopen_rsk = NULL;
		}
866 867 868 869 870 871 872
		inet_csk_destroy_sock(child);

		bh_unlock_sock(child);
		local_bh_enable();
		sock_put(child);

		sk_acceptq_removed(sk);
873
		reqsk_put(req);
874
	}
875
	if (queue->fastopenq) {
876 877 878 879 880 881 882
		/* Free all the reqs queued in rskq_rst_head. */
		spin_lock_bh(&queue->fastopenq->lock);
		acc_req = queue->fastopenq->rskq_rst_head;
		queue->fastopenq->rskq_rst_head = NULL;
		spin_unlock_bh(&queue->fastopenq->lock);
		while ((req = acc_req) != NULL) {
			acc_req = req->dl_next;
883
			reqsk_put(req);
884 885
		}
	}
886
	WARN_ON(sk->sk_ack_backlog);
887 888
}
EXPORT_SYMBOL_GPL(inet_csk_listen_stop);
889 890 891 892 893 894 895

void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr)
{
	struct sockaddr_in *sin = (struct sockaddr_in *)uaddr;
	const struct inet_sock *inet = inet_sk(sk);

	sin->sin_family		= AF_INET;
E
Eric Dumazet 已提交
896 897
	sin->sin_addr.s_addr	= inet->inet_daddr;
	sin->sin_port		= inet->inet_dport;
898 899
}
EXPORT_SYMBOL_GPL(inet_csk_addr2sockaddr);
900

901 902 903 904
#ifdef CONFIG_COMPAT
int inet_csk_compat_getsockopt(struct sock *sk, int level, int optname,
			       char __user *optval, int __user *optlen)
{
905
	const struct inet_connection_sock *icsk = inet_csk(sk);
906

907
	if (icsk->icsk_af_ops->compat_getsockopt)
908 909 910 911 912 913 914 915
		return icsk->icsk_af_ops->compat_getsockopt(sk, level, optname,
							    optval, optlen);
	return icsk->icsk_af_ops->getsockopt(sk, level, optname,
					     optval, optlen);
}
EXPORT_SYMBOL_GPL(inet_csk_compat_getsockopt);

int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname,
916
			       char __user *optval, unsigned int optlen)
917
{
918
	const struct inet_connection_sock *icsk = inet_csk(sk);
919

920
	if (icsk->icsk_af_ops->compat_setsockopt)
921 922 923 924 925 926 927
		return icsk->icsk_af_ops->compat_setsockopt(sk, level, optname,
							    optval, optlen);
	return icsk->icsk_af_ops->setsockopt(sk, level, optname,
					     optval, optlen);
}
EXPORT_SYMBOL_GPL(inet_csk_compat_setsockopt);
#endif
928 929 930

static struct dst_entry *inet_csk_rebuild_route(struct sock *sk, struct flowi *fl)
{
E
Eric Dumazet 已提交
931 932
	const struct inet_sock *inet = inet_sk(sk);
	const struct ip_options_rcu *inet_opt;
933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964
	__be32 daddr = inet->inet_daddr;
	struct flowi4 *fl4;
	struct rtable *rt;

	rcu_read_lock();
	inet_opt = rcu_dereference(inet->inet_opt);
	if (inet_opt && inet_opt->opt.srr)
		daddr = inet_opt->opt.faddr;
	fl4 = &fl->u.ip4;
	rt = ip_route_output_ports(sock_net(sk), fl4, sk, daddr,
				   inet->inet_saddr, inet->inet_dport,
				   inet->inet_sport, sk->sk_protocol,
				   RT_CONN_FLAGS(sk), sk->sk_bound_dev_if);
	if (IS_ERR(rt))
		rt = NULL;
	if (rt)
		sk_setup_caps(sk, &rt->dst);
	rcu_read_unlock();

	return &rt->dst;
}

struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu)
{
	struct dst_entry *dst = __sk_dst_check(sk, 0);
	struct inet_sock *inet = inet_sk(sk);

	if (!dst) {
		dst = inet_csk_rebuild_route(sk, &inet->cork.fl);
		if (!dst)
			goto out;
	}
965
	dst->ops->update_pmtu(dst, sk, NULL, mtu);
966 967 968 969 970 971 972 973

	dst = __sk_dst_check(sk, 0);
	if (!dst)
		dst = inet_csk_rebuild_route(sk, &inet->cork.fl);
out:
	return dst;
}
EXPORT_SYMBOL_GPL(inet_csk_update_pmtu);