inet_connection_sock.c 26.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
/*
 * INET		An implementation of the TCP/IP protocol suite for the LINUX
 *		operating system.  INET is implemented using the  BSD Socket
 *		interface as the means of communication with the user level.
 *
 *		Support for INET connection oriented protocols.
 *
 * Authors:	See the TCP sources
 *
 *		This program is free software; you can redistribute it and/or
 *		modify it under the terms of the GNU General Public License
 *		as published by the Free Software Foundation; either version
 *		2 of the License, or(at your option) any later version.
 */

#include <linux/module.h>
#include <linux/jhash.h>

#include <net/inet_connection_sock.h>
#include <net/inet_hashtables.h>
#include <net/inet_timewait_sock.h>
#include <net/ip.h>
#include <net/route.h>
#include <net/tcp_states.h>
25
#include <net/xfrm.h>
26 27 28 29 30 31 32

#ifdef INET_CSK_DEBUG
const char inet_csk_timer_bug_msg[] = "inet_csk BUG: unknown timer value\n";
EXPORT_SYMBOL(inet_csk_timer_bug_msg);
#endif

/*
E
Eric Dumazet 已提交
33
 * This struct holds the first and last local port number.
34
 */
E
Eric Dumazet 已提交
35
struct local_ports sysctl_local_ports __read_mostly = {
36
	.lock = __SEQLOCK_UNLOCKED(sysctl_local_ports.lock),
E
Eric Dumazet 已提交
37 38
	.range = { 32768, 61000 },
};
39

40 41 42
unsigned long *sysctl_local_reserved_ports;
EXPORT_SYMBOL(sysctl_local_reserved_ports);

43 44
void inet_get_local_port_range(int *low, int *high)
{
45 46
	unsigned int seq;

47
	do {
E
Eric Dumazet 已提交
48
		seq = read_seqbegin(&sysctl_local_ports.lock);
49

E
Eric Dumazet 已提交
50 51 52
		*low = sysctl_local_ports.range[0];
		*high = sysctl_local_ports.range[1];
	} while (read_seqretry(&sysctl_local_ports.lock, seq));
53 54
}
EXPORT_SYMBOL(inet_get_local_port_range);
55

56
int inet_csk_bind_conflict(const struct sock *sk,
57
			   const struct inet_bind_bucket *tb, bool relax)
58 59 60 61
{
	struct sock *sk2;
	struct hlist_node *node;
	int reuse = sk->sk_reuse;
62 63
	int reuseport = sk->sk_reuseport;
	kuid_t uid = sock_i_uid((struct sock *)sk);
64

65 66 67 68 69 70 71
	/*
	 * Unlike other sk lookup places we do not check
	 * for sk_net here, since _all_ the socks listed
	 * in tb->owners list belong to the same net - the
	 * one this bucket belongs to.
	 */

72 73 74 75 76 77
	sk_for_each_bound(sk2, node, &tb->owners) {
		if (sk != sk2 &&
		    !inet_v6_ipv6only(sk2) &&
		    (!sk->sk_bound_dev_if ||
		     !sk2->sk_bound_dev_if ||
		     sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
78 79 80 81 82
			if ((!reuse || !sk2->sk_reuse ||
			    sk2->sk_state == TCP_LISTEN) &&
			    (!reuseport || !sk2->sk_reuseport ||
			    (sk2->sk_state != TCP_TIME_WAIT &&
			     !uid_eq(uid, sock_i_uid(sk2))))) {
83 84 85
				const __be32 sk2_rcv_saddr = sk_rcv_saddr(sk2);
				if (!sk2_rcv_saddr || !sk_rcv_saddr(sk) ||
				    sk2_rcv_saddr == sk_rcv_saddr(sk))
86
					break;
87
			}
88 89 90 91 92 93 94 95
			if (!relax && reuse && sk2->sk_reuse &&
			    sk2->sk_state != TCP_LISTEN) {
				const __be32 sk2_rcv_saddr = sk_rcv_saddr(sk2);

				if (!sk2_rcv_saddr || !sk_rcv_saddr(sk) ||
				    sk2_rcv_saddr == sk_rcv_saddr(sk))
					break;
			}
96 97 98 99
		}
	}
	return node != NULL;
}
100 101
EXPORT_SYMBOL_GPL(inet_csk_bind_conflict);

102 103 104
/* Obtain a reference to a local port for the given sock,
 * if snum is zero it means select any available local port.
 */
105
int inet_csk_get_port(struct sock *sk, unsigned short snum)
106
{
107
	struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
108 109 110
	struct inet_bind_hashbucket *head;
	struct hlist_node *node;
	struct inet_bind_bucket *tb;
111
	int ret, attempts = 5;
112
	struct net *net = sock_net(sk);
113
	int smallest_size = -1, smallest_rover;
114
	kuid_t uid = sock_i_uid(sk);
115 116 117

	local_bh_disable();
	if (!snum) {
118 119
		int remaining, rover, low, high;

120
again:
121
		inet_get_local_port_range(&low, &high);
122
		remaining = (high - low) + 1;
123
		smallest_rover = rover = net_random() % remaining + low;
124

125
		smallest_size = -1;
126
		do {
127 128
			if (inet_is_reserved_local_port(rover))
				goto next_nolock;
129 130
			head = &hashinfo->bhash[inet_bhashfn(net, rover,
					hashinfo->bhash_size)];
131 132
			spin_lock(&head->lock);
			inet_bind_bucket_for_each(tb, node, &head->chain)
O
Octavian Purdila 已提交
133
				if (net_eq(ib_net(tb), net) && tb->port == rover) {
134 135 136 137 138 139
					if (((tb->fastreuse > 0 &&
					      sk->sk_reuse &&
					      sk->sk_state != TCP_LISTEN) ||
					     (tb->fastreuseport > 0 &&
					      sk->sk_reuseport &&
					      uid_eq(tb->fastuid, uid))) &&
140 141 142
					    (tb->num_owners < smallest_size || smallest_size == -1)) {
						smallest_size = tb->num_owners;
						smallest_rover = rover;
143 144
						if (atomic_read(&hashinfo->bsockets) > (high - low) + 1 &&
						    !inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb, false)) {
145
							snum = smallest_rover;
146
							goto tb_found;
147 148
						}
					}
149
					if (!inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb, false)) {
150
						snum = rover;
151
						goto tb_found;
152
					}
153
					goto next;
154
				}
155 156 157
			break;
		next:
			spin_unlock(&head->lock);
158
		next_nolock:
159 160
			if (++rover > high)
				rover = low;
161 162 163 164 165 166 167 168 169
		} while (--remaining > 0);

		/* Exhausted local port range during search?  It is not
		 * possible for us to be holding one of the bind hash
		 * locks if this test triggers, because if 'remaining'
		 * drops to zero, we broke out of the do/while loop at
		 * the top level, not from the 'break;' statement.
		 */
		ret = 1;
170 171 172 173 174
		if (remaining <= 0) {
			if (smallest_size != -1) {
				snum = smallest_rover;
				goto have_snum;
			}
175
			goto fail;
176
		}
177 178 179 180 181
		/* OK, here is the one we will use.  HEAD is
		 * non-NULL and we hold it's mutex.
		 */
		snum = rover;
	} else {
182
have_snum:
183 184
		head = &hashinfo->bhash[inet_bhashfn(net, snum,
				hashinfo->bhash_size)];
185 186
		spin_lock(&head->lock);
		inet_bind_bucket_for_each(tb, node, &head->chain)
O
Octavian Purdila 已提交
187
			if (net_eq(ib_net(tb), net) && tb->port == snum)
188 189 190 191 192 193
				goto tb_found;
	}
	tb = NULL;
	goto tb_not_found;
tb_found:
	if (!hlist_empty(&tb->owners)) {
194 195 196
		if (sk->sk_reuse == SK_FORCE_REUSE)
			goto success;

197 198 199 200
		if (((tb->fastreuse > 0 &&
		      sk->sk_reuse && sk->sk_state != TCP_LISTEN) ||
		     (tb->fastreuseport > 0 &&
		      sk->sk_reuseport && uid_eq(tb->fastuid, uid))) &&
201
		    smallest_size == -1) {
202 203 204
			goto success;
		} else {
			ret = 1;
205
			if (inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb, true)) {
206
				if (((sk->sk_reuse && sk->sk_state != TCP_LISTEN) ||
207 208
				     (tb->fastreuseport > 0 &&
				      sk->sk_reuseport && uid_eq(tb->fastuid, uid))) &&
209
				    smallest_size != -1 && --attempts >= 0) {
210 211 212
					spin_unlock(&head->lock);
					goto again;
				}
213

214
				goto fail_unlock;
215
			}
216 217 218 219
		}
	}
tb_not_found:
	ret = 1;
220 221
	if (!tb && (tb = inet_bind_bucket_create(hashinfo->bind_bucket_cachep,
					net, head, snum)) == NULL)
222 223 224 225 226 227
		goto fail_unlock;
	if (hlist_empty(&tb->owners)) {
		if (sk->sk_reuse && sk->sk_state != TCP_LISTEN)
			tb->fastreuse = 1;
		else
			tb->fastreuse = 0;
228 229 230
		if (sk->sk_reuseport) {
			tb->fastreuseport = 1;
			tb->fastuid = uid;
231
		} else
232 233 234 235 236 237
			tb->fastreuseport = 0;
	} else {
		if (tb->fastreuse &&
		    (!sk->sk_reuse || sk->sk_state == TCP_LISTEN))
			tb->fastreuse = 0;
		if (tb->fastreuseport &&
238
		    (!sk->sk_reuseport || !uid_eq(tb->fastuid, uid)))
239 240
			tb->fastreuseport = 0;
	}
241 242 243
success:
	if (!inet_csk(sk)->icsk_bind_hash)
		inet_bind_hash(sk, tb, snum);
244
	WARN_ON(inet_csk(sk)->icsk_bind_hash != tb);
245
	ret = 0;
246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279

fail_unlock:
	spin_unlock(&head->lock);
fail:
	local_bh_enable();
	return ret;
}
EXPORT_SYMBOL_GPL(inet_csk_get_port);

/*
 * Wait for an incoming connection, avoid race conditions. This must be called
 * with the socket locked.
 */
static int inet_csk_wait_for_connect(struct sock *sk, long timeo)
{
	struct inet_connection_sock *icsk = inet_csk(sk);
	DEFINE_WAIT(wait);
	int err;

	/*
	 * True wake-one mechanism for incoming connections: only
	 * one process gets woken up, not the 'whole herd'.
	 * Since we do not 'race & poll' for established sockets
	 * anymore, the common case will execute the loop only once.
	 *
	 * Subtle issue: "add_wait_queue_exclusive()" will be added
	 * after any current non-exclusive waiters, and we know that
	 * it will always _stay_ after any new non-exclusive waiters
	 * because all non-exclusive waiters are added at the
	 * beginning of the wait-queue. As such, it's ok to "drop"
	 * our exclusiveness temporarily when we get woken up without
	 * having to remove and re-insert us on the wait queue.
	 */
	for (;;) {
E
Eric Dumazet 已提交
280
		prepare_to_wait_exclusive(sk_sleep(sk), &wait,
281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298
					  TASK_INTERRUPTIBLE);
		release_sock(sk);
		if (reqsk_queue_empty(&icsk->icsk_accept_queue))
			timeo = schedule_timeout(timeo);
		lock_sock(sk);
		err = 0;
		if (!reqsk_queue_empty(&icsk->icsk_accept_queue))
			break;
		err = -EINVAL;
		if (sk->sk_state != TCP_LISTEN)
			break;
		err = sock_intr_errno(timeo);
		if (signal_pending(current))
			break;
		err = -EAGAIN;
		if (!timeo)
			break;
	}
E
Eric Dumazet 已提交
299
	finish_wait(sk_sleep(sk), &wait);
300 301 302 303 304 305 306 307 308
	return err;
}

/*
 * This will accept the next outstanding connection.
 */
struct sock *inet_csk_accept(struct sock *sk, int flags, int *err)
{
	struct inet_connection_sock *icsk = inet_csk(sk);
309
	struct request_sock_queue *queue = &icsk->icsk_accept_queue;
310
	struct sock *newsk;
311
	struct request_sock *req;
312 313 314 315 316 317 318 319 320 321 322 323
	int error;

	lock_sock(sk);

	/* We need to make sure that this socket is listening,
	 * and that it has something pending.
	 */
	error = -EINVAL;
	if (sk->sk_state != TCP_LISTEN)
		goto out_err;

	/* Find already established connection */
324
	if (reqsk_queue_empty(queue)) {
325 326 327 328 329 330 331 332 333 334 335
		long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);

		/* If this is a non blocking socket don't sleep */
		error = -EAGAIN;
		if (!timeo)
			goto out_err;

		error = inet_csk_wait_for_connect(sk, timeo);
		if (error)
			goto out_err;
	}
336 337 338 339
	req = reqsk_queue_remove(queue);
	newsk = req->sk;

	sk_acceptq_removed(sk);
E
Eric Dumazet 已提交
340
	if (sk->sk_protocol == IPPROTO_TCP && queue->fastopenq != NULL) {
341 342 343 344 345 346 347 348 349 350 351 352 353
		spin_lock_bh(&queue->fastopenq->lock);
		if (tcp_rsk(req)->listener) {
			/* We are still waiting for the final ACK from 3WHS
			 * so can't free req now. Instead, we set req->sk to
			 * NULL to signify that the child socket is taken
			 * so reqsk_fastopen_remove() will free the req
			 * when 3WHS finishes (or is aborted).
			 */
			req->sk = NULL;
			req = NULL;
		}
		spin_unlock_bh(&queue->fastopenq->lock);
	}
354 355
out:
	release_sock(sk);
356 357
	if (req)
		__reqsk_free(req);
358 359 360
	return newsk;
out_err:
	newsk = NULL;
361
	req = NULL;
362 363 364 365 366 367 368
	*err = error;
	goto out;
}
EXPORT_SYMBOL(inet_csk_accept);

/*
 * Using different timers for retransmit, delayed acks and probes
369
 * We may wish use just one timer maintaining a list of expire jiffies
370 371 372 373 374 375 376 377 378
 * to optimize.
 */
void inet_csk_init_xmit_timers(struct sock *sk,
			       void (*retransmit_handler)(unsigned long),
			       void (*delack_handler)(unsigned long),
			       void (*keepalive_handler)(unsigned long))
{
	struct inet_connection_sock *icsk = inet_csk(sk);

379 380 381 382 383
	setup_timer(&icsk->icsk_retransmit_timer, retransmit_handler,
			(unsigned long)sk);
	setup_timer(&icsk->icsk_delack_timer, delack_handler,
			(unsigned long)sk);
	setup_timer(&sk->sk_timer, keepalive_handler, (unsigned long)sk);
384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411
	icsk->icsk_pending = icsk->icsk_ack.pending = 0;
}
EXPORT_SYMBOL(inet_csk_init_xmit_timers);

void inet_csk_clear_xmit_timers(struct sock *sk)
{
	struct inet_connection_sock *icsk = inet_csk(sk);

	icsk->icsk_pending = icsk->icsk_ack.pending = icsk->icsk_ack.blocked = 0;

	sk_stop_timer(sk, &icsk->icsk_retransmit_timer);
	sk_stop_timer(sk, &icsk->icsk_delack_timer);
	sk_stop_timer(sk, &sk->sk_timer);
}
EXPORT_SYMBOL(inet_csk_clear_xmit_timers);

void inet_csk_delete_keepalive_timer(struct sock *sk)
{
	sk_stop_timer(sk, &sk->sk_timer);
}
EXPORT_SYMBOL(inet_csk_delete_keepalive_timer);

void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long len)
{
	sk_reset_timer(sk, &sk->sk_timer, jiffies + len);
}
EXPORT_SYMBOL(inet_csk_reset_keepalive_timer);

412
struct dst_entry *inet_csk_route_req(struct sock *sk,
413
				     struct flowi4 *fl4,
414
				     const struct request_sock *req)
415 416 417
{
	struct rtable *rt;
	const struct inet_request_sock *ireq = inet_rsk(req);
418
	struct ip_options_rcu *opt = inet_rsk(req)->opt;
419
	struct net *net = sock_net(sk);
420
	int flags = inet_sk_flowi_flags(sk);
421

422
	flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark,
423
			   RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
424
			   sk->sk_protocol,
425
			   flags,
426
			   (opt && opt->opt.srr) ? opt->opt.faddr : ireq->rmt_addr,
427
			   ireq->loc_addr, ireq->rmt_port, inet_sk(sk)->inet_sport);
428 429
	security_req_classify_flow(req, flowi4_to_flowi(fl4));
	rt = ip_route_output_flow(net, fl4, sk);
430
	if (IS_ERR(rt))
I
Ilpo Järvinen 已提交
431
		goto no_route;
J
Julian Anastasov 已提交
432
	if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway)
I
Ilpo Järvinen 已提交
433
		goto route_err;
434
	return &rt->dst;
I
Ilpo Järvinen 已提交
435 436 437 438 439 440

route_err:
	ip_rt_put(rt);
no_route:
	IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES);
	return NULL;
441 442 443
}
EXPORT_SYMBOL_GPL(inet_csk_route_req);

444 445 446 447 448 449
struct dst_entry *inet_csk_route_child_sock(struct sock *sk,
					    struct sock *newsk,
					    const struct request_sock *req)
{
	const struct inet_request_sock *ireq = inet_rsk(req);
	struct inet_sock *newinet = inet_sk(newsk);
450
	struct ip_options_rcu *opt;
451 452 453 454 455
	struct net *net = sock_net(sk);
	struct flowi4 *fl4;
	struct rtable *rt;

	fl4 = &newinet->cork.fl.u.ip4;
456 457 458

	rcu_read_lock();
	opt = rcu_dereference(newinet->inet_opt);
459 460 461 462 463 464 465 466 467
	flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark,
			   RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
			   sk->sk_protocol, inet_sk_flowi_flags(sk),
			   (opt && opt->opt.srr) ? opt->opt.faddr : ireq->rmt_addr,
			   ireq->loc_addr, ireq->rmt_port, inet_sk(sk)->inet_sport);
	security_req_classify_flow(req, flowi4_to_flowi(fl4));
	rt = ip_route_output_flow(net, fl4, sk);
	if (IS_ERR(rt))
		goto no_route;
J
Julian Anastasov 已提交
468
	if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway)
469
		goto route_err;
470
	rcu_read_unlock();
471 472 473 474 475
	return &rt->dst;

route_err:
	ip_rt_put(rt);
no_route:
476
	rcu_read_unlock();
477 478 479 480 481
	IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES);
	return NULL;
}
EXPORT_SYMBOL_GPL(inet_csk_route_child_sock);

482
static inline u32 inet_synq_hash(const __be32 raddr, const __be16 rport,
483
				 const u32 rnd, const u32 synq_hsize)
484
{
485
	return jhash_2words((__force u32)raddr, (__force u32)rport, rnd) & (synq_hsize - 1);
486 487
}

E
Eric Dumazet 已提交
488
#if IS_ENABLED(CONFIG_IPV6)
489 490 491 492 493 494 495
#define AF_INET_FAMILY(fam) ((fam) == AF_INET)
#else
#define AF_INET_FAMILY(fam) 1
#endif

struct request_sock *inet_csk_search_req(const struct sock *sk,
					 struct request_sock ***prevp,
496
					 const __be16 rport, const __be32 raddr,
497
					 const __be32 laddr)
498 499 500 501 502 503 504 505 506 507 508 509 510 511 512
{
	const struct inet_connection_sock *icsk = inet_csk(sk);
	struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
	struct request_sock *req, **prev;

	for (prev = &lopt->syn_table[inet_synq_hash(raddr, rport, lopt->hash_rnd,
						    lopt->nr_table_entries)];
	     (req = *prev) != NULL;
	     prev = &req->dl_next) {
		const struct inet_request_sock *ireq = inet_rsk(req);

		if (ireq->rmt_port == rport &&
		    ireq->rmt_addr == raddr &&
		    ireq->loc_addr == laddr &&
		    AF_INET_FAMILY(req->rsk_ops->family)) {
513
			WARN_ON(req->sk);
514 515 516 517 518 519 520 521 522 523
			*prevp = prev;
			break;
		}
	}

	return req;
}
EXPORT_SYMBOL_GPL(inet_csk_search_req);

void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
524
				   unsigned long timeout)
525 526 527 528 529 530 531 532 533
{
	struct inet_connection_sock *icsk = inet_csk(sk);
	struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
	const u32 h = inet_synq_hash(inet_rsk(req)->rmt_addr, inet_rsk(req)->rmt_port,
				     lopt->hash_rnd, lopt->nr_table_entries);

	reqsk_queue_hash_req(&icsk->icsk_accept_queue, h, req, timeout);
	inet_csk_reqsk_queue_added(sk, timeout);
}
E
Eric Dumazet 已提交
534
EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_hash_add);
535

536 537 538
/* Only thing we need from tcp.h */
extern int sysctl_tcp_synack_retries;

539

540 541 542 543 544 545 546
/* Decide when to expire the request and when to resend SYN-ACK */
static inline void syn_ack_recalc(struct request_sock *req, const int thresh,
				  const int max_retries,
				  const u8 rskq_defer_accept,
				  int *expire, int *resend)
{
	if (!rskq_defer_accept) {
547
		*expire = req->num_timeout >= thresh;
548 549 550
		*resend = 1;
		return;
	}
551 552
	*expire = req->num_timeout >= thresh &&
		  (!inet_rsk(req)->acked || req->num_timeout >= max_retries);
553 554 555 556 557 558
	/*
	 * Do not resend while waiting for data after ACK,
	 * start to resend on end of deferring period to give
	 * last chance for data or ACK to create established socket.
	 */
	*resend = !inet_rsk(req)->acked ||
559
		  req->num_timeout >= rskq_defer_accept - 1;
560 561
}

562 563 564 565 566 567 568 569 570 571
int inet_rtx_syn_ack(struct sock *parent, struct request_sock *req)
{
	int err = req->rsk_ops->rtx_syn_ack(parent, req, NULL);

	if (!err)
		req->num_retrans++;
	return err;
}
EXPORT_SYMBOL(inet_rtx_syn_ack);

572 573 574 575 576 577 578 579
void inet_csk_reqsk_queue_prune(struct sock *parent,
				const unsigned long interval,
				const unsigned long timeout,
				const unsigned long max_rto)
{
	struct inet_connection_sock *icsk = inet_csk(parent);
	struct request_sock_queue *queue = &icsk->icsk_accept_queue;
	struct listen_sock *lopt = queue->listen_opt;
580 581
	int max_retries = icsk->icsk_syn_retries ? : sysctl_tcp_synack_retries;
	int thresh = max_retries;
582 583 584 585 586 587 588 589 590
	unsigned long now = jiffies;
	struct request_sock **reqp, *req;
	int i, budget;

	if (lopt == NULL || lopt->qlen == 0)
		return;

	/* Normally all the openreqs are young and become mature
	 * (i.e. converted to established socket) for first timeout.
591
	 * If synack was not acknowledged for 1 second, it means
592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616
	 * one of the following things: synack was lost, ack was lost,
	 * rtt is high or nobody planned to ack (i.e. synflood).
	 * When server is a bit loaded, queue is populated with old
	 * open requests, reducing effective size of queue.
	 * When server is well loaded, queue size reduces to zero
	 * after several minutes of work. It is not synflood,
	 * it is normal operation. The solution is pruning
	 * too old entries overriding normal timeout, when
	 * situation becomes dangerous.
	 *
	 * Essentially, we reserve half of room for young
	 * embrions; and abort old ones without pity, if old
	 * ones are about to clog our table.
	 */
	if (lopt->qlen>>(lopt->max_qlen_log-1)) {
		int young = (lopt->qlen_young<<1);

		while (thresh > 2) {
			if (lopt->qlen < young)
				break;
			thresh--;
			young <<= 1;
		}
	}

617 618 619
	if (queue->rskq_defer_accept)
		max_retries = queue->rskq_defer_accept;

620 621 622 623 624 625 626
	budget = 2 * (lopt->nr_table_entries / (timeout / interval));
	i = lopt->clock_hand;

	do {
		reqp=&lopt->syn_table[i];
		while ((req = *reqp) != NULL) {
			if (time_after_eq(now, req->expires)) {
627 628 629 630 631
				int expire = 0, resend = 0;

				syn_ack_recalc(req, thresh, max_retries,
					       queue->rskq_defer_accept,
					       &expire, &resend);
632
				req->rsk_ops->syn_ack_timeout(parent, req);
633 634
				if (!expire &&
				    (!resend ||
635
				     !inet_rtx_syn_ack(parent, req) ||
636
				     inet_rsk(req)->acked)) {
637 638
					unsigned long timeo;

639
					if (req->num_timeout++ == 0)
640
						lopt->qlen_young--;
641 642
					timeo = min(timeout << req->num_timeout,
						    max_rto);
643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667
					req->expires = now + timeo;
					reqp = &req->dl_next;
					continue;
				}

				/* Drop this request */
				inet_csk_reqsk_queue_unlink(parent, req, reqp);
				reqsk_queue_removed(queue, req);
				reqsk_free(req);
				continue;
			}
			reqp = &req->dl_next;
		}

		i = (i + 1) & (lopt->nr_table_entries - 1);

	} while (--budget > 0);

	lopt->clock_hand = i;

	if (lopt->qlen)
		inet_csk_reset_keepalive_timer(parent, interval);
}
EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_prune);

668 669 670 671 672 673 674 675 676 677 678
/**
 *	inet_csk_clone_lock - clone an inet socket, and lock its clone
 *	@sk: the socket to clone
 *	@req: request_sock
 *	@priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
 *
 *	Caller must unlock socket even in error path (bh_unlock_sock(newsk))
 */
struct sock *inet_csk_clone_lock(const struct sock *sk,
				 const struct request_sock *req,
				 const gfp_t priority)
679
{
680
	struct sock *newsk = sk_clone_lock(sk, priority);
681 682 683 684 685 686 687

	if (newsk != NULL) {
		struct inet_connection_sock *newicsk = inet_csk(newsk);

		newsk->sk_state = TCP_SYN_RECV;
		newicsk->icsk_bind_hash = NULL;

E
Eric Dumazet 已提交
688 689 690
		inet_sk(newsk)->inet_dport = inet_rsk(req)->rmt_port;
		inet_sk(newsk)->inet_num = ntohs(inet_rsk(req)->loc_port);
		inet_sk(newsk)->inet_sport = inet_rsk(req)->loc_port;
691 692 693
		newsk->sk_write_space = sk_stream_write_space;

		newicsk->icsk_retransmits = 0;
694 695
		newicsk->icsk_backoff	  = 0;
		newicsk->icsk_probes_out  = 0;
696 697 698

		/* Deinitialize accept_queue to trap illegal accesses. */
		memset(&newicsk->icsk_accept_queue, 0, sizeof(newicsk->icsk_accept_queue));
699 700

		security_inet_csk_clone(newsk, req);
701 702 703
	}
	return newsk;
}
704
EXPORT_SYMBOL_GPL(inet_csk_clone_lock);
705 706 707 708 709 710 711 712 713

/*
 * At this point, there should be no process reference to this
 * socket, and thus no user references at all.  Therefore we
 * can assume the socket waitqueue is inactive and nobody will
 * try to jump onto it.
 */
void inet_csk_destroy_sock(struct sock *sk)
{
714 715
	WARN_ON(sk->sk_state != TCP_CLOSE);
	WARN_ON(!sock_flag(sk, SOCK_DEAD));
716 717

	/* It cannot be in hash table! */
718
	WARN_ON(!sk_unhashed(sk));
719

E
Eric Dumazet 已提交
720 721
	/* If it has not 0 inet_sk(sk)->inet_num, it must be bound */
	WARN_ON(inet_sk(sk)->inet_num && !inet_csk(sk)->icsk_bind_hash);
722 723 724 725 726 727 728 729 730

	sk->sk_prot->destroy(sk);

	sk_stream_kill_queues(sk);

	xfrm_sk_free_policy(sk);

	sk_refcnt_debug_release(sk);

731
	percpu_counter_dec(sk->sk_prot->orphan_count);
732 733 734 735
	sock_put(sk);
}
EXPORT_SYMBOL(inet_csk_destroy_sock);

736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751
/* This function allows to force a closure of a socket after the call to
 * tcp/dccp_create_openreq_child().
 */
void inet_csk_prepare_forced_close(struct sock *sk)
{
	/* sk_clone_lock locked the socket and set refcnt to 2 */
	bh_unlock_sock(sk);
	sock_put(sk);

	/* The below has to be done to allow calling inet_csk_destroy_sock */
	sock_set_flag(sk, SOCK_DEAD);
	percpu_counter_inc(sk->sk_prot->orphan_count);
	inet_sk(sk)->inet_num = 0;
}
EXPORT_SYMBOL(inet_csk_prepare_forced_close);

752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770
int inet_csk_listen_start(struct sock *sk, const int nr_table_entries)
{
	struct inet_sock *inet = inet_sk(sk);
	struct inet_connection_sock *icsk = inet_csk(sk);
	int rc = reqsk_queue_alloc(&icsk->icsk_accept_queue, nr_table_entries);

	if (rc != 0)
		return rc;

	sk->sk_max_ack_backlog = 0;
	sk->sk_ack_backlog = 0;
	inet_csk_delack_init(sk);

	/* There is race window here: we announce ourselves listening,
	 * but this transition is still not validated by get_port().
	 * It is OK, because this socket enters to hash table only
	 * after validation is complete.
	 */
	sk->sk_state = TCP_LISTEN;
E
Eric Dumazet 已提交
771 772
	if (!sk->sk_prot->get_port(sk, inet->inet_num)) {
		inet->inet_sport = htons(inet->inet_num);
773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792

		sk_dst_reset(sk);
		sk->sk_prot->hash(sk);

		return 0;
	}

	sk->sk_state = TCP_CLOSE;
	__reqsk_queue_destroy(&icsk->icsk_accept_queue);
	return -EADDRINUSE;
}
EXPORT_SYMBOL_GPL(inet_csk_listen_start);

/*
 *	This routine closes sockets which have been at least partially
 *	opened, but not yet accepted.
 */
void inet_csk_listen_stop(struct sock *sk)
{
	struct inet_connection_sock *icsk = inet_csk(sk);
793
	struct request_sock_queue *queue = &icsk->icsk_accept_queue;
794 795 796 797 798 799
	struct request_sock *acc_req;
	struct request_sock *req;

	inet_csk_delete_keepalive_timer(sk);

	/* make all the listen_opt local to us */
800
	acc_req = reqsk_queue_yank_acceptq(queue);
801 802 803 804 805 806 807 808 809

	/* Following specs, it would be better either to send FIN
	 * (and enter FIN-WAIT-1, it is normal close)
	 * or to send active reset (abort).
	 * Certainly, it is pretty dangerous while synflood, but it is
	 * bad justification for our negligence 8)
	 * To be honest, we are not able to make either
	 * of the variants now.			--ANK
	 */
810
	reqsk_queue_destroy(queue);
811 812 813 814 815 816 817 818

	while ((req = acc_req) != NULL) {
		struct sock *child = req->sk;

		acc_req = req->dl_next;

		local_bh_disable();
		bh_lock_sock(child);
819
		WARN_ON(sock_owned_by_user(child));
820 821 822 823 824 825
		sock_hold(child);

		sk->sk_prot->disconnect(child, O_NONBLOCK);

		sock_orphan(child);

H
Herbert Xu 已提交
826 827
		percpu_counter_inc(sk->sk_prot->orphan_count);

E
Eric Dumazet 已提交
828
		if (sk->sk_protocol == IPPROTO_TCP && tcp_rsk(req)->listener) {
829 830 831 832 833 834 835 836 837 838 839 840
			BUG_ON(tcp_sk(child)->fastopen_rsk != req);
			BUG_ON(sk != tcp_rsk(req)->listener);

			/* Paranoid, to prevent race condition if
			 * an inbound pkt destined for child is
			 * blocked by sock lock in tcp_v4_rcv().
			 * Also to satisfy an assertion in
			 * tcp_v4_destroy_sock().
			 */
			tcp_sk(child)->fastopen_rsk = NULL;
			sock_put(sk);
		}
841 842 843 844 845 846 847 848 849
		inet_csk_destroy_sock(child);

		bh_unlock_sock(child);
		local_bh_enable();
		sock_put(child);

		sk_acceptq_removed(sk);
		__reqsk_free(req);
	}
850 851 852 853 854 855 856 857 858 859 860
	if (queue->fastopenq != NULL) {
		/* Free all the reqs queued in rskq_rst_head. */
		spin_lock_bh(&queue->fastopenq->lock);
		acc_req = queue->fastopenq->rskq_rst_head;
		queue->fastopenq->rskq_rst_head = NULL;
		spin_unlock_bh(&queue->fastopenq->lock);
		while ((req = acc_req) != NULL) {
			acc_req = req->dl_next;
			__reqsk_free(req);
		}
	}
861
	WARN_ON(sk->sk_ack_backlog);
862 863
}
EXPORT_SYMBOL_GPL(inet_csk_listen_stop);
864 865 866 867 868 869 870

void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr)
{
	struct sockaddr_in *sin = (struct sockaddr_in *)uaddr;
	const struct inet_sock *inet = inet_sk(sk);

	sin->sin_family		= AF_INET;
E
Eric Dumazet 已提交
871 872
	sin->sin_addr.s_addr	= inet->inet_daddr;
	sin->sin_port		= inet->inet_dport;
873 874
}
EXPORT_SYMBOL_GPL(inet_csk_addr2sockaddr);
875

876 877 878 879
#ifdef CONFIG_COMPAT
int inet_csk_compat_getsockopt(struct sock *sk, int level, int optname,
			       char __user *optval, int __user *optlen)
{
880
	const struct inet_connection_sock *icsk = inet_csk(sk);
881 882 883 884 885 886 887 888 889 890

	if (icsk->icsk_af_ops->compat_getsockopt != NULL)
		return icsk->icsk_af_ops->compat_getsockopt(sk, level, optname,
							    optval, optlen);
	return icsk->icsk_af_ops->getsockopt(sk, level, optname,
					     optval, optlen);
}
EXPORT_SYMBOL_GPL(inet_csk_compat_getsockopt);

int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname,
891
			       char __user *optval, unsigned int optlen)
892
{
893
	const struct inet_connection_sock *icsk = inet_csk(sk);
894 895 896 897 898 899 900 901 902

	if (icsk->icsk_af_ops->compat_setsockopt != NULL)
		return icsk->icsk_af_ops->compat_setsockopt(sk, level, optname,
							    optval, optlen);
	return icsk->icsk_af_ops->setsockopt(sk, level, optname,
					     optval, optlen);
}
EXPORT_SYMBOL_GPL(inet_csk_compat_setsockopt);
#endif
903 904 905

static struct dst_entry *inet_csk_rebuild_route(struct sock *sk, struct flowi *fl)
{
E
Eric Dumazet 已提交
906 907
	const struct inet_sock *inet = inet_sk(sk);
	const struct ip_options_rcu *inet_opt;
908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939
	__be32 daddr = inet->inet_daddr;
	struct flowi4 *fl4;
	struct rtable *rt;

	rcu_read_lock();
	inet_opt = rcu_dereference(inet->inet_opt);
	if (inet_opt && inet_opt->opt.srr)
		daddr = inet_opt->opt.faddr;
	fl4 = &fl->u.ip4;
	rt = ip_route_output_ports(sock_net(sk), fl4, sk, daddr,
				   inet->inet_saddr, inet->inet_dport,
				   inet->inet_sport, sk->sk_protocol,
				   RT_CONN_FLAGS(sk), sk->sk_bound_dev_if);
	if (IS_ERR(rt))
		rt = NULL;
	if (rt)
		sk_setup_caps(sk, &rt->dst);
	rcu_read_unlock();

	return &rt->dst;
}

struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu)
{
	struct dst_entry *dst = __sk_dst_check(sk, 0);
	struct inet_sock *inet = inet_sk(sk);

	if (!dst) {
		dst = inet_csk_rebuild_route(sk, &inet->cork.fl);
		if (!dst)
			goto out;
	}
940
	dst->ops->update_pmtu(dst, sk, NULL, mtu);
941 942 943 944 945 946 947 948

	dst = __sk_dst_check(sk, 0);
	if (!dst)
		dst = inet_csk_rebuild_route(sk, &inet->cork.fl);
out:
	return dst;
}
EXPORT_SYMBOL_GPL(inet_csk_update_pmtu);