inet_timewait_sock.c 14.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10
/*
 * INET		An implementation of the TCP/IP protocol suite for the LINUX
 *		operating system.  INET is implemented using the  BSD Socket
 *		interface as the means of communication with the user level.
 *
 *		Generic TIME_WAIT sockets functions
 *
 *		From code orinally in TCP
 */

11
#include <linux/kernel.h>
12
#include <linux/kmemcheck.h>
13
#include <linux/slab.h>
14
#include <linux/module.h>
15 16
#include <net/inet_hashtables.h>
#include <net/inet_timewait_sock.h>
17
#include <net/ip.h>
18

19

20 21 22 23 24 25 26
/**
 *	inet_twsk_unhash - unhash a timewait socket from established hash
 *	@tw: timewait socket
 *
 *	unhash a timewait socket from established hash, if hashed.
 *	ehash lock must be held by caller.
 *	Returns 1 if caller should call inet_twsk_put() after lock release.
27 28 29 30 31 32 33 34
 */
int inet_twsk_unhash(struct inet_timewait_sock *tw)
{
	if (hlist_nulls_unhashed(&tw->tw_node))
		return 0;

	hlist_nulls_del_rcu(&tw->tw_node);
	sk_nulls_node_init(&tw->tw_node);
35 36 37 38
	/*
	 * We cannot call inet_twsk_put() ourself under lock,
	 * caller must call it for us.
	 */
39 40 41
	return 1;
}

42 43 44 45 46 47 48 49
/**
 *	inet_twsk_bind_unhash - unhash a timewait socket from bind hash
 *	@tw: timewait socket
 *	@hashinfo: hashinfo pointer
 *
 *	unhash a timewait socket from bind hash, if hashed.
 *	bind hash lock must be held by caller.
 *	Returns 1 if caller should call inet_twsk_put() after lock release.
50 51 52 53 54 55 56 57 58 59 60 61
 */
int inet_twsk_bind_unhash(struct inet_timewait_sock *tw,
			  struct inet_hashinfo *hashinfo)
{
	struct inet_bind_bucket *tb = tw->tw_tb;

	if (!tb)
		return 0;

	__hlist_del(&tw->tw_bind_node);
	tw->tw_tb = NULL;
	inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb);
62 63 64 65
	/*
	 * We cannot call inet_twsk_put() ourself under lock,
	 * caller must call it for us.
	 */
66 67 68
	return 1;
}

69
/* Must be called with locally disabled BHs. */
70 71
static void __inet_twsk_kill(struct inet_timewait_sock *tw,
			     struct inet_hashinfo *hashinfo)
72 73
{
	struct inet_bind_hashbucket *bhead;
74
	int refcnt;
75
	/* Unlink from established hashes. */
76
	spinlock_t *lock = inet_ehash_lockp(hashinfo, tw->tw_hash);
77

78
	spin_lock(lock);
79
	refcnt = inet_twsk_unhash(tw);
80
	spin_unlock(lock);
81 82

	/* Disassociate with bind bucket. */
83 84
	bhead = &hashinfo->bhash[inet_bhashfn(twsk_net(tw), tw->tw_num,
			hashinfo->bhash_size)];
85

86
	spin_lock(&bhead->lock);
87
	refcnt += inet_twsk_bind_unhash(tw, hashinfo);
88
	spin_unlock(&bhead->lock);
89

E
Eric Dumazet 已提交
90 91
	BUG_ON(refcnt >= atomic_read(&tw->tw_refcnt));
	atomic_sub(refcnt, &tw->tw_refcnt);
92 93
}

E
Eric Dumazet 已提交
94
void inet_twsk_free(struct inet_timewait_sock *tw)
95
{
96 97
	struct module *owner = tw->tw_prot->owner;
	twsk_destructor((struct sock *)tw);
98
#ifdef SOCK_REFCNT_DEBUG
99
	pr_debug("%s timewait_sock %p released\n", tw->tw_prot->name, tw);
100
#endif
101 102 103 104 105 106 107 108
	kmem_cache_free(tw->tw_prot->twsk_prot->twsk_slab, tw);
	module_put(owner);
}

void inet_twsk_put(struct inet_timewait_sock *tw)
{
	if (atomic_dec_and_test(&tw->tw_refcnt))
		inet_twsk_free(tw);
109 110 111
}
EXPORT_SYMBOL_GPL(inet_twsk_put);

E
Eric Dumazet 已提交
112 113 114 115 116 117 118 119 120 121 122 123
static void inet_twsk_add_node_rcu(struct inet_timewait_sock *tw,
				   struct hlist_nulls_head *list)
{
	hlist_nulls_add_head_rcu(&tw->tw_node, list);
}

static void inet_twsk_add_bind_node(struct inet_timewait_sock *tw,
				    struct hlist_head *list)
{
	hlist_add_head(&tw->tw_bind_node, list);
}

124 125 126 127 128 129 130 131 132
/*
 * Enter the time wait state. This is called with locally disabled BH.
 * Essentially we whip up a timewait bucket, copy the relevant info into it
 * from the SK, and mess with hash chains and list linkage.
 */
void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
			   struct inet_hashinfo *hashinfo)
{
	const struct inet_sock *inet = inet_sk(sk);
133
	const struct inet_connection_sock *icsk = inet_csk(sk);
134
	struct inet_ehash_bucket *ehead = inet_ehash_bucket(hashinfo, sk->sk_hash);
135
	spinlock_t *lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
136 137 138 139 140
	struct inet_bind_hashbucket *bhead;
	/* Step 1: Put TW into bind hash. Original socket stays there too.
	   Note, that any socket with inet->num != 0 MUST be bound in
	   binding cache, even if it is closed.
	 */
E
Eric Dumazet 已提交
141
	bhead = &hashinfo->bhash[inet_bhashfn(twsk_net(tw), inet->inet_num,
142
			hashinfo->bhash_size)];
143
	spin_lock(&bhead->lock);
144
	tw->tw_tb = icsk->icsk_bind_hash;
145
	WARN_ON(!icsk->icsk_bind_hash);
146 147 148
	inet_twsk_add_bind_node(tw, &tw->tw_tb->owners);
	spin_unlock(&bhead->lock);

149
	spin_lock(lock);
150

151
	/*
E
Eric Dumazet 已提交
152 153 154 155 156 157 158
	 * Step 2: Hash TW into tcp ehash chain.
	 * Notes :
	 * - tw_refcnt is set to 3 because :
	 * - We have one reference from bhash chain.
	 * - We have one reference from ehash chain.
	 * We can use atomic_set() because prior spin_lock()/spin_unlock()
	 * committed into memory all tw fields.
159
	 */
E
Eric Dumazet 已提交
160 161
	atomic_set(&tw->tw_refcnt, 1 + 1 + 1);
	inet_twsk_add_node_rcu(tw, &ehead->chain);
162

E
Eric Dumazet 已提交
163
	/* Step 3: Remove SK from hash chain */
164 165
	if (__sk_nulls_del_node_init_rcu(sk))
		sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
166

167
	spin_unlock(lock);
168
}
169 170
EXPORT_SYMBOL_GPL(__inet_twsk_hashdance);

171 172
struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk, const int state)
{
173 174
	struct inet_timewait_sock *tw =
		kmem_cache_alloc(sk->sk_prot_creator->twsk_prot->twsk_slab,
175
				 GFP_ATOMIC);
176 177 178
	if (tw != NULL) {
		const struct inet_sock *inet = inet_sk(sk);

179 180
		kmemcheck_annotate_bitfield(tw, flags);

181
		/* Give us an identity. */
E
Eric Dumazet 已提交
182 183
		tw->tw_daddr	    = inet->inet_daddr;
		tw->tw_rcv_saddr    = inet->inet_rcv_saddr;
184
		tw->tw_bound_dev_if = sk->sk_bound_dev_if;
185
		tw->tw_tos	    = inet->tos;
E
Eric Dumazet 已提交
186
		tw->tw_num	    = inet->inet_num;
187 188
		tw->tw_state	    = TCP_TIME_WAIT;
		tw->tw_substate	    = state;
E
Eric Dumazet 已提交
189 190
		tw->tw_sport	    = inet->inet_sport;
		tw->tw_dport	    = inet->inet_dport;
191 192
		tw->tw_family	    = sk->sk_family;
		tw->tw_reuse	    = sk->sk_reuse;
193
		tw->tw_hash	    = sk->sk_hash;
194
		tw->tw_ipv6only	    = 0;
195
		tw->tw_transparent  = inet->transparent;
196
		tw->tw_prot	    = sk->sk_prot_creator;
E
Eric Dumazet 已提交
197
		atomic64_set(&tw->tw_cookie, atomic64_read(&sk->sk_cookie));
198
		twsk_net_set(tw, sock_net(sk));
E
Eric Dumazet 已提交
199 200 201 202 203 204
		/*
		 * Because we use RCU lookups, we should not set tw_refcnt
		 * to a non null value before everything is setup for this
		 * timewait socket.
		 */
		atomic_set(&tw->tw_refcnt, 0);
205
		inet_twsk_dead_node_init(tw);
206
		__module_get(tw->tw_prot->owner);
207 208 209 210
	}

	return tw;
}
211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229
EXPORT_SYMBOL_GPL(inet_twsk_alloc);

/* Returns non-zero if quota exceeded.  */
static int inet_twdr_do_twkill_work(struct inet_timewait_death_row *twdr,
				    const int slot)
{
	struct inet_timewait_sock *tw;
	unsigned int killed;
	int ret;

	/* NOTE: compare this to previous version where lock
	 * was released after detaching chain. It was racy,
	 * because tw buckets are scheduled in not serialized context
	 * in 2.3 (with netfilter), and with softnet it is common, because
	 * soft irqs are not sequenced.
	 */
	killed = 0;
	ret = 0;
rescan:
230
	inet_twsk_for_each_inmate(tw, &twdr->cells[slot]) {
231 232 233
		__inet_twsk_del_dead_node(tw);
		spin_unlock(&twdr->death_lock);
		__inet_twsk_kill(tw, twdr->hashinfo);
234 235 236
#ifdef CONFIG_NET_NS
		NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_TIMEWAITED);
#endif
237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254
		inet_twsk_put(tw);
		killed++;
		spin_lock(&twdr->death_lock);
		if (killed > INET_TWDR_TWKILL_QUOTA) {
			ret = 1;
			break;
		}

		/* While we dropped twdr->death_lock, another cpu may have
		 * killed off the next TW bucket in the list, therefore
		 * do a fresh re-read of the hlist head node with the
		 * lock reacquired.  We still use the hlist traversal
		 * macro in order to get the prefetches.
		 */
		goto rescan;
	}

	twdr->tw_count -= killed;
255 256 257
#ifndef CONFIG_NET_NS
	NET_ADD_STATS_BH(&init_net, LINUX_MIB_TIMEWAITED, killed);
#endif
258 259 260 261 262 263
	return ret;
}

void inet_twdr_hangman(unsigned long data)
{
	struct inet_timewait_death_row *twdr;
264
	unsigned int need_timer;
265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280

	twdr = (struct inet_timewait_death_row *)data;
	spin_lock(&twdr->death_lock);

	if (twdr->tw_count == 0)
		goto out;

	need_timer = 0;
	if (inet_twdr_do_twkill_work(twdr, twdr->slot)) {
		twdr->thread_slots |= (1 << twdr->slot);
		schedule_work(&twdr->twkill_work);
		need_timer = 1;
	} else {
		/* We purged the entire slot, anything left?  */
		if (twdr->tw_count)
			need_timer = 1;
281
		twdr->slot = ((twdr->slot + 1) & (INET_TWDR_TWKILL_SLOTS - 1));
282 283 284 285 286 287 288 289
	}
	if (need_timer)
		mod_timer(&twdr->tw_timer, jiffies + twdr->period);
out:
	spin_unlock(&twdr->death_lock);
}
EXPORT_SYMBOL_GPL(inet_twdr_hangman);

290
void inet_twdr_twkill_work(struct work_struct *work)
291
{
292 293
	struct inet_timewait_death_row *twdr =
		container_of(work, struct inet_timewait_death_row, twkill_work);
294 295
	int i;

296 297
	BUILD_BUG_ON((INET_TWDR_TWKILL_SLOTS - 1) >
			(sizeof(twdr->thread_slots) * 8));
298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384

	while (twdr->thread_slots) {
		spin_lock_bh(&twdr->death_lock);
		for (i = 0; i < INET_TWDR_TWKILL_SLOTS; i++) {
			if (!(twdr->thread_slots & (1 << i)))
				continue;

			while (inet_twdr_do_twkill_work(twdr, i) != 0) {
				if (need_resched()) {
					spin_unlock_bh(&twdr->death_lock);
					schedule();
					spin_lock_bh(&twdr->death_lock);
				}
			}

			twdr->thread_slots &= ~(1 << i);
		}
		spin_unlock_bh(&twdr->death_lock);
	}
}
EXPORT_SYMBOL_GPL(inet_twdr_twkill_work);

/* These are always called from BH context.  See callers in
 * tcp_input.c to verify this.
 */

/* This is for handling early-kills of TIME_WAIT sockets. */
void inet_twsk_deschedule(struct inet_timewait_sock *tw,
			  struct inet_timewait_death_row *twdr)
{
	spin_lock(&twdr->death_lock);
	if (inet_twsk_del_dead_node(tw)) {
		inet_twsk_put(tw);
		if (--twdr->tw_count == 0)
			del_timer(&twdr->tw_timer);
	}
	spin_unlock(&twdr->death_lock);
	__inet_twsk_kill(tw, twdr->hashinfo);
}
EXPORT_SYMBOL(inet_twsk_deschedule);

void inet_twsk_schedule(struct inet_timewait_sock *tw,
		       struct inet_timewait_death_row *twdr,
		       const int timeo, const int timewait_len)
{
	struct hlist_head *list;
	int slot;

	/* timeout := RTO * 3.5
	 *
	 * 3.5 = 1+2+0.5 to wait for two retransmits.
	 *
	 * RATIONALE: if FIN arrived and we entered TIME-WAIT state,
	 * our ACK acking that FIN can be lost. If N subsequent retransmitted
	 * FINs (or previous seqments) are lost (probability of such event
	 * is p^(N+1), where p is probability to lose single packet and
	 * time to detect the loss is about RTO*(2^N - 1) with exponential
	 * backoff). Normal timewait length is calculated so, that we
	 * waited at least for one retransmitted FIN (maximal RTO is 120sec).
	 * [ BTW Linux. following BSD, violates this requirement waiting
	 *   only for 60sec, we should wait at least for 240 secs.
	 *   Well, 240 consumes too much of resources 8)
	 * ]
	 * This interval is not reduced to catch old duplicate and
	 * responces to our wandering segments living for two MSLs.
	 * However, if we use PAWS to detect
	 * old duplicates, we can reduce the interval to bounds required
	 * by RTO, rather than MSL. So, if peer understands PAWS, we
	 * kill tw bucket after 3.5*RTO (it is important that this number
	 * is greater than TS tick!) and detect old duplicates with help
	 * of PAWS.
	 */
	slot = (timeo + (1 << INET_TWDR_RECYCLE_TICK) - 1) >> INET_TWDR_RECYCLE_TICK;

	spin_lock(&twdr->death_lock);

	/* Unlink it, if it was scheduled */
	if (inet_twsk_del_dead_node(tw))
		twdr->tw_count--;
	else
		atomic_inc(&tw->tw_refcnt);

	if (slot >= INET_TWDR_RECYCLE_SLOTS) {
		/* Schedule to slow timer */
		if (timeo >= timewait_len) {
			slot = INET_TWDR_TWKILL_SLOTS - 1;
		} else {
385
			slot = DIV_ROUND_UP(timeo, twdr->period);
386 387 388
			if (slot >= INET_TWDR_TWKILL_SLOTS)
				slot = INET_TWDR_TWKILL_SLOTS - 1;
		}
389
		tw->tw_ttd = inet_tw_time_stamp() + timeo;
390 391 392
		slot = (twdr->slot + slot) & (INET_TWDR_TWKILL_SLOTS - 1);
		list = &twdr->cells[slot];
	} else {
393
		tw->tw_ttd = inet_tw_time_stamp() + (slot << INET_TWDR_RECYCLE_TICK);
394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438

		if (twdr->twcal_hand < 0) {
			twdr->twcal_hand = 0;
			twdr->twcal_jiffie = jiffies;
			twdr->twcal_timer.expires = twdr->twcal_jiffie +
					      (slot << INET_TWDR_RECYCLE_TICK);
			add_timer(&twdr->twcal_timer);
		} else {
			if (time_after(twdr->twcal_timer.expires,
				       jiffies + (slot << INET_TWDR_RECYCLE_TICK)))
				mod_timer(&twdr->twcal_timer,
					  jiffies + (slot << INET_TWDR_RECYCLE_TICK));
			slot = (twdr->twcal_hand + slot) & (INET_TWDR_RECYCLE_SLOTS - 1);
		}
		list = &twdr->twcal_row[slot];
	}

	hlist_add_head(&tw->tw_death_node, list);

	if (twdr->tw_count++ == 0)
		mod_timer(&twdr->tw_timer, jiffies + twdr->period);
	spin_unlock(&twdr->death_lock);
}
EXPORT_SYMBOL_GPL(inet_twsk_schedule);

void inet_twdr_twcal_tick(unsigned long data)
{
	struct inet_timewait_death_row *twdr;
	int n, slot;
	unsigned long j;
	unsigned long now = jiffies;
	int killed = 0;
	int adv = 0;

	twdr = (struct inet_timewait_death_row *)data;

	spin_lock(&twdr->death_lock);
	if (twdr->twcal_hand < 0)
		goto out;

	slot = twdr->twcal_hand;
	j = twdr->twcal_jiffie;

	for (n = 0; n < INET_TWDR_RECYCLE_SLOTS; n++) {
		if (time_before_eq(j, now)) {
439
			struct hlist_node *safe;
440 441
			struct inet_timewait_sock *tw;

442
			inet_twsk_for_each_inmate_safe(tw, safe,
443 444 445
						       &twdr->twcal_row[slot]) {
				__inet_twsk_del_dead_node(tw);
				__inet_twsk_kill(tw, twdr->hashinfo);
446 447 448
#ifdef CONFIG_NET_NS
				NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_TIMEWAITKILLED);
#endif
449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471
				inet_twsk_put(tw);
				killed++;
			}
		} else {
			if (!adv) {
				adv = 1;
				twdr->twcal_jiffie = j;
				twdr->twcal_hand = slot;
			}

			if (!hlist_empty(&twdr->twcal_row[slot])) {
				mod_timer(&twdr->twcal_timer, j);
				goto out;
			}
		}
		j += 1 << INET_TWDR_RECYCLE_TICK;
		slot = (slot + 1) & (INET_TWDR_RECYCLE_SLOTS - 1);
	}
	twdr->twcal_hand = -1;

out:
	if ((twdr->tw_count -= killed) == 0)
		del_timer(&twdr->tw_timer);
472 473 474
#ifndef CONFIG_NET_NS
	NET_ADD_STATS_BH(&init_net, LINUX_MIB_TIMEWAITKILLED, killed);
#endif
475 476 477
	spin_unlock(&twdr->death_lock);
}
EXPORT_SYMBOL_GPL(inet_twdr_twcal_tick);
478

E
Eric W. Biederman 已提交
479
void inet_twsk_purge(struct inet_hashinfo *hashinfo,
480 481 482 483
		     struct inet_timewait_death_row *twdr, int family)
{
	struct inet_timewait_sock *tw;
	struct sock *sk;
484
	struct hlist_nulls_node *node;
485
	unsigned int slot;
486

487 488 489 490
	for (slot = 0; slot <= hashinfo->ehash_mask; slot++) {
		struct inet_ehash_bucket *head = &hashinfo->ehash[slot];
restart_rcu:
		rcu_read_lock();
491
restart:
E
Eric Dumazet 已提交
492 493 494
		sk_nulls_for_each_rcu(sk, node, &head->chain) {
			if (sk->sk_state != TCP_TIME_WAIT)
				continue;
495
			tw = inet_twsk(sk);
E
Eric W. Biederman 已提交
496 497
			if ((tw->tw_family != family) ||
				atomic_read(&twsk_net(tw)->count))
498 499
				continue;

500 501 502
			if (unlikely(!atomic_inc_not_zero(&tw->tw_refcnt)))
				continue;

E
Eric W. Biederman 已提交
503 504
			if (unlikely((tw->tw_family != family) ||
				     atomic_read(&twsk_net(tw)->count))) {
505 506 507 508 509
				inet_twsk_put(tw);
				goto restart;
			}

			rcu_read_unlock();
E
Eric Dumazet 已提交
510
			local_bh_disable();
511
			inet_twsk_deschedule(tw, twdr);
E
Eric Dumazet 已提交
512
			local_bh_enable();
513
			inet_twsk_put(tw);
514
			goto restart_rcu;
515
		}
516 517 518 519 520 521 522
		/* If the nulls value we got at the end of this lookup is
		 * not the expected one, we must restart lookup.
		 * We probably met an item that was moved to another chain.
		 */
		if (get_nulls_value(node) != slot)
			goto restart;
		rcu_read_unlock();
523 524 525
	}
}
EXPORT_SYMBOL_GPL(inet_twsk_purge);