inet_timewait_sock.c 13.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10
/*
 * INET		An implementation of the TCP/IP protocol suite for the LINUX
 *		operating system.  INET is implemented using the  BSD Socket
 *		interface as the means of communication with the user level.
 *
 *		Generic TIME_WAIT sockets functions
 *
 *		From code orinally in TCP
 */

11
#include <linux/kernel.h>
12
#include <linux/kmemcheck.h>
13 14
#include <net/inet_hashtables.h>
#include <net/inet_timewait_sock.h>
15
#include <net/ip.h>
16

17 18 19 20 21 22 23 24 25 26 27 28 29 30 31

/*
 * unhash a timewait socket from established hash
 * lock must be hold by caller
 */
int inet_twsk_unhash(struct inet_timewait_sock *tw)
{
	if (hlist_nulls_unhashed(&tw->tw_node))
		return 0;

	hlist_nulls_del_rcu(&tw->tw_node);
	sk_nulls_node_init(&tw->tw_node);
	return 1;
}

32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49
/*
 * unhash a timewait socket from bind hash
 * lock must be hold by caller
 */
int inet_twsk_bind_unhash(struct inet_timewait_sock *tw,
			  struct inet_hashinfo *hashinfo)
{
	struct inet_bind_bucket *tb = tw->tw_tb;

	if (!tb)
		return 0;

	__hlist_del(&tw->tw_bind_node);
	tw->tw_tb = NULL;
	inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb);
	return 1;
}

50
/* Must be called with locally disabled BHs. */
51 52
static void __inet_twsk_kill(struct inet_timewait_sock *tw,
			     struct inet_hashinfo *hashinfo)
53 54
{
	struct inet_bind_hashbucket *bhead;
55
	int refcnt;
56
	/* Unlink from established hashes. */
57
	spinlock_t *lock = inet_ehash_lockp(hashinfo, tw->tw_hash);
58

59
	spin_lock(lock);
60
	refcnt = inet_twsk_unhash(tw);
61
	spin_unlock(lock);
62 63

	/* Disassociate with bind bucket. */
64 65
	bhead = &hashinfo->bhash[inet_bhashfn(twsk_net(tw), tw->tw_num,
			hashinfo->bhash_size)];
66

67
	spin_lock(&bhead->lock);
68
	refcnt += inet_twsk_bind_unhash(tw, hashinfo);
69
	spin_unlock(&bhead->lock);
70

71 72 73 74 75 76
#ifdef SOCK_REFCNT_DEBUG
	if (atomic_read(&tw->tw_refcnt) != 1) {
		printk(KERN_DEBUG "%s timewait_sock %p refcnt=%d\n",
		       tw->tw_prot->name, tw, atomic_read(&tw->tw_refcnt));
	}
#endif
77 78 79 80
	while (refcnt) {
		inet_twsk_put(tw);
		refcnt--;
	}
81 82
}

83
static noinline void inet_twsk_free(struct inet_timewait_sock *tw)
84
{
85 86
	struct module *owner = tw->tw_prot->owner;
	twsk_destructor((struct sock *)tw);
87
#ifdef SOCK_REFCNT_DEBUG
88
	pr_debug("%s timewait_sock %p released\n", tw->tw_prot->name, tw);
89
#endif
90 91 92 93 94 95 96 97 98
	release_net(twsk_net(tw));
	kmem_cache_free(tw->tw_prot->twsk_prot->twsk_slab, tw);
	module_put(owner);
}

void inet_twsk_put(struct inet_timewait_sock *tw)
{
	if (atomic_dec_and_test(&tw->tw_refcnt))
		inet_twsk_free(tw);
99 100 101
}
EXPORT_SYMBOL_GPL(inet_twsk_put);

102 103 104 105 106 107 108 109 110
/*
 * Enter the time wait state. This is called with locally disabled BH.
 * Essentially we whip up a timewait bucket, copy the relevant info into it
 * from the SK, and mess with hash chains and list linkage.
 */
void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
			   struct inet_hashinfo *hashinfo)
{
	const struct inet_sock *inet = inet_sk(sk);
111
	const struct inet_connection_sock *icsk = inet_csk(sk);
112
	struct inet_ehash_bucket *ehead = inet_ehash_bucket(hashinfo, sk->sk_hash);
113
	spinlock_t *lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
114 115 116 117 118
	struct inet_bind_hashbucket *bhead;
	/* Step 1: Put TW into bind hash. Original socket stays there too.
	   Note, that any socket with inet->num != 0 MUST be bound in
	   binding cache, even if it is closed.
	 */
E
Eric Dumazet 已提交
119
	bhead = &hashinfo->bhash[inet_bhashfn(twsk_net(tw), inet->inet_num,
120
			hashinfo->bhash_size)];
121
	spin_lock(&bhead->lock);
122
	tw->tw_tb = icsk->icsk_bind_hash;
123
	WARN_ON(!icsk->icsk_bind_hash);
124 125 126
	inet_twsk_add_bind_node(tw, &tw->tw_tb->owners);
	spin_unlock(&bhead->lock);

127
	spin_lock(lock);
128

129 130 131 132 133 134 135 136 137 138
	/*
	 * Step 2: Hash TW into TIMEWAIT chain.
	 * Should be done before removing sk from established chain
	 * because readers are lockless and search established first.
	 */
	inet_twsk_add_node_rcu(tw, &ehead->twchain);

	/* Step 3: Remove SK from established hash. */
	if (__sk_nulls_del_node_init_rcu(sk))
		sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
139

E
Eric Dumazet 已提交
140 141 142 143 144 145 146 147 148 149
	/*
	 * Notes :
 	 * - We initially set tw_refcnt to 0 in inet_twsk_alloc()
	 * - We add one reference for the bhash link
	 * - We add one reference for the ehash link
	 * - We want this refcnt update done before allowing other
	 *   threads to find this tw in ehash chain.
	 */
	atomic_add(1 + 1 + 1, &tw->tw_refcnt);

150
	spin_unlock(lock);
151
}
152

153 154
EXPORT_SYMBOL_GPL(__inet_twsk_hashdance);

155 156
struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk, const int state)
{
157 158
	struct inet_timewait_sock *tw =
		kmem_cache_alloc(sk->sk_prot_creator->twsk_prot->twsk_slab,
159
				 GFP_ATOMIC);
160 161 162
	if (tw != NULL) {
		const struct inet_sock *inet = inet_sk(sk);

163 164
		kmemcheck_annotate_bitfield(tw, flags);

165
		/* Give us an identity. */
E
Eric Dumazet 已提交
166 167
		tw->tw_daddr	    = inet->inet_daddr;
		tw->tw_rcv_saddr    = inet->inet_rcv_saddr;
168
		tw->tw_bound_dev_if = sk->sk_bound_dev_if;
E
Eric Dumazet 已提交
169
		tw->tw_num	    = inet->inet_num;
170 171
		tw->tw_state	    = TCP_TIME_WAIT;
		tw->tw_substate	    = state;
E
Eric Dumazet 已提交
172 173
		tw->tw_sport	    = inet->inet_sport;
		tw->tw_dport	    = inet->inet_dport;
174 175
		tw->tw_family	    = sk->sk_family;
		tw->tw_reuse	    = sk->sk_reuse;
176
		tw->tw_hash	    = sk->sk_hash;
177
		tw->tw_ipv6only	    = 0;
178
		tw->tw_transparent  = inet->transparent;
179
		tw->tw_prot	    = sk->sk_prot_creator;
180
		twsk_net_set(tw, hold_net(sock_net(sk)));
E
Eric Dumazet 已提交
181 182 183 184 185 186
		/*
		 * Because we use RCU lookups, we should not set tw_refcnt
		 * to a non null value before everything is setup for this
		 * timewait socket.
		 */
		atomic_set(&tw->tw_refcnt, 0);
187
		inet_twsk_dead_node_init(tw);
188
		__module_get(tw->tw_prot->owner);
189 190 191 192
	}

	return tw;
}
193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217

EXPORT_SYMBOL_GPL(inet_twsk_alloc);

/* Returns non-zero if quota exceeded.  */
static int inet_twdr_do_twkill_work(struct inet_timewait_death_row *twdr,
				    const int slot)
{
	struct inet_timewait_sock *tw;
	struct hlist_node *node;
	unsigned int killed;
	int ret;

	/* NOTE: compare this to previous version where lock
	 * was released after detaching chain. It was racy,
	 * because tw buckets are scheduled in not serialized context
	 * in 2.3 (with netfilter), and with softnet it is common, because
	 * soft irqs are not sequenced.
	 */
	killed = 0;
	ret = 0;
rescan:
	inet_twsk_for_each_inmate(tw, node, &twdr->cells[slot]) {
		__inet_twsk_del_dead_node(tw);
		spin_unlock(&twdr->death_lock);
		__inet_twsk_kill(tw, twdr->hashinfo);
218 219 220
#ifdef CONFIG_NET_NS
		NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_TIMEWAITED);
#endif
221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238
		inet_twsk_put(tw);
		killed++;
		spin_lock(&twdr->death_lock);
		if (killed > INET_TWDR_TWKILL_QUOTA) {
			ret = 1;
			break;
		}

		/* While we dropped twdr->death_lock, another cpu may have
		 * killed off the next TW bucket in the list, therefore
		 * do a fresh re-read of the hlist head node with the
		 * lock reacquired.  We still use the hlist traversal
		 * macro in order to get the prefetches.
		 */
		goto rescan;
	}

	twdr->tw_count -= killed;
239 240 241
#ifndef CONFIG_NET_NS
	NET_ADD_STATS_BH(&init_net, LINUX_MIB_TIMEWAITED, killed);
#endif
242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264
	return ret;
}

void inet_twdr_hangman(unsigned long data)
{
	struct inet_timewait_death_row *twdr;
	int unsigned need_timer;

	twdr = (struct inet_timewait_death_row *)data;
	spin_lock(&twdr->death_lock);

	if (twdr->tw_count == 0)
		goto out;

	need_timer = 0;
	if (inet_twdr_do_twkill_work(twdr, twdr->slot)) {
		twdr->thread_slots |= (1 << twdr->slot);
		schedule_work(&twdr->twkill_work);
		need_timer = 1;
	} else {
		/* We purged the entire slot, anything left?  */
		if (twdr->tw_count)
			need_timer = 1;
265
		twdr->slot = ((twdr->slot + 1) & (INET_TWDR_TWKILL_SLOTS - 1));
266 267 268 269 270 271 272 273 274
	}
	if (need_timer)
		mod_timer(&twdr->tw_timer, jiffies + twdr->period);
out:
	spin_unlock(&twdr->death_lock);
}

EXPORT_SYMBOL_GPL(inet_twdr_hangman);

275
void inet_twdr_twkill_work(struct work_struct *work)
276
{
277 278
	struct inet_timewait_death_row *twdr =
		container_of(work, struct inet_timewait_death_row, twkill_work);
279 280
	int i;

281 282
	BUILD_BUG_ON((INET_TWDR_TWKILL_SLOTS - 1) >
			(sizeof(twdr->thread_slots) * 8));
283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371

	while (twdr->thread_slots) {
		spin_lock_bh(&twdr->death_lock);
		for (i = 0; i < INET_TWDR_TWKILL_SLOTS; i++) {
			if (!(twdr->thread_slots & (1 << i)))
				continue;

			while (inet_twdr_do_twkill_work(twdr, i) != 0) {
				if (need_resched()) {
					spin_unlock_bh(&twdr->death_lock);
					schedule();
					spin_lock_bh(&twdr->death_lock);
				}
			}

			twdr->thread_slots &= ~(1 << i);
		}
		spin_unlock_bh(&twdr->death_lock);
	}
}

EXPORT_SYMBOL_GPL(inet_twdr_twkill_work);

/* These are always called from BH context.  See callers in
 * tcp_input.c to verify this.
 */

/* This is for handling early-kills of TIME_WAIT sockets. */
void inet_twsk_deschedule(struct inet_timewait_sock *tw,
			  struct inet_timewait_death_row *twdr)
{
	spin_lock(&twdr->death_lock);
	if (inet_twsk_del_dead_node(tw)) {
		inet_twsk_put(tw);
		if (--twdr->tw_count == 0)
			del_timer(&twdr->tw_timer);
	}
	spin_unlock(&twdr->death_lock);
	__inet_twsk_kill(tw, twdr->hashinfo);
}

EXPORT_SYMBOL(inet_twsk_deschedule);

void inet_twsk_schedule(struct inet_timewait_sock *tw,
		       struct inet_timewait_death_row *twdr,
		       const int timeo, const int timewait_len)
{
	struct hlist_head *list;
	int slot;

	/* timeout := RTO * 3.5
	 *
	 * 3.5 = 1+2+0.5 to wait for two retransmits.
	 *
	 * RATIONALE: if FIN arrived and we entered TIME-WAIT state,
	 * our ACK acking that FIN can be lost. If N subsequent retransmitted
	 * FINs (or previous seqments) are lost (probability of such event
	 * is p^(N+1), where p is probability to lose single packet and
	 * time to detect the loss is about RTO*(2^N - 1) with exponential
	 * backoff). Normal timewait length is calculated so, that we
	 * waited at least for one retransmitted FIN (maximal RTO is 120sec).
	 * [ BTW Linux. following BSD, violates this requirement waiting
	 *   only for 60sec, we should wait at least for 240 secs.
	 *   Well, 240 consumes too much of resources 8)
	 * ]
	 * This interval is not reduced to catch old duplicate and
	 * responces to our wandering segments living for two MSLs.
	 * However, if we use PAWS to detect
	 * old duplicates, we can reduce the interval to bounds required
	 * by RTO, rather than MSL. So, if peer understands PAWS, we
	 * kill tw bucket after 3.5*RTO (it is important that this number
	 * is greater than TS tick!) and detect old duplicates with help
	 * of PAWS.
	 */
	slot = (timeo + (1 << INET_TWDR_RECYCLE_TICK) - 1) >> INET_TWDR_RECYCLE_TICK;

	spin_lock(&twdr->death_lock);

	/* Unlink it, if it was scheduled */
	if (inet_twsk_del_dead_node(tw))
		twdr->tw_count--;
	else
		atomic_inc(&tw->tw_refcnt);

	if (slot >= INET_TWDR_RECYCLE_SLOTS) {
		/* Schedule to slow timer */
		if (timeo >= timewait_len) {
			slot = INET_TWDR_TWKILL_SLOTS - 1;
		} else {
372
			slot = DIV_ROUND_UP(timeo, twdr->period);
373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433
			if (slot >= INET_TWDR_TWKILL_SLOTS)
				slot = INET_TWDR_TWKILL_SLOTS - 1;
		}
		tw->tw_ttd = jiffies + timeo;
		slot = (twdr->slot + slot) & (INET_TWDR_TWKILL_SLOTS - 1);
		list = &twdr->cells[slot];
	} else {
		tw->tw_ttd = jiffies + (slot << INET_TWDR_RECYCLE_TICK);

		if (twdr->twcal_hand < 0) {
			twdr->twcal_hand = 0;
			twdr->twcal_jiffie = jiffies;
			twdr->twcal_timer.expires = twdr->twcal_jiffie +
					      (slot << INET_TWDR_RECYCLE_TICK);
			add_timer(&twdr->twcal_timer);
		} else {
			if (time_after(twdr->twcal_timer.expires,
				       jiffies + (slot << INET_TWDR_RECYCLE_TICK)))
				mod_timer(&twdr->twcal_timer,
					  jiffies + (slot << INET_TWDR_RECYCLE_TICK));
			slot = (twdr->twcal_hand + slot) & (INET_TWDR_RECYCLE_SLOTS - 1);
		}
		list = &twdr->twcal_row[slot];
	}

	hlist_add_head(&tw->tw_death_node, list);

	if (twdr->tw_count++ == 0)
		mod_timer(&twdr->tw_timer, jiffies + twdr->period);
	spin_unlock(&twdr->death_lock);
}

EXPORT_SYMBOL_GPL(inet_twsk_schedule);

void inet_twdr_twcal_tick(unsigned long data)
{
	struct inet_timewait_death_row *twdr;
	int n, slot;
	unsigned long j;
	unsigned long now = jiffies;
	int killed = 0;
	int adv = 0;

	twdr = (struct inet_timewait_death_row *)data;

	spin_lock(&twdr->death_lock);
	if (twdr->twcal_hand < 0)
		goto out;

	slot = twdr->twcal_hand;
	j = twdr->twcal_jiffie;

	for (n = 0; n < INET_TWDR_RECYCLE_SLOTS; n++) {
		if (time_before_eq(j, now)) {
			struct hlist_node *node, *safe;
			struct inet_timewait_sock *tw;

			inet_twsk_for_each_inmate_safe(tw, node, safe,
						       &twdr->twcal_row[slot]) {
				__inet_twsk_del_dead_node(tw);
				__inet_twsk_kill(tw, twdr->hashinfo);
434 435 436
#ifdef CONFIG_NET_NS
				NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_TIMEWAITKILLED);
#endif
437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459
				inet_twsk_put(tw);
				killed++;
			}
		} else {
			if (!adv) {
				adv = 1;
				twdr->twcal_jiffie = j;
				twdr->twcal_hand = slot;
			}

			if (!hlist_empty(&twdr->twcal_row[slot])) {
				mod_timer(&twdr->twcal_timer, j);
				goto out;
			}
		}
		j += 1 << INET_TWDR_RECYCLE_TICK;
		slot = (slot + 1) & (INET_TWDR_RECYCLE_SLOTS - 1);
	}
	twdr->twcal_hand = -1;

out:
	if ((twdr->tw_count -= killed) == 0)
		del_timer(&twdr->tw_timer);
460 461 462
#ifndef CONFIG_NET_NS
	NET_ADD_STATS_BH(&init_net, LINUX_MIB_TIMEWAITKILLED, killed);
#endif
463 464 465 466
	spin_unlock(&twdr->death_lock);
}

EXPORT_SYMBOL_GPL(inet_twdr_twcal_tick);
467

E
Eric W. Biederman 已提交
468
void inet_twsk_purge(struct inet_hashinfo *hashinfo,
469 470 471 472
		     struct inet_timewait_death_row *twdr, int family)
{
	struct inet_timewait_sock *tw;
	struct sock *sk;
473
	struct hlist_nulls_node *node;
474
	unsigned int slot;
475

476 477 478 479
	for (slot = 0; slot <= hashinfo->ehash_mask; slot++) {
		struct inet_ehash_bucket *head = &hashinfo->ehash[slot];
restart_rcu:
		rcu_read_lock();
480
restart:
481
		sk_nulls_for_each_rcu(sk, node, &head->twchain) {
482
			tw = inet_twsk(sk);
E
Eric W. Biederman 已提交
483 484
			if ((tw->tw_family != family) ||
				atomic_read(&twsk_net(tw)->count))
485 486
				continue;

487 488 489
			if (unlikely(!atomic_inc_not_zero(&tw->tw_refcnt)))
				continue;

E
Eric W. Biederman 已提交
490 491
			if (unlikely((tw->tw_family != family) ||
				     atomic_read(&twsk_net(tw)->count))) {
492 493 494 495 496
				inet_twsk_put(tw);
				goto restart;
			}

			rcu_read_unlock();
497 498
			inet_twsk_deschedule(tw, twdr);
			inet_twsk_put(tw);
499
			goto restart_rcu;
500
		}
501 502 503 504 505 506 507
		/* If the nulls value we got at the end of this lookup is
		 * not the expected one, we must restart lookup.
		 * We probably met an item that was moved to another chain.
		 */
		if (get_nulls_value(node) != slot)
			goto restart;
		rcu_read_unlock();
508 509 510
	}
}
EXPORT_SYMBOL_GPL(inet_twsk_purge);