ccid2.c 18.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25
/*
 *  net/dccp/ccids/ccid2.c
 *
 *  Copyright (c) 2005, 2006 Andrea Bittau <a.bittau@cs.ucl.ac.uk>
 *
 *  Changes to meet Linux coding standards, and DCCP infrastructure fixes.
 *
 *  Copyright (c) 2006 Arnaldo Carvalho de Melo <acme@conectiva.com.br>
 *
 *  This program is free software; you can redistribute it and/or modify
 *  it under the terms of the GNU General Public License as published by
 *  the Free Software Foundation; either version 2 of the License, or
 *  (at your option) any later version.
 *
 *  This program is distributed in the hope that it will be useful,
 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *  GNU General Public License for more details.
 *
 *  You should have received a copy of the GNU General Public License
 *  along with this program; if not, write to the Free Software
 *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 */

/*
26
 * This implementation should follow RFC 4341
27
 */
28
#include "../feat.h"
29 30 31 32 33
#include "../ccid.h"
#include "../dccp.h"
#include "ccid2.h"


34
#ifdef CONFIG_IP_DCCP_CCID2_DEBUG
35 36
static int ccid2_debug;
#define ccid2_pr_debug(format, a...)	DCCP_PR_DEBUG(ccid2_debug, format, ##a)
37
#else
38
#define ccid2_pr_debug(format, a...)
39 40
#endif

G
Gerrit Renker 已提交
41
static int ccid2_hc_tx_alloc_seq(struct ccid2_hc_tx_sock *hctx)
42 43 44 45 46
{
	struct ccid2_seq *seqp;
	int i;

	/* check if we have space to preserve the pointer to the buffer */
47
	if (hctx->seqbufc >= sizeof(hctx->seqbuf) / sizeof(struct ccid2_seq *))
48 49 50
		return -ENOMEM;

	/* allocate buffer and initialize linked list */
G
Gerrit Renker 已提交
51
	seqp = kmalloc(CCID2_SEQBUF_LEN * sizeof(struct ccid2_seq), gfp_any());
52 53 54
	if (seqp == NULL)
		return -ENOMEM;

G
Gerrit Renker 已提交
55
	for (i = 0; i < (CCID2_SEQBUF_LEN - 1); i++) {
56 57 58
		seqp[i].ccid2s_next = &seqp[i + 1];
		seqp[i + 1].ccid2s_prev = &seqp[i];
	}
G
Gerrit Renker 已提交
59 60
	seqp[CCID2_SEQBUF_LEN - 1].ccid2s_next = seqp;
	seqp->ccid2s_prev = &seqp[CCID2_SEQBUF_LEN - 1];
61 62

	/* This is the first allocation.  Initiate the head and tail.  */
63 64
	if (hctx->seqbufc == 0)
		hctx->seqh = hctx->seqt = seqp;
65 66
	else {
		/* link the existing list with the one we just created */
67 68
		hctx->seqh->ccid2s_next = seqp;
		seqp->ccid2s_prev = hctx->seqh;
69

70 71
		hctx->seqt->ccid2s_prev = &seqp[CCID2_SEQBUF_LEN - 1];
		seqp[CCID2_SEQBUF_LEN - 1].ccid2s_next = hctx->seqt;
72 73 74
	}

	/* store the original pointer to the buffer so we can free it */
75 76
	hctx->seqbuf[hctx->seqbufc] = seqp;
	hctx->seqbufc++;
77 78 79 80

	return 0;
}

81
static int ccid2_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb)
82
{
G
Gerrit Renker 已提交
83 84 85
	if (ccid2_cwnd_network_limited(ccid2_hc_tx_sk(sk)))
		return CCID_PACKET_WILL_DEQUEUE_LATER;
	return CCID_PACKET_SEND_AT_ONCE;
86 87
}

88
static void ccid2_change_l_ack_ratio(struct sock *sk, u32 val)
89 90
{
	struct dccp_sock *dp = dccp_sk(sk);
91
	u32 max_ratio = DIV_ROUND_UP(ccid2_hc_tx_sk(sk)->cwnd, 2);
92

93
	/*
94 95 96 97
	 * Ensure that Ack Ratio does not exceed ceil(cwnd/2), which is (2) from
	 * RFC 4341, 6.1.2. We ignore the statement that Ack Ratio 2 is always
	 * acceptable since this causes starvation/deadlock whenever cwnd < 2.
	 * The same problem arises when Ack Ratio is 0 (ie. Ack Ratio disabled).
98
	 */
99 100 101
	if (val == 0 || val > max_ratio) {
		DCCP_WARN("Limiting Ack Ratio (%u) to %u\n", val, max_ratio);
		val = max_ratio;
102
	}
103 104
	if (val > DCCPF_ACK_RATIO_MAX)
		val = DCCPF_ACK_RATIO_MAX;
105

106 107 108
	if (val == dp->dccps_l_ack_ratio)
		return;

109
	ccid2_pr_debug("changing local ack ratio to %u\n", val);
110 111 112
	dp->dccps_l_ack_ratio = val;
}

113 114 115
static void ccid2_change_srtt(struct ccid2_hc_tx_sock *hctx, long val)
{
	ccid2_pr_debug("change SRTT to %ld\n", val);
116
	hctx->srtt = val;
117 118
}

119 120 121 122 123 124
static void ccid2_start_rto_timer(struct sock *sk);

static void ccid2_hc_tx_rto_expire(unsigned long data)
{
	struct sock *sk = (struct sock *)data;
	struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk);
G
Gerrit Renker 已提交
125
	const bool sender_was_blocked = ccid2_cwnd_network_limited(hctx);
126 127 128 129
	long s;

	bh_lock_sock(sk);
	if (sock_owned_by_user(sk)) {
130
		sk_reset_timer(sk, &hctx->rtotimer, jiffies + HZ / 5);
131 132 133 134 135 136
		goto out;
	}

	ccid2_pr_debug("RTO_EXPIRE\n");

	/* back-off timer */
137
	hctx->rto <<= 1;
138

139
	s = hctx->rto / HZ;
140
	if (s > 60)
141
		hctx->rto = 60 * HZ;
142 143

	/* adjust pipe, cwnd etc */
144 145 146 147 148
	hctx->ssthresh = hctx->cwnd / 2;
	if (hctx->ssthresh < 2)
		hctx->ssthresh = 2;
	hctx->cwnd = 1;
	hctx->pipe = 0;
149 150

	/* clear state about stuff we sent */
151 152
	hctx->seqt = hctx->seqh;
	hctx->packets_acked = 0;
153 154

	/* clear ack ratio state. */
155 156
	hctx->rpseq    = 0;
	hctx->rpdupack = -1;
157
	ccid2_change_l_ack_ratio(sk, 1);
G
Gerrit Renker 已提交
158 159 160 161 162

	/* if we were blocked before, we may now send cwnd=1 packet */
	if (sender_was_blocked)
		tasklet_schedule(&dccp_sk(sk)->dccps_xmitlet);
	ccid2_start_rto_timer(sk);
163 164
out:
	bh_unlock_sock(sk);
165
	sock_put(sk);
166 167 168 169 170 171
}

static void ccid2_start_rto_timer(struct sock *sk)
{
	struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk);

172
	ccid2_pr_debug("setting RTO timeout=%ld\n", hctx->rto);
173

174 175 176
	BUG_ON(timer_pending(&hctx->rtotimer));
	sk_reset_timer(sk, &hctx->rtotimer,
		       jiffies + hctx->rto);
177 178
}

179
static void ccid2_hc_tx_packet_sent(struct sock *sk, unsigned int len)
180 181 182
{
	struct dccp_sock *dp = dccp_sk(sk);
	struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk);
183
	struct ccid2_seq *next;
184

185
	hctx->pipe++;
186

187 188 189
	hctx->seqh->ccid2s_seq   = dp->dccps_gss;
	hctx->seqh->ccid2s_acked = 0;
	hctx->seqh->ccid2s_sent  = jiffies;
190

191
	next = hctx->seqh->ccid2s_next;
192
	/* check if we need to alloc more space */
193
	if (next == hctx->seqt) {
G
Gerrit Renker 已提交
194 195 196 197 198
		if (ccid2_hc_tx_alloc_seq(hctx)) {
			DCCP_CRIT("packet history - out of memory!");
			/* FIXME: find a more graceful way to bail out */
			return;
		}
199 200
		next = hctx->seqh->ccid2s_next;
		BUG_ON(next == hctx->seqt);
201
	}
202
	hctx->seqh = next;
203

204
	ccid2_pr_debug("cwnd=%d pipe=%d\n", hctx->cwnd, hctx->pipe);
205

206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225
	/*
	 * FIXME: The code below is broken and the variables have been removed
	 * from the socket struct. The `ackloss' variable was always set to 0,
	 * and with arsent there are several problems:
	 *  (i) it doesn't just count the number of Acks, but all sent packets;
	 *  (ii) it is expressed in # of packets, not # of windows, so the
	 *  comparison below uses the wrong formula: Appendix A of RFC 4341
	 *  comes up with the number K = cwnd / (R^2 - R) of consecutive windows
	 *  of data with no lost or marked Ack packets. If arsent were the # of
	 *  consecutive Acks received without loss, then Ack Ratio needs to be
	 *  decreased by 1 when
	 *	      arsent >=  K * cwnd / R  =  cwnd^2 / (R^3 - R^2)
	 *  where cwnd / R is the number of Acks received per window of data
	 *  (cf. RFC 4341, App. A). The problems are that
	 *  - arsent counts other packets as well;
	 *  - the comparison uses a formula different from RFC 4341;
	 *  - computing a cubic/quadratic equation each time is too complicated.
	 *  Hence a different algorithm is needed.
	 */
#if 0
226
	/* Ack Ratio.  Need to maintain a concept of how many windows we sent */
227
	hctx->arsent++;
228
	/* We had an ack loss in this window... */
229 230 231 232
	if (hctx->ackloss) {
		if (hctx->arsent >= hctx->cwnd) {
			hctx->arsent  = 0;
			hctx->ackloss = 0;
233
		}
234 235
	} else {
		/* No acks lost up to now... */
236 237 238
		/* decrease ack ratio if enough packets were sent */
		if (dp->dccps_l_ack_ratio > 1) {
			/* XXX don't calculate denominator each time */
239 240
			int denom = dp->dccps_l_ack_ratio * dp->dccps_l_ack_ratio -
				    dp->dccps_l_ack_ratio;
241

242
			denom = hctx->cwnd * hctx->cwnd / denom;
243

244
			if (hctx->arsent >= denom) {
245
				ccid2_change_l_ack_ratio(sk, dp->dccps_l_ack_ratio - 1);
246
				hctx->arsent = 0;
247
			}
248 249
		} else {
			/* we can't increase ack ratio further [1] */
250
			hctx->arsent = 0; /* or maybe set it to cwnd*/
251 252
		}
	}
253
#endif
254 255

	/* setup RTO timer */
256
	if (!timer_pending(&hctx->rtotimer))
257
		ccid2_start_rto_timer(sk);
258

259
#ifdef CONFIG_IP_DCCP_CCID2_DEBUG
260
	do {
261
		struct ccid2_seq *seqp = hctx->seqt;
262

263
		while (seqp != hctx->seqh) {
264
			ccid2_pr_debug("out seq=%llu acked=%d time=%lu\n",
265
				       (unsigned long long)seqp->ccid2s_seq,
R
Randy Dunlap 已提交
266
				       seqp->ccid2s_acked, seqp->ccid2s_sent);
267 268
			seqp = seqp->ccid2s_next;
		}
269
	} while (0);
270 271 272 273
	ccid2_pr_debug("=========\n");
#endif
}

274
static void ccid2_hc_tx_kill_rto_timer(struct sock *sk)
275
{
276 277
	struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk);

278
	sk_stop_timer(sk, &hctx->rtotimer);
279
	ccid2_pr_debug("deleted RTO timer\n");
280 281 282
}

static inline void ccid2_new_ack(struct sock *sk,
283
				 struct ccid2_seq *seqp,
284 285 286 287
				 unsigned int *maxincr)
{
	struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk);

288 289 290 291 292
	if (hctx->cwnd < hctx->ssthresh) {
		if (*maxincr > 0 && ++hctx->packets_acked == 2) {
			hctx->cwnd += 1;
			*maxincr   -= 1;
			hctx->packets_acked = 0;
293
		}
294 295 296
	} else if (++hctx->packets_acked >= hctx->cwnd) {
			hctx->cwnd += 1;
			hctx->packets_acked = 0;
297 298 299
	}

	/* update RTO */
300 301
	if (hctx->srtt == -1 ||
	    time_after(jiffies, hctx->lastrtt + hctx->srtt)) {
302
		unsigned long r = (long)jiffies - (long)seqp->ccid2s_sent;
303 304 305
		int s;

		/* first measurement */
306
		if (hctx->srtt == -1) {
307
			ccid2_pr_debug("R: %lu Time=%lu seq=%llu\n",
308
				       r, jiffies,
R
Randy Dunlap 已提交
309
				       (unsigned long long)seqp->ccid2s_seq);
310
			ccid2_change_srtt(hctx, r);
311
			hctx->rttvar = r >> 1;
312
		} else {
313
			/* RTTVAR */
314
			long tmp = hctx->srtt - r;
315 316
			long srtt;

317 318 319 320
			if (tmp < 0)
				tmp *= -1;

			tmp >>= 2;
321 322 323
			hctx->rttvar *= 3;
			hctx->rttvar >>= 2;
			hctx->rttvar += tmp;
324 325

			/* SRTT */
326
			srtt = hctx->srtt;
327 328
			srtt *= 7;
			srtt >>= 3;
329
			tmp = r >> 3;
330 331
			srtt += tmp;
			ccid2_change_srtt(hctx, srtt);
332
		}
333
		s = hctx->rttvar << 2;
334 335 336
		/* clock granularity is 1 when based on jiffies */
		if (!s)
			s = 1;
337
		hctx->rto = hctx->srtt + s;
338 339

		/* must be at least a second */
340
		s = hctx->rto / HZ;
341 342 343
		/* DCCP doesn't require this [but I like it cuz my code sux] */
#if 1
		if (s < 1)
344
			hctx->rto = HZ;
345 346 347
#endif
		/* max 60 seconds */
		if (s > 60)
348
			hctx->rto = HZ * 60;
349

350
		hctx->lastrtt = jiffies;
351 352

		ccid2_pr_debug("srtt: %ld rttvar: %ld rto: %ld (HZ=%d) R=%lu\n",
353 354
			       hctx->srtt, hctx->rttvar,
			       hctx->rto, HZ, r);
355 356 357
	}
}

358
static void ccid2_congestion_event(struct sock *sk, struct ccid2_seq *seqp)
359
{
360 361
	struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk);

362
	if (time_before(seqp->ccid2s_sent, hctx->last_cong)) {
363 364 365 366
		ccid2_pr_debug("Multiple losses in an RTT---treating as one\n");
		return;
	}

367
	hctx->last_cong = jiffies;
368

369 370
	hctx->cwnd     = hctx->cwnd / 2 ? : 1U;
	hctx->ssthresh = max(hctx->cwnd, 2U);
371 372

	/* Avoid spurious timeouts resulting from Ack Ratio > cwnd */
373 374
	if (dccp_sk(sk)->dccps_l_ack_ratio > hctx->cwnd)
		ccid2_change_l_ack_ratio(sk, hctx->cwnd);
375 376
}

377 378 379 380 381 382 383 384 385 386 387 388 389 390
static int ccid2_hc_tx_parse_options(struct sock *sk, u8 packet_type,
				     u8 option, u8 *optval, u8 optlen)
{
	struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk);

	switch (option) {
	case DCCPO_ACK_VECTOR_0:
	case DCCPO_ACK_VECTOR_1:
		return dccp_ackvec_parsed_add(&hctx->av_chunks, optval, optlen,
					      option - DCCPO_ACK_VECTOR_0);
	}
	return 0;
}

391 392 393 394
static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
{
	struct dccp_sock *dp = dccp_sk(sk);
	struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk);
G
Gerrit Renker 已提交
395
	const bool sender_was_blocked = ccid2_cwnd_network_limited(hctx);
396
	struct dccp_ackvec_parsed *avp;
397 398 399 400 401 402 403 404 405 406 407 408 409
	u64 ackno, seqno;
	struct ccid2_seq *seqp;
	int done = 0;
	unsigned int maxincr = 0;

	/* check reverse path congestion */
	seqno = DCCP_SKB_CB(skb)->dccpd_seq;

	/* XXX this whole "algorithm" is broken.  Need to fix it to keep track
	 * of the seqnos of the dupacks so that rpseq and rpdupack are correct
	 * -sorbo.
	 */
	/* need to bootstrap */
410 411 412
	if (hctx->rpdupack == -1) {
		hctx->rpdupack = 0;
		hctx->rpseq = seqno;
413
	} else {
414
		/* check if packet is consecutive */
415 416
		if (dccp_delta_seqno(hctx->rpseq, seqno) == 1)
			hctx->rpseq = seqno;
417
		/* it's a later packet */
418 419
		else if (after48(seqno, hctx->rpseq)) {
			hctx->rpdupack++;
420 421

			/* check if we got enough dupacks */
422 423 424
			if (hctx->rpdupack >= NUMDUPACK) {
				hctx->rpdupack = -1; /* XXX lame */
				hctx->rpseq = 0;
425

426
				ccid2_change_l_ack_ratio(sk, 2 * dp->dccps_l_ack_ratio);
427 428 429 430 431
			}
		}
	}

	/* check forward path congestion */
432
	if (dccp_packet_without_ack(skb))
433 434
		return;

435 436 437
	/* still didn't send out new data packets */
	if (hctx->seqh == hctx->seqt)
		goto done;
438 439

	ackno = DCCP_SKB_CB(skb)->dccpd_ack_seq;
440 441
	if (after48(ackno, hctx->high_ack))
		hctx->high_ack = ackno;
A
Andrea Bittau 已提交
442

443
	seqp = hctx->seqt;
A
Andrea Bittau 已提交
444 445
	while (before48(seqp->ccid2s_seq, ackno)) {
		seqp = seqp->ccid2s_next;
446 447
		if (seqp == hctx->seqh) {
			seqp = hctx->seqh->ccid2s_prev;
A
Andrea Bittau 已提交
448 449 450
			break;
		}
	}
451

452 453 454 455
	/*
	 * In slow-start, cwnd can increase up to a maximum of Ack Ratio/2
	 * packets per acknowledgement. Rounding up avoids that cwnd is not
	 * advanced when Ack Ratio is 1 and gives a slight edge otherwise.
456
	 */
457
	if (hctx->cwnd < hctx->ssthresh)
458
		maxincr = DIV_ROUND_UP(dp->dccps_l_ack_ratio, 2);
459 460

	/* go through all ack vectors */
461
	list_for_each_entry(avp, &hctx->av_chunks, node) {
462
		/* go through this ack vector */
463 464 465
		for (; avp->len--; avp->vec++) {
			u64 ackno_end_rl = SUB48(ackno,
						 dccp_ackvec_runlen(avp->vec));
466

467
			ccid2_pr_debug("ackvec %llu |%u,%u|\n",
R
Randy Dunlap 已提交
468
				       (unsigned long long)ackno,
469 470
				       dccp_ackvec_state(avp->vec) >> 6,
				       dccp_ackvec_runlen(avp->vec));
471 472 473 474 475
			/* if the seqno we are analyzing is larger than the
			 * current ackno, then move towards the tail of our
			 * seqnos.
			 */
			while (after48(seqp->ccid2s_seq, ackno)) {
476
				if (seqp == hctx->seqt) {
477 478 479 480 481 482 483 484 485 486 487 488
					done = 1;
					break;
				}
				seqp = seqp->ccid2s_prev;
			}
			if (done)
				break;

			/* check all seqnos in the range of the vector
			 * run length
			 */
			while (between48(seqp->ccid2s_seq,ackno_end_rl,ackno)) {
489
				const u8 state = dccp_ackvec_state(avp->vec);
490 491

				/* new packet received or marked */
492
				if (state != DCCPAV_NOT_RECEIVED &&
493
				    !seqp->ccid2s_acked) {
494
					if (state == DCCPAV_ECN_MARKED)
495
						ccid2_congestion_event(sk,
496
								       seqp);
497
					else
498 499 500 501 502
						ccid2_new_ack(sk, seqp,
							      &maxincr);

					seqp->ccid2s_acked = 1;
					ccid2_pr_debug("Got ack for %llu\n",
R
Randy Dunlap 已提交
503
						       (unsigned long long)seqp->ccid2s_seq);
504
					hctx->pipe--;
505
				}
506
				if (seqp == hctx->seqt) {
507 508 509
					done = 1;
					break;
				}
510
				seqp = seqp->ccid2s_prev;
511 512 513 514
			}
			if (done)
				break;

515
			ackno = SUB48(ackno_end_rl, 1);
516 517 518 519 520 521 522 523
		}
		if (done)
			break;
	}

	/* The state about what is acked should be correct now
	 * Check for NUMDUPACK
	 */
524 525
	seqp = hctx->seqt;
	while (before48(seqp->ccid2s_seq, hctx->high_ack)) {
A
Andrea Bittau 已提交
526
		seqp = seqp->ccid2s_next;
527 528
		if (seqp == hctx->seqh) {
			seqp = hctx->seqh->ccid2s_prev;
A
Andrea Bittau 已提交
529 530 531
			break;
		}
	}
532 533 534 535
	done = 0;
	while (1) {
		if (seqp->ccid2s_acked) {
			done++;
536
			if (done == NUMDUPACK)
537 538
				break;
		}
539
		if (seqp == hctx->seqt)
540 541 542 543 544 545 546
			break;
		seqp = seqp->ccid2s_prev;
	}

	/* If there are at least 3 acknowledgements, anything unacknowledged
	 * below the last sequence number is considered lost
	 */
547
	if (done == NUMDUPACK) {
548 549 550 551 552
		struct ccid2_seq *last_acked = seqp;

		/* check for lost packets */
		while (1) {
			if (!seqp->ccid2s_acked) {
553
				ccid2_pr_debug("Packet lost: %llu\n",
R
Randy Dunlap 已提交
554
					       (unsigned long long)seqp->ccid2s_seq);
555 556 557 558
				/* XXX need to traverse from tail -> head in
				 * order to detect multiple congestion events in
				 * one ack vector.
				 */
559
				ccid2_congestion_event(sk, seqp);
560
				hctx->pipe--;
561
			}
562
			if (seqp == hctx->seqt)
563 564 565 566
				break;
			seqp = seqp->ccid2s_prev;
		}

567
		hctx->seqt = last_acked;
568 569 570
	}

	/* trim acked packets in tail */
571 572
	while (hctx->seqt != hctx->seqh) {
		if (!hctx->seqt->ccid2s_acked)
573 574
			break;

575
		hctx->seqt = hctx->seqt->ccid2s_next;
576 577
	}

578 579 580 581 582 583
	/* restart RTO timer if not all outstanding data has been acked */
	if (hctx->pipe == 0)
		sk_stop_timer(sk, &hctx->rtotimer);
	else
		sk_reset_timer(sk, &hctx->rtotimer,
			       jiffies + hctx->rto);
584
done:
G
Gerrit Renker 已提交
585 586 587
	/* check if incoming Acks allow pending packets to be sent */
	if (sender_was_blocked && !ccid2_cwnd_network_limited(hctx))
		tasklet_schedule(&dccp_sk(sk)->dccps_xmitlet);
588
	dccp_ackvec_parsed_cleanup(&hctx->av_chunks);
589 590
}

591
static int ccid2_hc_tx_init(struct ccid *ccid, struct sock *sk)
592
{
593
	struct ccid2_hc_tx_sock *hctx = ccid_priv(ccid);
594 595
	struct dccp_sock *dp = dccp_sk(sk);
	u32 max_ratio;
596

597
	/* RFC 4341, 5: initialise ssthresh to arbitrarily high (max) value */
598
	hctx->ssthresh = ~0U;
599

600 601 602 603 604
	/*
	 * RFC 4341, 5: "The cwnd parameter is initialized to at most four
	 * packets for new connections, following the rules from [RFC3390]".
	 * We need to convert the bytes of RFC3390 into the packets of RFC 4341.
	 */
605
	hctx->cwnd = clamp(4380U / dp->dccps_mss_cache, 2U, 4U);
606 607

	/* Make sure that Ack Ratio is enabled and within bounds. */
608
	max_ratio = DIV_ROUND_UP(hctx->cwnd, 2);
609 610 611
	if (dp->dccps_l_ack_ratio == 0 || dp->dccps_l_ack_ratio > max_ratio)
		dp->dccps_l_ack_ratio = max_ratio;

612
	/* XXX init ~ to window size... */
G
Gerrit Renker 已提交
613
	if (ccid2_hc_tx_alloc_seq(hctx))
614
		return -ENOMEM;
615

616
	hctx->rto	 = 3 * HZ;
617
	ccid2_change_srtt(hctx, -1);
618 619 620 621
	hctx->rttvar	= -1;
	hctx->rpdupack  = -1;
	hctx->last_cong = jiffies;
	setup_timer(&hctx->rtotimer, ccid2_hc_tx_rto_expire, (unsigned long)sk);
622
	INIT_LIST_HEAD(&hctx->av_chunks);
623 624 625 626 627
	return 0;
}

static void ccid2_hc_tx_exit(struct sock *sk)
{
628
	struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk);
629
	int i;
630

631
	ccid2_hc_tx_kill_rto_timer(sk);
632

633 634 635
	for (i = 0; i < hctx->seqbufc; i++)
		kfree(hctx->seqbuf[i]);
	hctx->seqbufc = 0;
636 637 638 639 640 641 642 643 644 645
}

static void ccid2_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
{
	const struct dccp_sock *dp = dccp_sk(sk);
	struct ccid2_hc_rx_sock *hcrx = ccid2_hc_rx_sk(sk);

	switch (DCCP_SKB_CB(skb)->dccpd_type) {
	case DCCP_PKT_DATA:
	case DCCP_PKT_DATAACK:
646 647
		hcrx->data++;
		if (hcrx->data >= dp->dccps_r_ack_ratio) {
648
			dccp_send_ack(sk);
649
			hcrx->data = 0;
650 651 652 653 654
		}
		break;
	}
}

655
static struct ccid_operations ccid2 = {
656 657 658 659 660 661 662 663 664 665 666 667
	.ccid_id		  = DCCPC_CCID2,
	.ccid_name		  = "TCP-like",
	.ccid_owner		  = THIS_MODULE,
	.ccid_hc_tx_obj_size	  = sizeof(struct ccid2_hc_tx_sock),
	.ccid_hc_tx_init	  = ccid2_hc_tx_init,
	.ccid_hc_tx_exit	  = ccid2_hc_tx_exit,
	.ccid_hc_tx_send_packet	  = ccid2_hc_tx_send_packet,
	.ccid_hc_tx_packet_sent	  = ccid2_hc_tx_packet_sent,
	.ccid_hc_tx_parse_options = ccid2_hc_tx_parse_options,
	.ccid_hc_tx_packet_recv	  = ccid2_hc_tx_packet_recv,
	.ccid_hc_rx_obj_size	  = sizeof(struct ccid2_hc_rx_sock),
	.ccid_hc_rx_packet_recv	  = ccid2_hc_rx_packet_recv,
668 669
};

670
#ifdef CONFIG_IP_DCCP_CCID2_DEBUG
671
module_param(ccid2_debug, bool, 0644);
672
MODULE_PARM_DESC(ccid2_debug, "Enable debug messages");
673
#endif
674 675 676 677 678 679 680 681 682 683 684 685 686 687

static __init int ccid2_module_init(void)
{
	return ccid_register(&ccid2);
}
module_init(ccid2_module_init);

static __exit void ccid2_module_exit(void)
{
	ccid_unregister(&ccid2);
}
module_exit(ccid2_module_exit);

MODULE_AUTHOR("Andrea Bittau <a.bittau@cs.ucl.ac.uk>");
688
MODULE_DESCRIPTION("DCCP TCP-Like (CCID2) CCID");
689 690
MODULE_LICENSE("GPL");
MODULE_ALIAS("net-dccp-ccid-2");