input.c 18.8 KB
Newer Older
1 2
/*
 *  net/dccp/input.c
3
 *
4 5 6 7 8 9 10 11 12 13 14 15 16 17
 *  An implementation of the DCCP protocol
 *  Arnaldo Carvalho de Melo <acme@conectiva.com.br>
 *
 *	This program is free software; you can redistribute it and/or
 *	modify it under the terms of the GNU General Public License
 *	as published by the Free Software Foundation; either version
 *	2 of the License, or (at your option) any later version.
 */

#include <linux/dccp.h>
#include <linux/skbuff.h>

#include <net/sock.h>

18
#include "ackvec.h"
19 20 21
#include "ccid.h"
#include "dccp.h"

22 23 24
/* rate-limit for syncs in reply to sequence-invalid packets; RFC 4340, 7.5.4 */
int sysctl_dccp_sync_ratelimit	__read_mostly = HZ / 8;

25 26 27 28 29 30 31 32 33 34 35 36
static void dccp_fin(struct sock *sk, struct sk_buff *skb)
{
	sk->sk_shutdown |= RCV_SHUTDOWN;
	sock_set_flag(sk, SOCK_DONE);
	__skb_pull(skb, dccp_hdr(skb)->dccph_doff * 4);
	__skb_queue_tail(&sk->sk_receive_queue, skb);
	skb_set_owner_r(skb, sk);
	sk->sk_data_ready(sk, 0);
}

static void dccp_rcv_close(struct sock *sk, struct sk_buff *skb)
{
37
	dccp_send_reset(sk, DCCP_RESET_CODE_CLOSED);
38 39
	dccp_fin(sk, skb);
	dccp_set_state(sk, DCCP_CLOSED);
40
	sk_wake_async(sk, 1, POLL_HUP);
41 42 43 44 45 46 47 48 49 50 51
}

static void dccp_rcv_closereq(struct sock *sk, struct sk_buff *skb)
{
	/*
	 *   Step 7: Check for unexpected packet types
	 *      If (S.is_server and P.type == CloseReq)
	 *	  Send Sync packet acknowledging P.seqno
	 *	  Drop packet and return
	 */
	if (dccp_sk(sk)->dccps_role != DCCP_ROLE_CLIENT) {
52
		dccp_send_sync(sk, DCCP_SKB_CB(skb)->dccpd_seq, DCCP_PKT_SYNC);
53 54 55
		return;
	}

56 57
	if (sk->sk_state != DCCP_CLOSING)
		dccp_set_state(sk, DCCP_CLOSING);
58
	dccp_send_close(sk, 0);
59 60
}

61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96
static u8 dccp_reset_code_convert(const u8 code)
{
	const u8 error_code[] = {
	[DCCP_RESET_CODE_CLOSED]	     = 0,	/* normal termination */
	[DCCP_RESET_CODE_UNSPECIFIED]	     = 0,	/* nothing known */
	[DCCP_RESET_CODE_ABORTED]	     = ECONNRESET,

	[DCCP_RESET_CODE_NO_CONNECTION]	     = ECONNREFUSED,
	[DCCP_RESET_CODE_CONNECTION_REFUSED] = ECONNREFUSED,
	[DCCP_RESET_CODE_TOO_BUSY]	     = EUSERS,
	[DCCP_RESET_CODE_AGGRESSION_PENALTY] = EDQUOT,

	[DCCP_RESET_CODE_PACKET_ERROR]	     = ENOMSG,
	[DCCP_RESET_CODE_BAD_INIT_COOKIE]    = EBADR,
	[DCCP_RESET_CODE_BAD_SERVICE_CODE]   = EBADRQC,
	[DCCP_RESET_CODE_OPTION_ERROR]	     = EILSEQ,
	[DCCP_RESET_CODE_MANDATORY_ERROR]    = EOPNOTSUPP,
	};

	return code >= DCCP_MAX_RESET_CODES ? 0 : error_code[code];
}

static void dccp_rcv_reset(struct sock *sk, struct sk_buff *skb)
{
	u8 err = dccp_reset_code_convert(dccp_hdr_reset(skb)->dccph_reset_code);

	sk->sk_err = err;

	/* Queue the equivalent of TCP fin so that dccp_recvmsg exits the loop */
	dccp_fin(sk, skb);

	if (err && !sock_flag(sk, SOCK_DEAD))
		sk_wake_async(sk, 0, POLL_ERR);
	dccp_time_wait(sk, DCCP_TIME_WAIT, 0);
}

97
static void dccp_event_ack_recv(struct sock *sk, struct sk_buff *skb)
98 99 100
{
	struct dccp_sock *dp = dccp_sk(sk);

101
	if (dccp_msk(sk)->dccpms_send_ack_vector)
102 103
		dccp_ackvec_check_rcv_ackno(dp->dccps_hc_rx_ackvec, sk,
					    DCCP_SKB_CB(skb)->dccpd_ack_seq);
104 105 106 107 108 109
}

static int dccp_check_seqno(struct sock *sk, struct sk_buff *skb)
{
	const struct dccp_hdr *dh = dccp_hdr(skb);
	struct dccp_sock *dp = dccp_sk(sk);
110 111
	u64 lswl, lawl, seqno = DCCP_SKB_CB(skb)->dccpd_seq,
			ackno = DCCP_SKB_CB(skb)->dccpd_ack_seq;
112 113 114 115 116 117 118 119 120 121 122 123 124

	/*
	 *   Step 5: Prepare sequence numbers for Sync
	 *     If P.type == Sync or P.type == SyncAck,
	 *	  If S.AWL <= P.ackno <= S.AWH and P.seqno >= S.SWL,
	 *	     / * P is valid, so update sequence number variables
	 *		 accordingly.  After this update, P will pass the tests
	 *		 in Step 6.  A SyncAck is generated if necessary in
	 *		 Step 15 * /
	 *	     Update S.GSR, S.SWL, S.SWH
	 *	  Otherwise,
	 *	     Drop packet and return
	 */
125
	if (dh->dccph_type == DCCP_PKT_SYNC ||
126
	    dh->dccph_type == DCCP_PKT_SYNCACK) {
127 128 129
		if (between48(ackno, dp->dccps_awl, dp->dccps_awh) &&
		    dccp_delta_seqno(dp->dccps_swl, seqno) >= 0)
			dccp_update_gsr(sk, seqno);
130 131
		else
			return -1;
132
	}
133

134 135 136 137 138 139 140 141 142 143 144
	/*
	 *   Step 6: Check sequence numbers
	 *      Let LSWL = S.SWL and LAWL = S.AWL
	 *      If P.type == CloseReq or P.type == Close or P.type == Reset,
	 *	  LSWL := S.GSR + 1, LAWL := S.GAR
	 *      If LSWL <= P.seqno <= S.SWH
	 *	     and (P.ackno does not exist or LAWL <= P.ackno <= S.AWH),
	 *	  Update S.GSR, S.SWL, S.SWH
	 *	  If P.type != Sync,
	 *	     Update S.GAR
	 */
145 146 147 148
	lswl = dp->dccps_swl;
	lawl = dp->dccps_awl;

	if (dh->dccph_type == DCCP_PKT_CLOSEREQ ||
149 150
	    dh->dccph_type == DCCP_PKT_CLOSE ||
	    dh->dccph_type == DCCP_PKT_RESET) {
151
		lswl = ADD48(dp->dccps_gsr, 1);
152 153 154
		lawl = dp->dccps_gar;
	}

155 156 157 158
	if (between48(seqno, lswl, dp->dccps_swh) &&
	    (ackno == DCCP_PKT_WITHOUT_ACK_SEQ ||
	     between48(ackno, lawl, dp->dccps_awh))) {
		dccp_update_gsr(sk, seqno);
159 160

		if (dh->dccph_type != DCCP_PKT_SYNC &&
161 162
		    (ackno != DCCP_PKT_WITHOUT_ACK_SEQ))
			dp->dccps_gar = ackno;
163
	} else {
G
Gerrit Renker 已提交
164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180
		unsigned long now = jiffies;
		/*
		 *   Step 6: Check sequence numbers
		 *      Otherwise,
		 *         If P.type == Reset,
		 *            Send Sync packet acknowledging S.GSR
		 *         Otherwise,
		 *            Send Sync packet acknowledging P.seqno
		 *      Drop packet and return
		 *
		 *   These Syncs are rate-limited as per RFC 4340, 7.5.4:
		 *   at most 1 / (dccp_sync_rate_limit * HZ) Syncs per second.
		 */
		if (time_before(now, (dp->dccps_rate_last +
				      sysctl_dccp_sync_ratelimit)))
			return 0;

181 182 183 184
		DCCP_WARN("DCCP: Step 6 failed for %s packet, "
			  "(LSWL(%llu) <= P.seqno(%llu) <= S.SWH(%llu)) and "
			  "(P.ackno %s or LAWL(%llu) <= P.ackno(%llu) <= S.AWH(%llu), "
			  "sending SYNC...\n",  dccp_packet_name(dh->dccph_type),
185
			  (unsigned long long) lswl, (unsigned long long) seqno,
186
			  (unsigned long long) dp->dccps_swh,
187 188 189
			  (ackno == DCCP_PKT_WITHOUT_ACK_SEQ) ? "doesn't exist"
							      : "exists",
			  (unsigned long long) lawl, (unsigned long long) ackno,
190
			  (unsigned long long) dp->dccps_awh);
G
Gerrit Renker 已提交
191 192 193

		dp->dccps_rate_last = now;

G
Gerrit Renker 已提交
194 195
		if (dh->dccph_type == DCCP_PKT_RESET)
			seqno = dp->dccps_gsr;
196
		dccp_send_sync(sk, seqno, DCCP_PKT_SYNC);
197 198 199 200 201 202
		return -1;
	}

	return 0;
}

203 204
static int __dccp_rcv_established(struct sock *sk, struct sk_buff *skb,
				  const struct dccp_hdr *dh, const unsigned len)
205 206 207 208 209 210 211
{
	struct dccp_sock *dp = dccp_sk(sk);

	switch (dccp_hdr(skb)->dccph_type) {
	case DCCP_PKT_DATAACK:
	case DCCP_PKT_DATA:
		/*
212 213
		 * FIXME: check if sk_receive_queue is full, schedule DATA_DROPPED
		 * option if it is.
214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229
		 */
		__skb_pull(skb, dh->dccph_doff * 4);
		__skb_queue_tail(&sk->sk_receive_queue, skb);
		skb_set_owner_r(skb, sk);
		sk->sk_data_ready(sk, 0);
		return 0;
	case DCCP_PKT_ACK:
		goto discard;
	case DCCP_PKT_RESET:
		/*
		 *  Step 9: Process Reset
		 *	If P.type == Reset,
		 *		Tear down connection
		 *		S.state := TIMEWAIT
		 *		Set TIMEWAIT timer
		 *		Drop packet and return
230 231
		 */
		dccp_rcv_reset(sk, skb);
232 233 234 235 236 237 238 239
		return 0;
	case DCCP_PKT_CLOSEREQ:
		dccp_rcv_closereq(sk, skb);
		goto discard;
	case DCCP_PKT_CLOSE:
		dccp_rcv_close(sk, skb);
		return 0;
	case DCCP_PKT_REQUEST:
240 241
		/* Step 7
		 *   or (S.is_server and P.type == Response)
242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257
		 *   or (S.is_client and P.type == Request)
		 *   or (S.state >= OPEN and P.type == Request
		 *	and P.seqno >= S.OSR)
		 *    or (S.state >= OPEN and P.type == Response
		 *	and P.seqno >= S.OSR)
		 *    or (S.state == RESPOND and P.type == Data),
		 *  Send Sync packet acknowledging P.seqno
		 *  Drop packet and return
		 */
		if (dp->dccps_role != DCCP_ROLE_LISTEN)
			goto send_sync;
		goto check_seq;
	case DCCP_PKT_RESPONSE:
		if (dp->dccps_role != DCCP_ROLE_CLIENT)
			goto send_sync;
check_seq:
258 259
		if (dccp_delta_seqno(dp->dccps_osr,
				     DCCP_SKB_CB(skb)->dccpd_seq) >= 0) {
260
send_sync:
261 262
			dccp_send_sync(sk, DCCP_SKB_CB(skb)->dccpd_seq,
				       DCCP_PKT_SYNC);
263 264
		}
		break;
265 266 267 268
	case DCCP_PKT_SYNC:
		dccp_send_sync(sk, DCCP_SKB_CB(skb)->dccpd_seq,
			       DCCP_PKT_SYNCACK);
		/*
269
		 * From RFC 4340, sec. 5.7
270 271 272
		 *
		 * As with DCCP-Ack packets, DCCP-Sync and DCCP-SyncAck packets
		 * MAY have non-zero-length application data areas, whose
273
		 * contents receivers MUST ignore.
274 275
		 */
		goto discard;
276 277 278 279 280 281 282 283
	}

	DCCP_INC_STATS_BH(DCCP_MIB_INERRS);
discard:
	__kfree_skb(skb);
	return 0;
}

284 285 286 287 288 289 290 291 292 293 294 295 296 297
int dccp_rcv_established(struct sock *sk, struct sk_buff *skb,
			 const struct dccp_hdr *dh, const unsigned len)
{
	struct dccp_sock *dp = dccp_sk(sk);

	if (dccp_check_seqno(sk, skb))
		goto discard;

	if (dccp_parse_options(sk, skb))
		goto discard;

	if (DCCP_SKB_CB(skb)->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ)
		dccp_event_ack_recv(sk, skb);

298
	if (dccp_msk(sk)->dccpms_send_ack_vector &&
299 300 301 302 303
	    dccp_ackvec_add(dp->dccps_hc_rx_ackvec, sk,
			    DCCP_SKB_CB(skb)->dccpd_seq,
			    DCCP_ACKVEC_STATE_RECEIVED))
		goto discard;

304 305
	ccid_hc_rx_packet_recv(dp->dccps_hc_rx_ccid, sk, skb);
	ccid_hc_tx_packet_recv(dp->dccps_hc_tx_ccid, sk, skb);
306 307 308 309 310 311 312

	return __dccp_rcv_established(sk, skb, dh, len);
discard:
	__kfree_skb(skb);
	return 0;
}

313 314
EXPORT_SYMBOL_GPL(dccp_rcv_established);

315 316 317 318 319
static int dccp_rcv_request_sent_state_process(struct sock *sk,
					       struct sk_buff *skb,
					       const struct dccp_hdr *dh,
					       const unsigned len)
{
320
	/*
321 322 323 324 325 326 327 328 329 330 331 332 333
	 *  Step 4: Prepare sequence numbers in REQUEST
	 *     If S.state == REQUEST,
	 *	  If (P.type == Response or P.type == Reset)
	 *		and S.AWL <= P.ackno <= S.AWH,
	 *	     / * Set sequence number variables corresponding to the
	 *		other endpoint, so P will pass the tests in Step 6 * /
	 *	     Set S.GSR, S.ISR, S.SWL, S.SWH
	 *	     / * Response processing continues in Step 10; Reset
	 *		processing continues in Step 9 * /
	*/
	if (dh->dccph_type == DCCP_PKT_RESPONSE) {
		const struct inet_connection_sock *icsk = inet_csk(sk);
		struct dccp_sock *dp = dccp_sk(sk);
334
		long tstamp = dccp_timestamp();
335 336 337 338 339 340 341

		/* Stop the REQUEST timer */
		inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS);
		BUG_TRAP(sk->sk_send_head != NULL);
		__kfree_skb(sk->sk_send_head);
		sk->sk_send_head = NULL;

342 343 344 345 346 347 348
		if (!between48(DCCP_SKB_CB(skb)->dccpd_ack_seq,
			       dp->dccps_awl, dp->dccps_awh)) {
			dccp_pr_debug("invalid ackno: S.AWL=%llu, "
				      "P.ackno=%llu, S.AWH=%llu \n",
				      (unsigned long long)dp->dccps_awl,
			   (unsigned long long)DCCP_SKB_CB(skb)->dccpd_ack_seq,
				      (unsigned long long)dp->dccps_awh);
349 350
			goto out_invalid_packet;
		}
351

352 353 354
		if (dccp_parse_options(sk, skb))
			goto out_invalid_packet;

355 356 357 358
		/* Obtain usec RTT sample from SYN exchange (used by CCID 3) */
		if (likely(dp->dccps_options_received.dccpor_timestamp_echo))
			dp->dccps_syn_rtt = dccp_sample_rtt(sk, 10 * (tstamp -
			    dp->dccps_options_received.dccpor_timestamp_echo));
359

360 361 362 363 364
		if (dccp_msk(sk)->dccpms_send_ack_vector &&
		    dccp_ackvec_add(dp->dccps_hc_rx_ackvec, sk,
				    DCCP_SKB_CB(skb)->dccpd_seq,
				    DCCP_ACKVEC_STATE_RECEIVED))
			goto out_invalid_packet; /* FIXME: change error code */
365 366

		dp->dccps_isr = DCCP_SKB_CB(skb)->dccpd_seq;
367 368 369 370 371 372 373 374 375 376 377 378 379
		dccp_update_gsr(sk, dp->dccps_isr);
		/*
		 * SWL and AWL are initially adjusted so that they are not less than
		 * the initial Sequence Numbers received and sent, respectively:
		 *	SWL := max(GSR + 1 - floor(W/4), ISR),
		 *	AWL := max(GSS - W' + 1, ISS).
		 * These adjustments MUST be applied only at the beginning of the
		 * connection.
		 *
		 * AWL was adjusted in dccp_v4_connect -acme
		 */
		dccp_set_seqno(&dp->dccps_swl,
			       max48(dp->dccps_swl, dp->dccps_isr));
380

381
		dccp_sync_mss(sk, icsk->icsk_pmtu_cookie);
382 383 384 385

		/*
		 *    Step 10: Process REQUEST state (second part)
		 *       If S.state == REQUEST,
386 387 388 389 390 391
		 *	  / * If we get here, P is a valid Response from the
		 *	      server (see Step 4), and we should move to
		 *	      PARTOPEN state. PARTOPEN means send an Ack,
		 *	      don't send Data packets, retransmit Acks
		 *	      periodically, and always include any Init Cookie
		 *	      from the Response * /
392 393
		 *	  S.state := PARTOPEN
		 *	  Set PARTOPEN timer
394
		 *	  Continue with S.state == PARTOPEN
395 396
		 *	  / * Step 12 will send the Ack completing the
		 *	      three-way handshake * /
397 398 399 400
		 */
		dccp_set_state(sk, DCCP_PARTOPEN);

		/* Make sure socket is routed, for correct metrics. */
401
		icsk->icsk_af_ops->rebuild_header(sk);
402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424

		if (!sock_flag(sk, SOCK_DEAD)) {
			sk->sk_state_change(sk);
			sk_wake_async(sk, 0, POLL_OUT);
		}

		if (sk->sk_write_pending || icsk->icsk_ack.pingpong ||
		    icsk->icsk_accept_queue.rskq_defer_accept) {
			/* Save one ACK. Data will be ready after
			 * several ticks, if write_pending is set.
			 *
			 * It may be deleted, but with this feature tcpdumps
			 * look so _wonderfully_ clever, that I was not able
			 * to stand against the temptation 8)     --ANK
			 */
			/*
			 * OK, in DCCP we can as well do a similar trick, its
			 * even in the draft, but there is no need for us to
			 * schedule an ack here, as dccp_sendmsg does this for
			 * us, also stated in the draft. -acme
			 */
			__kfree_skb(skb);
			return 0;
425
		}
426 427 428 429 430
		dccp_send_ack(sk);
		return -1;
	}

out_invalid_packet:
431 432
	/* dccp_v4_do_rcv will send a reset */
	DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_PACKET_ERROR;
433
	return 1;
434 435 436 437 438 439 440 441 442 443 444 445 446
}

static int dccp_rcv_respond_partopen_state_process(struct sock *sk,
						   struct sk_buff *skb,
						   const struct dccp_hdr *dh,
						   const unsigned len)
{
	int queued = 0;

	switch (dh->dccph_type) {
	case DCCP_PKT_RESET:
		inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
		break;
447 448 449
	case DCCP_PKT_DATA:
		if (sk->sk_state == DCCP_RESPOND)
			break;
450 451 452
	case DCCP_PKT_DATAACK:
	case DCCP_PKT_ACK:
		/*
453 454 455 456 457 458
		 * FIXME: we should be reseting the PARTOPEN (DELACK) timer
		 * here but only if we haven't used the DELACK timer for
		 * something else, like sending a delayed ack for a TIMESTAMP
		 * echo, etc, for now were not clearing it, sending an extra
		 * ACK when there is nothing else to do in DELACK is not a big
		 * deal after all.
459 460 461 462 463 464 465 466 467
		 */

		/* Stop the PARTOPEN timer */
		if (sk->sk_state == DCCP_PARTOPEN)
			inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);

		dccp_sk(sk)->dccps_osr = DCCP_SKB_CB(skb)->dccpd_seq;
		dccp_set_state(sk, DCCP_OPEN);

468 469
		if (dh->dccph_type == DCCP_PKT_DATAACK ||
		    dh->dccph_type == DCCP_PKT_DATA) {
470
			__dccp_rcv_established(sk, skb, dh, len);
471
			queued = 1; /* packet was queued
472
				       (by __dccp_rcv_established) */
473 474 475 476 477 478 479 480 481 482 483
		}
		break;
	}

	return queued;
}

int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
			   struct dccp_hdr *dh, unsigned len)
{
	struct dccp_sock *dp = dccp_sk(sk);
484
	struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
485 486 487
	const int old_state = sk->sk_state;
	int queued = 0;

488 489 490 491
	/*
	 *  Step 3: Process LISTEN state
	 *
	 *     If S.state == LISTEN,
492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508
	 *	 If P.type == Request or P contains a valid Init Cookie option,
	 *	      (* Must scan the packet's options to check for Init
	 *		 Cookies.  Only Init Cookies are processed here,
	 *		 however; other options are processed in Step 8.  This
	 *		 scan need only be performed if the endpoint uses Init
	 *		 Cookies *)
	 *	      (* Generate a new socket and switch to that socket *)
	 *	      Set S := new socket for this port pair
	 *	      S.state = RESPOND
	 *	      Choose S.ISS (initial seqno) or set from Init Cookies
	 *	      Initialize S.GAR := S.ISS
	 *	      Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init
	 *	      Cookies Continue with S.state == RESPOND
	 *	      (* A Response packet will be generated in Step 11 *)
	 *	 Otherwise,
	 *	      Generate Reset(No Connection) unless P.type == Reset
	 *	      Drop packet and return
509 510 511
	 */
	if (sk->sk_state == DCCP_LISTEN) {
		if (dh->dccph_type == DCCP_PKT_REQUEST) {
512 513
			if (inet_csk(sk)->icsk_af_ops->conn_request(sk,
								    skb) < 0)
514 515 516 517 518 519 520 521
				return 1;

			/* FIXME: do congestion control initialization */
			goto discard;
		}
		if (dh->dccph_type == DCCP_PKT_RESET)
			goto discard;

522 523
		/* Caller (dccp_v4_do_rcv) will send Reset */
		dcb->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION;
524 525 526 527
		return 1;
	}

	if (sk->sk_state != DCCP_REQUESTING) {
528 529 530 531 532 533 534 535 536
		if (dccp_check_seqno(sk, skb))
			goto discard;

		/*
		 * Step 8: Process options and mark acknowledgeable
		 */
		if (dccp_parse_options(sk, skb))
			goto discard;

537
		if (dcb->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ)
538 539
			dccp_event_ack_recv(sk, skb);

540
		if (dccp_msk(sk)->dccpms_send_ack_vector &&
541
		    dccp_ackvec_add(dp->dccps_hc_rx_ackvec, sk,
542 543 544
				    DCCP_SKB_CB(skb)->dccpd_seq,
				    DCCP_ACKVEC_STATE_RECEIVED))
			goto discard;
545

546 547
		ccid_hc_rx_packet_recv(dp->dccps_hc_rx_ccid, sk, skb);
		ccid_hc_tx_packet_recv(dp->dccps_hc_tx_ccid, sk, skb);
548 549 550 551 552 553 554 555 556 557 558
	}

	/*
	 *  Step 9: Process Reset
	 *	If P.type == Reset,
	 *		Tear down connection
	 *		S.state := TIMEWAIT
	 *		Set TIMEWAIT timer
	 *		Drop packet and return
	*/
	if (dh->dccph_type == DCCP_PKT_RESET) {
559
		dccp_rcv_reset(sk, skb);
560 561 562 563 564 565 566 567 568 569 570
		return 0;
		/*
		 *   Step 7: Check for unexpected packet types
		 *      If (S.is_server and P.type == CloseReq)
		 *	    or (S.is_server and P.type == Response)
		 *	    or (S.is_client and P.type == Request)
		 *	    or (S.state == RESPOND and P.type == Data),
		 *	  Send Sync packet acknowledging P.seqno
		 *	  Drop packet and return
		 */
	} else if ((dp->dccps_role != DCCP_ROLE_CLIENT &&
571 572
		    (dh->dccph_type == DCCP_PKT_RESPONSE ||
		     dh->dccph_type == DCCP_PKT_CLOSEREQ)) ||
573 574
		    (dp->dccps_role == DCCP_ROLE_CLIENT &&
		     dh->dccph_type == DCCP_PKT_REQUEST) ||
575 576
		    (sk->sk_state == DCCP_RESPOND &&
		     dh->dccph_type == DCCP_PKT_DATA)) {
577
		dccp_send_sync(sk, dcb->dccpd_seq, DCCP_PKT_SYNC);
578
		goto discard;
579 580 581 582 583 584
	} else if (dh->dccph_type == DCCP_PKT_CLOSEREQ) {
		dccp_rcv_closereq(sk, skb);
		goto discard;
	} else if (dh->dccph_type == DCCP_PKT_CLOSE) {
		dccp_rcv_close(sk, skb);
		return 0;
585 586 587 588
	}

	switch (sk->sk_state) {
	case DCCP_CLOSED:
589
		dcb->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION;
590 591 592 593 594 595 596 597 598 599 600 601 602 603
		return 1;

	case DCCP_REQUESTING:
		/* FIXME: do congestion control initialization */

		queued = dccp_rcv_request_sent_state_process(sk, skb, dh, len);
		if (queued >= 0)
			return queued;

		__kfree_skb(skb);
		return 0;

	case DCCP_RESPOND:
	case DCCP_PARTOPEN:
604 605
		queued = dccp_rcv_respond_partopen_state_process(sk, skb,
								 dh, len);
606 607 608
		break;
	}

609 610
	if (dh->dccph_type == DCCP_PKT_ACK ||
	    dh->dccph_type == DCCP_PKT_DATAACK) {
611 612 613 614 615 616
		switch (old_state) {
		case DCCP_PARTOPEN:
			sk->sk_state_change(sk);
			sk_wake_async(sk, 0, POLL_OUT);
			break;
		}
617 618 619
	} else if (unlikely(dh->dccph_type == DCCP_PKT_SYNC)) {
		dccp_send_sync(sk, dcb->dccpd_seq, DCCP_PKT_SYNCACK);
		goto discard;
620 621
	}

622
	if (!queued) {
623 624 625 626 627
discard:
		__kfree_skb(skb);
	}
	return 0;
}
628 629

EXPORT_SYMBOL_GPL(dccp_rcv_state_process);
630 631

/**
632 633 634 635
 *  dccp_sample_rtt  -  Validate and finalise computation of RTT sample
 *  @delta:	number of microseconds between packet and acknowledgment
 *  The routine is kept generic to work in different contexts. It should be
 *  called immediately when the ACK used for the RTT sample arrives.
636
 */
637
u32 dccp_sample_rtt(struct sock *sk, long delta)
638
{
639 640
	/* dccpor_elapsed_time is either zeroed out or set and > 0 */
	delta -= dccp_sk(sk)->dccps_options_received.dccpor_elapsed_time * 10;
641 642

	if (unlikely(delta <= 0)) {
643
		DCCP_WARN("unusable RTT sample %ld, using min\n", delta);
644 645
		return DCCP_SANE_RTT_MIN;
	}
646 647
	if (unlikely(delta > DCCP_SANE_RTT_MAX)) {
		DCCP_WARN("RTT sample %ld too large, using max\n", delta);
648 649 650 651 652 653 654
		return DCCP_SANE_RTT_MAX;
	}

	return delta;
}

EXPORT_SYMBOL_GPL(dccp_sample_rtt);