input.c 36.2 KB
Newer Older
1 2
/* RxRPC packet reception
 *
3
 * Copyright (C) 2007, 2016 Red Hat, Inc. All Rights Reserved.
4 5 6 7 8 9 10 11
 * Written by David Howells (dhowells@redhat.com)
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version
 * 2 of the License, or (at your option) any later version.
 */

12 13
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

14 15 16 17 18 19 20 21
#include <linux/module.h>
#include <linux/net.h>
#include <linux/skbuff.h>
#include <linux/errqueue.h>
#include <linux/udp.h>
#include <linux/in.h>
#include <linux/in6.h>
#include <linux/icmp.h>
22
#include <linux/gfp.h>
23 24 25
#include <net/sock.h>
#include <net/af_rxrpc.h>
#include <net/ip.h>
26
#include <net/udp.h>
27
#include <net/net_namespace.h>
28 29
#include "ar-internal.h"

30 31 32
static void rxrpc_proto_abort(const char *why,
			      struct rxrpc_call *call, rxrpc_seq_t seq)
{
33
	if (rxrpc_abort_call(why, call, seq, RX_PROTOCOL_ERROR, -EBADMSG)) {
34 35 36 37 38
		set_bit(RXRPC_CALL_EV_ABORT, &call->events);
		rxrpc_queue_call(call);
	}
}

D
David Howells 已提交
39 40 41 42 43
/*
 * Do TCP-style congestion management [RFC 5681].
 */
static void rxrpc_congestion_management(struct rxrpc_call *call,
					struct sk_buff *skb,
44 45
					struct rxrpc_ack_summary *summary,
					rxrpc_serial_t acked_serial)
D
David Howells 已提交
46 47 48 49 50 51 52 53 54 55 56 57 58 59
{
	enum rxrpc_congest_change change = rxrpc_cong_no_change;
	unsigned int cumulative_acks = call->cong_cumul_acks;
	unsigned int cwnd = call->cong_cwnd;
	bool resend = false;

	summary->flight_size =
		(call->tx_top - call->tx_hard_ack) - summary->nr_acks;

	if (test_and_clear_bit(RXRPC_CALL_RETRANS_TIMEOUT, &call->flags)) {
		summary->retrans_timeo = true;
		call->cong_ssthresh = max_t(unsigned int,
					    summary->flight_size / 2, 2);
		cwnd = 1;
60
		if (cwnd >= call->cong_ssthresh &&
D
David Howells 已提交
61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84
		    call->cong_mode == RXRPC_CALL_SLOW_START) {
			call->cong_mode = RXRPC_CALL_CONGEST_AVOIDANCE;
			call->cong_tstamp = skb->tstamp;
			cumulative_acks = 0;
		}
	}

	cumulative_acks += summary->nr_new_acks;
	cumulative_acks += summary->nr_rot_new_acks;
	if (cumulative_acks > 255)
		cumulative_acks = 255;

	summary->mode = call->cong_mode;
	summary->cwnd = call->cong_cwnd;
	summary->ssthresh = call->cong_ssthresh;
	summary->cumulative_acks = cumulative_acks;
	summary->dup_acks = call->cong_dup_acks;

	switch (call->cong_mode) {
	case RXRPC_CALL_SLOW_START:
		if (summary->nr_nacks > 0)
			goto packet_loss_detected;
		if (summary->cumulative_acks > 0)
			cwnd += 1;
85
		if (cwnd >= call->cong_ssthresh) {
D
David Howells 已提交
86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163
			call->cong_mode = RXRPC_CALL_CONGEST_AVOIDANCE;
			call->cong_tstamp = skb->tstamp;
		}
		goto out;

	case RXRPC_CALL_CONGEST_AVOIDANCE:
		if (summary->nr_nacks > 0)
			goto packet_loss_detected;

		/* We analyse the number of packets that get ACK'd per RTT
		 * period and increase the window if we managed to fill it.
		 */
		if (call->peer->rtt_usage == 0)
			goto out;
		if (ktime_before(skb->tstamp,
				 ktime_add_ns(call->cong_tstamp,
					      call->peer->rtt)))
			goto out_no_clear_ca;
		change = rxrpc_cong_rtt_window_end;
		call->cong_tstamp = skb->tstamp;
		if (cumulative_acks >= cwnd)
			cwnd++;
		goto out;

	case RXRPC_CALL_PACKET_LOSS:
		if (summary->nr_nacks == 0)
			goto resume_normality;

		if (summary->new_low_nack) {
			change = rxrpc_cong_new_low_nack;
			call->cong_dup_acks = 1;
			if (call->cong_extra > 1)
				call->cong_extra = 1;
			goto send_extra_data;
		}

		call->cong_dup_acks++;
		if (call->cong_dup_acks < 3)
			goto send_extra_data;

		change = rxrpc_cong_begin_retransmission;
		call->cong_mode = RXRPC_CALL_FAST_RETRANSMIT;
		call->cong_ssthresh = max_t(unsigned int,
					    summary->flight_size / 2, 2);
		cwnd = call->cong_ssthresh + 3;
		call->cong_extra = 0;
		call->cong_dup_acks = 0;
		resend = true;
		goto out;

	case RXRPC_CALL_FAST_RETRANSMIT:
		if (!summary->new_low_nack) {
			if (summary->nr_new_acks == 0)
				cwnd += 1;
			call->cong_dup_acks++;
			if (call->cong_dup_acks == 2) {
				change = rxrpc_cong_retransmit_again;
				call->cong_dup_acks = 0;
				resend = true;
			}
		} else {
			change = rxrpc_cong_progress;
			cwnd = call->cong_ssthresh;
			if (summary->nr_nacks == 0)
				goto resume_normality;
		}
		goto out;

	default:
		BUG();
		goto out;
	}

resume_normality:
	change = rxrpc_cong_cleared_nacks;
	call->cong_dup_acks = 0;
	call->cong_extra = 0;
	call->cong_tstamp = skb->tstamp;
164
	if (cwnd < call->cong_ssthresh)
D
David Howells 已提交
165 166 167 168 169 170 171 172 173 174
		call->cong_mode = RXRPC_CALL_SLOW_START;
	else
		call->cong_mode = RXRPC_CALL_CONGEST_AVOIDANCE;
out:
	cumulative_acks = 0;
out_no_clear_ca:
	if (cwnd >= RXRPC_RXTX_BUFF_SIZE - 1)
		cwnd = RXRPC_RXTX_BUFF_SIZE - 1;
	call->cong_cwnd = cwnd;
	call->cong_cumul_acks = cumulative_acks;
175
	trace_rxrpc_congest(call, summary, acked_serial, change);
D
David Howells 已提交
176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198
	if (resend && !test_and_set_bit(RXRPC_CALL_EV_RESEND, &call->events))
		rxrpc_queue_call(call);
	return;

packet_loss_detected:
	change = rxrpc_cong_saw_nack;
	call->cong_mode = RXRPC_CALL_PACKET_LOSS;
	call->cong_dup_acks = 0;
	goto send_extra_data;

send_extra_data:
	/* Send some previously unsent DATA if we have some to advance the ACK
	 * state.
	 */
	if (call->rxtx_annotations[call->tx_top & RXRPC_RXTX_BUFF_MASK] &
	    RXRPC_TX_ANNO_LAST ||
	    summary->nr_acks != call->tx_top - call->tx_hard_ack) {
		call->cong_extra++;
		wake_up(&call->waitq);
	}
	goto out_no_clear_ca;
}

199 200 201 202 203 204 205 206
/*
 * Ping the other end to fill our RTT cache and to retrieve the rwind
 * and MTU parameters.
 */
static void rxrpc_send_ping(struct rxrpc_call *call, struct sk_buff *skb,
			    int skew)
{
	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
207
	ktime_t now = skb->tstamp;
208

209 210 211
	if (call->peer->rtt_usage < 3 ||
	    ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000), now))
		rxrpc_propose_ACK(call, RXRPC_ACK_PING, skew, sp->hdr.serial,
212 213
				  true, true,
				  rxrpc_propose_ack_ping_for_params);
214 215
}

216
/*
217
 * Apply a hard ACK by advancing the Tx window.
218
 */
219 220
static void rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to,
				   struct rxrpc_ack_summary *summary)
221
{
222 223
	struct sk_buff *skb, *list = NULL;
	int ix;
224
	u8 annotation;
225

226 227 228 229 230 231 232
	if (call->acks_lowest_nak == call->tx_hard_ack) {
		call->acks_lowest_nak = to;
	} else if (before_eq(call->acks_lowest_nak, to)) {
		summary->new_low_nack = true;
		call->acks_lowest_nak = to;
	}

233
	spin_lock(&call->lock);
234

235 236 237 238
	while (before(call->tx_hard_ack, to)) {
		call->tx_hard_ack++;
		ix = call->tx_hard_ack & RXRPC_RXTX_BUFF_MASK;
		skb = call->rxtx_buffer[ix];
239
		annotation = call->rxtx_annotations[ix];
D
David Howells 已提交
240
		rxrpc_see_skb(skb, rxrpc_skb_tx_rotated);
241 242 243 244
		call->rxtx_buffer[ix] = NULL;
		call->rxtx_annotations[ix] = 0;
		skb->next = list;
		list = skb;
245 246 247

		if (annotation & RXRPC_TX_ANNO_LAST)
			set_bit(RXRPC_CALL_TX_LAST, &call->flags);
248 249
		if ((annotation & RXRPC_TX_ANNO_MASK) != RXRPC_TX_ANNO_ACK)
			summary->nr_rot_new_acks++;
250
	}
251

252
	spin_unlock(&call->lock);
253

254 255 256
	trace_rxrpc_transmit(call, (test_bit(RXRPC_CALL_TX_LAST, &call->flags) ?
				    rxrpc_transmit_rotate_last :
				    rxrpc_transmit_rotate));
257 258
	wake_up(&call->waitq);

259 260 261 262
	while (list) {
		skb = list;
		list = skb->next;
		skb->next = NULL;
D
David Howells 已提交
263
		rxrpc_free_skb(skb, rxrpc_skb_tx_freed);
264
	}
265
}
266

267 268 269 270 271 272
/*
 * End the transmission phase of a call.
 *
 * This occurs when we get an ACKALL packet, the first DATA packet of a reply,
 * or a final ACK packet.
 */
273 274
static bool rxrpc_end_tx_phase(struct rxrpc_call *call, bool reply_begun,
			       const char *abort_why)
275
{
276

277
	ASSERT(test_bit(RXRPC_CALL_TX_LAST, &call->flags));
278

279
	write_lock(&call->state_lock);
280

281
	switch (call->state) {
282
	case RXRPC_CALL_CLIENT_SEND_REQUEST:
283
	case RXRPC_CALL_CLIENT_AWAIT_REPLY:
284 285 286 287
		if (reply_begun)
			call->state = RXRPC_CALL_CLIENT_RECV_REPLY;
		else
			call->state = RXRPC_CALL_CLIENT_AWAIT_REPLY;
288
		break;
289

290 291 292 293
	case RXRPC_CALL_SERVER_AWAIT_ACK:
		__rxrpc_call_completed(call);
		rxrpc_notify_socket(call);
		break;
294 295 296

	default:
		goto bad_state;
297 298
	}

299
	write_unlock(&call->state_lock);
300 301 302 303 304
	if (call->state == RXRPC_CALL_CLIENT_AWAIT_REPLY) {
		trace_rxrpc_transmit(call, rxrpc_transmit_await_reply);
	} else {
		trace_rxrpc_transmit(call, rxrpc_transmit_end);
	}
305 306
	_leave(" = ok");
	return true;
307 308 309 310 311 312 313 314 315 316 317 318 319

bad_state:
	write_unlock(&call->state_lock);
	kdebug("end_tx %s", rxrpc_call_states[call->state]);
	rxrpc_proto_abort(abort_why, call, call->tx_top);
	return false;
}

/*
 * Begin the reply reception phase of a call.
 */
static bool rxrpc_receiving_reply(struct rxrpc_call *call)
{
320
	struct rxrpc_ack_summary summary = { 0 };
D
David Howells 已提交
321
	unsigned long now, timo;
322 323
	rxrpc_seq_t top = READ_ONCE(call->tx_top);

324 325 326 327
	if (call->ackr_reason) {
		spin_lock_bh(&call->lock);
		call->ackr_reason = 0;
		spin_unlock_bh(&call->lock);
D
David Howells 已提交
328 329 330 331 332
		now = jiffies;
		timo = now + MAX_JIFFY_OFFSET;
		WRITE_ONCE(call->resend_at, timo);
		WRITE_ONCE(call->ack_at, timo);
		trace_rxrpc_timer(call, rxrpc_timer_init_for_reply, now);
333 334
	}

335
	if (!test_bit(RXRPC_CALL_TX_LAST, &call->flags))
336
		rxrpc_rotate_tx_window(call, top, &summary);
337 338 339 340 341 342 343 344
	if (!test_bit(RXRPC_CALL_TX_LAST, &call->flags)) {
		rxrpc_proto_abort("TXL", call, top);
		return false;
	}
	if (!rxrpc_end_tx_phase(call, true, "ETD"))
		return false;
	call->tx_phase = false;
	return true;
345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361
}

/*
 * Scan a jumbo packet to validate its structure and to work out how many
 * subpackets it contains.
 *
 * A jumbo packet is a collection of consecutive packets glued together with
 * little headers between that indicate how to change the initial header for
 * each subpacket.
 *
 * RXRPC_JUMBO_PACKET must be set on all but the last subpacket - and all but
 * the last are RXRPC_JUMBO_DATALEN in size.  The last subpacket may be of any
 * size.
 */
static bool rxrpc_validate_jumbo(struct sk_buff *skb)
{
	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
362
	unsigned int offset = sizeof(struct rxrpc_wire_header);
363
	unsigned int len = skb->len;
364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380
	int nr_jumbo = 1;
	u8 flags = sp->hdr.flags;

	do {
		nr_jumbo++;
		if (len - offset < RXRPC_JUMBO_SUBPKTLEN)
			goto protocol_error;
		if (flags & RXRPC_LAST_PACKET)
			goto protocol_error;
		offset += RXRPC_JUMBO_DATALEN;
		if (skb_copy_bits(skb, offset, &flags, 1) < 0)
			goto protocol_error;
		offset += sizeof(struct rxrpc_jumbo_header);
	} while (flags & RXRPC_JUMBO_PACKET);

	sp->nr_jumbo = nr_jumbo;
	return true;
381

382 383
protocol_error:
	return false;
384 385 386
}

/*
387 388 389 390 391 392 393 394 395 396 397
 * Handle reception of a duplicate packet.
 *
 * We have to take care to avoid an attack here whereby we're given a series of
 * jumbograms, each with a sequence number one before the preceding one and
 * filled up to maximum UDP size.  If they never send us the first packet in
 * the sequence, they can cause us to have to hold on to around 2MiB of kernel
 * space until the call times out.
 *
 * We limit the space usage by only accepting three duplicate jumbo packets per
 * call.  After that, we tell the other side we're no longer accepting jumbos
 * (that information is encoded in the ACK packet).
398
 */
399
static void rxrpc_input_dup_data(struct rxrpc_call *call, rxrpc_seq_t seq,
400
				 u8 annotation, bool *_jumbo_bad)
401
{
402 403 404
	/* Discard normal packets that are duplicates. */
	if (annotation == 0)
		return;
405

406 407 408 409
	/* Skip jumbo subpackets that are duplicates.  When we've had three or
	 * more partially duplicate jumbo packets, we refuse to take any more
	 * jumbos for this call.
	 */
410 411 412
	if (!*_jumbo_bad) {
		call->nr_jumbo_bad++;
		*_jumbo_bad = true;
413 414
	}
}
415

416 417 418 419 420 421 422
/*
 * Process a DATA packet, adding the packet to the Rx ring.
 */
static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb,
			     u16 skew)
{
	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
423
	enum rxrpc_call_state state;
424
	unsigned int offset = sizeof(struct rxrpc_wire_header);
425 426 427
	unsigned int ix;
	rxrpc_serial_t serial = sp->hdr.serial, ack_serial = 0;
	rxrpc_seq_t seq = sp->hdr.seq, hard_ack;
428
	bool immediate_ack = false, jumbo_bad = false, queued;
429 430
	u16 len;
	u8 ack = 0, flags, annotation = 0;
431

432
	_enter("{%u,%u},{%u,%u}",
433
	       call->rx_hard_ack, call->rx_top, skb->len, seq);
434

435 436
	_proto("Rx DATA %%%u { #%u f=%02x }",
	       sp->hdr.serial, seq, sp->hdr.flags);
437

438 439
	state = READ_ONCE(call->state);
	if (state >= RXRPC_CALL_COMPLETE)
440
		return;
441

D
David Howells 已提交
442 443 444 445 446 447 448 449 450 451 452 453 454
	if (call->state == RXRPC_CALL_SERVER_RECV_REQUEST) {
		unsigned long timo = READ_ONCE(call->next_req_timo);
		unsigned long now, expect_req_by;

		if (timo) {
			now = jiffies;
			expect_req_by = now + timo;
			WRITE_ONCE(call->expect_req_by, expect_req_by);
			rxrpc_reduce_call_timer(call, expect_req_by, now,
						rxrpc_timer_set_for_idle);
		}
	}

455 456 457
	/* Received data implicitly ACKs all of the request packets we sent
	 * when we're acting as a client.
	 */
458 459
	if ((state == RXRPC_CALL_CLIENT_SEND_REQUEST ||
	     state == RXRPC_CALL_CLIENT_AWAIT_REPLY) &&
460
	    !rxrpc_receiving_reply(call))
461
		return;
462

463
	call->ackr_prev_seq = seq;
464

465 466
	hard_ack = READ_ONCE(call->rx_hard_ack);
	if (after(seq, hard_ack + call->rx_winsize)) {
467
		ack = RXRPC_ACK_EXCEEDS_WINDOW;
468 469
		ack_serial = serial;
		goto ack;
470 471
	}

472 473
	flags = sp->hdr.flags;
	if (flags & RXRPC_JUMBO_PACKET) {
474
		if (call->nr_jumbo_bad > 3) {
475 476 477
			ack = RXRPC_ACK_NOSPACE;
			ack_serial = serial;
			goto ack;
478
		}
479
		annotation = 1;
480 481
	}

482 483 484
next_subpacket:
	queued = false;
	ix = seq & RXRPC_RXTX_BUFF_MASK;
485
	len = skb->len;
486 487 488 489
	if (flags & RXRPC_JUMBO_PACKET)
		len = RXRPC_JUMBO_DATALEN;

	if (flags & RXRPC_LAST_PACKET) {
490
		if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) &&
491 492 493 494 495 496
		    seq != call->rx_top)
			return rxrpc_proto_abort("LSN", call, seq);
	} else {
		if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) &&
		    after_eq(seq, call->rx_top))
			return rxrpc_proto_abort("LSA", call, seq);
497 498
	}

D
David Howells 已提交
499
	trace_rxrpc_rx_data(call->debug_id, seq, serial, flags, annotation);
500 501 502 503 504 505 506 507 508 509 510 511
	if (before_eq(seq, hard_ack)) {
		ack = RXRPC_ACK_DUPLICATE;
		ack_serial = serial;
		goto skip;
	}

	if (flags & RXRPC_REQUEST_ACK && !ack) {
		ack = RXRPC_ACK_REQUESTED;
		ack_serial = serial;
	}

	if (call->rxtx_buffer[ix]) {
512
		rxrpc_input_dup_data(call, seq, annotation, &jumbo_bad);
513 514 515
		if (ack != RXRPC_ACK_DUPLICATE) {
			ack = RXRPC_ACK_DUPLICATE;
			ack_serial = serial;
516
		}
517 518
		immediate_ack = true;
		goto skip;
519 520
	}

521 522 523 524 525 526 527 528
	/* Queue the packet.  We use a couple of memory barriers here as need
	 * to make sure that rx_top is perceived to be set after the buffer
	 * pointer and that the buffer pointer is set after the annotation and
	 * the skb data.
	 *
	 * Barriers against rxrpc_recvmsg_data() and rxrpc_rotate_rx_window()
	 * and also rxrpc_fill_out_ack().
	 */
D
David Howells 已提交
529
	rxrpc_get_skb(skb, rxrpc_skb_rx_got);
530 531 532
	call->rxtx_annotations[ix] = annotation;
	smp_wmb();
	call->rxtx_buffer[ix] = skb;
533
	if (after(seq, call->rx_top)) {
534
		smp_store_release(&call->rx_top, seq);
535 536 537 538 539 540 541 542
	} else if (before(seq, call->rx_top)) {
		/* Send an immediate ACK if we fill in a hole */
		if (!ack) {
			ack = RXRPC_ACK_DELAY;
			ack_serial = serial;
		}
		immediate_ack = true;
	}
543
	if (flags & RXRPC_LAST_PACKET) {
544
		set_bit(RXRPC_CALL_RX_LAST, &call->flags);
545 546 547 548
		trace_rxrpc_receive(call, rxrpc_receive_queue_last, serial, seq);
	} else {
		trace_rxrpc_receive(call, rxrpc_receive_queue, serial, seq);
	}
549 550 551 552 553 554 555 556 557
	queued = true;

	if (after_eq(seq, call->rx_expect_next)) {
		if (after(seq, call->rx_expect_next)) {
			_net("OOS %u > %u", seq, call->rx_expect_next);
			ack = RXRPC_ACK_OUT_OF_SEQUENCE;
			ack_serial = serial;
		}
		call->rx_expect_next = seq + 1;
558 559
	}

560 561 562 563 564 565 566 567 568 569 570
skip:
	offset += len;
	if (flags & RXRPC_JUMBO_PACKET) {
		if (skb_copy_bits(skb, offset, &flags, 1) < 0)
			return rxrpc_proto_abort("XJF", call, seq);
		offset += sizeof(struct rxrpc_jumbo_header);
		seq++;
		serial++;
		annotation++;
		if (flags & RXRPC_JUMBO_PACKET)
			annotation |= RXRPC_RX_ANNO_JLAST;
571 572 573 574 575 576 577 578 579
		if (after(seq, hard_ack + call->rx_winsize)) {
			ack = RXRPC_ACK_EXCEEDS_WINDOW;
			ack_serial = serial;
			if (!jumbo_bad) {
				call->nr_jumbo_bad++;
				jumbo_bad = true;
			}
			goto ack;
		}
580 581 582 583

		_proto("Rx DATA Jumbo %%%u", serial);
		goto next_subpacket;
	}
584

585 586 587 588
	if (queued && flags & RXRPC_LAST_PACKET && !ack) {
		ack = RXRPC_ACK_DELAY;
		ack_serial = serial;
	}
589

590 591 592
ack:
	if (ack)
		rxrpc_propose_ACK(call, ack, skew, ack_serial,
593 594
				  immediate_ack, true,
				  rxrpc_propose_ack_input_data);
D
David Howells 已提交
595 596 597 598
	else
		rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, skew, serial,
				  false, true,
				  rxrpc_propose_ack_input_data);
599

D
David Howells 已提交
600 601
	if (sp->hdr.seq == READ_ONCE(call->rx_hard_ack) + 1) {
		trace_rxrpc_notify_socket(call->debug_id, serial);
602
		rxrpc_notify_socket(call);
D
David Howells 已提交
603
	}
604
	_leave(" [queued]");
605 606
}

607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624
/*
 * Process a requested ACK.
 */
static void rxrpc_input_requested_ack(struct rxrpc_call *call,
				      ktime_t resp_time,
				      rxrpc_serial_t orig_serial,
				      rxrpc_serial_t ack_serial)
{
	struct rxrpc_skb_priv *sp;
	struct sk_buff *skb;
	ktime_t sent_at;
	int ix;

	for (ix = 0; ix < RXRPC_RXTX_BUFF_SIZE; ix++) {
		skb = call->rxtx_buffer[ix];
		if (!skb)
			continue;

D
David Howells 已提交
625 626
		sent_at = skb->tstamp;
		smp_rmb(); /* Read timestamp before serial. */
627 628 629 630 631
		sp = rxrpc_skb(skb);
		if (sp->hdr.serial != orig_serial)
			continue;
		goto found;
	}
D
David Howells 已提交
632

633 634 635 636 637 638 639
	return;

found:
	rxrpc_peer_add_rtt(call, rxrpc_rtt_rx_requested_ack,
			   orig_serial, ack_serial, sent_at, resp_time);
}

640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676
/*
 * Process the response to a ping that we sent to find out if we lost an ACK.
 *
 * If we got back a ping response that indicates a lower tx_top than what we
 * had at the time of the ping transmission, we adjudge all the DATA packets
 * sent between the response tx_top and the ping-time tx_top to have been lost.
 */
static void rxrpc_input_check_for_lost_ack(struct rxrpc_call *call)
{
	rxrpc_seq_t top, bottom, seq;
	bool resend = false;

	spin_lock_bh(&call->lock);

	bottom = call->tx_hard_ack + 1;
	top = call->acks_lost_top;
	if (before(bottom, top)) {
		for (seq = bottom; before_eq(seq, top); seq++) {
			int ix = seq & RXRPC_RXTX_BUFF_MASK;
			u8 annotation = call->rxtx_annotations[ix];
			u8 anno_type = annotation & RXRPC_TX_ANNO_MASK;

			if (anno_type != RXRPC_TX_ANNO_UNACK)
				continue;
			annotation &= ~RXRPC_TX_ANNO_MASK;
			annotation |= RXRPC_TX_ANNO_RETRANS;
			call->rxtx_annotations[ix] = annotation;
			resend = true;
		}
	}

	spin_unlock_bh(&call->lock);

	if (resend && !test_and_set_bit(RXRPC_CALL_EV_RESEND, &call->events))
		rxrpc_queue_call(call);
}

677 678 679 680 681 682 683 684 685 686 687
/*
 * Process a ping response.
 */
static void rxrpc_input_ping_response(struct rxrpc_call *call,
				      ktime_t resp_time,
				      rxrpc_serial_t orig_serial,
				      rxrpc_serial_t ack_serial)
{
	rxrpc_serial_t ping_serial;
	ktime_t ping_time;

688
	ping_time = call->ping_time;
689
	smp_rmb();
690
	ping_serial = call->ping_serial;
691

692 693 694
	if (orig_serial == call->acks_lost_ping)
		rxrpc_input_check_for_lost_ack(call);

695 696 697 698 699 700 701 702 703 704 705
	if (!test_bit(RXRPC_CALL_PINGING, &call->flags) ||
	    before(orig_serial, ping_serial))
		return;
	clear_bit(RXRPC_CALL_PINGING, &call->flags);
	if (after(orig_serial, ping_serial))
		return;

	rxrpc_peer_add_rtt(call, rxrpc_rtt_rx_ping_response,
			   orig_serial, ack_serial, ping_time, resp_time);
}

706
/*
707
 * Process the extra information that may be appended to an ACK packet
708
 */
709 710
static void rxrpc_input_ackinfo(struct rxrpc_call *call, struct sk_buff *skb,
				struct rxrpc_ackinfo *ackinfo)
711
{
712 713 714
	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
	struct rxrpc_peer *peer;
	unsigned int mtu;
715
	bool wake = false;
716
	u32 rwind = ntohl(ackinfo->rwind);
717 718 719 720

	_proto("Rx ACK %%%u Info { rx=%u max=%u rwin=%u jm=%u }",
	       sp->hdr.serial,
	       ntohl(ackinfo->rxMTU), ntohl(ackinfo->maxMTU),
721
	       rwind, ntohl(ackinfo->jumbo_max));
722

723 724 725 726 727
	if (call->tx_winsize != rwind) {
		if (rwind > RXRPC_RXTX_BUFF_SIZE - 1)
			rwind = RXRPC_RXTX_BUFF_SIZE - 1;
		if (rwind > call->tx_winsize)
			wake = true;
728 729
		trace_rxrpc_rx_rwind_change(call, sp->hdr.serial,
					    ntohl(ackinfo->rwind), wake);
730 731 732
		call->tx_winsize = rwind;
	}

733 734
	if (call->cong_ssthresh > rwind)
		call->cong_ssthresh = rwind;
735 736 737 738 739 740 741 742 743 744 745

	mtu = min(ntohl(ackinfo->rxMTU), ntohl(ackinfo->maxMTU));

	peer = call->peer;
	if (mtu < peer->maxdata) {
		spin_lock_bh(&peer->lock);
		peer->maxdata = mtu;
		peer->mtu = mtu + peer->hdrsize;
		spin_unlock_bh(&peer->lock);
		_net("Net MTU %u (maxdata %u)", peer->mtu, peer->maxdata);
	}
746 747 748

	if (wake)
		wake_up(&call->waitq);
749
}
750

751 752 753 754 755 756 757 758 759 760
/*
 * Process individual soft ACKs.
 *
 * Each ACK in the array corresponds to one packet and can be either an ACK or
 * a NAK.  If we get find an explicitly NAK'd packet we resend immediately;
 * packets that lie beyond the end of the ACK list are scheduled for resend by
 * the timer on the basis that the peer might just not have processed them at
 * the time the ACK was sent.
 */
static void rxrpc_input_soft_acks(struct rxrpc_call *call, u8 *acks,
761 762
				  rxrpc_seq_t seq, int nr_acks,
				  struct rxrpc_ack_summary *summary)
763 764
{
	int ix;
D
David Howells 已提交
765
	u8 annotation, anno_type;
766 767 768

	for (; nr_acks > 0; nr_acks--, seq++) {
		ix = seq & RXRPC_RXTX_BUFF_MASK;
D
David Howells 已提交
769 770 771
		annotation = call->rxtx_annotations[ix];
		anno_type = annotation & RXRPC_TX_ANNO_MASK;
		annotation &= ~RXRPC_TX_ANNO_MASK;
772
		switch (*acks++) {
773
		case RXRPC_ACK_TYPE_ACK:
774
			summary->nr_acks++;
D
David Howells 已提交
775 776
			if (anno_type == RXRPC_TX_ANNO_ACK)
				continue;
777
			summary->nr_new_acks++;
D
David Howells 已提交
778 779
			call->rxtx_annotations[ix] =
				RXRPC_TX_ANNO_ACK | annotation;
780 781
			break;
		case RXRPC_ACK_TYPE_NACK:
782 783 784 785 786 787
			if (!summary->nr_nacks &&
			    call->acks_lowest_nak != seq) {
				call->acks_lowest_nak = seq;
				summary->new_low_nack = true;
			}
			summary->nr_nacks++;
D
David Howells 已提交
788
			if (anno_type == RXRPC_TX_ANNO_NAK)
789
				continue;
790
			summary->nr_new_nacks++;
791 792
			if (anno_type == RXRPC_TX_ANNO_RETRANS)
				continue;
D
David Howells 已提交
793 794
			call->rxtx_annotations[ix] =
				RXRPC_TX_ANNO_NAK | annotation;
795 796 797
			break;
		default:
			return rxrpc_proto_abort("SFT", call, 0);
798 799 800 801 802
		}
	}
}

/*
803 804 805 806 807 808 809 810
 * Process an ACK packet.
 *
 * ack.firstPacket is the sequence number of the first soft-ACK'd/NAK'd packet
 * in the ACK array.  Anything before that is hard-ACK'd and may be discarded.
 *
 * A hard-ACK means that a packet has been processed and may be discarded; a
 * soft-ACK means that the packet may be discarded and retransmission
 * requested.  A phase is complete when all packets are hard-ACK'd.
811
 */
812 813
static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb,
			    u16 skew)
814
{
815
	struct rxrpc_ack_summary summary = { 0 };
816
	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
817 818 819 820 821
	union {
		struct rxrpc_ackpacket ack;
		struct rxrpc_ackinfo info;
		u8 acks[RXRPC_MAXACKS];
	} buf;
822
	rxrpc_serial_t acked_serial;
823
	rxrpc_seq_t first_soft_ack, hard_ack;
824
	int nr_acks, offset, ioffset;
825 826 827

	_enter("");

828 829
	offset = sizeof(struct rxrpc_wire_header);
	if (skb_copy_bits(skb, offset, &buf.ack, sizeof(buf.ack)) < 0) {
830 831
		_debug("extraction failure");
		return rxrpc_proto_abort("XAK", call, 0);
832
	}
833
	offset += sizeof(buf.ack);
834

835
	acked_serial = ntohl(buf.ack.serial);
836 837 838
	first_soft_ack = ntohl(buf.ack.firstPacket);
	hard_ack = first_soft_ack - 1;
	nr_acks = buf.ack.nAcks;
839 840
	summary.ack_reason = (buf.ack.reason < RXRPC_ACK__INVALID ?
			      buf.ack.reason : RXRPC_ACK__INVALID);
841

D
David Howells 已提交
842 843 844
	trace_rxrpc_rx_ack(call, sp->hdr.serial, acked_serial,
			   first_soft_ack, ntohl(buf.ack.previousPacket),
			   summary.ack_reason, nr_acks);
845

846 847 848
	if (buf.ack.reason == RXRPC_ACK_PING_RESPONSE)
		rxrpc_input_ping_response(call, skb->tstamp, acked_serial,
					  sp->hdr.serial);
849 850 851
	if (buf.ack.reason == RXRPC_ACK_REQUESTED)
		rxrpc_input_requested_ack(call, skb->tstamp, acked_serial,
					  sp->hdr.serial);
852

853 854 855
	if (buf.ack.reason == RXRPC_ACK_PING) {
		_proto("Rx ACK %%%u PING Request", sp->hdr.serial);
		rxrpc_propose_ACK(call, RXRPC_ACK_PING_RESPONSE,
856 857
				  skew, sp->hdr.serial, true, true,
				  rxrpc_propose_ack_respond_to_ping);
858
	} else if (sp->hdr.flags & RXRPC_REQUEST_ACK) {
859
		rxrpc_propose_ACK(call, RXRPC_ACK_REQUESTED,
860 861
				  skew, sp->hdr.serial, true, true,
				  rxrpc_propose_ack_respond_to_ack);
862 863
	}

864 865 866
	ioffset = offset + nr_acks + 3;
	if (skb->len >= ioffset + sizeof(buf.info)) {
		if (skb_copy_bits(skb, ioffset, &buf.info, sizeof(buf.info)) < 0)
867 868 869
			return rxrpc_proto_abort("XAI", call, 0);
		rxrpc_input_ackinfo(call, skb, &buf.info);
	}
870

871 872
	if (first_soft_ack == 0)
		return rxrpc_proto_abort("AK0", call, 0);
873

874
	/* Ignore ACKs unless we are or have just been transmitting. */
875
	switch (READ_ONCE(call->state)) {
876 877 878 879 880
	case RXRPC_CALL_CLIENT_SEND_REQUEST:
	case RXRPC_CALL_CLIENT_AWAIT_REPLY:
	case RXRPC_CALL_SERVER_SEND_REPLY:
	case RXRPC_CALL_SERVER_AWAIT_ACK:
		break;
881
	default:
882 883
		return;
	}
884

885
	/* Discard any out-of-order or duplicate ACKs. */
886
	if (before_eq(sp->hdr.serial, call->acks_latest)) {
887 888 889 890
		_debug("discard ACK %d <= %d",
		       sp->hdr.serial, call->acks_latest);
		return;
	}
D
David Howells 已提交
891
	call->acks_latest_ts = skb->tstamp;
892
	call->acks_latest = sp->hdr.serial;
893

894 895 896
	if (before(hard_ack, call->tx_hard_ack) ||
	    after(hard_ack, call->tx_top))
		return rxrpc_proto_abort("AKW", call, 0);
897 898
	if (nr_acks > call->tx_top - hard_ack)
		return rxrpc_proto_abort("AKN", call, 0);
899

900
	if (after(hard_ack, call->tx_hard_ack))
901
		rxrpc_rotate_tx_window(call, hard_ack, &summary);
902

903
	if (nr_acks > 0) {
904
		if (skb_copy_bits(skb, offset, buf.acks, nr_acks) < 0)
905
			return rxrpc_proto_abort("XSA", call, 0);
906 907
		rxrpc_input_soft_acks(call, buf.acks, first_soft_ack, nr_acks,
				      &summary);
908 909 910 911
	}

	if (test_bit(RXRPC_CALL_TX_LAST, &call->flags)) {
		rxrpc_end_tx_phase(call, false, "ETA");
912
		return;
913
	}
914

915 916
	if (call->rxtx_annotations[call->tx_top & RXRPC_RXTX_BUFF_MASK] &
	    RXRPC_TX_ANNO_LAST &&
917 918
	    summary.nr_acks == call->tx_top - hard_ack &&
	    rxrpc_is_client_call(call))
919 920 921
		rxrpc_propose_ACK(call, RXRPC_ACK_PING, skew, sp->hdr.serial,
				  false, true,
				  rxrpc_propose_ack_ping_for_lost_reply);
D
David Howells 已提交
922

923
	return rxrpc_congestion_management(call, skb, &summary, acked_serial);
924 925 926
}

/*
927
 * Process an ACKALL packet.
928
 */
929
static void rxrpc_input_ackall(struct rxrpc_call *call, struct sk_buff *skb)
930
{
931
	struct rxrpc_ack_summary summary = { 0 };
932
	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
933

934
	_proto("Rx ACKALL %%%u", sp->hdr.serial);
935

936
	rxrpc_rotate_tx_window(call, call->tx_top, &summary);
937 938
	if (test_bit(RXRPC_CALL_TX_LAST, &call->flags))
		rxrpc_end_tx_phase(call, false, "ETL");
939
}
940

941
/*
D
David Howells 已提交
942
 * Process an ABORT packet directed at a call.
943 944 945 946 947 948
 */
static void rxrpc_input_abort(struct rxrpc_call *call, struct sk_buff *skb)
{
	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
	__be32 wtmp;
	u32 abort_code = RX_CALL_DEAD;
949

950
	_enter("");
951

952
	if (skb->len >= 4 &&
953 954
	    skb_copy_bits(skb, sizeof(struct rxrpc_wire_header),
			  &wtmp, sizeof(wtmp)) >= 0)
955
		abort_code = ntohl(wtmp);
956

D
David Howells 已提交
957 958
	trace_rxrpc_rx_abort(call, sp->hdr.serial, abort_code);

959
	_proto("Rx ABORT %%%u { %x }", sp->hdr.serial, abort_code);
960

961
	if (rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED,
962
				      abort_code, -ECONNABORTED))
963
		rxrpc_notify_socket(call);
964 965 966
}

/*
967
 * Process an incoming call packet.
968
 */
969 970
static void rxrpc_input_call_packet(struct rxrpc_call *call,
				    struct sk_buff *skb, u16 skew)
971
{
972
	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
D
David Howells 已提交
973
	unsigned long timo;
974

975
	_enter("%p,%p", call, skb);
976

D
David Howells 已提交
977 978 979 980
	timo = READ_ONCE(call->next_rx_timo);
	if (timo) {
		unsigned long now = jiffies, expect_rx_by;

981
		expect_rx_by = now + timo;
D
David Howells 已提交
982 983 984 985
		WRITE_ONCE(call->expect_rx_by, expect_rx_by);
		rxrpc_reduce_call_timer(call, expect_rx_by, now,
					rxrpc_timer_set_for_normal);
	}
D
David Howells 已提交
986

987 988 989 990
	switch (sp->hdr.type) {
	case RXRPC_PACKET_TYPE_DATA:
		rxrpc_input_data(call, skb, skew);
		break;
991

992 993
	case RXRPC_PACKET_TYPE_ACK:
		rxrpc_input_ack(call, skb, skew);
994 995
		break;

996 997
	case RXRPC_PACKET_TYPE_BUSY:
		_proto("Rx BUSY %%%u", sp->hdr.serial);
998

999 1000 1001 1002 1003
		/* Just ignore BUSY packets from the server; the retry and
		 * lifespan timers will take care of business.  BUSY packets
		 * from the client don't make sense.
		 */
		break;
1004

1005 1006 1007
	case RXRPC_PACKET_TYPE_ABORT:
		rxrpc_input_abort(call, skb);
		break;
1008

1009 1010 1011
	case RXRPC_PACKET_TYPE_ACKALL:
		rxrpc_input_ackall(call, skb);
		break;
1012

1013 1014
	default:
		break;
1015
	}
1016

1017 1018 1019
	_leave("");
}

1020 1021 1022 1023 1024 1025 1026 1027 1028
/*
 * Handle a new call on a channel implicitly completing the preceding call on
 * that channel.
 *
 * TODO: If callNumber > call_id + 1, renegotiate security.
 */
static void rxrpc_input_implicit_end_call(struct rxrpc_connection *conn,
					  struct rxrpc_call *call)
{
1029
	switch (READ_ONCE(call->state)) {
1030 1031 1032 1033 1034 1035
	case RXRPC_CALL_SERVER_AWAIT_ACK:
		rxrpc_call_completed(call);
		break;
	case RXRPC_CALL_COMPLETE:
		break;
	default:
1036
		if (rxrpc_abort_call("IMP", call, 0, RX_CALL_DEAD, -ESHUTDOWN)) {
1037 1038 1039 1040 1041 1042
			set_bit(RXRPC_CALL_EV_ABORT, &call->events);
			rxrpc_queue_call(call);
		}
		break;
	}

D
David Howells 已提交
1043
	trace_rxrpc_improper_term(call);
1044 1045 1046 1047
	__rxrpc_disconnect_call(conn, call);
	rxrpc_notify_socket(call);
}

1048 1049
/*
 * post connection-level events to the connection
1050 1051
 * - this includes challenges, responses, some aborts and call terminal packet
 *   retransmission.
1052
 */
1053
static void rxrpc_post_packet_to_conn(struct rxrpc_connection *conn,
1054 1055 1056 1057 1058
				      struct sk_buff *skb)
{
	_enter("%p,%p", conn, skb);

	skb_queue_tail(&conn->rx_queue, skb);
1059
	rxrpc_queue_conn(conn);
1060 1061
}

1062 1063 1064 1065 1066 1067 1068 1069 1070 1071
/*
 * post endpoint-level events to the local endpoint
 * - this includes debug and version messages
 */
static void rxrpc_post_packet_to_local(struct rxrpc_local *local,
				       struct sk_buff *skb)
{
	_enter("%p,%p", local, skb);

	skb_queue_tail(&local->event_queue, skb);
1072
	rxrpc_queue_local(local);
1073 1074
}

1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085
/*
 * put a packet up for transport-level abort
 */
static void rxrpc_reject_packet(struct rxrpc_local *local, struct sk_buff *skb)
{
	CHECK_SLAB_OKAY(&local->usage);

	skb_queue_tail(&local->reject_queue, skb);
	rxrpc_queue_local(local);
}

1086 1087 1088 1089 1090 1091 1092 1093 1094
/*
 * Extract the wire header from a packet and translate the byte order.
 */
static noinline
int rxrpc_extract_header(struct rxrpc_skb_priv *sp, struct sk_buff *skb)
{
	struct rxrpc_wire_header whdr;

	/* dig out the RxRPC connection details */
1095 1096 1097
	if (skb_copy_bits(skb, 0, &whdr, sizeof(whdr)) < 0) {
		trace_rxrpc_rx_eproto(NULL, sp->hdr.serial,
				      tracepoint_string("bad_hdr"));
1098
		return -EBADMSG;
1099
	}
1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115

	memset(sp, 0, sizeof(*sp));
	sp->hdr.epoch		= ntohl(whdr.epoch);
	sp->hdr.cid		= ntohl(whdr.cid);
	sp->hdr.callNumber	= ntohl(whdr.callNumber);
	sp->hdr.seq		= ntohl(whdr.seq);
	sp->hdr.serial		= ntohl(whdr.serial);
	sp->hdr.flags		= whdr.flags;
	sp->hdr.type		= whdr.type;
	sp->hdr.userStatus	= whdr.userStatus;
	sp->hdr.securityIndex	= whdr.securityIndex;
	sp->hdr._rsvd		= ntohs(whdr._rsvd);
	sp->hdr.serviceId	= ntohs(whdr.serviceId);
	return 0;
}

1116 1117 1118
/*
 * handle data received on the local endpoint
 * - may be called in interrupt context
1119 1120 1121 1122
 *
 * The socket is locked by the caller and this prevents the socket from being
 * shut down and the local endpoint from going away, thus sk_user_data will not
 * be cleared until this function returns.
1123
 */
1124
void rxrpc_data_ready(struct sock *udp_sk)
1125
{
1126
	struct rxrpc_connection *conn;
1127
	struct rxrpc_channel *chan;
1128
	struct rxrpc_call *call = NULL;
1129
	struct rxrpc_skb_priv *sp;
1130
	struct rxrpc_local *local = udp_sk->sk_user_data;
1131
	struct rxrpc_sock *rx;
1132
	struct sk_buff *skb;
1133
	unsigned int channel;
1134
	int ret, skew = 0;
1135

1136
	_enter("%p", udp_sk);
1137 1138 1139

	ASSERT(!irqs_disabled());

1140
	skb = skb_recv_udp(udp_sk, 0, 1, &ret);
1141 1142 1143 1144 1145 1146 1147
	if (!skb) {
		if (ret == -EAGAIN)
			return;
		_debug("UDP socket error %d", ret);
		return;
	}

D
David Howells 已提交
1148 1149 1150
	if (skb->tstamp == 0)
		skb->tstamp = ktime_get_real();

D
David Howells 已提交
1151
	rxrpc_new_skb(skb, rxrpc_skb_rx_received);
1152 1153 1154 1155 1156

	_net("recv skb %p", skb);

	/* we'll probably need to checksum it (didn't call sock_recvmsg) */
	if (skb_checksum_complete(skb)) {
D
David Howells 已提交
1157
		rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
1158
		__UDP_INC_STATS(&init_net, UDP_MIB_INERRORS, 0);
1159 1160 1161 1162
		_leave(" [CSUM failed]");
		return;
	}

1163
	__UDP_INC_STATS(&init_net, UDP_MIB_INDATAGRAMS, 0);
1164

1165 1166
	/* The UDP protocol already released all skb resources;
	 * we are free to add our own data there.
1167
	 */
1168 1169
	sp = rxrpc_skb(skb);

1170 1171 1172 1173
	/* dig out the RxRPC connection details */
	if (rxrpc_extract_header(sp, skb) < 0)
		goto bad_message;

1174 1175 1176
	if (IS_ENABLED(CONFIG_AF_RXRPC_INJECT_LOSS)) {
		static int lose;
		if ((lose++ & 7) == 7) {
1177
			trace_rxrpc_rx_lose(sp);
1178 1179 1180 1181 1182
			rxrpc_lose_skb(skb, rxrpc_skb_rx_lost);
			return;
		}
	}

1183
	trace_rxrpc_rx_packet(sp);
1184

1185 1186
	switch (sp->hdr.type) {
	case RXRPC_PACKET_TYPE_VERSION:
1187
		if (rxrpc_to_client(sp))
1188
			goto discard;
1189 1190
		rxrpc_post_packet_to_local(local, skb);
		goto out;
1191

1192
	case RXRPC_PACKET_TYPE_BUSY:
1193
		if (rxrpc_to_server(sp))
1194
			goto discard;
1195
		/* Fall through */
1196 1197 1198 1199 1200 1201 1202
	case RXRPC_PACKET_TYPE_ACK:
	case RXRPC_PACKET_TYPE_ACKALL:
		if (sp->hdr.callNumber == 0)
			goto bad_message;
		/* Fall through */
	case RXRPC_PACKET_TYPE_ABORT:
		break;
1203 1204

	case RXRPC_PACKET_TYPE_DATA:
1205 1206
		if (sp->hdr.callNumber == 0 ||
		    sp->hdr.seq == 0)
1207 1208 1209 1210 1211
			goto bad_message;
		if (sp->hdr.flags & RXRPC_JUMBO_PACKET &&
		    !rxrpc_validate_jumbo(skb))
			goto bad_message;
		break;
1212

1213 1214 1215 1216 1217 1218 1219 1220 1221
	case RXRPC_PACKET_TYPE_CHALLENGE:
		if (rxrpc_to_server(sp))
			goto discard;
		break;
	case RXRPC_PACKET_TYPE_RESPONSE:
		if (rxrpc_to_client(sp))
			goto discard;
		break;

1222 1223 1224 1225 1226
		/* Packet types 9-11 should just be ignored. */
	case RXRPC_PACKET_TYPE_PARAMS:
	case RXRPC_PACKET_TYPE_10:
	case RXRPC_PACKET_TYPE_11:
		goto discard;
1227 1228 1229 1230

	default:
		_proto("Rx Bad Packet Type %u", sp->hdr.type);
		goto bad_message;
1231
	}
1232

1233 1234 1235
	if (sp->hdr.serviceId == 0)
		goto bad_message;

1236 1237
	rcu_read_lock();

1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252
	if (rxrpc_to_server(sp)) {
		/* Weed out packets to services we're not offering.  Packets
		 * that would begin a call are explicitly rejected and the rest
		 * are just discarded.
		 */
		rx = rcu_dereference(local->service);
		if (!rx || (sp->hdr.serviceId != rx->srx.srx_service &&
			    sp->hdr.serviceId != rx->second_service)) {
			if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA &&
			    sp->hdr.seq == 1)
				goto unsupported_service;
			goto discard_unlock;
		}
	}

1253
	conn = rxrpc_find_connection_rcu(local, skb);
1254 1255 1256
	if (conn) {
		if (sp->hdr.securityIndex != conn->security_ix)
			goto wrong_security;
1257

1258 1259 1260 1261 1262 1263
		if (sp->hdr.serviceId != conn->service_id) {
			if (!test_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags) ||
			    conn->service_id != conn->params.service_id)
				goto reupgrade;
			conn->service_id = sp->hdr.serviceId;
		}
D
David Howells 已提交
1264

1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280
		if (sp->hdr.callNumber == 0) {
			/* Connection-level packet */
			_debug("CONN %p {%d}", conn, conn->debug_id);
			rxrpc_post_packet_to_conn(conn, skb);
			goto out_unlock;
		}

		/* Note the serial number skew here */
		skew = (int)sp->hdr.serial - (int)conn->hi_serial;
		if (skew >= 0) {
			if (skew > 0)
				conn->hi_serial = sp->hdr.serial;
		} else {
			skew = -skew;
			skew = min(skew, 65535);
		}
1281

1282
		/* Call-bound packets are routed by connection channel. */
1283 1284
		channel = sp->hdr.cid & RXRPC_CHANNELMASK;
		chan = &conn->channels[channel];
1285 1286 1287 1288 1289 1290

		/* Ignore really old calls */
		if (sp->hdr.callNumber < chan->last_call)
			goto discard_unlock;

		if (sp->hdr.callNumber == chan->last_call) {
1291 1292 1293 1294 1295 1296
			if (chan->call ||
			    sp->hdr.type == RXRPC_PACKET_TYPE_ABORT)
				goto discard_unlock;

			/* For the previous service call, if completed
			 * successfully, we discard all further packets.
1297
			 */
D
David Howells 已提交
1298
			if (rxrpc_conn_is_service(conn) &&
1299
			    chan->last_type == RXRPC_PACKET_TYPE_ACK)
1300 1301
				goto discard_unlock;

1302 1303
			/* But otherwise we need to retransmit the final packet
			 * from data cached in the connection record.
1304
			 */
D
David Howells 已提交
1305 1306 1307 1308 1309
			if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA)
				trace_rxrpc_rx_data(chan->call_debug_id,
						    sp->hdr.seq,
						    sp->hdr.serial,
						    sp->hdr.flags, 0);
1310 1311 1312
			rxrpc_post_packet_to_conn(conn, skb);
			goto out_unlock;
		}
1313

1314
		call = rcu_dereference(chan->call);
1315 1316

		if (sp->hdr.callNumber > chan->call_id) {
1317
			if (rxrpc_to_client(sp)) {
1318 1319 1320 1321 1322 1323 1324
				rcu_read_unlock();
				goto reject_packet;
			}
			if (call)
				rxrpc_input_implicit_end_call(conn, call);
			call = NULL;
		}
1325

1326 1327 1328 1329 1330 1331 1332 1333
		if (call) {
			if (sp->hdr.serviceId != call->service_id)
				call->service_id = sp->hdr.serviceId;
			if ((int)sp->hdr.serial - (int)call->rx_serial > 0)
				call->rx_serial = sp->hdr.serial;
			if (!test_bit(RXRPC_CALL_RX_HEARD, &call->flags))
				set_bit(RXRPC_CALL_RX_HEARD, &call->flags);
		}
1334
	}
1335

1336
	if (!call || atomic_read(&call->usage) == 0) {
1337
		if (rxrpc_to_client(sp) ||
1338 1339 1340 1341 1342 1343 1344 1345 1346
		    sp->hdr.type != RXRPC_PACKET_TYPE_DATA)
			goto bad_message_unlock;
		if (sp->hdr.seq != 1)
			goto discard_unlock;
		call = rxrpc_new_incoming_call(local, conn, skb);
		if (!call) {
			rcu_read_unlock();
			goto reject_packet;
		}
1347
		rxrpc_send_ping(call, skb, skew);
1348
		mutex_unlock(&call->user_mutex);
1349
	}
1350

1351 1352 1353
	rxrpc_input_call_packet(call, skb, skew);
	goto discard_unlock;

1354
discard_unlock:
1355
	rcu_read_unlock();
1356
discard:
D
David Howells 已提交
1357
	rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
1358
out:
1359
	trace_rxrpc_rx_done(0, 0);
1360 1361
	return;

1362
out_unlock:
1363
	rcu_read_unlock();
1364
	goto out;
1365

1366 1367
wrong_security:
	rcu_read_unlock();
1368
	trace_rxrpc_abort(0, "SEC", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
1369 1370 1371
			  RXKADINCONSISTENCY, EBADMSG);
	skb->priority = RXKADINCONSISTENCY;
	goto post_abort;
1372

1373 1374 1375 1376 1377 1378 1379
unsupported_service:
	rcu_read_unlock();
	trace_rxrpc_abort(0, "INV", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
			  RX_INVALID_OPERATION, EOPNOTSUPP);
	skb->priority = RX_INVALID_OPERATION;
	goto post_abort;

1380 1381
reupgrade:
	rcu_read_unlock();
1382
	trace_rxrpc_abort(0, "UPG", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
1383 1384 1385
			  RX_PROTOCOL_ERROR, EBADMSG);
	goto protocol_error;

1386 1387
bad_message_unlock:
	rcu_read_unlock();
1388
bad_message:
1389
	trace_rxrpc_abort(0, "BAD", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
1390
			  RX_PROTOCOL_ERROR, EBADMSG);
1391
protocol_error:
1392
	skb->priority = RX_PROTOCOL_ERROR;
1393
post_abort:
1394
	skb->mark = RXRPC_SKB_MARK_REJECT_ABORT;
1395 1396
reject_packet:
	trace_rxrpc_rx_done(skb->mark, skb->priority);
1397 1398 1399
	rxrpc_reject_packet(local, skb);
	_leave(" [badmsg]");
}