call_event.c 12.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11
/* Management of Tx window, Tx resend, ACKs and out-of-sequence reception
 *
 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
 * Written by David Howells (dhowells@redhat.com)
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version
 * 2 of the License, or (at your option) any later version.
 */

12 13
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

14 15 16 17
#include <linux/module.h>
#include <linux/circ_buf.h>
#include <linux/net.h>
#include <linux/skbuff.h>
18
#include <linux/slab.h>
19 20 21 22 23
#include <linux/udp.h>
#include <net/sock.h>
#include <net/af_rxrpc.h>
#include "ar-internal.h"

24 25 26 27 28 29 30 31 32 33 34
/*
 * Propose a PING ACK be sent.
 */
static void rxrpc_propose_ping(struct rxrpc_call *call,
			       bool immediate, bool background)
{
	if (immediate) {
		if (background &&
		    !test_and_set_bit(RXRPC_CALL_EV_PING, &call->events))
			rxrpc_queue_call(call);
	} else {
D
David Howells 已提交
35 36
		unsigned long now = jiffies;
		unsigned long ping_at = now + rxrpc_idle_ack_delay;
37

D
David Howells 已提交
38 39 40 41
		if (time_before(ping_at, call->ping_at)) {
			WRITE_ONCE(call->ping_at, ping_at);
			rxrpc_reduce_call_timer(call, ping_at, now,
						rxrpc_timer_set_for_ping);
42 43 44 45
		}
	}
}

46
/*
47
 * propose an ACK be sent
48
 */
49 50
static void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
				u16 skew, u32 serial, bool immediate,
51 52
				bool background,
				enum rxrpc_propose_ack_trace why)
53
{
54
	enum rxrpc_propose_ack_outcome outcome = rxrpc_propose_ack_use;
55
	unsigned long expiry = rxrpc_soft_ack_delay;
56 57
	s8 prior = rxrpc_ack_priority[ack_reason];

58 59 60 61 62 63 64 65
	/* Pings are handled specially because we don't want to accidentally
	 * lose a ping response by subsuming it into a ping.
	 */
	if (ack_reason == RXRPC_ACK_PING) {
		rxrpc_propose_ping(call, immediate, background);
		goto trace;
	}

66 67 68 69 70 71 72 73
	/* Update DELAY, IDLE, REQUESTED and PING_RESPONSE ACK serial
	 * numbers, but we don't alter the timeout.
	 */
	_debug("prior %u %u vs %u %u",
	       ack_reason, prior,
	       call->ackr_reason, rxrpc_ack_priority[call->ackr_reason]);
	if (ack_reason == call->ackr_reason) {
		if (RXRPC_ACK_UPDATEABLE & (1 << ack_reason)) {
74
			outcome = rxrpc_propose_ack_update;
75 76
			call->ackr_serial = serial;
			call->ackr_skew = skew;
77
		}
78
		if (!immediate)
79
			goto trace;
80 81 82 83
	} else if (prior > rxrpc_ack_priority[call->ackr_reason]) {
		call->ackr_reason = ack_reason;
		call->ackr_serial = serial;
		call->ackr_skew = skew;
84 85
	} else {
		outcome = rxrpc_propose_ack_subsume;
86 87
	}

88 89 90 91 92 93 94
	switch (ack_reason) {
	case RXRPC_ACK_REQUESTED:
		if (rxrpc_requested_ack_delay < expiry)
			expiry = rxrpc_requested_ack_delay;
		if (serial == 1)
			immediate = false;
		break;
95

96 97 98 99
	case RXRPC_ACK_DELAY:
		if (rxrpc_soft_ack_delay < expiry)
			expiry = rxrpc_soft_ack_delay;
		break;
100

101
	case RXRPC_ACK_IDLE:
102
		if (rxrpc_idle_ack_delay < expiry)
103 104
			expiry = rxrpc_idle_ack_delay;
		break;
105

106 107 108
	default:
		immediate = true;
		break;
109 110
	}

111 112 113 114 115 116 117 118
	if (test_bit(RXRPC_CALL_EV_ACK, &call->events)) {
		_debug("already scheduled");
	} else if (immediate || expiry == 0) {
		_debug("immediate ACK %lx", call->events);
		if (!test_and_set_bit(RXRPC_CALL_EV_ACK, &call->events) &&
		    background)
			rxrpc_queue_call(call);
	} else {
119 120 121 122 123 124 125
		unsigned long now = jiffies, ack_at;

		if (call->peer->rtt_usage > 0)
			ack_at = nsecs_to_jiffies(call->peer->rtt);
		else
			ack_at = expiry;

D
David Howells 已提交
126 127 128 129 130
		ack_at = jiffies + expiry;
		if (time_before(ack_at, call->ack_at)) {
			WRITE_ONCE(call->ack_at, ack_at);
			rxrpc_reduce_call_timer(call, ack_at, now,
						rxrpc_timer_set_for_ack);
131 132
		}
	}
133 134 135 136

trace:
	trace_rxrpc_propose_ack(call, why, ack_reason, serial, immediate,
				background, outcome);
137 138 139
}

/*
140
 * propose an ACK be sent, locking the call structure
141
 */
142
void rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
143 144
		       u16 skew, u32 serial, bool immediate, bool background,
		       enum rxrpc_propose_ack_trace why)
145
{
146 147
	spin_lock_bh(&call->lock);
	__rxrpc_propose_ACK(call, ack_reason, skew, serial,
148
			    immediate, background, why);
149
	spin_unlock_bh(&call->lock);
150 151
}

D
David Howells 已提交
152 153 154 155 156 157 158 159
/*
 * Handle congestion being detected by the retransmit timeout.
 */
static void rxrpc_congestion_timeout(struct rxrpc_call *call)
{
	set_bit(RXRPC_CALL_RETRANS_TIMEOUT, &call->flags);
}

160
/*
161
 * Perform retransmission of NAK'd and unack'd packets.
162
 */
D
David Howells 已提交
163
static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j)
164 165 166
{
	struct rxrpc_skb_priv *sp;
	struct sk_buff *skb;
D
David Howells 已提交
167
	unsigned long resend_at;
168
	rxrpc_seq_t cursor, seq, top;
169
	ktime_t now, max_age, oldest, ack_ts, timeout, min_timeo;
170
	int ix;
D
David Howells 已提交
171
	u8 annotation, anno_type, retrans = 0, unacked = 0;
172

173
	_enter("{%d,%d}", call->tx_hard_ack, call->tx_top);
174

175 176 177 178 179 180 181 182
	if (call->peer->rtt_usage > 1)
		timeout = ns_to_ktime(call->peer->rtt * 3 / 2);
	else
		timeout = ms_to_ktime(rxrpc_resend_timeout);
	min_timeo = ns_to_ktime((1000000000 / HZ) * 4);
	if (ktime_before(timeout, min_timeo))
		timeout = min_timeo;

D
David Howells 已提交
183
	now = ktime_get_real();
184
	max_age = ktime_sub(now, timeout);
185

186 187
	spin_lock_bh(&call->lock);

188 189 190 191 192 193 194 195 196 197
	cursor = call->tx_hard_ack;
	top = call->tx_top;
	ASSERT(before_eq(cursor, top));
	if (cursor == top)
		goto out_unlock;

	/* Scan the packet list without dropping the lock and decide which of
	 * the packets in the Tx buffer we're going to resend and what the new
	 * resend timeout will be.
	 */
198
	oldest = now;
199
	for (seq = cursor + 1; before_eq(seq, top); seq++) {
200 201
		ix = seq & RXRPC_RXTX_BUFF_MASK;
		annotation = call->rxtx_annotations[ix];
D
David Howells 已提交
202 203 204
		anno_type = annotation & RXRPC_TX_ANNO_MASK;
		annotation &= ~RXRPC_TX_ANNO_MASK;
		if (anno_type == RXRPC_TX_ANNO_ACK)
205
			continue;
206

207
		skb = call->rxtx_buffer[ix];
D
David Howells 已提交
208
		rxrpc_see_skb(skb, rxrpc_skb_tx_seen);
209 210
		sp = rxrpc_skb(skb);

D
David Howells 已提交
211
		if (anno_type == RXRPC_TX_ANNO_UNACK) {
212 213 214
			if (ktime_after(skb->tstamp, max_age)) {
				if (ktime_before(skb->tstamp, oldest))
					oldest = skb->tstamp;
215
				continue;
216
			}
D
David Howells 已提交
217 218
			if (!(annotation & RXRPC_TX_ANNO_RESENT))
				unacked++;
219 220
		}

221
		/* Okay, we need to retransmit a packet. */
D
David Howells 已提交
222
		call->rxtx_annotations[ix] = RXRPC_TX_ANNO_RETRANS | annotation;
D
David Howells 已提交
223
		retrans++;
224 225
		trace_rxrpc_retransmit(call, seq, annotation | anno_type,
				       ktime_to_ns(ktime_sub(skb->tstamp, max_age)));
226
	}
227

D
David Howells 已提交
228 229 230
	resend_at = nsecs_to_jiffies(ktime_to_ns(ktime_sub(oldest, now)));
	resend_at += jiffies + rxrpc_resend_timeout;
	WRITE_ONCE(call->resend_at, resend_at);
231

D
David Howells 已提交
232 233 234 235 236 237 238 239
	if (unacked)
		rxrpc_congestion_timeout(call);

	/* If there was nothing that needed retransmission then it's likely
	 * that an ACK got lost somewhere.  Send a ping to find out instead of
	 * retransmitting data.
	 */
	if (!retrans) {
D
David Howells 已提交
240 241
		rxrpc_reduce_call_timer(call, resend_at, now,
					rxrpc_timer_set_for_resend);
D
David Howells 已提交
242 243 244 245 246 247
		spin_unlock_bh(&call->lock);
		ack_ts = ktime_sub(now, call->acks_latest_ts);
		if (ktime_to_ns(ack_ts) < call->peer->rtt)
			goto out;
		rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, 0, true, false,
				  rxrpc_propose_ack_ping_for_lost_ack);
248
		rxrpc_send_ack_packet(call, true, NULL);
D
David Howells 已提交
249 250 251
		goto out;
	}

252 253 254 255 256
	/* Now go through the Tx window and perform the retransmissions.  We
	 * have to drop the lock for each send.  If an ACK comes in whilst the
	 * lock is dropped, it may clear some of the retransmission markers for
	 * packets that it soft-ACKs.
	 */
257
	for (seq = cursor + 1; before_eq(seq, top); seq++) {
258 259
		ix = seq & RXRPC_RXTX_BUFF_MASK;
		annotation = call->rxtx_annotations[ix];
D
David Howells 已提交
260 261
		anno_type = annotation & RXRPC_TX_ANNO_MASK;
		if (anno_type != RXRPC_TX_ANNO_RETRANS)
262
			continue;
263

264
		skb = call->rxtx_buffer[ix];
D
David Howells 已提交
265
		rxrpc_get_skb(skb, rxrpc_skb_tx_got);
266 267
		spin_unlock_bh(&call->lock);

268
		if (rxrpc_send_data_packet(call, skb, true) < 0) {
D
David Howells 已提交
269
			rxrpc_free_skb(skb, rxrpc_skb_tx_freed);
270 271
			return;
		}
272

273 274
		if (rxrpc_is_client_call(call))
			rxrpc_expose_client_call(call);
275

D
David Howells 已提交
276
		rxrpc_free_skb(skb, rxrpc_skb_tx_freed);
277 278
		spin_lock_bh(&call->lock);

279 280 281 282 283
		/* We need to clear the retransmit state, but there are two
		 * things we need to be aware of: A new ACK/NAK might have been
		 * received and the packet might have been hard-ACK'd (in which
		 * case it will no longer be in the buffer).
		 */
D
David Howells 已提交
284 285 286 287 288 289 290 291 292 293 294
		if (after(seq, call->tx_hard_ack)) {
			annotation = call->rxtx_annotations[ix];
			anno_type = annotation & RXRPC_TX_ANNO_MASK;
			if (anno_type == RXRPC_TX_ANNO_RETRANS ||
			    anno_type == RXRPC_TX_ANNO_NAK) {
				annotation &= ~RXRPC_TX_ANNO_MASK;
				annotation |= RXRPC_TX_ANNO_UNACK;
			}
			annotation |= RXRPC_TX_ANNO_RESENT;
			call->rxtx_annotations[ix] = annotation;
		}
295 296 297

		if (after(call->tx_hard_ack, seq))
			seq = call->tx_hard_ack;
298
	}
299 300 301

out_unlock:
	spin_unlock_bh(&call->lock);
D
David Howells 已提交
302
out:
303
	_leave("");
304 305 306
}

/*
307
 * Handle retransmission and deferred ACK/abort generation.
308 309 310 311 312
 */
void rxrpc_process_call(struct work_struct *work)
{
	struct rxrpc_call *call =
		container_of(work, struct rxrpc_call, processor);
313
	rxrpc_serial_t *send_ack;
D
David Howells 已提交
314
	unsigned long now, next, t;
315

D
David Howells 已提交
316 317
	rxrpc_see_call(call);

318
	//printk("\n--------------------\n");
319 320
	_enter("{%d,%s,%lx}",
	       call->debug_id, rxrpc_call_states[call->state], call->events);
321

322 323
recheck_state:
	if (test_and_clear_bit(RXRPC_CALL_EV_ABORT, &call->events)) {
324
		rxrpc_send_abort_packet(call);
325
		goto recheck_state;
326 327
	}

328 329
	if (call->state == RXRPC_CALL_COMPLETE) {
		del_timer_sync(&call->timer);
D
David Howells 已提交
330
		rxrpc_notify_socket(call);
331
		goto out_put;
332 333
	}

D
David Howells 已提交
334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361
	/* Work out if any timeouts tripped */
	now = jiffies;
	t = READ_ONCE(call->expect_rx_by);
	if (time_after_eq(now, t)) {
		trace_rxrpc_timer(call, rxrpc_timer_exp_normal, now);
		set_bit(RXRPC_CALL_EV_EXPIRED, &call->events);
	}

	t = READ_ONCE(call->expect_req_by);
	if (call->state == RXRPC_CALL_SERVER_RECV_REQUEST &&
	    time_after_eq(now, t)) {
		trace_rxrpc_timer(call, rxrpc_timer_exp_idle, now);
		set_bit(RXRPC_CALL_EV_EXPIRED, &call->events);
	}

	t = READ_ONCE(call->expect_term_by);
	if (time_after_eq(now, t)) {
		trace_rxrpc_timer(call, rxrpc_timer_exp_hard, now);
		set_bit(RXRPC_CALL_EV_EXPIRED, &call->events);
	}

	t = READ_ONCE(call->ack_at);
	if (time_after_eq(now, t)) {
		trace_rxrpc_timer(call, rxrpc_timer_exp_ack, now);
		cmpxchg(&call->ack_at, t, now + MAX_JIFFY_OFFSET);
		set_bit(RXRPC_CALL_EV_ACK, &call->events);
	}

362 363 364 365 366 367 368
	t = READ_ONCE(call->ack_lost_at);
	if (time_after_eq(now, t)) {
		trace_rxrpc_timer(call, rxrpc_timer_exp_lost_ack, now);
		cmpxchg(&call->ack_lost_at, t, now + MAX_JIFFY_OFFSET);
		set_bit(RXRPC_CALL_EV_ACK_LOST, &call->events);
	}

D
David Howells 已提交
369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384
	t = READ_ONCE(call->ping_at);
	if (time_after_eq(now, t)) {
		trace_rxrpc_timer(call, rxrpc_timer_exp_ping, now);
		cmpxchg(&call->ping_at, t, now + MAX_JIFFY_OFFSET);
		set_bit(RXRPC_CALL_EV_PING, &call->events);
	}

	t = READ_ONCE(call->resend_at);
	if (time_after_eq(now, t)) {
		trace_rxrpc_timer(call, rxrpc_timer_exp_resend, now);
		cmpxchg(&call->resend_at, t, now + MAX_JIFFY_OFFSET);
		set_bit(RXRPC_CALL_EV_RESEND, &call->events);
	}

	/* Process events */
	if (test_and_clear_bit(RXRPC_CALL_EV_EXPIRED, &call->events)) {
D
David Howells 已提交
385
		rxrpc_abort_call("EXP", call, 0, RX_USER_ABORT, -ETIME);
386
		set_bit(RXRPC_CALL_EV_ABORT, &call->events);
D
David Howells 已提交
387
		goto recheck_state;
388 389
	}

390 391 392 393 394 395 396 397 398 399
	send_ack = NULL;
	if (test_and_clear_bit(RXRPC_CALL_EV_ACK_LOST, &call->events)) {
		call->acks_lost_top = call->tx_top;
		rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, 0, true, false,
				  rxrpc_propose_ack_ping_for_lost_ack);
		send_ack = &call->acks_lost_ping;
	}

	if (test_and_clear_bit(RXRPC_CALL_EV_ACK, &call->events) ||
	    send_ack) {
400
		if (call->ackr_reason) {
401
			rxrpc_send_ack_packet(call, false, send_ack);
402
			goto recheck_state;
403 404 405
		}
	}

406
	if (test_and_clear_bit(RXRPC_CALL_EV_PING, &call->events)) {
407
		rxrpc_send_ack_packet(call, true, NULL);
408 409 410
		goto recheck_state;
	}

411
	if (test_and_clear_bit(RXRPC_CALL_EV_RESEND, &call->events)) {
412
		rxrpc_resend(call, now);
413
		goto recheck_state;
414 415
	}

D
David Howells 已提交
416 417 418 419 420 421 422 423
	/* Make sure the timer is restarted */
	next = call->expect_rx_by;

#define set(T) { t = READ_ONCE(T); if (time_before(t, next)) next = t; }
	
	set(call->expect_req_by);
	set(call->expect_term_by);
	set(call->ack_at);
424
	set(call->ack_lost_at);
D
David Howells 已提交
425 426 427 428 429 430 431 432
	set(call->resend_at);
	set(call->ping_at);

	now = jiffies;
	if (time_after_eq(now, next))
		goto recheck_state;

	rxrpc_reduce_call_timer(call, next, now, rxrpc_timer_restart);
433 434

	/* other events may have been raised since we started checking */
435
	if (call->events && call->state < RXRPC_CALL_COMPLETE) {
436
		__rxrpc_queue_call(call);
437
		goto out;
438 439
	}

440 441 442
out_put:
	rxrpc_put_call(call, rxrpc_call_put);
out:
443 444
	_leave("");
}