call_object.c 22.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11
/* RxRPC individual remote procedure call handling
 *
 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
 * Written by David Howells (dhowells@redhat.com)
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version
 * 2 of the License, or (at your option) any later version.
 */

12 13
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

14
#include <linux/slab.h>
15 16
#include <linux/module.h>
#include <linux/circ_buf.h>
17
#include <linux/spinlock_types.h>
18 19 20 21
#include <net/sock.h>
#include <net/af_rxrpc.h>
#include "ar-internal.h"

22 23 24
/*
 * Maximum lifetime of a call (in jiffies).
 */
25
unsigned int rxrpc_max_call_lifetime = 60 * HZ;
26 27 28 29

/*
 * Time till dead call expires after last use (in jiffies).
 */
30
unsigned int rxrpc_dead_call_expiry = 2 * HZ;
31

32
const char *const rxrpc_call_states[NR__RXRPC_CALL_STATES] = {
33
	[RXRPC_CALL_UNINITIALISED]		= "Uninit  ",
34
	[RXRPC_CALL_CLIENT_AWAIT_CONN]		= "ClWtConn",
35 36 37 38 39 40 41 42 43 44 45
	[RXRPC_CALL_CLIENT_SEND_REQUEST]	= "ClSndReq",
	[RXRPC_CALL_CLIENT_AWAIT_REPLY]		= "ClAwtRpl",
	[RXRPC_CALL_CLIENT_RECV_REPLY]		= "ClRcvRpl",
	[RXRPC_CALL_CLIENT_FINAL_ACK]		= "ClFnlACK",
	[RXRPC_CALL_SERVER_SECURING]		= "SvSecure",
	[RXRPC_CALL_SERVER_ACCEPTING]		= "SvAccept",
	[RXRPC_CALL_SERVER_RECV_REQUEST]	= "SvRcvReq",
	[RXRPC_CALL_SERVER_ACK_REQUEST]		= "SvAckReq",
	[RXRPC_CALL_SERVER_SEND_REPLY]		= "SvSndRpl",
	[RXRPC_CALL_SERVER_AWAIT_ACK]		= "SvAwtACK",
	[RXRPC_CALL_COMPLETE]			= "Complete",
46 47 48 49 50
	[RXRPC_CALL_DEAD]			= "Dead    ",
};

const char *const rxrpc_call_completions[NR__RXRPC_CALL_COMPLETIONS] = {
	[RXRPC_CALL_SUCCEEDED]			= "Complete",
51 52 53
	[RXRPC_CALL_SERVER_BUSY]		= "SvBusy  ",
	[RXRPC_CALL_REMOTELY_ABORTED]		= "RmtAbort",
	[RXRPC_CALL_LOCALLY_ABORTED]		= "LocAbort",
54
	[RXRPC_CALL_LOCAL_ERROR]		= "LocError",
55 56 57
	[RXRPC_CALL_NETWORK_ERROR]		= "NetError",
};

58 59 60 61 62 63 64 65 66 67
struct kmem_cache *rxrpc_call_jar;
LIST_HEAD(rxrpc_calls);
DEFINE_RWLOCK(rxrpc_call_lock);

static void rxrpc_destroy_call(struct work_struct *work);
static void rxrpc_call_life_expired(unsigned long _call);
static void rxrpc_dead_call_expired(unsigned long _call);
static void rxrpc_ack_time_expired(unsigned long _call);
static void rxrpc_resend_time_expired(unsigned long _call);

68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104
/*
 * find an extant server call
 * - called in process context with IRQs enabled
 */
struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *rx,
					      unsigned long user_call_ID)
{
	struct rxrpc_call *call;
	struct rb_node *p;

	_enter("%p,%lx", rx, user_call_ID);

	read_lock(&rx->call_lock);

	p = rx->calls.rb_node;
	while (p) {
		call = rb_entry(p, struct rxrpc_call, sock_node);

		if (user_call_ID < call->user_call_ID)
			p = p->rb_left;
		else if (user_call_ID > call->user_call_ID)
			p = p->rb_right;
		else
			goto found_extant_call;
	}

	read_unlock(&rx->call_lock);
	_leave(" = NULL");
	return NULL;

found_extant_call:
	rxrpc_get_call(call);
	read_unlock(&rx->call_lock);
	_leave(" = %p [%d]", call, atomic_read(&call->usage));
	return call;
}

105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133
/*
 * allocate a new call
 */
static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
{
	struct rxrpc_call *call;

	call = kmem_cache_zalloc(rxrpc_call_jar, gfp);
	if (!call)
		return NULL;

	call->acks_winsz = 16;
	call->acks_window = kmalloc(call->acks_winsz * sizeof(unsigned long),
				    gfp);
	if (!call->acks_window) {
		kmem_cache_free(rxrpc_call_jar, call);
		return NULL;
	}

	setup_timer(&call->lifetimer, &rxrpc_call_life_expired,
		    (unsigned long) call);
	setup_timer(&call->deadspan, &rxrpc_dead_call_expired,
		    (unsigned long) call);
	setup_timer(&call->ack_timer, &rxrpc_ack_time_expired,
		    (unsigned long) call);
	setup_timer(&call->resend_timer, &rxrpc_resend_time_expired,
		    (unsigned long) call);
	INIT_WORK(&call->destroyer, &rxrpc_destroy_call);
	INIT_WORK(&call->processor, &rxrpc_process_call);
134
	INIT_LIST_HEAD(&call->link);
135
	INIT_LIST_HEAD(&call->chan_wait_link);
136 137 138
	INIT_LIST_HEAD(&call->accept_link);
	skb_queue_head_init(&call->rx_queue);
	skb_queue_head_init(&call->rx_oos_queue);
139
	skb_queue_head_init(&call->knlrecv_queue);
140
	init_waitqueue_head(&call->waitq);
141 142 143 144 145 146 147 148 149 150
	spin_lock_init(&call->lock);
	rwlock_init(&call->state_lock);
	atomic_set(&call->usage, 1);
	call->debug_id = atomic_inc_return(&rxrpc_debug_id);

	memset(&call->sock_node, 0xed, sizeof(call->sock_node));

	call->rx_data_expect = 1;
	call->rx_data_eaten = 0;
	call->rx_first_oos = 0;
151
	call->ackr_win_top = call->rx_data_eaten + 1 + rxrpc_rx_window_size;
152 153 154 155 156
	call->creation_jif = jiffies;
	return call;
}

/*
157
 * Allocate a new client call.
158
 */
159 160 161
static struct rxrpc_call *rxrpc_alloc_client_call(struct rxrpc_sock *rx,
						  struct sockaddr_rxrpc *srx,
						  gfp_t gfp)
162 163 164 165 166
{
	struct rxrpc_call *call;

	_enter("");

167
	ASSERT(rx->local != NULL);
168 169 170 171

	call = rxrpc_alloc_call(gfp);
	if (!call)
		return ERR_PTR(-ENOMEM);
172
	call->state = RXRPC_CALL_CLIENT_AWAIT_CONN;
173 174 175 176

	sock_hold(&rx->sk);
	call->socket = rx;
	call->rx_data_post = 1;
177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195
	call->service_id = srx->srx_service;

	_leave(" = %p", call);
	return call;
}

/*
 * Begin client call.
 */
static int rxrpc_begin_client_call(struct rxrpc_call *call,
				   struct rxrpc_conn_parameters *cp,
				   struct sockaddr_rxrpc *srx,
				   gfp_t gfp)
{
	int ret;

	/* Set up or get a connection record and set the protocol parameters,
	 * including channel number and call ID.
	 */
196
	ret = rxrpc_connect_call(call, cp, srx, gfp);
197 198 199
	if (ret < 0)
		return ret;

200 201 202
	spin_lock(&call->conn->params.peer->lock);
	hlist_add_head(&call->error_link, &call->conn->params.peer->error_targets);
	spin_unlock(&call->conn->params.peer->lock);
203

204
	call->lifetimer.expires = jiffies + rxrpc_max_call_lifetime;
205
	add_timer(&call->lifetimer);
206
	return 0;
207 208 209 210 211 212
}

/*
 * set up a call for the given data
 * - called in process context with IRQs enabled
 */
213
struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
214
					 struct rxrpc_conn_parameters *cp,
215
					 struct sockaddr_rxrpc *srx,
216 217 218
					 unsigned long user_call_ID,
					 gfp_t gfp)
{
219 220
	struct rxrpc_call *call, *xcall;
	struct rb_node *parent, **pp;
D
David Howells 已提交
221
	const void *here = __builtin_return_address(0);
222
	int ret;
223

224
	_enter("%p,%lx", rx, user_call_ID);
225

226
	call = rxrpc_alloc_client_call(rx, srx, gfp);
227 228 229
	if (IS_ERR(call)) {
		_leave(" = %ld", PTR_ERR(call));
		return call;
230 231
	}

D
David Howells 已提交
232 233 234
	trace_rxrpc_call(call, 0, atomic_read(&call->usage), 0, here,
			 (const void *)user_call_ID);

235
	/* Publish the call, even though it is incompletely set up as yet */
236 237
	call->user_call_ID = user_call_ID;
	__set_bit(RXRPC_CALL_HAS_USERID, &call->flags);
238 239 240 241 242 243 244

	write_lock(&rx->call_lock);

	pp = &rx->calls.rb_node;
	parent = NULL;
	while (*pp) {
		parent = *pp;
245
		xcall = rb_entry(parent, struct rxrpc_call, sock_node);
246

247
		if (user_call_ID < xcall->user_call_ID)
248
			pp = &(*pp)->rb_left;
249
		else if (user_call_ID > xcall->user_call_ID)
250 251
			pp = &(*pp)->rb_right;
		else
252
			goto found_user_ID_now_present;
253 254 255 256 257 258 259 260 261 262 263 264
	}

	rxrpc_get_call(call);

	rb_link_node(&call->sock_node, parent, pp);
	rb_insert_color(&call->sock_node, &rx->calls);
	write_unlock(&rx->call_lock);

	write_lock_bh(&rxrpc_call_lock);
	list_add_tail(&call->link, &rxrpc_calls);
	write_unlock_bh(&rxrpc_call_lock);

265
	ret = rxrpc_begin_client_call(call, cp, srx, gfp);
266 267 268
	if (ret < 0)
		goto error;

269 270 271 272 273
	_net("CALL new %d on CONN %d", call->debug_id, call->conn->debug_id);

	_leave(" = %p [new]", call);
	return call;

274 275 276 277 278 279 280
error:
	write_lock(&rx->call_lock);
	rb_erase(&call->sock_node, &rx->calls);
	write_unlock(&rx->call_lock);
	rxrpc_put_call(call);

	write_lock_bh(&rxrpc_call_lock);
281
	list_del_init(&call->link);
282 283
	write_unlock_bh(&rxrpc_call_lock);

284
	set_bit(RXRPC_CALL_RELEASED, &call->flags);
285
	call->state = RXRPC_CALL_DEAD;
286 287 288 289
	rxrpc_put_call(call);
	_leave(" = %d", ret);
	return ERR_PTR(ret);

290 291 292 293 294 295
	/* We unexpectedly found the user ID in the list after taking
	 * the call_lock.  This shouldn't happen unless the user races
	 * with itself and tries to add the same user ID twice at the
	 * same time in different threads.
	 */
found_user_ID_now_present:
296
	write_unlock(&rx->call_lock);
297
	set_bit(RXRPC_CALL_RELEASED, &call->flags);
298
	call->state = RXRPC_CALL_DEAD;
299 300 301
	rxrpc_put_call(call);
	_leave(" = -EEXIST [%p]", call);
	return ERR_PTR(-EEXIST);
302 303 304 305 306 307 308 309
}

/*
 * set up an incoming call
 * - called in process context with IRQs enabled
 */
struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *rx,
				       struct rxrpc_connection *conn,
310
				       struct sk_buff *skb)
311
{
312
	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
313
	struct rxrpc_call *call, *candidate;
D
David Howells 已提交
314
	const void *here = __builtin_return_address(0);
315
	u32 call_id, chan;
316

317
	_enter(",%d", conn->debug_id);
318 319 320

	ASSERT(rx != NULL);

321
	candidate = rxrpc_alloc_call(GFP_NOIO);
322 323 324
	if (!candidate)
		return ERR_PTR(-EBUSY);

D
David Howells 已提交
325 326 327
	trace_rxrpc_call(candidate, 1, atomic_read(&candidate->usage),
			 0, here, NULL);

328
	chan = sp->hdr.cid & RXRPC_CHANNELMASK;
329 330
	candidate->socket	= rx;
	candidate->conn		= conn;
331
	candidate->peer		= conn->params.peer;
332 333 334 335
	candidate->cid		= sp->hdr.cid;
	candidate->call_id	= sp->hdr.callNumber;
	candidate->rx_data_post	= 0;
	candidate->state	= RXRPC_CALL_SERVER_ACCEPTING;
336
	candidate->flags	|= (1 << RXRPC_CALL_IS_SERVICE);
337 338 339
	if (conn->security_ix > 0)
		candidate->state = RXRPC_CALL_SERVER_SECURING;

340
	spin_lock(&conn->channel_lock);
341 342

	/* set the channel for this call */
343 344 345
	call = rcu_dereference_protected(conn->channels[chan].call,
					 lockdep_is_held(&conn->channel_lock));

346
	_debug("channel[%u] is %p", candidate->cid & RXRPC_CHANNELMASK, call);
347
	if (call && call->call_id == sp->hdr.callNumber) {
348 349 350 351 352 353 354
		/* already set; must've been a duplicate packet */
		_debug("extant call [%d]", call->state);
		ASSERTCMP(call->conn, ==, conn);

		read_lock(&call->state_lock);
		switch (call->state) {
		case RXRPC_CALL_LOCALLY_ABORTED:
355
			if (!test_and_set_bit(RXRPC_CALL_EV_ABORT, &call->events))
356
				rxrpc_queue_call(call);
357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372
		case RXRPC_CALL_REMOTELY_ABORTED:
			read_unlock(&call->state_lock);
			goto aborted_call;
		default:
			rxrpc_get_call(call);
			read_unlock(&call->state_lock);
			goto extant_call;
		}
	}

	if (call) {
		/* it seems the channel is still in use from the previous call
		 * - ditch the old binding if its call is now complete */
		_debug("CALL: %u { %s }",
		       call->debug_id, rxrpc_call_states[call->state]);

373
		if (call->state == RXRPC_CALL_COMPLETE) {
374
			__rxrpc_disconnect_call(conn, call);
375
		} else {
376
			spin_unlock(&conn->channel_lock);
377 378 379 380 381 382 383 384
			kmem_cache_free(rxrpc_call_jar, candidate);
			_leave(" = -EBUSY");
			return ERR_PTR(-EBUSY);
		}
	}

	/* check the call number isn't duplicate */
	_debug("check dup");
385
	call_id = sp->hdr.callNumber;
386 387 388 389 390 391

	/* We just ignore calls prior to the current call ID.  Terminated calls
	 * are handled via the connection.
	 */
	if (call_id <= conn->channels[chan].call_counter)
		goto old_call; /* TODO: Just drop packet */
392 393 394 395 396

	/* make the call available */
	_debug("new call");
	call = candidate;
	candidate = NULL;
397 398
	conn->channels[chan].call_counter = call_id;
	rcu_assign_pointer(conn->channels[chan].call, call);
399
	sock_hold(&rx->sk);
400
	rxrpc_get_connection(conn);
401
	rxrpc_get_peer(call->peer);
402
	spin_unlock(&conn->channel_lock);
403

404 405 406
	spin_lock(&conn->params.peer->lock);
	hlist_add_head(&call->error_link, &conn->params.peer->error_targets);
	spin_unlock(&conn->params.peer->lock);
407 408 409 410 411

	write_lock_bh(&rxrpc_call_lock);
	list_add_tail(&call->link, &rxrpc_calls);
	write_unlock_bh(&rxrpc_call_lock);

412
	call->service_id = conn->params.service_id;
413

414 415
	_net("CALL incoming %d on CONN %d", call->debug_id, call->conn->debug_id);

416
	call->lifetimer.expires = jiffies + rxrpc_max_call_lifetime;
417 418 419 420 421
	add_timer(&call->lifetimer);
	_leave(" = %p {%d} [new]", call, call->debug_id);
	return call;

extant_call:
422
	spin_unlock(&conn->channel_lock);
423 424 425 426 427
	kmem_cache_free(rxrpc_call_jar, candidate);
	_leave(" = %p {%d} [extant]", call, call ? call->debug_id : -1);
	return call;

aborted_call:
428
	spin_unlock(&conn->channel_lock);
429 430 431 432 433
	kmem_cache_free(rxrpc_call_jar, candidate);
	_leave(" = -ECONNABORTED");
	return ERR_PTR(-ECONNABORTED);

old_call:
434
	spin_unlock(&conn->channel_lock);
435 436 437 438 439
	kmem_cache_free(rxrpc_call_jar, candidate);
	_leave(" = -ECONNRESET [old]");
	return ERR_PTR(-ECONNRESET);
}

D
David Howells 已提交
440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477
/*
 * Note the re-emergence of a call.
 */
void rxrpc_see_call(struct rxrpc_call *call)
{
	const void *here = __builtin_return_address(0);
	if (call) {
		int n = atomic_read(&call->usage);
		int m = atomic_read(&call->skb_count);

		trace_rxrpc_call(call, 2, n, m, here, 0);
	}
}

/*
 * Note the addition of a ref on a call.
 */
void rxrpc_get_call(struct rxrpc_call *call)
{
	const void *here = __builtin_return_address(0);
	int n = atomic_inc_return(&call->usage);
	int m = atomic_read(&call->skb_count);

	trace_rxrpc_call(call, 3, n, m, here, 0);
}

/*
 * Note the addition of a ref on a call for a socket buffer.
 */
void rxrpc_get_call_for_skb(struct rxrpc_call *call, struct sk_buff *skb)
{
	const void *here = __builtin_return_address(0);
	int n = atomic_inc_return(&call->usage);
	int m = atomic_inc_return(&call->skb_count);

	trace_rxrpc_call(call, 4, n, m, here, skb);
}

478 479 480 481 482
/*
 * detach a call from a socket and set up for release
 */
void rxrpc_release_call(struct rxrpc_call *call)
{
483
	struct rxrpc_connection *conn = call->conn;
484 485 486 487 488 489 490
	struct rxrpc_sock *rx = call->socket;

	_enter("{%d,%d,%d,%d}",
	       call->debug_id, atomic_read(&call->usage),
	       atomic_read(&call->ackr_not_idle),
	       call->rx_first_oos);

D
David Howells 已提交
491 492
	rxrpc_see_call(call);

493 494 495 496 497 498 499 500
	spin_lock_bh(&call->lock);
	if (test_and_set_bit(RXRPC_CALL_RELEASED, &call->flags))
		BUG();
	spin_unlock_bh(&call->lock);

	/* dissociate from the socket
	 * - the socket's ref on the call is passed to the death timer
	 */
501
	_debug("RELEASE CALL %p (%d CONN %p)", call, call->debug_id, conn);
502

503 504 505 506
	spin_lock(&conn->params.peer->lock);
	hlist_del_init(&call->error_link);
	spin_unlock(&conn->params.peer->lock);

507 508 509 510 511 512 513 514 515 516 517 518 519 520 521
	write_lock_bh(&rx->call_lock);
	if (!list_empty(&call->accept_link)) {
		_debug("unlinking once-pending call %p { e=%lx f=%lx }",
		       call, call->events, call->flags);
		ASSERT(!test_bit(RXRPC_CALL_HAS_USERID, &call->flags));
		list_del_init(&call->accept_link);
		sk_acceptq_removed(&rx->sk);
	} else if (test_bit(RXRPC_CALL_HAS_USERID, &call->flags)) {
		rb_erase(&call->sock_node, &rx->calls);
		memset(&call->sock_node, 0xdd, sizeof(call->sock_node));
		clear_bit(RXRPC_CALL_HAS_USERID, &call->flags);
	}
	write_unlock_bh(&rx->call_lock);

	/* free up the channel for reuse */
522
	write_lock_bh(&call->state_lock);
523

524 525 526
	if (call->state < RXRPC_CALL_COMPLETE &&
	    call->state != RXRPC_CALL_CLIENT_FINAL_ACK) {
		_debug("+++ ABORTING STATE %d +++\n", call->state);
527
		__rxrpc_abort_call(call, RX_CALL_DEAD, ECONNRESET);
528
	}
529
	write_unlock_bh(&call->state_lock);
530

531 532
	rxrpc_disconnect_call(call);

533
	/* clean up the Rx queue */
534 535 536 537 538 539 540 541 542 543 544 545
	if (!skb_queue_empty(&call->rx_queue) ||
	    !skb_queue_empty(&call->rx_oos_queue)) {
		struct rxrpc_skb_priv *sp;
		struct sk_buff *skb;

		_debug("purge Rx queues");

		spin_lock_bh(&call->lock);
		while ((skb = skb_dequeue(&call->rx_queue)) ||
		       (skb = skb_dequeue(&call->rx_oos_queue))) {
			spin_unlock_bh(&call->lock);

546
			sp = rxrpc_skb(skb);
547 548
			_debug("- zap %s %%%u #%u",
			       rxrpc_pkts[sp->hdr.type],
549
			       sp->hdr.serial, sp->hdr.seq);
550 551 552 553 554 555 556 557 558
			rxrpc_free_skb(skb);
			spin_lock_bh(&call->lock);
		}
		spin_unlock_bh(&call->lock);
	}

	del_timer_sync(&call->resend_timer);
	del_timer_sync(&call->ack_timer);
	del_timer_sync(&call->lifetimer);
559
	call->deadspan.expires = jiffies + rxrpc_dead_call_expiry;
560 561 562 563 564 565 566 567 568 569 570 571 572 573
	add_timer(&call->deadspan);

	_leave("");
}

/*
 * handle a dead call being ready for reaping
 */
static void rxrpc_dead_call_expired(unsigned long _call)
{
	struct rxrpc_call *call = (struct rxrpc_call *) _call;

	_enter("{%d}", call->debug_id);

D
David Howells 已提交
574
	rxrpc_see_call(call);
575 576 577 578 579 580 581 582 583 584 585 586
	write_lock_bh(&call->state_lock);
	call->state = RXRPC_CALL_DEAD;
	write_unlock_bh(&call->state_lock);
	rxrpc_put_call(call);
}

/*
 * mark a call as to be released, aborting it if it's still in progress
 * - called with softirqs disabled
 */
static void rxrpc_mark_call_released(struct rxrpc_call *call)
{
587
	bool sched = false;
588

D
David Howells 已提交
589
	rxrpc_see_call(call);
590 591
	write_lock(&call->state_lock);
	if (call->state < RXRPC_CALL_DEAD) {
592
		sched = __rxrpc_abort_call(call, RX_CALL_DEAD, ECONNRESET);
593
		if (!test_and_set_bit(RXRPC_CALL_EV_RELEASE, &call->events))
594 595 596
			sched = true;
	}
	write_unlock(&call->state_lock);
597 598
	if (sched)
		rxrpc_queue_call(call);
599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621
}

/*
 * release all the calls associated with a socket
 */
void rxrpc_release_calls_on_socket(struct rxrpc_sock *rx)
{
	struct rxrpc_call *call;
	struct rb_node *p;

	_enter("%p", rx);

	read_lock_bh(&rx->call_lock);

	/* kill the not-yet-accepted incoming calls */
	list_for_each_entry(call, &rx->secureq, accept_link) {
		rxrpc_mark_call_released(call);
	}

	list_for_each_entry(call, &rx->acceptq, accept_link) {
		rxrpc_mark_call_released(call);
	}

622 623 624 625 626 627
	/* mark all the calls as no longer wanting incoming packets */
	for (p = rb_first(&rx->calls); p; p = rb_next(p)) {
		call = rb_entry(p, struct rxrpc_call, sock_node);
		rxrpc_mark_call_released(call);
	}

628 629 630 631 632 633 634
	read_unlock_bh(&rx->call_lock);
	_leave("");
}

/*
 * release a call
 */
D
David Howells 已提交
635
void rxrpc_put_call(struct rxrpc_call *call)
636
{
D
David Howells 已提交
637 638
	const void *here = __builtin_return_address(0);
	int n, m;
639

D
David Howells 已提交
640
	ASSERT(call != NULL);
641

D
David Howells 已提交
642 643 644 645 646 647 648 649 650 651 652
	n = atomic_dec_return(&call->usage);
	m = atomic_read(&call->skb_count);
	trace_rxrpc_call(call, 5, n, m, here, NULL);
	ASSERTCMP(n, >=, 0);
	if (n == 0) {
		_debug("call %d dead", call->debug_id);
		WARN_ON(m != 0);
		ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD);
		rxrpc_queue_work(&call->destroyer);
	}
}
653

D
David Howells 已提交
654 655 656 657 658 659 660 661 662 663 664 665 666
/*
 * Release a call ref held by a socket buffer.
 */
void rxrpc_put_call_for_skb(struct rxrpc_call *call, struct sk_buff *skb)
{
	const void *here = __builtin_return_address(0);
	int n, m;

	n = atomic_dec_return(&call->usage);
	m = atomic_dec_return(&call->skb_count);
	trace_rxrpc_call(call, 6, n, m, here, skb);
	ASSERTCMP(n, >=, 0);
	if (n == 0) {
667
		_debug("call %d dead", call->debug_id);
D
David Howells 已提交
668
		WARN_ON(m != 0);
669
		ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD);
670
		rxrpc_queue_work(&call->destroyer);
671 672 673
	}
}

674 675 676 677 678 679 680 681
/*
 * Final call destruction under RCU.
 */
static void rxrpc_rcu_destroy_call(struct rcu_head *rcu)
{
	struct rxrpc_call *call = container_of(rcu, struct rxrpc_call, rcu);

	rxrpc_purge_queue(&call->rx_queue);
682
	rxrpc_purge_queue(&call->knlrecv_queue);
683
	rxrpc_put_peer(call->peer);
684 685 686
	kmem_cache_free(rxrpc_call_jar, call);
}

687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706
/*
 * clean up a call
 */
static void rxrpc_cleanup_call(struct rxrpc_call *call)
{
	_net("DESTROY CALL %d", call->debug_id);

	ASSERT(call->socket);

	memset(&call->sock_node, 0xcd, sizeof(call->sock_node));

	del_timer_sync(&call->lifetimer);
	del_timer_sync(&call->deadspan);
	del_timer_sync(&call->ack_timer);
	del_timer_sync(&call->resend_timer);

	ASSERT(test_bit(RXRPC_CALL_RELEASED, &call->flags));
	ASSERTCMP(call->events, ==, 0);
	if (work_pending(&call->processor)) {
		_debug("defer destroy");
707
		rxrpc_queue_work(&call->destroyer);
708 709 710
		return;
	}

711
	ASSERTCMP(call->conn, ==, NULL);
712 713 714 715 716 717 718 719 720 721 722 723

	if (call->acks_window) {
		_debug("kill Tx window %d",
		       CIRC_CNT(call->acks_head, call->acks_tail,
				call->acks_winsz));
		smp_mb();
		while (CIRC_CNT(call->acks_head, call->acks_tail,
				call->acks_winsz) > 0) {
			struct rxrpc_skb_priv *sp;
			unsigned long _skb;

			_skb = call->acks_window[call->acks_tail] & ~1;
724 725 726
			sp = rxrpc_skb((struct sk_buff *)_skb);
			_debug("+++ clear Tx %u", sp->hdr.seq);
			rxrpc_free_skb((struct sk_buff *)_skb);
727 728 729 730 731 732 733 734 735 736 737
			call->acks_tail =
				(call->acks_tail + 1) & (call->acks_winsz - 1);
		}

		kfree(call->acks_window);
	}

	rxrpc_free_skb(call->tx_pending);

	rxrpc_purge_queue(&call->rx_queue);
	ASSERT(skb_queue_empty(&call->rx_oos_queue));
738
	rxrpc_purge_queue(&call->knlrecv_queue);
739
	sock_put(&call->socket->sk);
740
	call_rcu(&call->rcu, rxrpc_rcu_destroy_call);
741 742 743 744 745 746 747 748 749 750
}

/*
 * destroy a call
 */
static void rxrpc_destroy_call(struct work_struct *work)
{
	struct rxrpc_call *call =
		container_of(work, struct rxrpc_call, destroyer);

751 752
	_enter("%p{%d,%x,%p}",
	       call, atomic_read(&call->usage), call->cid, call->conn);
753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778

	ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD);

	write_lock_bh(&rxrpc_call_lock);
	list_del_init(&call->link);
	write_unlock_bh(&rxrpc_call_lock);

	rxrpc_cleanup_call(call);
	_leave("");
}

/*
 * preemptively destroy all the call records from a transport endpoint rather
 * than waiting for them to time out
 */
void __exit rxrpc_destroy_all_calls(void)
{
	struct rxrpc_call *call;

	_enter("");
	write_lock_bh(&rxrpc_call_lock);

	while (!list_empty(&rxrpc_calls)) {
		call = list_entry(rxrpc_calls.next, struct rxrpc_call, link);
		_debug("Zapping call %p", call);

D
David Howells 已提交
779
		rxrpc_see_call(call);
780 781 782 783 784 785 786 787 788 789 790 791 792
		list_del_init(&call->link);

		switch (atomic_read(&call->usage)) {
		case 0:
			ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD);
			break;
		case 1:
			if (del_timer_sync(&call->deadspan) != 0 &&
			    call->state != RXRPC_CALL_DEAD)
				rxrpc_dead_call_expired((unsigned long) call);
			if (call->state != RXRPC_CALL_DEAD)
				break;
		default:
793
			pr_err("Call %p still in use (%d,%d,%s,%lx,%lx)!\n",
794 795 796 797 798
			       call, atomic_read(&call->usage),
			       atomic_read(&call->ackr_not_idle),
			       rxrpc_call_states[call->state],
			       call->flags, call->events);
			if (!skb_queue_empty(&call->rx_queue))
799
				pr_err("Rx queue occupied\n");
800
			if (!skb_queue_empty(&call->rx_oos_queue))
801
				pr_err("OOS queue occupied\n");
802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820
			break;
		}

		write_unlock_bh(&rxrpc_call_lock);
		cond_resched();
		write_lock_bh(&rxrpc_call_lock);
	}

	write_unlock_bh(&rxrpc_call_lock);
	_leave("");
}

/*
 * handle call lifetime being exceeded
 */
static void rxrpc_call_life_expired(unsigned long _call)
{
	struct rxrpc_call *call = (struct rxrpc_call *) _call;

821 822
	_enter("{%d}", call->debug_id);

D
David Howells 已提交
823
	rxrpc_see_call(call);
824 825 826
	if (call->state >= RXRPC_CALL_COMPLETE)
		return;

827 828
	set_bit(RXRPC_CALL_EV_LIFE_TIMER, &call->events);
	rxrpc_queue_call(call);
829 830 831 832
}

/*
 * handle resend timer expiry
833
 * - may not take call->state_lock as this can deadlock against del_timer_sync()
834 835 836 837 838 839 840
 */
static void rxrpc_resend_time_expired(unsigned long _call)
{
	struct rxrpc_call *call = (struct rxrpc_call *) _call;

	_enter("{%d}", call->debug_id);

D
David Howells 已提交
841
	rxrpc_see_call(call);
842 843 844 845
	if (call->state >= RXRPC_CALL_COMPLETE)
		return;

	clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
846
	if (!test_and_set_bit(RXRPC_CALL_EV_RESEND_TIMER, &call->events))
847
		rxrpc_queue_call(call);
848 849 850 851 852 853 854 855 856 857 858
}

/*
 * handle ACK timer expiry
 */
static void rxrpc_ack_time_expired(unsigned long _call)
{
	struct rxrpc_call *call = (struct rxrpc_call *) _call;

	_enter("{%d}", call->debug_id);

D
David Howells 已提交
859
	rxrpc_see_call(call);
860 861 862
	if (call->state >= RXRPC_CALL_COMPLETE)
		return;

863
	if (!test_and_set_bit(RXRPC_CALL_EV_ACK, &call->events))
864
		rxrpc_queue_call(call);
865
}