call_object.c 25.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11
/* RxRPC individual remote procedure call handling
 *
 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
 * Written by David Howells (dhowells@redhat.com)
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version
 * 2 of the License, or (at your option) any later version.
 */

12 13
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

14
#include <linux/slab.h>
15 16
#include <linux/module.h>
#include <linux/circ_buf.h>
17 18
#include <linux/hashtable.h>
#include <linux/spinlock_types.h>
19 20 21 22
#include <net/sock.h>
#include <net/af_rxrpc.h>
#include "ar-internal.h"

23 24 25
/*
 * Maximum lifetime of a call (in jiffies).
 */
26
unsigned int rxrpc_max_call_lifetime = 60 * HZ;
27 28 29 30

/*
 * Time till dead call expires after last use (in jiffies).
 */
31
unsigned int rxrpc_dead_call_expiry = 2 * HZ;
32

33
const char *const rxrpc_call_states[NR__RXRPC_CALL_STATES] = {
34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51
	[RXRPC_CALL_CLIENT_SEND_REQUEST]	= "ClSndReq",
	[RXRPC_CALL_CLIENT_AWAIT_REPLY]		= "ClAwtRpl",
	[RXRPC_CALL_CLIENT_RECV_REPLY]		= "ClRcvRpl",
	[RXRPC_CALL_CLIENT_FINAL_ACK]		= "ClFnlACK",
	[RXRPC_CALL_SERVER_SECURING]		= "SvSecure",
	[RXRPC_CALL_SERVER_ACCEPTING]		= "SvAccept",
	[RXRPC_CALL_SERVER_RECV_REQUEST]	= "SvRcvReq",
	[RXRPC_CALL_SERVER_ACK_REQUEST]		= "SvAckReq",
	[RXRPC_CALL_SERVER_SEND_REPLY]		= "SvSndRpl",
	[RXRPC_CALL_SERVER_AWAIT_ACK]		= "SvAwtACK",
	[RXRPC_CALL_COMPLETE]			= "Complete",
	[RXRPC_CALL_SERVER_BUSY]		= "SvBusy  ",
	[RXRPC_CALL_REMOTELY_ABORTED]		= "RmtAbort",
	[RXRPC_CALL_LOCALLY_ABORTED]		= "LocAbort",
	[RXRPC_CALL_NETWORK_ERROR]		= "NetError",
	[RXRPC_CALL_DEAD]			= "Dead    ",
};

52 53 54 55 56 57 58 59 60 61
struct kmem_cache *rxrpc_call_jar;
LIST_HEAD(rxrpc_calls);
DEFINE_RWLOCK(rxrpc_call_lock);

static void rxrpc_destroy_call(struct work_struct *work);
static void rxrpc_call_life_expired(unsigned long _call);
static void rxrpc_dead_call_expired(unsigned long _call);
static void rxrpc_ack_time_expired(unsigned long _call);
static void rxrpc_resend_time_expired(unsigned long _call);

62 63 64 65 66 67 68
static DEFINE_SPINLOCK(rxrpc_call_hash_lock);
static DEFINE_HASHTABLE(rxrpc_call_hash, 10);

/*
 * Hash function for rxrpc_call_hash
 */
static unsigned long rxrpc_call_hashfunc(
69 70 71 72 73
	u8		in_clientflag,
	u32		cid,
	u32		call_id,
	u32		epoch,
	u16		service_id,
74
	sa_family_t	family,
75 76 77 78 79 80 81 82 83 84 85 86 87 88
	void		*localptr,
	unsigned int	addr_size,
	const u8	*peer_addr)
{
	const u16 *p;
	unsigned int i;
	unsigned long key;

	_enter("");

	key = (unsigned long)localptr;
	/* We just want to add up the __be32 values, so forcing the
	 * cast should be okay.
	 */
89 90 91 92 93 94
	key += epoch;
	key += service_id;
	key += call_id;
	key += (cid & RXRPC_CIDMASK) >> RXRPC_CIDSHIFT;
	key += cid & RXRPC_CHANNELMASK;
	key += in_clientflag;
95
	key += family;
96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111
	/* Step through the peer address in 16-bit portions for speed */
	for (i = 0, p = (const u16 *)peer_addr; i < addr_size >> 1; i++, p++)
		key += *p;
	_leave(" key = 0x%lx", key);
	return key;
}

/*
 * Add a call to the hashtable
 */
static void rxrpc_call_hash_add(struct rxrpc_call *call)
{
	unsigned long key;
	unsigned int addr_size = 0;

	_enter("");
112
	switch (call->family) {
113 114 115 116 117 118 119 120 121 122 123
	case AF_INET:
		addr_size = sizeof(call->peer_ip.ipv4_addr);
		break;
	case AF_INET6:
		addr_size = sizeof(call->peer_ip.ipv6_addr);
		break;
	default:
		break;
	}
	key = rxrpc_call_hashfunc(call->in_clientflag, call->cid,
				  call->call_id, call->epoch,
124
				  call->service_id, call->family,
125
				  call->conn->params.local, addr_size,
126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151
				  call->peer_ip.ipv6_addr);
	/* Store the full key in the call */
	call->hash_key = key;
	spin_lock(&rxrpc_call_hash_lock);
	hash_add_rcu(rxrpc_call_hash, &call->hash_node, key);
	spin_unlock(&rxrpc_call_hash_lock);
	_leave("");
}

/*
 * Remove a call from the hashtable
 */
static void rxrpc_call_hash_del(struct rxrpc_call *call)
{
	_enter("");
	spin_lock(&rxrpc_call_hash_lock);
	hash_del_rcu(&call->hash_node);
	spin_unlock(&rxrpc_call_hash_lock);
	_leave("");
}

/*
 * Find a call in the hashtable and return it, or NULL if it
 * isn't there.
 */
struct rxrpc_call *rxrpc_find_call_hash(
152
	struct rxrpc_host_header *hdr,
153
	void		*localptr,
154
	sa_family_t	family,
155
	const void	*peer_addr)
156 157 158 159 160
{
	unsigned long key;
	unsigned int addr_size = 0;
	struct rxrpc_call *call = NULL;
	struct rxrpc_call *ret = NULL;
161
	u8 in_clientflag = hdr->flags & RXRPC_CLIENT_INITIATED;
162 163

	_enter("");
164
	switch (family) {
165 166 167 168 169 170 171 172 173 174
	case AF_INET:
		addr_size = sizeof(call->peer_ip.ipv4_addr);
		break;
	case AF_INET6:
		addr_size = sizeof(call->peer_ip.ipv6_addr);
		break;
	default:
		break;
	}

175 176
	key = rxrpc_call_hashfunc(in_clientflag, hdr->cid, hdr->callNumber,
				  hdr->epoch, hdr->serviceId,
177
				  family, localptr, addr_size,
178 179 180
				  peer_addr);
	hash_for_each_possible_rcu(rxrpc_call_hash, call, hash_node, key) {
		if (call->hash_key == key &&
181 182 183 184
		    call->call_id == hdr->callNumber &&
		    call->cid == hdr->cid &&
		    call->in_clientflag == in_clientflag &&
		    call->service_id == hdr->serviceId &&
185
		    call->family == family &&
186 187
		    call->local == localptr &&
		    memcmp(call->peer_ip.ipv6_addr, peer_addr,
188 189
			   addr_size) == 0 &&
		    call->epoch == hdr->epoch) {
190 191 192 193 194 195 196 197
			ret = call;
			break;
		}
	}
	_leave(" = %p", ret);
	return ret;
}

198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234
/*
 * find an extant server call
 * - called in process context with IRQs enabled
 */
struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *rx,
					      unsigned long user_call_ID)
{
	struct rxrpc_call *call;
	struct rb_node *p;

	_enter("%p,%lx", rx, user_call_ID);

	read_lock(&rx->call_lock);

	p = rx->calls.rb_node;
	while (p) {
		call = rb_entry(p, struct rxrpc_call, sock_node);

		if (user_call_ID < call->user_call_ID)
			p = p->rb_left;
		else if (user_call_ID > call->user_call_ID)
			p = p->rb_right;
		else
			goto found_extant_call;
	}

	read_unlock(&rx->call_lock);
	_leave(" = NULL");
	return NULL;

found_extant_call:
	rxrpc_get_call(call);
	read_unlock(&rx->call_lock);
	_leave(" = %p [%d]", call, atomic_read(&call->usage));
	return call;
}

235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278
/*
 * allocate a new call
 */
static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
{
	struct rxrpc_call *call;

	call = kmem_cache_zalloc(rxrpc_call_jar, gfp);
	if (!call)
		return NULL;

	call->acks_winsz = 16;
	call->acks_window = kmalloc(call->acks_winsz * sizeof(unsigned long),
				    gfp);
	if (!call->acks_window) {
		kmem_cache_free(rxrpc_call_jar, call);
		return NULL;
	}

	setup_timer(&call->lifetimer, &rxrpc_call_life_expired,
		    (unsigned long) call);
	setup_timer(&call->deadspan, &rxrpc_dead_call_expired,
		    (unsigned long) call);
	setup_timer(&call->ack_timer, &rxrpc_ack_time_expired,
		    (unsigned long) call);
	setup_timer(&call->resend_timer, &rxrpc_resend_time_expired,
		    (unsigned long) call);
	INIT_WORK(&call->destroyer, &rxrpc_destroy_call);
	INIT_WORK(&call->processor, &rxrpc_process_call);
	INIT_LIST_HEAD(&call->accept_link);
	skb_queue_head_init(&call->rx_queue);
	skb_queue_head_init(&call->rx_oos_queue);
	init_waitqueue_head(&call->tx_waitq);
	spin_lock_init(&call->lock);
	rwlock_init(&call->state_lock);
	atomic_set(&call->usage, 1);
	call->debug_id = atomic_inc_return(&rxrpc_debug_id);
	call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;

	memset(&call->sock_node, 0xed, sizeof(call->sock_node));

	call->rx_data_expect = 1;
	call->rx_data_eaten = 0;
	call->rx_first_oos = 0;
279
	call->ackr_win_top = call->rx_data_eaten + 1 + rxrpc_rx_window_size;
280 281 282 283 284
	call->creation_jif = jiffies;
	return call;
}

/*
285
 * allocate a new client call and attempt to get a connection slot for it
286 287 288
 */
static struct rxrpc_call *rxrpc_alloc_client_call(
	struct rxrpc_sock *rx,
289
	struct rxrpc_conn_parameters *cp,
290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310
	struct rxrpc_transport *trans,
	struct rxrpc_conn_bundle *bundle,
	gfp_t gfp)
{
	struct rxrpc_call *call;
	int ret;

	_enter("");

	ASSERT(rx != NULL);
	ASSERT(trans != NULL);
	ASSERT(bundle != NULL);

	call = rxrpc_alloc_call(gfp);
	if (!call)
		return ERR_PTR(-ENOMEM);

	sock_hold(&rx->sk);
	call->socket = rx;
	call->rx_data_post = 1;

311
	ret = rxrpc_connect_call(rx, cp, trans, bundle, call, gfp);
312 313 314 315 316
	if (ret < 0) {
		kmem_cache_free(rxrpc_call_jar, call);
		return ERR_PTR(ret);
	}

317
	/* Record copies of information for hashtable lookup */
318 319 320
	call->family = rx->family;
	call->local = call->conn->params.local;
	switch (call->family) {
321 322
	case AF_INET:
		call->peer_ip.ipv4_addr =
323
			call->conn->params.peer->srx.transport.sin.sin_addr.s_addr;
324 325 326
		break;
	case AF_INET6:
		memcpy(call->peer_ip.ipv6_addr,
327
		       call->conn->params.peer->srx.transport.sin6.sin6_addr.in6_u.u6_addr8,
328 329 330
		       sizeof(call->peer_ip.ipv6_addr));
		break;
	}
331 332 333
	call->epoch = call->conn->proto.epoch;
	call->service_id = call->conn->params.service_id;
	call->in_clientflag = call->conn->proto.in_clientflag;
334 335 336
	/* Add the new call to the hashtable */
	rxrpc_call_hash_add(call);

337 338 339
	spin_lock(&call->conn->params.peer->lock);
	hlist_add_head(&call->error_link, &call->conn->params.peer->error_targets);
	spin_unlock(&call->conn->params.peer->lock);
340

341
	call->lifetimer.expires = jiffies + rxrpc_max_call_lifetime;
342 343 344 345 346 347 348 349 350 351
	add_timer(&call->lifetimer);

	_leave(" = %p", call);
	return call;
}

/*
 * set up a call for the given data
 * - called in process context with IRQs enabled
 */
352
struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
353
					 struct rxrpc_conn_parameters *cp,
354 355 356 357 358
					 struct rxrpc_transport *trans,
					 struct rxrpc_conn_bundle *bundle,
					 unsigned long user_call_ID,
					 gfp_t gfp)
{
359 360
	struct rxrpc_call *call, *xcall;
	struct rb_node *parent, **pp;
361

362 363 364
	_enter("%p,%d,%d,%lx",
	       rx, trans->debug_id, bundle ? bundle->debug_id : -1,
	       user_call_ID);
365

366
	call = rxrpc_alloc_client_call(rx, cp, trans, bundle, gfp);
367 368 369
	if (IS_ERR(call)) {
		_leave(" = %ld", PTR_ERR(call));
		return call;
370 371
	}

372 373
	call->user_call_ID = user_call_ID;
	__set_bit(RXRPC_CALL_HAS_USERID, &call->flags);
374 375 376 377 378 379 380

	write_lock(&rx->call_lock);

	pp = &rx->calls.rb_node;
	parent = NULL;
	while (*pp) {
		parent = *pp;
381
		xcall = rb_entry(parent, struct rxrpc_call, sock_node);
382

383
		if (user_call_ID < xcall->user_call_ID)
384
			pp = &(*pp)->rb_left;
385
		else if (user_call_ID > xcall->user_call_ID)
386 387
			pp = &(*pp)->rb_right;
		else
388
			goto found_user_ID_now_present;
389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405
	}

	rxrpc_get_call(call);

	rb_link_node(&call->sock_node, parent, pp);
	rb_insert_color(&call->sock_node, &rx->calls);
	write_unlock(&rx->call_lock);

	write_lock_bh(&rxrpc_call_lock);
	list_add_tail(&call->link, &rxrpc_calls);
	write_unlock_bh(&rxrpc_call_lock);

	_net("CALL new %d on CONN %d", call->debug_id, call->conn->debug_id);

	_leave(" = %p [new]", call);
	return call;

406 407 408 409 410 411
	/* We unexpectedly found the user ID in the list after taking
	 * the call_lock.  This shouldn't happen unless the user races
	 * with itself and tries to add the same user ID twice at the
	 * same time in different threads.
	 */
found_user_ID_now_present:
412
	write_unlock(&rx->call_lock);
413 414 415
	rxrpc_put_call(call);
	_leave(" = -EEXIST [%p]", call);
	return ERR_PTR(-EEXIST);
416 417 418 419 420 421 422 423
}

/*
 * set up an incoming call
 * - called in process context with IRQs enabled
 */
struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *rx,
				       struct rxrpc_connection *conn,
424
				       struct sk_buff *skb)
425
{
426
	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
427 428
	struct rxrpc_call *call, *candidate;
	struct rb_node **p, *parent;
429
	u32 call_id;
430

431
	_enter(",%d", conn->debug_id);
432 433 434

	ASSERT(rx != NULL);

435
	candidate = rxrpc_alloc_call(GFP_NOIO);
436 437 438
	if (!candidate)
		return ERR_PTR(-EBUSY);

439 440 441 442 443 444 445
	candidate->socket	= rx;
	candidate->conn		= conn;
	candidate->cid		= sp->hdr.cid;
	candidate->call_id	= sp->hdr.callNumber;
	candidate->channel	= sp->hdr.cid & RXRPC_CHANNELMASK;
	candidate->rx_data_post	= 0;
	candidate->state	= RXRPC_CALL_SERVER_ACCEPTING;
446 447 448 449 450 451 452 453
	if (conn->security_ix > 0)
		candidate->state = RXRPC_CALL_SERVER_SECURING;

	write_lock_bh(&conn->lock);

	/* set the channel for this call */
	call = conn->channels[candidate->channel];
	_debug("channel[%u] is %p", candidate->channel, call);
454
	if (call && call->call_id == sp->hdr.callNumber) {
455 456 457 458 459 460 461
		/* already set; must've been a duplicate packet */
		_debug("extant call [%d]", call->state);
		ASSERTCMP(call->conn, ==, conn);

		read_lock(&call->state_lock);
		switch (call->state) {
		case RXRPC_CALL_LOCALLY_ABORTED:
462
			if (!test_and_set_bit(RXRPC_CALL_EV_ABORT, &call->events))
463
				rxrpc_queue_call(call);
464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491
		case RXRPC_CALL_REMOTELY_ABORTED:
			read_unlock(&call->state_lock);
			goto aborted_call;
		default:
			rxrpc_get_call(call);
			read_unlock(&call->state_lock);
			goto extant_call;
		}
	}

	if (call) {
		/* it seems the channel is still in use from the previous call
		 * - ditch the old binding if its call is now complete */
		_debug("CALL: %u { %s }",
		       call->debug_id, rxrpc_call_states[call->state]);

		if (call->state >= RXRPC_CALL_COMPLETE) {
			conn->channels[call->channel] = NULL;
		} else {
			write_unlock_bh(&conn->lock);
			kmem_cache_free(rxrpc_call_jar, candidate);
			_leave(" = -EBUSY");
			return ERR_PTR(-EBUSY);
		}
	}

	/* check the call number isn't duplicate */
	_debug("check dup");
492
	call_id = sp->hdr.callNumber;
493 494 495 496 497 498
	p = &conn->calls.rb_node;
	parent = NULL;
	while (*p) {
		parent = *p;
		call = rb_entry(parent, struct rxrpc_call, conn_node);

499 500 501
		/* The tree is sorted in order of the __be32 value without
		 * turning it into host order.
		 */
502
		if (call_id < call->call_id)
503
			p = &(*p)->rb_left;
504
		else if (call_id > call->call_id)
505 506 507 508 509 510 511 512 513 514 515 516 517
			p = &(*p)->rb_right;
		else
			goto old_call;
	}

	/* make the call available */
	_debug("new call");
	call = candidate;
	candidate = NULL;
	rb_link_node(&call->conn_node, parent, p);
	rb_insert_color(&call->conn_node, &conn->calls);
	conn->channels[call->channel] = call;
	sock_hold(&rx->sk);
518
	rxrpc_get_connection(conn);
519 520
	write_unlock_bh(&conn->lock);

521 522 523
	spin_lock(&conn->params.peer->lock);
	hlist_add_head(&call->error_link, &conn->params.peer->error_targets);
	spin_unlock(&conn->params.peer->lock);
524 525 526 527 528

	write_lock_bh(&rxrpc_call_lock);
	list_add_tail(&call->link, &rxrpc_calls);
	write_unlock_bh(&rxrpc_call_lock);

529
	/* Record copies of information for hashtable lookup */
530
	call->family = rx->family;
531
	call->local = conn->params.local;
532
	switch (call->family) {
533 534
	case AF_INET:
		call->peer_ip.ipv4_addr =
535
			conn->params.peer->srx.transport.sin.sin_addr.s_addr;
536 537 538
		break;
	case AF_INET6:
		memcpy(call->peer_ip.ipv6_addr,
539
		       conn->params.peer->srx.transport.sin6.sin6_addr.in6_u.u6_addr8,
540 541 542 543 544
		       sizeof(call->peer_ip.ipv6_addr));
		break;
	default:
		break;
	}
545 546 547
	call->epoch = conn->proto.epoch;
	call->service_id = conn->params.service_id;
	call->in_clientflag = conn->proto.in_clientflag;
548 549 550
	/* Add the new call to the hashtable */
	rxrpc_call_hash_add(call);

551 552
	_net("CALL incoming %d on CONN %d", call->debug_id, call->conn->debug_id);

553
	call->lifetimer.expires = jiffies + rxrpc_max_call_lifetime;
554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581
	add_timer(&call->lifetimer);
	_leave(" = %p {%d} [new]", call, call->debug_id);
	return call;

extant_call:
	write_unlock_bh(&conn->lock);
	kmem_cache_free(rxrpc_call_jar, candidate);
	_leave(" = %p {%d} [extant]", call, call ? call->debug_id : -1);
	return call;

aborted_call:
	write_unlock_bh(&conn->lock);
	kmem_cache_free(rxrpc_call_jar, candidate);
	_leave(" = -ECONNABORTED");
	return ERR_PTR(-ECONNABORTED);

old_call:
	write_unlock_bh(&conn->lock);
	kmem_cache_free(rxrpc_call_jar, candidate);
	_leave(" = -ECONNRESET [old]");
	return ERR_PTR(-ECONNRESET);
}

/*
 * detach a call from a socket and set up for release
 */
void rxrpc_release_call(struct rxrpc_call *call)
{
582
	struct rxrpc_connection *conn = call->conn;
583 584 585 586 587 588 589 590 591 592 593 594 595 596 597
	struct rxrpc_sock *rx = call->socket;

	_enter("{%d,%d,%d,%d}",
	       call->debug_id, atomic_read(&call->usage),
	       atomic_read(&call->ackr_not_idle),
	       call->rx_first_oos);

	spin_lock_bh(&call->lock);
	if (test_and_set_bit(RXRPC_CALL_RELEASED, &call->flags))
		BUG();
	spin_unlock_bh(&call->lock);

	/* dissociate from the socket
	 * - the socket's ref on the call is passed to the death timer
	 */
598
	_debug("RELEASE CALL %p (%d CONN %p)", call, call->debug_id, conn);
599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614

	write_lock_bh(&rx->call_lock);
	if (!list_empty(&call->accept_link)) {
		_debug("unlinking once-pending call %p { e=%lx f=%lx }",
		       call, call->events, call->flags);
		ASSERT(!test_bit(RXRPC_CALL_HAS_USERID, &call->flags));
		list_del_init(&call->accept_link);
		sk_acceptq_removed(&rx->sk);
	} else if (test_bit(RXRPC_CALL_HAS_USERID, &call->flags)) {
		rb_erase(&call->sock_node, &rx->calls);
		memset(&call->sock_node, 0xdd, sizeof(call->sock_node));
		clear_bit(RXRPC_CALL_HAS_USERID, &call->flags);
	}
	write_unlock_bh(&rx->call_lock);

	/* free up the channel for reuse */
615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642
	spin_lock(&conn->trans->client_lock);
	write_lock_bh(&conn->lock);
	write_lock(&call->state_lock);

	if (conn->channels[call->channel] == call)
		conn->channels[call->channel] = NULL;

	if (conn->out_clientflag && conn->bundle) {
		conn->avail_calls++;
		switch (conn->avail_calls) {
		case 1:
			list_move_tail(&conn->bundle_link,
				       &conn->bundle->avail_conns);
		case 2 ... RXRPC_MAXCALLS - 1:
			ASSERT(conn->channels[0] == NULL ||
			       conn->channels[1] == NULL ||
			       conn->channels[2] == NULL ||
			       conn->channels[3] == NULL);
			break;
		case RXRPC_MAXCALLS:
			list_move_tail(&conn->bundle_link,
				       &conn->bundle->unused_conns);
			ASSERT(conn->channels[0] == NULL &&
			       conn->channels[1] == NULL &&
			       conn->channels[2] == NULL &&
			       conn->channels[3] == NULL);
			break;
		default:
643
			pr_err("conn->avail_calls=%d\n", conn->avail_calls);
644 645
			BUG();
		}
646 647
	}

648
	spin_unlock(&conn->trans->client_lock);
649 650 651 652 653

	if (call->state < RXRPC_CALL_COMPLETE &&
	    call->state != RXRPC_CALL_CLIENT_FINAL_ACK) {
		_debug("+++ ABORTING STATE %d +++\n", call->state);
		call->state = RXRPC_CALL_LOCALLY_ABORTED;
654
		call->local_abort = RX_CALL_DEAD;
655
		set_bit(RXRPC_CALL_EV_ABORT, &call->events);
656
		rxrpc_queue_call(call);
657 658
	}
	write_unlock(&call->state_lock);
659
	write_unlock_bh(&conn->lock);
660

661
	/* clean up the Rx queue */
662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682
	if (!skb_queue_empty(&call->rx_queue) ||
	    !skb_queue_empty(&call->rx_oos_queue)) {
		struct rxrpc_skb_priv *sp;
		struct sk_buff *skb;

		_debug("purge Rx queues");

		spin_lock_bh(&call->lock);
		while ((skb = skb_dequeue(&call->rx_queue)) ||
		       (skb = skb_dequeue(&call->rx_oos_queue))) {
			sp = rxrpc_skb(skb);
			if (sp->call) {
				ASSERTCMP(sp->call, ==, call);
				rxrpc_put_call(call);
				sp->call = NULL;
			}
			skb->destructor = NULL;
			spin_unlock_bh(&call->lock);

			_debug("- zap %s %%%u #%u",
			       rxrpc_pkts[sp->hdr.type],
683
			       sp->hdr.serial, sp->hdr.seq);
684 685 686 687 688 689 690 691 692 693 694
			rxrpc_free_skb(skb);
			spin_lock_bh(&call->lock);
		}
		spin_unlock_bh(&call->lock);

		ASSERTCMP(call->state, !=, RXRPC_CALL_COMPLETE);
	}

	del_timer_sync(&call->resend_timer);
	del_timer_sync(&call->ack_timer);
	del_timer_sync(&call->lifetimer);
695
	call->deadspan.expires = jiffies + rxrpc_dead_call_expiry;
696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729
	add_timer(&call->deadspan);

	_leave("");
}

/*
 * handle a dead call being ready for reaping
 */
static void rxrpc_dead_call_expired(unsigned long _call)
{
	struct rxrpc_call *call = (struct rxrpc_call *) _call;

	_enter("{%d}", call->debug_id);

	write_lock_bh(&call->state_lock);
	call->state = RXRPC_CALL_DEAD;
	write_unlock_bh(&call->state_lock);
	rxrpc_put_call(call);
}

/*
 * mark a call as to be released, aborting it if it's still in progress
 * - called with softirqs disabled
 */
static void rxrpc_mark_call_released(struct rxrpc_call *call)
{
	bool sched;

	write_lock(&call->state_lock);
	if (call->state < RXRPC_CALL_DEAD) {
		sched = false;
		if (call->state < RXRPC_CALL_COMPLETE) {
			_debug("abort call %p", call);
			call->state = RXRPC_CALL_LOCALLY_ABORTED;
730
			call->local_abort = RX_CALL_DEAD;
731
			if (!test_and_set_bit(RXRPC_CALL_EV_ABORT, &call->events))
732 733
				sched = true;
		}
734
		if (!test_and_set_bit(RXRPC_CALL_EV_RELEASE, &call->events))
735 736
			sched = true;
		if (sched)
737
			rxrpc_queue_call(call);
738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786
	}
	write_unlock(&call->state_lock);
}

/*
 * release all the calls associated with a socket
 */
void rxrpc_release_calls_on_socket(struct rxrpc_sock *rx)
{
	struct rxrpc_call *call;
	struct rb_node *p;

	_enter("%p", rx);

	read_lock_bh(&rx->call_lock);

	/* mark all the calls as no longer wanting incoming packets */
	for (p = rb_first(&rx->calls); p; p = rb_next(p)) {
		call = rb_entry(p, struct rxrpc_call, sock_node);
		rxrpc_mark_call_released(call);
	}

	/* kill the not-yet-accepted incoming calls */
	list_for_each_entry(call, &rx->secureq, accept_link) {
		rxrpc_mark_call_released(call);
	}

	list_for_each_entry(call, &rx->acceptq, accept_link) {
		rxrpc_mark_call_released(call);
	}

	read_unlock_bh(&rx->call_lock);
	_leave("");
}

/*
 * release a call
 */
void __rxrpc_put_call(struct rxrpc_call *call)
{
	ASSERT(call != NULL);

	_enter("%p{u=%d}", call, atomic_read(&call->usage));

	ASSERTCMP(atomic_read(&call->usage), >, 0);

	if (atomic_dec_and_test(&call->usage)) {
		_debug("call %d dead", call->debug_id);
		ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD);
787
		rxrpc_queue_work(&call->destroyer);
788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811
	}
	_leave("");
}

/*
 * clean up a call
 */
static void rxrpc_cleanup_call(struct rxrpc_call *call)
{
	_net("DESTROY CALL %d", call->debug_id);

	ASSERT(call->socket);

	memset(&call->sock_node, 0xcd, sizeof(call->sock_node));

	del_timer_sync(&call->lifetimer);
	del_timer_sync(&call->deadspan);
	del_timer_sync(&call->ack_timer);
	del_timer_sync(&call->resend_timer);

	ASSERT(test_bit(RXRPC_CALL_RELEASED, &call->flags));
	ASSERTCMP(call->events, ==, 0);
	if (work_pending(&call->processor)) {
		_debug("defer destroy");
812
		rxrpc_queue_work(&call->destroyer);
813 814 815 816
		return;
	}

	if (call->conn) {
817
		spin_lock(&call->conn->params.peer->lock);
818
		hlist_del_init(&call->error_link);
819
		spin_unlock(&call->conn->params.peer->lock);
820 821 822 823 824 825 826

		write_lock_bh(&call->conn->lock);
		rb_erase(&call->conn_node, &call->conn->calls);
		write_unlock_bh(&call->conn->lock);
		rxrpc_put_connection(call->conn);
	}

827 828 829
	/* Remove the call from the hash */
	rxrpc_call_hash_del(call);

830 831 832 833 834 835 836 837 838 839 840
	if (call->acks_window) {
		_debug("kill Tx window %d",
		       CIRC_CNT(call->acks_head, call->acks_tail,
				call->acks_winsz));
		smp_mb();
		while (CIRC_CNT(call->acks_head, call->acks_tail,
				call->acks_winsz) > 0) {
			struct rxrpc_skb_priv *sp;
			unsigned long _skb;

			_skb = call->acks_window[call->acks_tail] & ~1;
841 842 843
			sp = rxrpc_skb((struct sk_buff *)_skb);
			_debug("+++ clear Tx %u", sp->hdr.seq);
			rxrpc_free_skb((struct sk_buff *)_skb);
844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907
			call->acks_tail =
				(call->acks_tail + 1) & (call->acks_winsz - 1);
		}

		kfree(call->acks_window);
	}

	rxrpc_free_skb(call->tx_pending);

	rxrpc_purge_queue(&call->rx_queue);
	ASSERT(skb_queue_empty(&call->rx_oos_queue));
	sock_put(&call->socket->sk);
	kmem_cache_free(rxrpc_call_jar, call);
}

/*
 * destroy a call
 */
static void rxrpc_destroy_call(struct work_struct *work)
{
	struct rxrpc_call *call =
		container_of(work, struct rxrpc_call, destroyer);

	_enter("%p{%d,%d,%p}",
	       call, atomic_read(&call->usage), call->channel, call->conn);

	ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD);

	write_lock_bh(&rxrpc_call_lock);
	list_del_init(&call->link);
	write_unlock_bh(&rxrpc_call_lock);

	rxrpc_cleanup_call(call);
	_leave("");
}

/*
 * preemptively destroy all the call records from a transport endpoint rather
 * than waiting for them to time out
 */
void __exit rxrpc_destroy_all_calls(void)
{
	struct rxrpc_call *call;

	_enter("");
	write_lock_bh(&rxrpc_call_lock);

	while (!list_empty(&rxrpc_calls)) {
		call = list_entry(rxrpc_calls.next, struct rxrpc_call, link);
		_debug("Zapping call %p", call);

		list_del_init(&call->link);

		switch (atomic_read(&call->usage)) {
		case 0:
			ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD);
			break;
		case 1:
			if (del_timer_sync(&call->deadspan) != 0 &&
			    call->state != RXRPC_CALL_DEAD)
				rxrpc_dead_call_expired((unsigned long) call);
			if (call->state != RXRPC_CALL_DEAD)
				break;
		default:
908
			pr_err("Call %p still in use (%d,%d,%s,%lx,%lx)!\n",
909 910 911 912 913
			       call, atomic_read(&call->usage),
			       atomic_read(&call->ackr_not_idle),
			       rxrpc_call_states[call->state],
			       call->flags, call->events);
			if (!skb_queue_empty(&call->rx_queue))
914
				pr_err("Rx queue occupied\n");
915
			if (!skb_queue_empty(&call->rx_oos_queue))
916
				pr_err("OOS queue occupied\n");
917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941
			break;
		}

		write_unlock_bh(&rxrpc_call_lock);
		cond_resched();
		write_lock_bh(&rxrpc_call_lock);
	}

	write_unlock_bh(&rxrpc_call_lock);
	_leave("");
}

/*
 * handle call lifetime being exceeded
 */
static void rxrpc_call_life_expired(unsigned long _call)
{
	struct rxrpc_call *call = (struct rxrpc_call *) _call;

	if (call->state >= RXRPC_CALL_COMPLETE)
		return;

	_enter("{%d}", call->debug_id);
	read_lock_bh(&call->state_lock);
	if (call->state < RXRPC_CALL_COMPLETE) {
942
		set_bit(RXRPC_CALL_EV_LIFE_TIMER, &call->events);
943
		rxrpc_queue_call(call);
944 945 946 947 948 949
	}
	read_unlock_bh(&call->state_lock);
}

/*
 * handle resend timer expiry
950
 * - may not take call->state_lock as this can deadlock against del_timer_sync()
951 952 953 954 955 956 957 958 959 960 961
 */
static void rxrpc_resend_time_expired(unsigned long _call)
{
	struct rxrpc_call *call = (struct rxrpc_call *) _call;

	_enter("{%d}", call->debug_id);

	if (call->state >= RXRPC_CALL_COMPLETE)
		return;

	clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
962
	if (!test_and_set_bit(RXRPC_CALL_EV_RESEND_TIMER, &call->events))
963
		rxrpc_queue_call(call);
964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979
}

/*
 * handle ACK timer expiry
 */
static void rxrpc_ack_time_expired(unsigned long _call)
{
	struct rxrpc_call *call = (struct rxrpc_call *) _call;

	_enter("{%d}", call->debug_id);

	if (call->state >= RXRPC_CALL_COMPLETE)
		return;

	read_lock_bh(&call->state_lock);
	if (call->state < RXRPC_CALL_COMPLETE &&
980
	    !test_and_set_bit(RXRPC_CALL_EV_ACK, &call->events))
981
		rxrpc_queue_call(call);
982 983
	read_unlock_bh(&call->state_lock);
}