call_object.c 18.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11
/* RxRPC individual remote procedure call handling
 *
 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
 * Written by David Howells (dhowells@redhat.com)
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version
 * 2 of the License, or (at your option) any later version.
 */

12 13
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

14
#include <linux/slab.h>
15 16
#include <linux/module.h>
#include <linux/circ_buf.h>
17
#include <linux/spinlock_types.h>
18 19 20 21
#include <net/sock.h>
#include <net/af_rxrpc.h>
#include "ar-internal.h"

22
const char *const rxrpc_call_states[NR__RXRPC_CALL_STATES] = {
23
	[RXRPC_CALL_UNINITIALISED]		= "Uninit  ",
24
	[RXRPC_CALL_CLIENT_AWAIT_CONN]		= "ClWtConn",
25 26 27
	[RXRPC_CALL_CLIENT_SEND_REQUEST]	= "ClSndReq",
	[RXRPC_CALL_CLIENT_AWAIT_REPLY]		= "ClAwtRpl",
	[RXRPC_CALL_CLIENT_RECV_REPLY]		= "ClRcvRpl",
28
	[RXRPC_CALL_SERVER_PREALLOC]		= "SvPrealc",
29 30 31 32 33 34 35
	[RXRPC_CALL_SERVER_SECURING]		= "SvSecure",
	[RXRPC_CALL_SERVER_ACCEPTING]		= "SvAccept",
	[RXRPC_CALL_SERVER_RECV_REQUEST]	= "SvRcvReq",
	[RXRPC_CALL_SERVER_ACK_REQUEST]		= "SvAckReq",
	[RXRPC_CALL_SERVER_SEND_REPLY]		= "SvSndRpl",
	[RXRPC_CALL_SERVER_AWAIT_ACK]		= "SvAwtACK",
	[RXRPC_CALL_COMPLETE]			= "Complete",
36 37 38 39
};

const char *const rxrpc_call_completions[NR__RXRPC_CALL_COMPLETIONS] = {
	[RXRPC_CALL_SUCCEEDED]			= "Complete",
40 41
	[RXRPC_CALL_REMOTELY_ABORTED]		= "RmtAbort",
	[RXRPC_CALL_LOCALLY_ABORTED]		= "LocAbort",
42
	[RXRPC_CALL_LOCAL_ERROR]		= "LocError",
43 44 45
	[RXRPC_CALL_NETWORK_ERROR]		= "NetError",
};

46 47
struct kmem_cache *rxrpc_call_jar;

48
static void rxrpc_call_timer_expired(struct timer_list *t)
49
{
50
	struct rxrpc_call *call = from_timer(call, t, timer);
51 52 53

	_enter("%d", call->debug_id);

D
David Howells 已提交
54 55 56 57
	if (call->state < RXRPC_CALL_COMPLETE) {
		trace_rxrpc_timer(call, rxrpc_timer_expired, jiffies);
		rxrpc_queue_call(call);
	}
58
}
59

60 61
static struct lock_class_key rxrpc_call_user_mutex_lock_class_key;

62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92
/*
 * find an extant server call
 * - called in process context with IRQs enabled
 */
struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *rx,
					      unsigned long user_call_ID)
{
	struct rxrpc_call *call;
	struct rb_node *p;

	_enter("%p,%lx", rx, user_call_ID);

	read_lock(&rx->call_lock);

	p = rx->calls.rb_node;
	while (p) {
		call = rb_entry(p, struct rxrpc_call, sock_node);

		if (user_call_ID < call->user_call_ID)
			p = p->rb_left;
		else if (user_call_ID > call->user_call_ID)
			p = p->rb_right;
		else
			goto found_extant_call;
	}

	read_unlock(&rx->call_lock);
	_leave(" = NULL");
	return NULL;

found_extant_call:
93
	rxrpc_get_call(call, rxrpc_call_got);
94 95 96 97 98
	read_unlock(&rx->call_lock);
	_leave(" = %p [%d]", call, atomic_read(&call->usage));
	return call;
}

99 100 101
/*
 * allocate a new call
 */
102
struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *rx, gfp_t gfp)
103 104 105 106 107 108 109
{
	struct rxrpc_call *call;

	call = kmem_cache_zalloc(rxrpc_call_jar, gfp);
	if (!call)
		return NULL;

110 111
	call->rxtx_buffer = kcalloc(RXRPC_RXTX_BUFF_SIZE,
				    sizeof(struct sk_buff *),
112
				    gfp);
113 114
	if (!call->rxtx_buffer)
		goto nomem;
115

116 117 118 119
	call->rxtx_annotations = kcalloc(RXRPC_RXTX_BUFF_SIZE, sizeof(u8), gfp);
	if (!call->rxtx_annotations)
		goto nomem_2;

120
	mutex_init(&call->user_mutex);
121 122 123 124 125 126 127 128

	/* Prevent lockdep reporting a deadlock false positive between the afs
	 * filesystem and sys_sendmsg() via the mmap sem.
	 */
	if (rx->sk.sk_kern_sock)
		lockdep_set_class(&call->user_mutex,
				  &rxrpc_call_user_mutex_lock_class_key);

129
	timer_setup(&call->timer, rxrpc_call_timer_expired, 0);
130
	INIT_WORK(&call->processor, &rxrpc_process_call);
131
	INIT_LIST_HEAD(&call->link);
132
	INIT_LIST_HEAD(&call->chan_wait_link);
133
	INIT_LIST_HEAD(&call->accept_link);
134 135
	INIT_LIST_HEAD(&call->recvmsg_link);
	INIT_LIST_HEAD(&call->sock_link);
136
	init_waitqueue_head(&call->waitq);
137
	spin_lock_init(&call->lock);
138
	spin_lock_init(&call->notify_lock);
139 140 141
	rwlock_init(&call->state_lock);
	atomic_set(&call->usage, 1);
	call->debug_id = atomic_inc_return(&rxrpc_debug_id);
142
	call->tx_total_len = -1;
D
David Howells 已提交
143 144
	call->next_rx_timo = 20 * HZ;
	call->next_req_timo = 1 * HZ;
145 146 147

	memset(&call->sock_node, 0xed, sizeof(call->sock_node));

148
	/* Leave space in the ring to handle a maxed-out jumbo packet */
149
	call->rx_winsize = rxrpc_rx_window_size;
150 151
	call->tx_winsize = 16;
	call->rx_expect_next = 1;
D
David Howells 已提交
152

153
	call->cong_cwnd = 2;
D
David Howells 已提交
154
	call->cong_ssthresh = RXRPC_RXTX_BUFF_SIZE - 1;
155
	return call;
156 157 158 159 160 161

nomem_2:
	kfree(call->rxtx_buffer);
nomem:
	kmem_cache_free(rxrpc_call_jar, call);
	return NULL;
162 163 164
}

/*
165
 * Allocate a new client call.
166
 */
167 168
static struct rxrpc_call *rxrpc_alloc_client_call(struct rxrpc_sock *rx,
						  struct sockaddr_rxrpc *srx,
169
						  gfp_t gfp)
170 171
{
	struct rxrpc_call *call;
D
David Howells 已提交
172
	ktime_t now;
173 174 175

	_enter("");

176
	call = rxrpc_alloc_call(rx, gfp);
177 178
	if (!call)
		return ERR_PTR(-ENOMEM);
179 180
	call->state = RXRPC_CALL_CLIENT_AWAIT_CONN;
	call->service_id = srx->srx_service;
D
David Howells 已提交
181
	call->tx_phase = true;
D
David Howells 已提交
182 183 184
	now = ktime_get_real();
	call->acks_latest_ts = now;
	call->cong_tstamp = now;
185 186 187 188 189 190

	_leave(" = %p", call);
	return call;
}

/*
191
 * Initiate the call ack/resend/expiry timer.
192
 */
193
static void rxrpc_start_call_timer(struct rxrpc_call *call)
194
{
D
David Howells 已提交
195 196 197 198
	unsigned long now = jiffies;
	unsigned long j = now + MAX_JIFFY_OFFSET;

	call->ack_at = j;
199
	call->ack_lost_at = j;
D
David Howells 已提交
200 201 202 203 204 205
	call->resend_at = j;
	call->ping_at = j;
	call->expect_rx_by = j;
	call->expect_req_by = j;
	call->expect_term_by = j;
	call->timer.expires = now;
206 207 208
}

/*
209 210 211
 * Set up a call for the given parameters.
 * - Called with the socket lock held, which it must release.
 * - If it returns a call, the call's lock will need releasing by the caller.
212
 */
213
struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
214
					 struct rxrpc_conn_parameters *cp,
215
					 struct sockaddr_rxrpc *srx,
216
					 struct rxrpc_call_params *p,
217
					 gfp_t gfp)
218
	__releases(&rx->sk.sk_lock.slock)
219
{
220
	struct rxrpc_call *call, *xcall;
221
	struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk));
222
	struct rb_node *parent, **pp;
D
David Howells 已提交
223
	const void *here = __builtin_return_address(0);
224
	int ret;
225

226
	_enter("%p,%lx", rx, p->user_call_ID);
227

228
	call = rxrpc_alloc_client_call(rx, srx, gfp);
229
	if (IS_ERR(call)) {
230
		release_sock(&rx->sk);
231 232
		_leave(" = %ld", PTR_ERR(call));
		return call;
233 234
	}

235
	call->tx_total_len = p->tx_total_len;
236
	trace_rxrpc_call(call, rxrpc_call_new_client, atomic_read(&call->usage),
237
			 here, (const void *)p->user_call_ID);
D
David Howells 已提交
238

239 240 241 242 243
	/* We need to protect a partially set up call against the user as we
	 * will be acting outside the socket lock.
	 */
	mutex_lock(&call->user_mutex);

244
	/* Publish the call, even though it is incompletely set up as yet */
245 246 247 248 249 250
	write_lock(&rx->call_lock);

	pp = &rx->calls.rb_node;
	parent = NULL;
	while (*pp) {
		parent = *pp;
251
		xcall = rb_entry(parent, struct rxrpc_call, sock_node);
252

253
		if (p->user_call_ID < xcall->user_call_ID)
254
			pp = &(*pp)->rb_left;
255
		else if (p->user_call_ID > xcall->user_call_ID)
256 257
			pp = &(*pp)->rb_right;
		else
258
			goto error_dup_user_ID;
259 260
	}

261
	rcu_assign_pointer(call->socket, rx);
262
	call->user_call_ID = p->user_call_ID;
263
	__set_bit(RXRPC_CALL_HAS_USERID, &call->flags);
264
	rxrpc_get_call(call, rxrpc_call_got_userid);
265 266
	rb_link_node(&call->sock_node, parent, pp);
	rb_insert_color(&call->sock_node, &rx->calls);
267 268
	list_add(&call->sock_link, &rx->sock_calls);

269 270
	write_unlock(&rx->call_lock);

271 272 273
	write_lock(&rxnet->call_lock);
	list_add_tail(&call->link, &rxnet->calls);
	write_unlock(&rxnet->call_lock);
274

275 276 277
	/* From this point on, the call is protected by its own lock. */
	release_sock(&rx->sk);

278 279 280 281
	/* Set up or get a connection record and set the protocol parameters,
	 * including channel number and call ID.
	 */
	ret = rxrpc_connect_call(call, cp, srx, gfp);
282 283 284
	if (ret < 0)
		goto error;

285
	trace_rxrpc_call(call, rxrpc_call_connected, atomic_read(&call->usage),
286
			 here, NULL);
287

288 289
	rxrpc_start_call_timer(call);

290 291 292 293 294
	_net("CALL new %d on CONN %d", call->debug_id, call->conn->debug_id);

	_leave(" = %p [new]", call);
	return call;

295 296 297 298 299
	/* We unexpectedly found the user ID in the list after taking
	 * the call_lock.  This shouldn't happen unless the user races
	 * with itself and tries to add the same user ID twice at the
	 * same time in different threads.
	 */
300
error_dup_user_ID:
301
	write_unlock(&rx->call_lock);
302
	release_sock(&rx->sk);
303
	ret = -EEXIST;
304 305 306 307

error:
	__rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR,
				    RX_CALL_DEAD, ret);
308 309
	trace_rxrpc_call(call, rxrpc_call_error, atomic_read(&call->usage),
			 here, ERR_PTR(ret));
310
	rxrpc_release_call(rx, call);
311
	mutex_unlock(&call->user_mutex);
312 313 314
	rxrpc_put_call(call, rxrpc_call_put);
	_leave(" = %d", ret);
	return ERR_PTR(ret);
315 316
}

317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358
/*
 * Retry a call to a new address.  It is expected that the Tx queue of the call
 * will contain data previously packaged for an old call.
 */
int rxrpc_retry_client_call(struct rxrpc_sock *rx,
			    struct rxrpc_call *call,
			    struct rxrpc_conn_parameters *cp,
			    struct sockaddr_rxrpc *srx,
			    gfp_t gfp)
{
	const void *here = __builtin_return_address(0);
	int ret;

	/* Set up or get a connection record and set the protocol parameters,
	 * including channel number and call ID.
	 */
	ret = rxrpc_connect_call(call, cp, srx, gfp);
	if (ret < 0)
		goto error;

	trace_rxrpc_call(call, rxrpc_call_connected, atomic_read(&call->usage),
			 here, NULL);

	rxrpc_start_call_timer(call);

	_net("CALL new %d on CONN %d", call->debug_id, call->conn->debug_id);

	if (!test_and_set_bit(RXRPC_CALL_EV_RESEND, &call->events))
		rxrpc_queue_call(call);

	_leave(" = 0");
	return 0;

error:
	rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR,
				  RX_CALL_DEAD, ret);
	trace_rxrpc_call(call, rxrpc_call_error, atomic_read(&call->usage),
			 here, ERR_PTR(ret));
	_leave(" = %d", ret);
	return ret;
}

359
/*
360 361
 * Set up an incoming call.  call->conn points to the connection.
 * This is called in BH context and isn't allowed to fail.
362
 */
363 364 365
void rxrpc_incoming_call(struct rxrpc_sock *rx,
			 struct rxrpc_call *call,
			 struct sk_buff *skb)
366
{
367
	struct rxrpc_connection *conn = call->conn;
368
	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
369
	u32 chan;
370

371
	_enter(",%d", call->conn->debug_id);
D
David Howells 已提交
372

373 374 375 376 377 378 379
	rcu_assign_pointer(call->socket, rx);
	call->call_id		= sp->hdr.callNumber;
	call->service_id	= sp->hdr.serviceId;
	call->cid		= sp->hdr.cid;
	call->state		= RXRPC_CALL_SERVER_ACCEPTING;
	if (sp->hdr.securityIndex > 0)
		call->state	= RXRPC_CALL_SERVER_SECURING;
D
David Howells 已提交
380
	call->cong_tstamp	= skb->tstamp;
381 382 383 384 385 386

	/* Set the channel for this call.  We don't get channel_lock as we're
	 * only defending against the data_ready handler (which we're called
	 * from) and the RESPONSE packet parser (which is only really
	 * interested in call_counter and can cope with a disagreement with the
	 * call pointer).
387
	 */
388 389 390
	chan = sp->hdr.cid & RXRPC_CHANNELMASK;
	conn->channels[chan].call_counter = call->call_id;
	conn->channels[chan].call_id = call->call_id;
391
	rcu_assign_pointer(conn->channels[chan].call, call);
392

393 394 395
	spin_lock(&conn->params.peer->lock);
	hlist_add_head(&call->error_link, &conn->params.peer->error_targets);
	spin_unlock(&conn->params.peer->lock);
396 397 398

	_net("CALL incoming %d on CONN %d", call->debug_id, call->conn->debug_id);

399 400
	rxrpc_start_call_timer(call);
	_leave("");
401 402
}

403 404 405 406 407 408 409 410 411 412
/*
 * Queue a call's work processor, getting a ref to pass to the work queue.
 */
bool rxrpc_queue_call(struct rxrpc_call *call)
{
	const void *here = __builtin_return_address(0);
	int n = __atomic_add_unless(&call->usage, 1, 0);
	if (n == 0)
		return false;
	if (rxrpc_queue_work(&call->processor))
413
		trace_rxrpc_call(call, rxrpc_call_queued, n + 1, here, NULL);
414 415 416 417 418 419 420 421 422 423 424 425 426 427
	else
		rxrpc_put_call(call, rxrpc_call_put_noqueue);
	return true;
}

/*
 * Queue a call's work processor, passing the callers ref to the work queue.
 */
bool __rxrpc_queue_call(struct rxrpc_call *call)
{
	const void *here = __builtin_return_address(0);
	int n = atomic_read(&call->usage);
	ASSERTCMP(n, >=, 1);
	if (rxrpc_queue_work(&call->processor))
428
		trace_rxrpc_call(call, rxrpc_call_queued_ref, n, here, NULL);
429 430 431 432 433
	else
		rxrpc_put_call(call, rxrpc_call_put_noqueue);
	return true;
}

D
David Howells 已提交
434 435 436 437 438 439 440 441 442
/*
 * Note the re-emergence of a call.
 */
void rxrpc_see_call(struct rxrpc_call *call)
{
	const void *here = __builtin_return_address(0);
	if (call) {
		int n = atomic_read(&call->usage);

443
		trace_rxrpc_call(call, rxrpc_call_seen, n, here, NULL);
D
David Howells 已提交
444 445 446 447 448 449
	}
}

/*
 * Note the addition of a ref on a call.
 */
450
void rxrpc_get_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
D
David Howells 已提交
451 452 453 454
{
	const void *here = __builtin_return_address(0);
	int n = atomic_inc_return(&call->usage);

455
	trace_rxrpc_call(call, op, n, here, NULL);
D
David Howells 已提交
456 457 458
}

/*
459
 * Detach a call from its owning socket.
D
David Howells 已提交
460
 */
461
void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call)
D
David Howells 已提交
462
{
463
	const void *here = __builtin_return_address(0);
464 465 466
	struct rxrpc_connection *conn = call->conn;
	bool put = false;
	int i;
D
David Howells 已提交
467

468
	_enter("{%d,%d}", call->debug_id, atomic_read(&call->usage));
D
David Howells 已提交
469

470 471
	trace_rxrpc_call(call, rxrpc_call_release, atomic_read(&call->usage),
			 here, (const void *)call->flags);
472

473
	ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
D
David Howells 已提交
474

475 476 477 478 479
	spin_lock_bh(&call->lock);
	if (test_and_set_bit(RXRPC_CALL_RELEASED, &call->flags))
		BUG();
	spin_unlock_bh(&call->lock);

480
	del_timer_sync(&call->timer);
481

482 483
	/* Make sure we don't get any more notifications */
	write_lock_bh(&rx->recvmsg_lock);
484

485
	if (!list_empty(&call->recvmsg_link)) {
486 487
		_debug("unlinking once-pending call %p { e=%lx f=%lx }",
		       call, call->events, call->flags);
488 489 490 491 492 493 494 495 496 497 498 499 500 501 502
		list_del(&call->recvmsg_link);
		put = true;
	}

	/* list_empty() must return false in rxrpc_notify_socket() */
	call->recvmsg_link.next = NULL;
	call->recvmsg_link.prev = NULL;

	write_unlock_bh(&rx->recvmsg_lock);
	if (put)
		rxrpc_put_call(call, rxrpc_call_put);

	write_lock(&rx->call_lock);

	if (test_and_clear_bit(RXRPC_CALL_HAS_USERID, &call->flags)) {
503 504
		rb_erase(&call->sock_node, &rx->calls);
		memset(&call->sock_node, 0xdd, sizeof(call->sock_node));
505
		rxrpc_put_call(call, rxrpc_call_put_userid);
506 507
	}

508 509 510 511 512 513
	list_del(&call->sock_link);
	write_unlock(&rx->call_lock);

	_debug("RELEASE CALL %p (%d CONN %p)", call, call->debug_id, conn);

	if (conn)
514
		rxrpc_disconnect_call(call);
515

516
	for (i = 0; i < RXRPC_RXTX_BUFF_SIZE; i++) {
D
David Howells 已提交
517 518 519
		rxrpc_free_skb(call->rxtx_buffer[i],
			       (call->tx_phase ? rxrpc_skb_tx_cleaned :
				rxrpc_skb_rx_cleaned));
520
		call->rxtx_buffer[i] = NULL;
521 522 523 524 525
	}

	_leave("");
}

526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580
/*
 * Prepare a kernel service call for retry.
 */
int rxrpc_prepare_call_for_retry(struct rxrpc_sock *rx, struct rxrpc_call *call)
{
	const void *here = __builtin_return_address(0);
	int i;
	u8 last = 0;

	_enter("{%d,%d}", call->debug_id, atomic_read(&call->usage));

	trace_rxrpc_call(call, rxrpc_call_release, atomic_read(&call->usage),
			 here, (const void *)call->flags);

	ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
	ASSERTCMP(call->completion, !=, RXRPC_CALL_REMOTELY_ABORTED);
	ASSERTCMP(call->completion, !=, RXRPC_CALL_LOCALLY_ABORTED);
	ASSERT(list_empty(&call->recvmsg_link));

	del_timer_sync(&call->timer);

	_debug("RELEASE CALL %p (%d CONN %p)", call, call->debug_id, call->conn);

	if (call->conn)
		rxrpc_disconnect_call(call);

	if (rxrpc_is_service_call(call) ||
	    !call->tx_phase ||
	    call->tx_hard_ack != 0 ||
	    call->rx_hard_ack != 0 ||
	    call->rx_top != 0)
		return -EINVAL;

	call->state = RXRPC_CALL_UNINITIALISED;
	call->completion = RXRPC_CALL_SUCCEEDED;
	call->call_id = 0;
	call->cid = 0;
	call->cong_cwnd = 0;
	call->cong_extra = 0;
	call->cong_ssthresh = 0;
	call->cong_mode = 0;
	call->cong_dup_acks = 0;
	call->cong_cumul_acks = 0;
	call->acks_lowest_nak = 0;

	for (i = 0; i < RXRPC_RXTX_BUFF_SIZE; i++) {
		last |= call->rxtx_annotations[i];
		call->rxtx_annotations[i] &= RXRPC_TX_ANNO_LAST;
		call->rxtx_annotations[i] |= RXRPC_TX_ANNO_RETRANS;
	}

	_leave(" = 0");
	return 0;
}

581 582 583 584 585 586 587 588 589
/*
 * release all the calls associated with a socket
 */
void rxrpc_release_calls_on_socket(struct rxrpc_sock *rx)
{
	struct rxrpc_call *call;

	_enter("%p", rx);

590 591 592 593
	while (!list_empty(&rx->to_be_accepted)) {
		call = list_entry(rx->to_be_accepted.next,
				  struct rxrpc_call, accept_link);
		list_del(&call->accept_link);
594
		rxrpc_abort_call("SKR", call, 0, RX_CALL_DEAD, -ECONNRESET);
595 596 597
		rxrpc_put_call(call, rxrpc_call_put);
	}

598 599 600 601
	while (!list_empty(&rx->sock_calls)) {
		call = list_entry(rx->sock_calls.next,
				  struct rxrpc_call, sock_link);
		rxrpc_get_call(call, rxrpc_call_got);
602
		rxrpc_abort_call("SKT", call, 0, RX_CALL_DEAD, -ECONNRESET);
603
		rxrpc_send_abort_packet(call);
604
		rxrpc_release_call(rx, call);
605
		rxrpc_put_call(call, rxrpc_call_put);
606 607
	}

608 609 610 611 612 613
	_leave("");
}

/*
 * release a call
 */
614
void rxrpc_put_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
615
{
616
	struct rxrpc_net *rxnet;
D
David Howells 已提交
617
	const void *here = __builtin_return_address(0);
618
	int n;
619

D
David Howells 已提交
620
	ASSERT(call != NULL);
621

D
David Howells 已提交
622
	n = atomic_dec_return(&call->usage);
623
	trace_rxrpc_call(call, op, n, here, NULL);
D
David Howells 已提交
624 625 626
	ASSERTCMP(n, >=, 0);
	if (n == 0) {
		_debug("call %d dead", call->debug_id);
627
		ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
628

629 630 631 632 633 634
		if (!list_empty(&call->link)) {
			rxnet = rxrpc_net(sock_net(&call->socket->sk));
			write_lock(&rxnet->call_lock);
			list_del_init(&call->link);
			write_unlock(&rxnet->call_lock);
		}
D
David Howells 已提交
635

636
		rxrpc_cleanup_call(call);
637 638 639
	}
}

640 641 642 643 644 645 646
/*
 * Final call destruction under RCU.
 */
static void rxrpc_rcu_destroy_call(struct rcu_head *rcu)
{
	struct rxrpc_call *call = container_of(rcu, struct rxrpc_call, rcu);

647
	rxrpc_put_peer(call->peer);
648 649
	kfree(call->rxtx_buffer);
	kfree(call->rxtx_annotations);
650 651 652
	kmem_cache_free(rxrpc_call_jar, call);
}

653 654 655
/*
 * clean up a call
 */
656
void rxrpc_cleanup_call(struct rxrpc_call *call)
657
{
658
	int i;
659

660
	_net("DESTROY CALL %d", call->debug_id);
661 662 663

	memset(&call->sock_node, 0xcd, sizeof(call->sock_node));

664
	del_timer_sync(&call->timer);
665

666
	ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
667
	ASSERT(test_bit(RXRPC_CALL_RELEASED, &call->flags));
668
	ASSERTCMP(call->conn, ==, NULL);
669

670 671
	/* Clean up the Rx/Tx buffer */
	for (i = 0; i < RXRPC_RXTX_BUFF_SIZE; i++)
D
David Howells 已提交
672 673 674
		rxrpc_free_skb(call->rxtx_buffer[i],
			       (call->tx_phase ? rxrpc_skb_tx_cleaned :
				rxrpc_skb_rx_cleaned));
675

D
David Howells 已提交
676
	rxrpc_free_skb(call->tx_pending, rxrpc_skb_tx_cleaned);
677

678
	call_rcu(&call->rcu, rxrpc_rcu_destroy_call);
679 680 681
}

/*
682 683 684
 * Make sure that all calls are gone from a network namespace.  To reach this
 * point, any open UDP sockets in that namespace must have been closed, so any
 * outstanding calls cannot be doing I/O.
685
 */
686
void rxrpc_destroy_all_calls(struct rxrpc_net *rxnet)
687 688 689 690
{
	struct rxrpc_call *call;

	_enter("");
691

692
	if (list_empty(&rxnet->calls))
693
		return;
694

695
	write_lock(&rxnet->call_lock);
696

697 698
	while (!list_empty(&rxnet->calls)) {
		call = list_entry(rxnet->calls.next, struct rxrpc_call, link);
699 700
		_debug("Zapping call %p", call);

D
David Howells 已提交
701
		rxrpc_see_call(call);
702 703
		list_del_init(&call->link);

704
		pr_err("Call %p still in use (%d,%s,%lx,%lx)!\n",
705 706 707
		       call, atomic_read(&call->usage),
		       rxrpc_call_states[call->state],
		       call->flags, call->events);
708

709
		write_unlock(&rxnet->call_lock);
710
		cond_resched();
711
		write_lock(&rxnet->call_lock);
712 713
	}

714
	write_unlock(&rxnet->call_lock);
715
}