call_object.c 18.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11
/* RxRPC individual remote procedure call handling
 *
 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
 * Written by David Howells (dhowells@redhat.com)
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version
 * 2 of the License, or (at your option) any later version.
 */

12 13
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

14
#include <linux/slab.h>
15 16
#include <linux/module.h>
#include <linux/circ_buf.h>
17
#include <linux/spinlock_types.h>
18 19 20 21
#include <net/sock.h>
#include <net/af_rxrpc.h>
#include "ar-internal.h"

22
const char *const rxrpc_call_states[NR__RXRPC_CALL_STATES] = {
23
	[RXRPC_CALL_UNINITIALISED]		= "Uninit  ",
24
	[RXRPC_CALL_CLIENT_AWAIT_CONN]		= "ClWtConn",
25 26 27
	[RXRPC_CALL_CLIENT_SEND_REQUEST]	= "ClSndReq",
	[RXRPC_CALL_CLIENT_AWAIT_REPLY]		= "ClAwtRpl",
	[RXRPC_CALL_CLIENT_RECV_REPLY]		= "ClRcvRpl",
28
	[RXRPC_CALL_SERVER_PREALLOC]		= "SvPrealc",
29 30 31 32 33 34 35
	[RXRPC_CALL_SERVER_SECURING]		= "SvSecure",
	[RXRPC_CALL_SERVER_ACCEPTING]		= "SvAccept",
	[RXRPC_CALL_SERVER_RECV_REQUEST]	= "SvRcvReq",
	[RXRPC_CALL_SERVER_ACK_REQUEST]		= "SvAckReq",
	[RXRPC_CALL_SERVER_SEND_REPLY]		= "SvSndRpl",
	[RXRPC_CALL_SERVER_AWAIT_ACK]		= "SvAwtACK",
	[RXRPC_CALL_COMPLETE]			= "Complete",
36 37 38 39
};

const char *const rxrpc_call_completions[NR__RXRPC_CALL_COMPLETIONS] = {
	[RXRPC_CALL_SUCCEEDED]			= "Complete",
40 41
	[RXRPC_CALL_REMOTELY_ABORTED]		= "RmtAbort",
	[RXRPC_CALL_LOCALLY_ABORTED]		= "LocAbort",
42
	[RXRPC_CALL_LOCAL_ERROR]		= "LocError",
43 44 45
	[RXRPC_CALL_NETWORK_ERROR]		= "NetError",
};

46 47
struct kmem_cache *rxrpc_call_jar;

48
static void rxrpc_call_timer_expired(struct timer_list *t)
49
{
50
	struct rxrpc_call *call = from_timer(call, t, timer);
51 52 53

	_enter("%d", call->debug_id);

D
David Howells 已提交
54 55 56 57
	if (call->state < RXRPC_CALL_COMPLETE) {
		trace_rxrpc_timer(call, rxrpc_timer_expired, jiffies);
		rxrpc_queue_call(call);
	}
58
}
59

60 61
static struct lock_class_key rxrpc_call_user_mutex_lock_class_key;

62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92
/*
 * find an extant server call
 * - called in process context with IRQs enabled
 */
struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *rx,
					      unsigned long user_call_ID)
{
	struct rxrpc_call *call;
	struct rb_node *p;

	_enter("%p,%lx", rx, user_call_ID);

	read_lock(&rx->call_lock);

	p = rx->calls.rb_node;
	while (p) {
		call = rb_entry(p, struct rxrpc_call, sock_node);

		if (user_call_ID < call->user_call_ID)
			p = p->rb_left;
		else if (user_call_ID > call->user_call_ID)
			p = p->rb_right;
		else
			goto found_extant_call;
	}

	read_unlock(&rx->call_lock);
	_leave(" = NULL");
	return NULL;

found_extant_call:
93
	rxrpc_get_call(call, rxrpc_call_got);
94 95 96 97 98
	read_unlock(&rx->call_lock);
	_leave(" = %p [%d]", call, atomic_read(&call->usage));
	return call;
}

99 100 101
/*
 * allocate a new call
 */
102 103
struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *rx, gfp_t gfp,
				    unsigned int debug_id)
104 105 106 107 108 109 110
{
	struct rxrpc_call *call;

	call = kmem_cache_zalloc(rxrpc_call_jar, gfp);
	if (!call)
		return NULL;

111 112
	call->rxtx_buffer = kcalloc(RXRPC_RXTX_BUFF_SIZE,
				    sizeof(struct sk_buff *),
113
				    gfp);
114 115
	if (!call->rxtx_buffer)
		goto nomem;
116

117 118 119 120
	call->rxtx_annotations = kcalloc(RXRPC_RXTX_BUFF_SIZE, sizeof(u8), gfp);
	if (!call->rxtx_annotations)
		goto nomem_2;

121
	mutex_init(&call->user_mutex);
122 123 124 125 126 127 128 129

	/* Prevent lockdep reporting a deadlock false positive between the afs
	 * filesystem and sys_sendmsg() via the mmap sem.
	 */
	if (rx->sk.sk_kern_sock)
		lockdep_set_class(&call->user_mutex,
				  &rxrpc_call_user_mutex_lock_class_key);

130
	timer_setup(&call->timer, rxrpc_call_timer_expired, 0);
131
	INIT_WORK(&call->processor, &rxrpc_process_call);
132
	INIT_LIST_HEAD(&call->link);
133
	INIT_LIST_HEAD(&call->chan_wait_link);
134
	INIT_LIST_HEAD(&call->accept_link);
135 136
	INIT_LIST_HEAD(&call->recvmsg_link);
	INIT_LIST_HEAD(&call->sock_link);
137
	init_waitqueue_head(&call->waitq);
138
	spin_lock_init(&call->lock);
139
	spin_lock_init(&call->notify_lock);
140 141
	rwlock_init(&call->state_lock);
	atomic_set(&call->usage, 1);
142
	call->debug_id = debug_id;
143
	call->tx_total_len = -1;
D
David Howells 已提交
144 145
	call->next_rx_timo = 20 * HZ;
	call->next_req_timo = 1 * HZ;
146 147 148

	memset(&call->sock_node, 0xed, sizeof(call->sock_node));

149
	/* Leave space in the ring to handle a maxed-out jumbo packet */
150
	call->rx_winsize = rxrpc_rx_window_size;
151 152
	call->tx_winsize = 16;
	call->rx_expect_next = 1;
D
David Howells 已提交
153

154
	call->cong_cwnd = 2;
D
David Howells 已提交
155
	call->cong_ssthresh = RXRPC_RXTX_BUFF_SIZE - 1;
156
	return call;
157 158 159 160 161 162

nomem_2:
	kfree(call->rxtx_buffer);
nomem:
	kmem_cache_free(rxrpc_call_jar, call);
	return NULL;
163 164 165
}

/*
166
 * Allocate a new client call.
167
 */
168 169
static struct rxrpc_call *rxrpc_alloc_client_call(struct rxrpc_sock *rx,
						  struct sockaddr_rxrpc *srx,
170 171
						  gfp_t gfp,
						  unsigned int debug_id)
172 173
{
	struct rxrpc_call *call;
D
David Howells 已提交
174
	ktime_t now;
175 176 177

	_enter("");

178
	call = rxrpc_alloc_call(rx, gfp, debug_id);
179 180
	if (!call)
		return ERR_PTR(-ENOMEM);
181 182
	call->state = RXRPC_CALL_CLIENT_AWAIT_CONN;
	call->service_id = srx->srx_service;
D
David Howells 已提交
183
	call->tx_phase = true;
D
David Howells 已提交
184 185 186
	now = ktime_get_real();
	call->acks_latest_ts = now;
	call->cong_tstamp = now;
187 188 189 190 191 192

	_leave(" = %p", call);
	return call;
}

/*
193
 * Initiate the call ack/resend/expiry timer.
194
 */
195
static void rxrpc_start_call_timer(struct rxrpc_call *call)
196
{
D
David Howells 已提交
197 198 199 200
	unsigned long now = jiffies;
	unsigned long j = now + MAX_JIFFY_OFFSET;

	call->ack_at = j;
201
	call->ack_lost_at = j;
D
David Howells 已提交
202 203 204 205 206 207
	call->resend_at = j;
	call->ping_at = j;
	call->expect_rx_by = j;
	call->expect_req_by = j;
	call->expect_term_by = j;
	call->timer.expires = now;
208 209 210
}

/*
211 212 213
 * Set up a call for the given parameters.
 * - Called with the socket lock held, which it must release.
 * - If it returns a call, the call's lock will need releasing by the caller.
214
 */
215
struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
216
					 struct rxrpc_conn_parameters *cp,
217
					 struct sockaddr_rxrpc *srx,
218
					 struct rxrpc_call_params *p,
219 220
					 gfp_t gfp,
					 unsigned int debug_id)
221
	__releases(&rx->sk.sk_lock.slock)
222
{
223
	struct rxrpc_call *call, *xcall;
224
	struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk));
225
	struct rb_node *parent, **pp;
D
David Howells 已提交
226
	const void *here = __builtin_return_address(0);
227
	int ret;
228

229
	_enter("%p,%lx", rx, p->user_call_ID);
230

231
	call = rxrpc_alloc_client_call(rx, srx, gfp, debug_id);
232
	if (IS_ERR(call)) {
233
		release_sock(&rx->sk);
234 235
		_leave(" = %ld", PTR_ERR(call));
		return call;
236 237
	}

238
	call->tx_total_len = p->tx_total_len;
239
	trace_rxrpc_call(call, rxrpc_call_new_client, atomic_read(&call->usage),
240
			 here, (const void *)p->user_call_ID);
D
David Howells 已提交
241

242 243 244 245 246
	/* We need to protect a partially set up call against the user as we
	 * will be acting outside the socket lock.
	 */
	mutex_lock(&call->user_mutex);

247
	/* Publish the call, even though it is incompletely set up as yet */
248 249 250 251 252 253
	write_lock(&rx->call_lock);

	pp = &rx->calls.rb_node;
	parent = NULL;
	while (*pp) {
		parent = *pp;
254
		xcall = rb_entry(parent, struct rxrpc_call, sock_node);
255

256
		if (p->user_call_ID < xcall->user_call_ID)
257
			pp = &(*pp)->rb_left;
258
		else if (p->user_call_ID > xcall->user_call_ID)
259 260
			pp = &(*pp)->rb_right;
		else
261
			goto error_dup_user_ID;
262 263
	}

264
	rcu_assign_pointer(call->socket, rx);
265
	call->user_call_ID = p->user_call_ID;
266
	__set_bit(RXRPC_CALL_HAS_USERID, &call->flags);
267
	rxrpc_get_call(call, rxrpc_call_got_userid);
268 269
	rb_link_node(&call->sock_node, parent, pp);
	rb_insert_color(&call->sock_node, &rx->calls);
270 271
	list_add(&call->sock_link, &rx->sock_calls);

272 273
	write_unlock(&rx->call_lock);

274 275 276
	write_lock(&rxnet->call_lock);
	list_add_tail(&call->link, &rxnet->calls);
	write_unlock(&rxnet->call_lock);
277

278 279 280
	/* From this point on, the call is protected by its own lock. */
	release_sock(&rx->sk);

281 282 283 284
	/* Set up or get a connection record and set the protocol parameters,
	 * including channel number and call ID.
	 */
	ret = rxrpc_connect_call(call, cp, srx, gfp);
285 286 287
	if (ret < 0)
		goto error;

288
	trace_rxrpc_call(call, rxrpc_call_connected, atomic_read(&call->usage),
289
			 here, NULL);
290

291 292
	rxrpc_start_call_timer(call);

293 294 295 296 297
	_net("CALL new %d on CONN %d", call->debug_id, call->conn->debug_id);

	_leave(" = %p [new]", call);
	return call;

298 299 300 301 302
	/* We unexpectedly found the user ID in the list after taking
	 * the call_lock.  This shouldn't happen unless the user races
	 * with itself and tries to add the same user ID twice at the
	 * same time in different threads.
	 */
303
error_dup_user_ID:
304
	write_unlock(&rx->call_lock);
305
	release_sock(&rx->sk);
306
	ret = -EEXIST;
307 308 309 310

error:
	__rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR,
				    RX_CALL_DEAD, ret);
311 312
	trace_rxrpc_call(call, rxrpc_call_error, atomic_read(&call->usage),
			 here, ERR_PTR(ret));
313
	rxrpc_release_call(rx, call);
314
	mutex_unlock(&call->user_mutex);
315 316 317
	rxrpc_put_call(call, rxrpc_call_put);
	_leave(" = %d", ret);
	return ERR_PTR(ret);
318 319
}

320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361
/*
 * Retry a call to a new address.  It is expected that the Tx queue of the call
 * will contain data previously packaged for an old call.
 */
int rxrpc_retry_client_call(struct rxrpc_sock *rx,
			    struct rxrpc_call *call,
			    struct rxrpc_conn_parameters *cp,
			    struct sockaddr_rxrpc *srx,
			    gfp_t gfp)
{
	const void *here = __builtin_return_address(0);
	int ret;

	/* Set up or get a connection record and set the protocol parameters,
	 * including channel number and call ID.
	 */
	ret = rxrpc_connect_call(call, cp, srx, gfp);
	if (ret < 0)
		goto error;

	trace_rxrpc_call(call, rxrpc_call_connected, atomic_read(&call->usage),
			 here, NULL);

	rxrpc_start_call_timer(call);

	_net("CALL new %d on CONN %d", call->debug_id, call->conn->debug_id);

	if (!test_and_set_bit(RXRPC_CALL_EV_RESEND, &call->events))
		rxrpc_queue_call(call);

	_leave(" = 0");
	return 0;

error:
	rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR,
				  RX_CALL_DEAD, ret);
	trace_rxrpc_call(call, rxrpc_call_error, atomic_read(&call->usage),
			 here, ERR_PTR(ret));
	_leave(" = %d", ret);
	return ret;
}

362
/*
363 364
 * Set up an incoming call.  call->conn points to the connection.
 * This is called in BH context and isn't allowed to fail.
365
 */
366 367 368
void rxrpc_incoming_call(struct rxrpc_sock *rx,
			 struct rxrpc_call *call,
			 struct sk_buff *skb)
369
{
370
	struct rxrpc_connection *conn = call->conn;
371
	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
372
	u32 chan;
373

374
	_enter(",%d", call->conn->debug_id);
D
David Howells 已提交
375

376 377 378 379 380 381 382
	rcu_assign_pointer(call->socket, rx);
	call->call_id		= sp->hdr.callNumber;
	call->service_id	= sp->hdr.serviceId;
	call->cid		= sp->hdr.cid;
	call->state		= RXRPC_CALL_SERVER_ACCEPTING;
	if (sp->hdr.securityIndex > 0)
		call->state	= RXRPC_CALL_SERVER_SECURING;
D
David Howells 已提交
383
	call->cong_tstamp	= skb->tstamp;
384 385 386 387 388 389

	/* Set the channel for this call.  We don't get channel_lock as we're
	 * only defending against the data_ready handler (which we're called
	 * from) and the RESPONSE packet parser (which is only really
	 * interested in call_counter and can cope with a disagreement with the
	 * call pointer).
390
	 */
391 392 393
	chan = sp->hdr.cid & RXRPC_CHANNELMASK;
	conn->channels[chan].call_counter = call->call_id;
	conn->channels[chan].call_id = call->call_id;
394
	rcu_assign_pointer(conn->channels[chan].call, call);
395

396 397 398
	spin_lock(&conn->params.peer->lock);
	hlist_add_head(&call->error_link, &conn->params.peer->error_targets);
	spin_unlock(&conn->params.peer->lock);
399 400 401

	_net("CALL incoming %d on CONN %d", call->debug_id, call->conn->debug_id);

402 403
	rxrpc_start_call_timer(call);
	_leave("");
404 405
}

406 407 408 409 410 411 412 413 414 415
/*
 * Queue a call's work processor, getting a ref to pass to the work queue.
 */
bool rxrpc_queue_call(struct rxrpc_call *call)
{
	const void *here = __builtin_return_address(0);
	int n = __atomic_add_unless(&call->usage, 1, 0);
	if (n == 0)
		return false;
	if (rxrpc_queue_work(&call->processor))
416
		trace_rxrpc_call(call, rxrpc_call_queued, n + 1, here, NULL);
417 418 419 420 421 422 423 424 425 426 427 428 429 430
	else
		rxrpc_put_call(call, rxrpc_call_put_noqueue);
	return true;
}

/*
 * Queue a call's work processor, passing the callers ref to the work queue.
 */
bool __rxrpc_queue_call(struct rxrpc_call *call)
{
	const void *here = __builtin_return_address(0);
	int n = atomic_read(&call->usage);
	ASSERTCMP(n, >=, 1);
	if (rxrpc_queue_work(&call->processor))
431
		trace_rxrpc_call(call, rxrpc_call_queued_ref, n, here, NULL);
432 433 434 435 436
	else
		rxrpc_put_call(call, rxrpc_call_put_noqueue);
	return true;
}

D
David Howells 已提交
437 438 439 440 441 442 443 444 445
/*
 * Note the re-emergence of a call.
 */
void rxrpc_see_call(struct rxrpc_call *call)
{
	const void *here = __builtin_return_address(0);
	if (call) {
		int n = atomic_read(&call->usage);

446
		trace_rxrpc_call(call, rxrpc_call_seen, n, here, NULL);
D
David Howells 已提交
447 448 449 450 451 452
	}
}

/*
 * Note the addition of a ref on a call.
 */
453
void rxrpc_get_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
D
David Howells 已提交
454 455 456 457
{
	const void *here = __builtin_return_address(0);
	int n = atomic_inc_return(&call->usage);

458
	trace_rxrpc_call(call, op, n, here, NULL);
D
David Howells 已提交
459 460 461
}

/*
462
 * Detach a call from its owning socket.
D
David Howells 已提交
463
 */
464
void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call)
D
David Howells 已提交
465
{
466
	const void *here = __builtin_return_address(0);
467 468 469
	struct rxrpc_connection *conn = call->conn;
	bool put = false;
	int i;
D
David Howells 已提交
470

471
	_enter("{%d,%d}", call->debug_id, atomic_read(&call->usage));
D
David Howells 已提交
472

473 474
	trace_rxrpc_call(call, rxrpc_call_release, atomic_read(&call->usage),
			 here, (const void *)call->flags);
475

476
	ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
D
David Howells 已提交
477

478 479 480 481 482
	spin_lock_bh(&call->lock);
	if (test_and_set_bit(RXRPC_CALL_RELEASED, &call->flags))
		BUG();
	spin_unlock_bh(&call->lock);

483
	del_timer_sync(&call->timer);
484

485 486
	/* Make sure we don't get any more notifications */
	write_lock_bh(&rx->recvmsg_lock);
487

488
	if (!list_empty(&call->recvmsg_link)) {
489 490
		_debug("unlinking once-pending call %p { e=%lx f=%lx }",
		       call, call->events, call->flags);
491 492 493 494 495 496 497 498 499 500 501 502 503 504 505
		list_del(&call->recvmsg_link);
		put = true;
	}

	/* list_empty() must return false in rxrpc_notify_socket() */
	call->recvmsg_link.next = NULL;
	call->recvmsg_link.prev = NULL;

	write_unlock_bh(&rx->recvmsg_lock);
	if (put)
		rxrpc_put_call(call, rxrpc_call_put);

	write_lock(&rx->call_lock);

	if (test_and_clear_bit(RXRPC_CALL_HAS_USERID, &call->flags)) {
506 507
		rb_erase(&call->sock_node, &rx->calls);
		memset(&call->sock_node, 0xdd, sizeof(call->sock_node));
508
		rxrpc_put_call(call, rxrpc_call_put_userid);
509 510
	}

511 512 513 514 515 516
	list_del(&call->sock_link);
	write_unlock(&rx->call_lock);

	_debug("RELEASE CALL %p (%d CONN %p)", call, call->debug_id, conn);

	if (conn)
517
		rxrpc_disconnect_call(call);
518

519
	for (i = 0; i < RXRPC_RXTX_BUFF_SIZE; i++) {
D
David Howells 已提交
520 521 522
		rxrpc_free_skb(call->rxtx_buffer[i],
			       (call->tx_phase ? rxrpc_skb_tx_cleaned :
				rxrpc_skb_rx_cleaned));
523
		call->rxtx_buffer[i] = NULL;
524 525 526 527 528
	}

	_leave("");
}

529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583
/*
 * Prepare a kernel service call for retry.
 */
int rxrpc_prepare_call_for_retry(struct rxrpc_sock *rx, struct rxrpc_call *call)
{
	const void *here = __builtin_return_address(0);
	int i;
	u8 last = 0;

	_enter("{%d,%d}", call->debug_id, atomic_read(&call->usage));

	trace_rxrpc_call(call, rxrpc_call_release, atomic_read(&call->usage),
			 here, (const void *)call->flags);

	ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
	ASSERTCMP(call->completion, !=, RXRPC_CALL_REMOTELY_ABORTED);
	ASSERTCMP(call->completion, !=, RXRPC_CALL_LOCALLY_ABORTED);
	ASSERT(list_empty(&call->recvmsg_link));

	del_timer_sync(&call->timer);

	_debug("RELEASE CALL %p (%d CONN %p)", call, call->debug_id, call->conn);

	if (call->conn)
		rxrpc_disconnect_call(call);

	if (rxrpc_is_service_call(call) ||
	    !call->tx_phase ||
	    call->tx_hard_ack != 0 ||
	    call->rx_hard_ack != 0 ||
	    call->rx_top != 0)
		return -EINVAL;

	call->state = RXRPC_CALL_UNINITIALISED;
	call->completion = RXRPC_CALL_SUCCEEDED;
	call->call_id = 0;
	call->cid = 0;
	call->cong_cwnd = 0;
	call->cong_extra = 0;
	call->cong_ssthresh = 0;
	call->cong_mode = 0;
	call->cong_dup_acks = 0;
	call->cong_cumul_acks = 0;
	call->acks_lowest_nak = 0;

	for (i = 0; i < RXRPC_RXTX_BUFF_SIZE; i++) {
		last |= call->rxtx_annotations[i];
		call->rxtx_annotations[i] &= RXRPC_TX_ANNO_LAST;
		call->rxtx_annotations[i] |= RXRPC_TX_ANNO_RETRANS;
	}

	_leave(" = 0");
	return 0;
}

584 585 586 587 588 589 590 591 592
/*
 * release all the calls associated with a socket
 */
void rxrpc_release_calls_on_socket(struct rxrpc_sock *rx)
{
	struct rxrpc_call *call;

	_enter("%p", rx);

593 594 595 596
	while (!list_empty(&rx->to_be_accepted)) {
		call = list_entry(rx->to_be_accepted.next,
				  struct rxrpc_call, accept_link);
		list_del(&call->accept_link);
597
		rxrpc_abort_call("SKR", call, 0, RX_CALL_DEAD, -ECONNRESET);
598 599 600
		rxrpc_put_call(call, rxrpc_call_put);
	}

601 602 603 604
	while (!list_empty(&rx->sock_calls)) {
		call = list_entry(rx->sock_calls.next,
				  struct rxrpc_call, sock_link);
		rxrpc_get_call(call, rxrpc_call_got);
605
		rxrpc_abort_call("SKT", call, 0, RX_CALL_DEAD, -ECONNRESET);
606
		rxrpc_send_abort_packet(call);
607
		rxrpc_release_call(rx, call);
608
		rxrpc_put_call(call, rxrpc_call_put);
609 610
	}

611 612 613 614 615 616
	_leave("");
}

/*
 * release a call
 */
617
void rxrpc_put_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
618
{
619
	struct rxrpc_net *rxnet;
D
David Howells 已提交
620
	const void *here = __builtin_return_address(0);
621
	int n;
622

D
David Howells 已提交
623
	ASSERT(call != NULL);
624

D
David Howells 已提交
625
	n = atomic_dec_return(&call->usage);
626
	trace_rxrpc_call(call, op, n, here, NULL);
D
David Howells 已提交
627 628 629
	ASSERTCMP(n, >=, 0);
	if (n == 0) {
		_debug("call %d dead", call->debug_id);
630
		ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
631

632 633 634 635 636 637
		if (!list_empty(&call->link)) {
			rxnet = rxrpc_net(sock_net(&call->socket->sk));
			write_lock(&rxnet->call_lock);
			list_del_init(&call->link);
			write_unlock(&rxnet->call_lock);
		}
D
David Howells 已提交
638

639
		rxrpc_cleanup_call(call);
640 641 642
	}
}

643 644 645 646 647 648 649
/*
 * Final call destruction under RCU.
 */
static void rxrpc_rcu_destroy_call(struct rcu_head *rcu)
{
	struct rxrpc_call *call = container_of(rcu, struct rxrpc_call, rcu);

650
	rxrpc_put_peer(call->peer);
651 652
	kfree(call->rxtx_buffer);
	kfree(call->rxtx_annotations);
653 654 655
	kmem_cache_free(rxrpc_call_jar, call);
}

656 657 658
/*
 * clean up a call
 */
659
void rxrpc_cleanup_call(struct rxrpc_call *call)
660
{
661
	int i;
662

663
	_net("DESTROY CALL %d", call->debug_id);
664 665 666

	memset(&call->sock_node, 0xcd, sizeof(call->sock_node));

667
	del_timer_sync(&call->timer);
668

669
	ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
670
	ASSERT(test_bit(RXRPC_CALL_RELEASED, &call->flags));
671
	ASSERTCMP(call->conn, ==, NULL);
672

673 674
	/* Clean up the Rx/Tx buffer */
	for (i = 0; i < RXRPC_RXTX_BUFF_SIZE; i++)
D
David Howells 已提交
675 676 677
		rxrpc_free_skb(call->rxtx_buffer[i],
			       (call->tx_phase ? rxrpc_skb_tx_cleaned :
				rxrpc_skb_rx_cleaned));
678

D
David Howells 已提交
679
	rxrpc_free_skb(call->tx_pending, rxrpc_skb_tx_cleaned);
680

681
	call_rcu(&call->rcu, rxrpc_rcu_destroy_call);
682 683 684
}

/*
685 686 687
 * Make sure that all calls are gone from a network namespace.  To reach this
 * point, any open UDP sockets in that namespace must have been closed, so any
 * outstanding calls cannot be doing I/O.
688
 */
689
void rxrpc_destroy_all_calls(struct rxrpc_net *rxnet)
690 691 692 693
{
	struct rxrpc_call *call;

	_enter("");
694

695
	if (list_empty(&rxnet->calls))
696
		return;
697

698
	write_lock(&rxnet->call_lock);
699

700 701
	while (!list_empty(&rxnet->calls)) {
		call = list_entry(rxnet->calls.next, struct rxrpc_call, link);
702 703
		_debug("Zapping call %p", call);

D
David Howells 已提交
704
		rxrpc_see_call(call);
705 706
		list_del_init(&call->link);

707
		pr_err("Call %p still in use (%d,%s,%lx,%lx)!\n",
708 709 710
		       call, atomic_read(&call->usage),
		       rxrpc_call_states[call->state],
		       call->flags, call->events);
711

712
		write_unlock(&rxnet->call_lock);
713
		cond_resched();
714
		write_lock(&rxnet->call_lock);
715 716
	}

717
	write_unlock(&rxnet->call_lock);
718
}