call_accept.c 18.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11
/* incoming call handling
 *
 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
 * Written by David Howells (dhowells@redhat.com)
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version
 * 2 of the License, or (at your option) any later version.
 */

12 13
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

14 15 16 17 18 19 20 21
#include <linux/module.h>
#include <linux/net.h>
#include <linux/skbuff.h>
#include <linux/errqueue.h>
#include <linux/udp.h>
#include <linux/in.h>
#include <linux/in6.h>
#include <linux/icmp.h>
22
#include <linux/gfp.h>
23
#include <linux/circ_buf.h>
24 25 26 27 28
#include <net/sock.h>
#include <net/af_rxrpc.h>
#include <net/ip.h>
#include "ar-internal.h"

29 30 31 32 33 34 35 36
/*
 * Preallocate a single service call, connection and peer and, if possible,
 * give them a user ID and attach the user's side of the ID to them.
 */
static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx,
				      struct rxrpc_backlog *b,
				      rxrpc_notify_rx_t notify_rx,
				      rxrpc_user_attach_call_t user_attach_call,
37 38
				      unsigned long user_call_ID, gfp_t gfp,
				      unsigned int debug_id)
39 40 41
{
	const void *here = __builtin_return_address(0);
	struct rxrpc_call *call;
42
	struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk));
43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83
	int max, tmp;
	unsigned int size = RXRPC_BACKLOG_MAX;
	unsigned int head, tail, call_head, call_tail;

	max = rx->sk.sk_max_ack_backlog;
	tmp = rx->sk.sk_ack_backlog;
	if (tmp >= max) {
		_leave(" = -ENOBUFS [full %u]", max);
		return -ENOBUFS;
	}
	max -= tmp;

	/* We don't need more conns and peers than we have calls, but on the
	 * other hand, we shouldn't ever use more peers than conns or conns
	 * than calls.
	 */
	call_head = b->call_backlog_head;
	call_tail = READ_ONCE(b->call_backlog_tail);
	tmp = CIRC_CNT(call_head, call_tail, size);
	if (tmp >= max) {
		_leave(" = -ENOBUFS [enough %u]", tmp);
		return -ENOBUFS;
	}
	max = tmp + 1;

	head = b->peer_backlog_head;
	tail = READ_ONCE(b->peer_backlog_tail);
	if (CIRC_CNT(head, tail, size) < max) {
		struct rxrpc_peer *peer = rxrpc_alloc_peer(rx->local, gfp);
		if (!peer)
			return -ENOMEM;
		b->peer_backlog[head] = peer;
		smp_store_release(&b->peer_backlog_head,
				  (head + 1) & (size - 1));
	}

	head = b->conn_backlog_head;
	tail = READ_ONCE(b->conn_backlog_tail);
	if (CIRC_CNT(head, tail, size) < max) {
		struct rxrpc_connection *conn;

84
		conn = rxrpc_prealloc_service_connection(rxnet, gfp);
85 86 87 88 89
		if (!conn)
			return -ENOMEM;
		b->conn_backlog[head] = conn;
		smp_store_release(&b->conn_backlog_head,
				  (head + 1) & (size - 1));
90 91 92

		trace_rxrpc_conn(conn, rxrpc_conn_new_service,
				 atomic_read(&conn->usage), here);
93 94 95 96 97
	}

	/* Now it gets complicated, because calls get registered with the
	 * socket here, particularly if a user ID is preassigned by the user.
	 */
98
	call = rxrpc_alloc_call(rx, gfp, debug_id);
99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128
	if (!call)
		return -ENOMEM;
	call->flags |= (1 << RXRPC_CALL_IS_SERVICE);
	call->state = RXRPC_CALL_SERVER_PREALLOC;

	trace_rxrpc_call(call, rxrpc_call_new_service,
			 atomic_read(&call->usage),
			 here, (const void *)user_call_ID);

	write_lock(&rx->call_lock);
	if (user_attach_call) {
		struct rxrpc_call *xcall;
		struct rb_node *parent, **pp;

		/* Check the user ID isn't already in use */
		pp = &rx->calls.rb_node;
		parent = NULL;
		while (*pp) {
			parent = *pp;
			xcall = rb_entry(parent, struct rxrpc_call, sock_node);
			if (user_call_ID < call->user_call_ID)
				pp = &(*pp)->rb_left;
			else if (user_call_ID > call->user_call_ID)
				pp = &(*pp)->rb_right;
			else
				goto id_in_use;
		}

		call->user_call_ID = user_call_ID;
		call->notify_rx = notify_rx;
129
		rxrpc_get_call(call, rxrpc_call_got_kernel);
130 131 132 133 134 135 136
		user_attach_call(call, user_call_ID);
		rxrpc_get_call(call, rxrpc_call_got_userid);
		rb_link_node(&call->sock_node, parent, pp);
		rb_insert_color(&call->sock_node, &rx->calls);
		set_bit(RXRPC_CALL_HAS_USERID, &call->flags);
	}

137 138
	list_add(&call->sock_link, &rx->sock_calls);

139 140
	write_unlock(&rx->call_lock);

141
	rxnet = call->rxnet;
142 143 144
	write_lock(&rxnet->call_lock);
	list_add_tail(&call->link, &rxnet->calls);
	write_unlock(&rxnet->call_lock);
145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178

	b->call_backlog[call_head] = call;
	smp_store_release(&b->call_backlog_head, (call_head + 1) & (size - 1));
	_leave(" = 0 [%d -> %lx]", call->debug_id, user_call_ID);
	return 0;

id_in_use:
	write_unlock(&rx->call_lock);
	rxrpc_cleanup_call(call);
	_leave(" = -EBADSLT");
	return -EBADSLT;
}

/*
 * Preallocate sufficient service connections, calls and peers to cover the
 * entire backlog of a socket.  When a new call comes in, if we don't have
 * sufficient of each available, the call gets rejected as busy or ignored.
 *
 * The backlog is replenished when a connection is accepted or rejected.
 */
int rxrpc_service_prealloc(struct rxrpc_sock *rx, gfp_t gfp)
{
	struct rxrpc_backlog *b = rx->backlog;

	if (!b) {
		b = kzalloc(sizeof(struct rxrpc_backlog), gfp);
		if (!b)
			return -ENOMEM;
		rx->backlog = b;
	}

	if (rx->discard_new_call)
		return 0;

179 180
	while (rxrpc_service_prealloc_one(rx, b, NULL, NULL, 0, gfp,
					  atomic_inc_return(&rxrpc_debug_id)) == 0)
181 182 183 184 185 186 187 188 189 190 191
		;

	return 0;
}

/*
 * Discard the preallocation on a service.
 */
void rxrpc_discard_prealloc(struct rxrpc_sock *rx)
{
	struct rxrpc_backlog *b = rx->backlog;
192
	struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk));
193 194 195 196 197 198
	unsigned int size = RXRPC_BACKLOG_MAX, head, tail;

	if (!b)
		return;
	rx->backlog = NULL;

199 200 201 202 203 204
	/* Make sure that there aren't any incoming calls in progress before we
	 * clear the preallocation buffers.
	 */
	spin_lock_bh(&rx->incoming_lock);
	spin_unlock_bh(&rx->incoming_lock);

205 206 207 208 209 210 211 212 213 214 215 216
	head = b->peer_backlog_head;
	tail = b->peer_backlog_tail;
	while (CIRC_CNT(head, tail, size) > 0) {
		struct rxrpc_peer *peer = b->peer_backlog[tail];
		kfree(peer);
		tail = (tail + 1) & (size - 1);
	}

	head = b->conn_backlog_head;
	tail = b->conn_backlog_tail;
	while (CIRC_CNT(head, tail, size) > 0) {
		struct rxrpc_connection *conn = b->conn_backlog[tail];
217
		write_lock(&rxnet->conn_lock);
218 219
		list_del(&conn->link);
		list_del(&conn->proc_link);
220
		write_unlock(&rxnet->conn_lock);
221
		kfree(conn);
222 223
		if (atomic_dec_and_test(&rxnet->nr_conns))
			wake_up_atomic_t(&rxnet->nr_conns);
224 225 226 227 228 229 230
		tail = (tail + 1) & (size - 1);
	}

	head = b->call_backlog_head;
	tail = b->call_backlog_tail;
	while (CIRC_CNT(head, tail, size) > 0) {
		struct rxrpc_call *call = b->call_backlog[tail];
231
		rcu_assign_pointer(call->socket, rx);
232 233 234
		if (rx->discard_new_call) {
			_debug("discard %lx", call->user_call_ID);
			rx->discard_new_call(call, call->user_call_ID);
D
David Howells 已提交
235
			rxrpc_put_call(call, rxrpc_call_put_kernel);
236 237 238 239 240 241 242 243 244 245
		}
		rxrpc_call_completed(call);
		rxrpc_release_call(rx, call);
		rxrpc_put_call(call, rxrpc_call_put);
		tail = (tail + 1) & (size - 1);
	}

	kfree(b);
}

246
/*
247 248
 * Allocate a new incoming call from the prealloc pool, along with a connection
 * and a peer as necessary.
249
 */
250 251 252 253
static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
						    struct rxrpc_local *local,
						    struct rxrpc_connection *conn,
						    struct sk_buff *skb)
254
{
255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284
	struct rxrpc_backlog *b = rx->backlog;
	struct rxrpc_peer *peer, *xpeer;
	struct rxrpc_call *call;
	unsigned short call_head, conn_head, peer_head;
	unsigned short call_tail, conn_tail, peer_tail;
	unsigned short call_count, conn_count;

	/* #calls >= #conns >= #peers must hold true. */
	call_head = smp_load_acquire(&b->call_backlog_head);
	call_tail = b->call_backlog_tail;
	call_count = CIRC_CNT(call_head, call_tail, RXRPC_BACKLOG_MAX);
	conn_head = smp_load_acquire(&b->conn_backlog_head);
	conn_tail = b->conn_backlog_tail;
	conn_count = CIRC_CNT(conn_head, conn_tail, RXRPC_BACKLOG_MAX);
	ASSERTCMP(conn_count, >=, call_count);
	peer_head = smp_load_acquire(&b->peer_backlog_head);
	peer_tail = b->peer_backlog_tail;
	ASSERTCMP(CIRC_CNT(peer_head, peer_tail, RXRPC_BACKLOG_MAX), >=,
		  conn_count);

	if (call_count == 0)
		return NULL;

	if (!conn) {
		/* No connection.  We're going to need a peer to start off
		 * with.  If one doesn't yet exist, use a spare from the
		 * preallocation set.  We dump the address into the spare in
		 * anticipation - and to save on stack space.
		 */
		xpeer = b->peer_backlog[peer_tail];
D
David Howells 已提交
285
		if (rxrpc_extract_addr_from_skb(local, &xpeer->srx, skb) < 0)
286 287 288 289 290 291 292 293 294
			return NULL;

		peer = rxrpc_lookup_incoming_peer(local, xpeer);
		if (peer == xpeer) {
			b->peer_backlog[peer_tail] = NULL;
			smp_store_release(&b->peer_backlog_tail,
					  (peer_tail + 1) &
					  (RXRPC_BACKLOG_MAX - 1));
		}
295

296 297 298 299 300
		/* Now allocate and set up the connection */
		conn = b->conn_backlog[conn_tail];
		b->conn_backlog[conn_tail] = NULL;
		smp_store_release(&b->conn_backlog_tail,
				  (conn_tail + 1) & (RXRPC_BACKLOG_MAX - 1));
301
		conn->params.local = rxrpc_get_local(local);
302
		conn->params.peer = peer;
303
		rxrpc_see_connection(conn);
D
David Howells 已提交
304
		rxrpc_new_incoming_connection(rx, conn, skb);
305 306
	} else {
		rxrpc_get_connection(conn);
307 308
	}

309 310 311 312 313 314
	/* And now we can allocate and set up a new call */
	call = b->call_backlog[call_tail];
	b->call_backlog[call_tail] = NULL;
	smp_store_release(&b->call_backlog_tail,
			  (call_tail + 1) & (RXRPC_BACKLOG_MAX - 1));

315
	rxrpc_see_call(call);
316 317
	call->conn = conn;
	call->peer = rxrpc_get_peer(conn->params.peer);
318
	call->cong_cwnd = call->peer->cong_cwnd;
319
	return call;
320 321 322
}

/*
323 324 325 326 327 328 329 330 331 332 333
 * Set up a new incoming call.  Called in BH context with the RCU read lock
 * held.
 *
 * If this is for a kernel service, when we allocate the call, it will have
 * three refs on it: (1) the kernel service, (2) the user_call_ID tree, (3) the
 * retainer ref obtained from the backlog buffer.  Prealloc calls for userspace
 * services only have the ref from the backlog buffer.  We want to pass this
 * ref to non-BH context to dispose of.
 *
 * If we want to report an error, we mark the skb with the packet type and
 * abort code and return NULL.
334 335
 *
 * The call is returned with the user access mutex held.
336
 */
337 338 339
struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
					   struct rxrpc_connection *conn,
					   struct sk_buff *skb)
340
{
341 342
	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
	struct rxrpc_sock *rx;
343
	struct rxrpc_call *call;
344
	u16 service_id = sp->hdr.serviceId;
345 346 347

	_enter("");

348
	/* Get the socket providing the service */
349
	rx = rcu_dereference(local->service);
350 351
	if (rx && (service_id == rx->srx.srx_service ||
		   service_id == rx->second_service))
352
		goto found_service;
353

354
	trace_rxrpc_abort(0, "INV", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
355 356 357 358 359
			  RX_INVALID_OPERATION, EOPNOTSUPP);
	skb->mark = RXRPC_SKB_MARK_LOCAL_ABORT;
	skb->priority = RX_INVALID_OPERATION;
	_leave(" = NULL [service]");
	return NULL;
360

361 362
found_service:
	spin_lock(&rx->incoming_lock);
363 364
	if (rx->sk.sk_state == RXRPC_SERVER_LISTEN_DISABLED ||
	    rx->sk.sk_state == RXRPC_CLOSE) {
365
		trace_rxrpc_abort(0, "CLS", sp->hdr.cid, sp->hdr.callNumber,
366 367 368 369 370 371
				  sp->hdr.seq, RX_INVALID_OPERATION, ESHUTDOWN);
		skb->mark = RXRPC_SKB_MARK_LOCAL_ABORT;
		skb->priority = RX_INVALID_OPERATION;
		_leave(" = NULL [close]");
		call = NULL;
		goto out;
372 373
	}

374 375 376 377 378 379 380
	call = rxrpc_alloc_incoming_call(rx, local, conn, skb);
	if (!call) {
		skb->mark = RXRPC_SKB_MARK_BUSY;
		_leave(" = NULL [busy]");
		call = NULL;
		goto out;
	}
381 382 383

	trace_rxrpc_receive(call, rxrpc_receive_incoming,
			    sp->hdr.serial, sp->hdr.seq);
384

385 386 387 388 389 390 391 392 393 394 395 396
	/* Lock the call to prevent rxrpc_kernel_send/recv_data() and
	 * sendmsg()/recvmsg() inconveniently stealing the mutex once the
	 * notification is generated.
	 *
	 * The BUG should never happen because the kernel should be well
	 * behaved enough not to access the call before the first notification
	 * event and userspace is prevented from doing so until the state is
	 * appropriate.
	 */
	if (!mutex_trylock(&call->user_mutex))
		BUG();

397 398 399
	/* Make the call live. */
	rxrpc_incoming_call(rx, call, skb);
	conn = call->conn;
400

401 402
	if (rx->notify_new_call)
		rx->notify_new_call(&rx->sk, call, call->user_call_ID);
403 404
	else
		sk_acceptq_added(&rx->sk);
405

406 407 408 409 410 411 412
	spin_lock(&conn->state_lock);
	switch (conn->state) {
	case RXRPC_CONN_SERVICE_UNSECURED:
		conn->state = RXRPC_CONN_SERVICE_CHALLENGING;
		set_bit(RXRPC_CONN_EV_CHALLENGE, &call->conn->events);
		rxrpc_queue_conn(call->conn);
		break;
413

414 415 416 417 418 419 420 421
	case RXRPC_CONN_SERVICE:
		write_lock(&call->state_lock);
		if (rx->discard_new_call)
			call->state = RXRPC_CALL_SERVER_RECV_REQUEST;
		else
			call->state = RXRPC_CALL_SERVER_ACCEPTING;
		write_unlock(&call->state_lock);
		break;
422

423 424
	case RXRPC_CONN_REMOTELY_ABORTED:
		rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED,
425
					  conn->remote_abort, -ECONNABORTED);
426 427 428
		break;
	case RXRPC_CONN_LOCALLY_ABORTED:
		rxrpc_abort_call("CON", call, sp->hdr.seq,
429
				 conn->local_abort, -ECONNABORTED);
430
		break;
431 432 433
	default:
		BUG();
	}
434
	spin_unlock(&conn->state_lock);
435

436 437
	if (call->state == RXRPC_CALL_SERVER_ACCEPTING)
		rxrpc_notify_socket(call);
438

D
David Howells 已提交
439 440 441 442 443 444 445
	/* We have to discard the prealloc queue's ref here and rely on a
	 * combination of the RCU read lock and refs held either by the socket
	 * (recvmsg queue, to-be-accepted queue or user ID tree) or the kernel
	 * service to prevent the call from being deallocated too early.
	 */
	rxrpc_put_call(call, rxrpc_call_put);

446 447 448 449
	_leave(" = %p{%d}", call, call->debug_id);
out:
	spin_unlock(&rx->incoming_lock);
	return call;
450 451 452 453 454
}

/*
 * handle acceptance of a call by userspace
 * - assign the user call ID to the call at the front of the queue
455
 * - called with the socket locked.
456
 */
457
struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *rx,
458 459
				     unsigned long user_call_ID,
				     rxrpc_notify_rx_t notify_rx)
460
	__releases(&rx->sk.sk_lock.slock)
461
	__acquires(call->user_mutex)
462 463 464 465 466 467 468 469 470 471 472
{
	struct rxrpc_call *call;
	struct rb_node *parent, **pp;
	int ret;

	_enter(",%lx", user_call_ID);

	ASSERT(!irqs_disabled());

	write_lock(&rx->call_lock);

473 474
	if (list_empty(&rx->to_be_accepted)) {
		write_unlock(&rx->call_lock);
475
		release_sock(&rx->sk);
476 477 478
		kleave(" = -ENODATA [empty]");
		return ERR_PTR(-ENODATA);
	}
479 480 481 482 483 484 485 486 487 488 489 490 491

	/* check the user ID isn't already in use */
	pp = &rx->calls.rb_node;
	parent = NULL;
	while (*pp) {
		parent = *pp;
		call = rb_entry(parent, struct rxrpc_call, sock_node);

		if (user_call_ID < call->user_call_ID)
			pp = &(*pp)->rb_left;
		else if (user_call_ID > call->user_call_ID)
			pp = &(*pp)->rb_right;
		else
492
			goto id_in_use;
493 494
	}

495 496 497 498 499
	/* Dequeue the first call and check it's still valid.  We gain
	 * responsibility for the queue's reference.
	 */
	call = list_entry(rx->to_be_accepted.next,
			  struct rxrpc_call, accept_link);
500 501 502 503 504 505 506 507 508 509 510 511 512 513
	write_unlock(&rx->call_lock);

	/* We need to gain the mutex from the interrupt handler without
	 * upsetting lockdep, so we have to release it there and take it here.
	 * We are, however, still holding the socket lock, so other accepts
	 * must wait for us and no one can add the user ID behind our backs.
	 */
	if (mutex_lock_interruptible(&call->user_mutex) < 0) {
		release_sock(&rx->sk);
		kleave(" = -ERESTARTSYS");
		return ERR_PTR(-ERESTARTSYS);
	}

	write_lock(&rx->call_lock);
514 515
	list_del_init(&call->accept_link);
	sk_acceptq_removed(&rx->sk);
D
David Howells 已提交
516
	rxrpc_see_call(call);
517

518 519 520 521 522 523 524 525 526 527 528 529 530 531 532
	/* Find the user ID insertion point. */
	pp = &rx->calls.rb_node;
	parent = NULL;
	while (*pp) {
		parent = *pp;
		call = rb_entry(parent, struct rxrpc_call, sock_node);

		if (user_call_ID < call->user_call_ID)
			pp = &(*pp)->rb_left;
		else if (user_call_ID > call->user_call_ID)
			pp = &(*pp)->rb_right;
		else
			BUG();
	}

533 534 535 536 537
	write_lock_bh(&call->state_lock);
	switch (call->state) {
	case RXRPC_CALL_SERVER_ACCEPTING:
		call->state = RXRPC_CALL_SERVER_RECV_REQUEST;
		break;
538 539
	case RXRPC_CALL_COMPLETE:
		ret = call->error;
540 541 542 543 544 545
		goto out_release;
	default:
		BUG();
	}

	/* formalise the acceptance */
546
	call->notify_rx = notify_rx;
547
	call->user_call_ID = user_call_ID;
548
	rxrpc_get_call(call, rxrpc_call_got_userid);
549 550 551 552 553 554 555
	rb_link_node(&call->sock_node, parent, pp);
	rb_insert_color(&call->sock_node, &rx->calls);
	if (test_and_set_bit(RXRPC_CALL_HAS_USERID, &call->flags))
		BUG();

	write_unlock_bh(&call->state_lock);
	write_unlock(&rx->call_lock);
556 557
	rxrpc_notify_socket(call);
	rxrpc_service_prealloc(rx, GFP_KERNEL);
558
	release_sock(&rx->sk);
559 560 561 562
	_leave(" = %p{%d}", call, call->debug_id);
	return call;

out_release:
563
	_debug("release %p", call);
564
	write_unlock_bh(&call->state_lock);
565 566
	write_unlock(&rx->call_lock);
	rxrpc_release_call(rx, call);
567 568 569 570 571
	rxrpc_put_call(call, rxrpc_call_put);
	goto out;

id_in_use:
	ret = -EBADSLT;
572
	write_unlock(&rx->call_lock);
573 574
out:
	rxrpc_service_prealloc(rx, GFP_KERNEL);
575
	release_sock(&rx->sk);
576 577 578 579 580
	_leave(" = %d", ret);
	return ERR_PTR(ret);
}

/*
581
 * Handle rejection of a call by userspace
582 583 584 585 586
 * - reject the call at the front of the queue
 */
int rxrpc_reject_call(struct rxrpc_sock *rx)
{
	struct rxrpc_call *call;
587
	bool abort = false;
588 589 590 591 592 593 594 595
	int ret;

	_enter("");

	ASSERT(!irqs_disabled());

	write_lock(&rx->call_lock);

596
	if (list_empty(&rx->to_be_accepted)) {
597 598 599
		write_unlock(&rx->call_lock);
		return -ENODATA;
	}
600

601 602 603 604 605
	/* Dequeue the first call and check it's still valid.  We gain
	 * responsibility for the queue's reference.
	 */
	call = list_entry(rx->to_be_accepted.next,
			  struct rxrpc_call, accept_link);
606 607
	list_del_init(&call->accept_link);
	sk_acceptq_removed(&rx->sk);
D
David Howells 已提交
608
	rxrpc_see_call(call);
609 610 611 612

	write_lock_bh(&call->state_lock);
	switch (call->state) {
	case RXRPC_CALL_SERVER_ACCEPTING:
613
		__rxrpc_abort_call("REJ", call, 1, RX_USER_ABORT, -ECONNABORTED);
614 615
		abort = true;
		/* fall through */
616 617
	case RXRPC_CALL_COMPLETE:
		ret = call->error;
618
		goto out_discard;
619 620 621
	default:
		BUG();
	}
622

623
out_discard:
624 625
	write_unlock_bh(&call->state_lock);
	write_unlock(&rx->call_lock);
626
	if (abort) {
627
		rxrpc_send_abort_packet(call);
628 629 630 631
		rxrpc_release_call(rx, call);
		rxrpc_put_call(call, rxrpc_call_put);
	}
	rxrpc_service_prealloc(rx, GFP_KERNEL);
632 633 634
	_leave(" = %d", ret);
	return ret;
}
635 636 637 638 639 640 641 642

/*
 * rxrpc_kernel_charge_accept - Charge up socket with preallocated calls
 * @sock: The socket on which to preallocate
 * @notify_rx: Event notification function for the call
 * @user_attach_call: Func to attach call to user_call_ID
 * @user_call_ID: The tag to attach to the preallocated call
 * @gfp: The allocation conditions.
643
 * @debug_id: The tracing debug ID.
644 645 646 647 648 649 650 651 652 653
 *
 * Charge up the socket with preallocated calls, each with a user ID.  A
 * function should be provided to effect the attachment from the user's side.
 * The user is given a ref to hold on the call.
 *
 * Note that the call may be come connected before this function returns.
 */
int rxrpc_kernel_charge_accept(struct socket *sock,
			       rxrpc_notify_rx_t notify_rx,
			       rxrpc_user_attach_call_t user_attach_call,
654 655
			       unsigned long user_call_ID, gfp_t gfp,
			       unsigned int debug_id)
656 657 658 659 660 661 662 663 664
{
	struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
	struct rxrpc_backlog *b = rx->backlog;

	if (sock->sk->sk_state == RXRPC_CLOSE)
		return -ESHUTDOWN;

	return rxrpc_service_prealloc_one(rx, b, notify_rx,
					  user_attach_call, user_call_ID,
665
					  gfp, debug_id);
666 667
}
EXPORT_SYMBOL(rxrpc_kernel_charge_accept);