conn_object.c 17.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11
/* RxRPC virtual connection handler
 *
 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
 * Written by David Howells (dhowells@redhat.com)
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version
 * 2 of the License, or (at your option) any later version.
 */

12 13
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

14
#include <linux/module.h>
15
#include <linux/slab.h>
16 17 18 19 20 21 22
#include <linux/net.h>
#include <linux/skbuff.h>
#include <linux/crypto.h>
#include <net/sock.h>
#include <net/af_rxrpc.h>
#include "ar-internal.h"

23 24 25
/*
 * Time till a connection expires after last use (in seconds).
 */
26
unsigned int rxrpc_connection_expiry = 10 * 60;
27

28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44
static void rxrpc_connection_reaper(struct work_struct *work);

LIST_HEAD(rxrpc_connections);
DEFINE_RWLOCK(rxrpc_connection_lock);
static DECLARE_DELAYED_WORK(rxrpc_connection_reap, rxrpc_connection_reaper);

/*
 * allocate a new connection
 */
static struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp)
{
	struct rxrpc_connection *conn;

	_enter("");

	conn = kzalloc(sizeof(struct rxrpc_connection), gfp);
	if (conn) {
45 46
		spin_lock_init(&conn->channel_lock);
		init_waitqueue_head(&conn->channel_wq);
47
		INIT_WORK(&conn->processor, &rxrpc_process_connection);
48
		INIT_LIST_HEAD(&conn->link);
49
		skb_queue_head_init(&conn->rx_queue);
50
		conn->security = &rxrpc_no_security;
51 52 53
		spin_lock_init(&conn->state_lock);
		atomic_set(&conn->usage, 1);
		conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
54
		atomic_set(&conn->avail_chans, RXRPC_MAXCALLS);
55
		conn->size_align = 4;
56
		conn->header_size = sizeof(struct rxrpc_wire_header);
57 58
	}

59
	_leave(" = %p{%d}", conn, conn ? conn->debug_id : 0);
60 61 62 63
	return conn;
}

/*
64 65
 * Allocate a client connection.  The caller must take care to clear any
 * padding bytes in *cp.
66
 */
67
static struct rxrpc_connection *
68
rxrpc_alloc_client_connection(struct rxrpc_conn_parameters *cp, gfp_t gfp)
69 70
{
	struct rxrpc_connection *conn;
71
	int ret;
72 73 74

	_enter("");

75
	conn = rxrpc_alloc_connection(gfp);
76
	if (!conn) {
77
		_leave(" = -ENOMEM");
78
		return ERR_PTR(-ENOMEM);
79
	}
80

81 82 83 84 85 86 87 88 89
	conn->params		= *cp;
	conn->proto.local	= cp->local;
	conn->proto.epoch	= rxrpc_epoch;
	conn->proto.cid		= 0;
	conn->proto.in_clientflag = 0;
	conn->proto.family	= cp->peer->srx.transport.family;
	conn->out_clientflag	= RXRPC_CLIENT_INITIATED;
	conn->state		= RXRPC_CONN_CLIENT;

90 91 92 93 94 95 96 97
	switch (conn->proto.family) {
	case AF_INET:
		conn->proto.addr_size = sizeof(conn->proto.ipv4_addr);
		conn->proto.ipv4_addr = cp->peer->srx.transport.sin.sin_addr;
		conn->proto.port = cp->peer->srx.transport.sin.sin_port;
		break;
	}

98
	ret = rxrpc_get_client_connection_id(conn, gfp);
99 100
	if (ret < 0)
		goto error_0;
101 102

	ret = rxrpc_init_client_conn_security(conn);
103 104 105
	if (ret < 0)
		goto error_1;

106 107 108
	ret = conn->security->prime_packet_security(conn);
	if (ret < 0)
		goto error_2;
109

110
	write_lock(&rxrpc_connection_lock);
111
	list_add_tail(&conn->link, &rxrpc_connections);
112
	write_unlock(&rxrpc_connection_lock);
113

114 115 116
	/* We steal the caller's peer ref. */
	cp->peer = NULL;
	rxrpc_get_local(conn->params.local);
117 118 119 120 121
	key_get(conn->params.key);

	_leave(" = %p", conn);
	return conn;

122 123
error_2:
	conn->security->clear(conn);
124 125 126 127 128 129 130 131
error_1:
	rxrpc_put_client_connection_id(conn);
error_0:
	kfree(conn);
	_leave(" = %d", ret);
	return ERR_PTR(ret);
}

132 133 134 135
/*
 * find a connection for a call
 * - called in process context with IRQs enabled
 */
136
int rxrpc_connect_call(struct rxrpc_call *call,
137
		       struct rxrpc_conn_parameters *cp,
138
		       struct sockaddr_rxrpc *srx,
139 140
		       gfp_t gfp)
{
141 142 143 144
	struct rxrpc_connection *conn, *candidate = NULL;
	struct rxrpc_local *local = cp->local;
	struct rb_node *p, **pp, *parent;
	long diff;
145
	int chan;
146 147 148

	DECLARE_WAITQUEUE(myself, current);

149
	_enter("{%d,%lx},", call->debug_id, call->user_call_ID);
150

151 152 153
	cp->peer = rxrpc_lookup_peer(cp->local, srx, gfp);
	if (!cp->peer)
		return -ENOMEM;
154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174

	if (!cp->exclusive) {
		/* Search for a existing client connection unless this is going
		 * to be a connection that's used exclusively for a single call.
		 */
		_debug("search 1");
		spin_lock(&local->client_conns_lock);
		p = local->client_conns.rb_node;
		while (p) {
			conn = rb_entry(p, struct rxrpc_connection, client_node);

#define cmp(X) ((long)conn->params.X - (long)cp->X)
			diff = (cmp(peer) ?:
				cmp(key) ?:
				cmp(security_level));
			if (diff < 0)
				p = p->rb_left;
			else if (diff > 0)
				p = p->rb_right;
			else
				goto found_extant_conn;
175
		}
176 177
		spin_unlock(&local->client_conns_lock);
	}
178

179 180
	/* We didn't find a connection or we want an exclusive one. */
	_debug("get new conn");
181
	candidate = rxrpc_alloc_client_connection(cp, gfp);
182 183 184 185
	if (!candidate) {
		_leave(" = -ENOMEM");
		return -ENOMEM;
	}
186

187 188 189 190 191 192 193 194 195 196 197 198
	if (cp->exclusive) {
		/* Assign the call on an exclusive connection to channel 0 and
		 * don't add the connection to the endpoint's shareable conn
		 * lookup tree.
		 */
		_debug("exclusive chan 0");
		conn = candidate;
		atomic_set(&conn->avail_chans, RXRPC_MAXCALLS - 1);
		spin_lock(&conn->channel_lock);
		chan = 0;
		goto found_channel;
	}
199

200 201 202 203 204
	/* We need to redo the search before attempting to add a new connection
	 * lest we race with someone else adding a conflicting instance.
	 */
	_debug("search 2");
	spin_lock(&local->client_conns_lock);
205

206 207 208 209 210
	pp = &local->client_conns.rb_node;
	parent = NULL;
	while (*pp) {
		parent = *pp;
		conn = rb_entry(parent, struct rxrpc_connection, client_node);
211

212 213 214 215 216 217 218 219 220 221
		diff = (cmp(peer) ?:
			cmp(key) ?:
			cmp(security_level));
		if (diff < 0)
			pp = &(*pp)->rb_left;
		else if (diff > 0)
			pp = &(*pp)->rb_right;
		else
			goto found_extant_conn;
	}
222

223 224 225 226 227 228 229
	/* The second search also failed; simply add the new connection with
	 * the new call in channel 0.  Note that we need to take the channel
	 * lock before dropping the client conn lock.
	 */
	_debug("new conn");
	conn = candidate;
	candidate = NULL;
230

231 232 233 234 235 236 237 238 239 240 241 242 243 244
	rb_link_node(&conn->client_node, parent, pp);
	rb_insert_color(&conn->client_node, &local->client_conns);

	atomic_set(&conn->avail_chans, RXRPC_MAXCALLS - 1);
	spin_lock(&conn->channel_lock);
	spin_unlock(&local->client_conns_lock);
	chan = 0;

found_channel:
	_debug("found chan");
	call->conn	= conn;
	call->channel	= chan;
	call->epoch	= conn->proto.epoch;
	call->cid	= conn->proto.cid | chan;
245 246 247
	call->call_id	= ++conn->channels[chan].call_counter;
	conn->channels[chan].call_id = call->call_id;
	rcu_assign_pointer(conn->channels[chan].call, call);
248

249
	_net("CONNECT call %d on conn %d", call->debug_id, conn->debug_id);
250

251
	spin_unlock(&conn->channel_lock);
252 253
	rxrpc_put_peer(cp->peer);
	cp->peer = NULL;
254 255 256 257 258 259 260 261 262 263 264
	_leave(" = %p {u=%d}", conn, atomic_read(&conn->usage));
	return 0;

	/* We found a suitable connection already in existence.  Discard any
	 * candidate we may have allocated, and try to get a channel on this
	 * one.
	 */
found_extant_conn:
	_debug("found conn");
	rxrpc_get_connection(conn);
	spin_unlock(&local->client_conns_lock);
265

266
	rxrpc_put_connection(candidate);
267

268 269 270 271 272 273
	if (!atomic_add_unless(&conn->avail_chans, -1, 0)) {
		if (!gfpflags_allow_blocking(gfp)) {
			rxrpc_put_connection(conn);
			_leave(" = -EAGAIN");
			return -EAGAIN;
		}
274

275 276 277 278 279 280 281 282 283 284 285
		add_wait_queue(&conn->channel_wq, &myself);
		for (;;) {
			set_current_state(TASK_INTERRUPTIBLE);
			if (atomic_add_unless(&conn->avail_chans, -1, 0))
				break;
			if (signal_pending(current))
				goto interrupted;
			schedule();
		}
		remove_wait_queue(&conn->channel_wq, &myself);
		__set_current_state(TASK_RUNNING);
286 287
	}

288 289
	/* The connection allegedly now has a free channel and we can now
	 * attach the call to it.
290
	 */
291 292
	spin_lock(&conn->channel_lock);

293
	for (chan = 0; chan < RXRPC_MAXCALLS; chan++)
294
		if (!conn->channels[chan].call)
295 296 297 298
			goto found_channel;
	BUG();

interrupted:
299 300 301
	remove_wait_queue(&conn->channel_wq, &myself);
	__set_current_state(TASK_RUNNING);
	rxrpc_put_connection(conn);
302 303
	rxrpc_put_peer(cp->peer);
	cp->peer = NULL;
304 305 306 307 308 309 310
	_leave(" = -ERESTARTSYS");
	return -ERESTARTSYS;
}

/*
 * get a record of an incoming connection
 */
311 312
struct rxrpc_connection *rxrpc_incoming_connection(struct rxrpc_local *local,
						   struct rxrpc_peer *peer,
313
						   struct sk_buff *skb)
314 315
{
	struct rxrpc_connection *conn, *candidate = NULL;
316
	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
317 318
	struct rb_node *p, **pp;
	const char *new = "old";
D
David Howells 已提交
319
	u32 epoch, cid;
320 321 322

	_enter("");

323
	ASSERT(sp->hdr.flags & RXRPC_CLIENT_INITIATED);
324

325 326
	epoch = sp->hdr.epoch;
	cid = sp->hdr.cid & RXRPC_CIDMASK;
327 328

	/* search the connection list first */
329
	read_lock_bh(&peer->conn_lock);
330

331
	p = peer->service_conns.rb_node;
332
	while (p) {
333
		conn = rb_entry(p, struct rxrpc_connection, service_node);
334

335
		_debug("maybe %x", conn->proto.cid);
336

337
		if (epoch < conn->proto.epoch)
338
			p = p->rb_left;
339
		else if (epoch > conn->proto.epoch)
340
			p = p->rb_right;
341
		else if (cid < conn->proto.cid)
342
			p = p->rb_left;
343
		else if (cid > conn->proto.cid)
344 345 346 347
			p = p->rb_right;
		else
			goto found_extant_connection;
	}
348
	read_unlock_bh(&peer->conn_lock);
349 350 351

	/* not yet present - create a candidate for a new record and then
	 * redo the search */
352
	candidate = rxrpc_alloc_connection(GFP_NOIO);
353 354 355 356 357
	if (!candidate) {
		_leave(" = -ENOMEM");
		return ERR_PTR(-ENOMEM);
	}

358
	candidate->proto.local		= local;
359 360 361
	candidate->proto.epoch		= sp->hdr.epoch;
	candidate->proto.cid		= sp->hdr.cid & RXRPC_CIDMASK;
	candidate->proto.in_clientflag	= RXRPC_CLIENT_INITIATED;
362 363
	candidate->params.local		= local;
	candidate->params.peer		= peer;
364 365 366
	candidate->params.service_id	= sp->hdr.serviceId;
	candidate->security_ix		= sp->hdr.securityIndex;
	candidate->out_clientflag	= 0;
367
	candidate->state		= RXRPC_CONN_SERVICE;
368
	if (candidate->params.service_id)
369
		candidate->state	= RXRPC_CONN_SERVICE_UNSECURED;
370

371
	write_lock_bh(&peer->conn_lock);
372

373
	pp = &peer->service_conns.rb_node;
374 375 376
	p = NULL;
	while (*pp) {
		p = *pp;
377
		conn = rb_entry(p, struct rxrpc_connection, service_node);
378

379
		if (epoch < conn->proto.epoch)
380
			pp = &(*pp)->rb_left;
381
		else if (epoch > conn->proto.epoch)
382
			pp = &(*pp)->rb_right;
383
		else if (cid < conn->proto.cid)
384
			pp = &(*pp)->rb_left;
385
		else if (cid > conn->proto.cid)
386 387 388 389 390 391 392 393
			pp = &(*pp)->rb_right;
		else
			goto found_extant_second;
	}

	/* we can now add the new candidate to the list */
	conn = candidate;
	candidate = NULL;
394
	rb_link_node(&conn->service_node, p, pp);
395 396 397
	rb_insert_color(&conn->service_node, &peer->service_conns);
	rxrpc_get_peer(peer);
	rxrpc_get_local(local);
398

399
	write_unlock_bh(&peer->conn_lock);
400

401
	write_lock(&rxrpc_connection_lock);
402
	list_add_tail(&conn->link, &rxrpc_connections);
403
	write_unlock(&rxrpc_connection_lock);
404 405 406 407

	new = "new";

success:
408
	_net("CONNECTION %s %d {%x}", new, conn->debug_id, conn->proto.cid);
409 410 411 412 413 414

	_leave(" = %p {u=%d}", conn, atomic_read(&conn->usage));
	return conn;

	/* we found the connection in the list immediately */
found_extant_connection:
415
	if (sp->hdr.securityIndex != conn->security_ix) {
416
		read_unlock_bh(&peer->conn_lock);
417 418
		goto security_mismatch;
	}
419
	rxrpc_get_connection(conn);
420
	read_unlock_bh(&peer->conn_lock);
421 422 423 424
	goto success;

	/* we found the connection on the second time through the list */
found_extant_second:
425
	if (sp->hdr.securityIndex != conn->security_ix) {
426
		write_unlock_bh(&peer->conn_lock);
427 428
		goto security_mismatch;
	}
429
	rxrpc_get_connection(conn);
430
	write_unlock_bh(&peer->conn_lock);
431 432 433 434 435 436 437 438 439 440 441 442 443
	kfree(candidate);
	goto success;

security_mismatch:
	kfree(candidate);
	_leave(" = -EKEYREJECTED");
	return ERR_PTR(-EKEYREJECTED);
}

/*
 * find a connection based on transport and RxRPC connection ID for an incoming
 * packet
 */
444 445
struct rxrpc_connection *rxrpc_find_connection(struct rxrpc_local *local,
					       struct rxrpc_peer *peer,
446
					       struct sk_buff *skb)
447 448
{
	struct rxrpc_connection *conn;
449
	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
450
	struct rb_node *p;
451
	u32 epoch, cid;
452

453
	_enter(",{%x,%x}", sp->hdr.cid, sp->hdr.flags);
454

455
	read_lock_bh(&peer->conn_lock);
456

457 458
	cid	= sp->hdr.cid & RXRPC_CIDMASK;
	epoch	= sp->hdr.epoch;
459

460
	if (sp->hdr.flags & RXRPC_CLIENT_INITIATED) {
461
		p = peer->service_conns.rb_node;
462
		while (p) {
463
			conn = rb_entry(p, struct rxrpc_connection, service_node);
464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479

			_debug("maybe %x", conn->proto.cid);

			if (epoch < conn->proto.epoch)
				p = p->rb_left;
			else if (epoch > conn->proto.epoch)
				p = p->rb_right;
			else if (cid < conn->proto.cid)
				p = p->rb_left;
			else if (cid > conn->proto.cid)
				p = p->rb_right;
			else
				goto found;
		}
	} else {
		conn = idr_find(&rxrpc_client_conn_ids, cid >> RXRPC_CIDSHIFT);
480 481 482
		if (conn &&
		    conn->proto.epoch == epoch &&
		    conn->params.peer == peer)
483 484 485
			goto found;
	}

486
	read_unlock_bh(&peer->conn_lock);
487 488 489 490
	_leave(" = NULL");
	return NULL;

found:
491
	rxrpc_get_connection(conn);
492
	read_unlock_bh(&peer->conn_lock);
493 494 495 496
	_leave(" = %p", conn);
	return conn;
}

497 498
/*
 * Disconnect a call and clear any channel it occupies when that call
499 500
 * terminates.  The caller must hold the channel_lock and must release the
 * call's ref on the connection.
501
 */
502
void __rxrpc_disconnect_call(struct rxrpc_call *call)
503 504
{
	struct rxrpc_connection *conn = call->conn;
505
	struct rxrpc_channel *chan = &conn->channels[call->channel];
506 507 508

	_enter("%d,%d", conn->debug_id, call->channel);

509 510 511 512 513 514 515 516
	if (rcu_access_pointer(chan->call) == call) {
		/* Save the result of the call so that we can repeat it if necessary
		 * through the channel, whilst disposing of the actual call record.
		 */
		chan->last_result = call->local_abort;
		smp_wmb();
		chan->last_call = chan->call_id;
		chan->call_id = chan->call_counter;
517

518
		rcu_assign_pointer(chan->call, NULL);
519 520 521
		atomic_inc(&conn->avail_chans);
		wake_up(&conn->channel_wq);
	}
522

523 524 525 526 527 528 529 530 531 532 533 534 535
	_leave("");
}

/*
 * Disconnect a call and clear any channel it occupies when that call
 * terminates.
 */
void rxrpc_disconnect_call(struct rxrpc_call *call)
{
	struct rxrpc_connection *conn = call->conn;

	spin_lock(&conn->channel_lock);
	__rxrpc_disconnect_call(call);
536 537 538 539
	spin_unlock(&conn->channel_lock);

	call->conn = NULL;
	rxrpc_put_connection(conn);
540 541
}

542 543 544 545 546
/*
 * release a virtual connection
 */
void rxrpc_put_connection(struct rxrpc_connection *conn)
{
547 548 549
	if (!conn)
		return;

550 551 552 553 554
	_enter("%p{u=%d,d=%d}",
	       conn, atomic_read(&conn->usage), conn->debug_id);

	ASSERTCMP(atomic_read(&conn->usage), >, 0);

555
	conn->put_time = ktime_get_seconds();
556 557
	if (atomic_dec_and_test(&conn->usage)) {
		_debug("zombie");
558
		rxrpc_queue_delayed_work(&rxrpc_connection_reap, 0);
559 560 561 562 563 564 565 566
	}

	_leave("");
}

/*
 * destroy a virtual connection
 */
567
static void rxrpc_destroy_connection(struct rcu_head *rcu)
568
{
569 570 571 572
	struct rxrpc_connection *conn =
		container_of(rcu, struct rxrpc_connection, rcu);

	_enter("{%d,u=%d}", conn->debug_id, atomic_read(&conn->usage));
573 574 575 576 577 578 579

	ASSERTCMP(atomic_read(&conn->usage), ==, 0);

	_net("DESTROY CONN %d", conn->debug_id);

	rxrpc_purge_queue(&conn->rx_queue);

580
	conn->security->clear(conn);
581
	key_put(conn->params.key);
582
	key_put(conn->server_key);
583 584
	rxrpc_put_peer(conn->params.peer);
	rxrpc_put_local(conn->params.local);
585

586 587 588 589 590 591 592
	kfree(conn);
	_leave("");
}

/*
 * reap dead connections
 */
R
Roel Kluin 已提交
593
static void rxrpc_connection_reaper(struct work_struct *work)
594 595
{
	struct rxrpc_connection *conn, *_p;
596
	struct rxrpc_peer *peer;
597 598 599 600 601 602
	unsigned long now, earliest, reap_time;

	LIST_HEAD(graveyard);

	_enter("");

603
	now = ktime_get_seconds();
604 605
	earliest = ULONG_MAX;

606
	write_lock(&rxrpc_connection_lock);
607 608 609 610 611 612 613 614
	list_for_each_entry_safe(conn, _p, &rxrpc_connections, link) {
		_debug("reap CONN %d { u=%d,t=%ld }",
		       conn->debug_id, atomic_read(&conn->usage),
		       (long) now - (long) conn->put_time);

		if (likely(atomic_read(&conn->usage) > 0))
			continue;

615 616 617 618
		if (rxrpc_conn_is_client(conn)) {
			struct rxrpc_local *local = conn->params.local;
			spin_lock(&local->client_conns_lock);
			reap_time = conn->put_time + rxrpc_connection_expiry;
619

620 621 622 623
			if (atomic_read(&conn->usage) > 0) {
				;
			} else if (reap_time <= now) {
				list_move_tail(&conn->link, &graveyard);
624
				rxrpc_put_client_connection_id(conn);
625 626 627 628 629 630 631 632
				rb_erase(&conn->client_node,
					 &local->client_conns);
			} else if (reap_time < earliest) {
				earliest = reap_time;
			}

			spin_unlock(&local->client_conns_lock);
		} else {
633 634
			peer = conn->params.peer;
			write_lock_bh(&peer->conn_lock);
635 636 637 638 639 640 641
			reap_time = conn->put_time + rxrpc_connection_expiry;

			if (atomic_read(&conn->usage) > 0) {
				;
			} else if (reap_time <= now) {
				list_move_tail(&conn->link, &graveyard);
				rb_erase(&conn->service_node,
642
					 &peer->service_conns);
643 644
			} else if (reap_time < earliest) {
				earliest = reap_time;
645 646
			}

647
			write_unlock_bh(&peer->conn_lock);
648 649
		}
	}
650
	write_unlock(&rxrpc_connection_lock);
651 652 653 654

	if (earliest != ULONG_MAX) {
		_debug("reschedule reaper %ld", (long) earliest - now);
		ASSERTCMP(earliest, >, now);
655 656
		rxrpc_queue_delayed_work(&rxrpc_connection_reap,
					 (earliest - now) * HZ);
657 658 659 660 661 662 663 664 665
	}

	/* then destroy all those pulled out */
	while (!list_empty(&graveyard)) {
		conn = list_entry(graveyard.next, struct rxrpc_connection,
				  link);
		list_del_init(&conn->link);

		ASSERTCMP(atomic_read(&conn->usage), ==, 0);
666 667
		skb_queue_purge(&conn->rx_queue);
		call_rcu(&conn->rcu, rxrpc_destroy_connection);
668 669 670 671 672 673 674 675 676 677 678
	}

	_leave("");
}

/*
 * preemptively destroy all the connection records rather than waiting for them
 * to time out
 */
void __exit rxrpc_destroy_all_connections(void)
{
679 680 681
	struct rxrpc_connection *conn, *_p;
	bool leak = false;

682 683
	_enter("");

684
	rxrpc_connection_expiry = 0;
685
	cancel_delayed_work(&rxrpc_connection_reap);
686
	rxrpc_queue_delayed_work(&rxrpc_connection_reap, 0);
687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702
	flush_workqueue(rxrpc_workqueue);

	write_lock(&rxrpc_connection_lock);
	list_for_each_entry_safe(conn, _p, &rxrpc_connections, link) {
		pr_err("AF_RXRPC: Leaked conn %p {%d}\n",
		       conn, atomic_read(&conn->usage));
		leak = true;
	}
	write_unlock(&rxrpc_connection_lock);
	BUG_ON(leak);

	/* Make sure the local and peer records pinned by any dying connections
	 * are released.
	 */
	rcu_barrier();
	rxrpc_destroy_client_conn_ids();
703 704 705

	_leave("");
}