conn_object.c 12.6 KB
Newer Older
1
/* RxRPC virtual connection handler, common bits.
2
 *
3
 * Copyright (C) 2007, 2016 Red Hat, Inc. All Rights Reserved.
4 5 6 7 8 9 10 11
 * Written by David Howells (dhowells@redhat.com)
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version
 * 2 of the License, or (at your option) any later version.
 */

12 13
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

14
#include <linux/module.h>
15
#include <linux/slab.h>
16 17 18 19
#include <linux/net.h>
#include <linux/skbuff.h>
#include "ar-internal.h"

20 21 22
/*
 * Time till a connection expires after last use (in seconds).
 */
D
David Howells 已提交
23 24
unsigned int __read_mostly rxrpc_connection_expiry = 10 * 60;
unsigned int __read_mostly rxrpc_closed_conn_expiry = 10;
25

26 27
static void rxrpc_destroy_connection(struct rcu_head *);

28 29 30 31 32 33 34 35
static void rxrpc_connection_timer(struct timer_list *timer)
{
	struct rxrpc_connection *conn =
		container_of(timer, struct rxrpc_connection, timer);

	rxrpc_queue_conn(conn);
}

36 37 38
/*
 * allocate a new connection
 */
39
struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp)
40 41 42 43 44 45 46
{
	struct rxrpc_connection *conn;

	_enter("");

	conn = kzalloc(sizeof(struct rxrpc_connection), gfp);
	if (conn) {
47
		INIT_LIST_HEAD(&conn->cache_link);
48
		spin_lock_init(&conn->channel_lock);
49
		INIT_LIST_HEAD(&conn->waiting_calls);
50
		timer_setup(&conn->timer, &rxrpc_connection_timer, 0);
51
		INIT_WORK(&conn->processor, &rxrpc_process_connection);
52
		INIT_LIST_HEAD(&conn->proc_link);
53
		INIT_LIST_HEAD(&conn->link);
54
		skb_queue_head_init(&conn->rx_queue);
55
		conn->security = &rxrpc_no_security;
56 57 58
		spin_lock_init(&conn->state_lock);
		conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
		conn->size_align = 4;
59
		conn->idle_timestamp = jiffies;
60 61
	}

62
	_leave(" = %p{%d}", conn, conn ? conn->debug_id : 0);
63 64 65 66
	return conn;
}

/*
67 68 69 70 71 72
 * Look up a connection in the cache by protocol parameters.
 *
 * If successful, a pointer to the connection is returned, but no ref is taken.
 * NULL is returned if there is no match.
 *
 * The caller must be holding the RCU read lock.
73
 */
74 75
struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *local,
						   struct sk_buff *skb)
76 77
{
	struct rxrpc_connection *conn;
78
	struct rxrpc_conn_proto k;
79
	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
80 81
	struct sockaddr_rxrpc srx;
	struct rxrpc_peer *peer;
82

83
	_enter(",%x", sp->hdr.cid & RXRPC_CIDMASK);
84

D
David Howells 已提交
85
	if (rxrpc_extract_addr_from_skb(local, &srx, skb) < 0)
86
		goto not_found;
87

88 89 90
	k.epoch	= sp->hdr.epoch;
	k.cid	= sp->hdr.cid & RXRPC_CIDMASK;

91 92 93 94 95 96 97 98 99 100
	/* We may have to handle mixing IPv4 and IPv6 */
	if (srx.transport.family != local->srx.transport.family) {
		pr_warn_ratelimited("AF_RXRPC: Protocol mismatch %u not %u\n",
				    srx.transport.family,
				    local->srx.transport.family);
		goto not_found;
	}

	k.epoch	= sp->hdr.epoch;
	k.cid	= sp->hdr.cid & RXRPC_CIDMASK;
101

102
	if (sp->hdr.flags & RXRPC_CLIENT_INITIATED) {
103 104 105 106 107 108 109
		/* We need to look up service connections by the full protocol
		 * parameter set.  We look up the peer first as an intermediate
		 * step and then the connection from the peer's tree.
		 */
		peer = rxrpc_lookup_peer_rcu(local, &srx);
		if (!peer)
			goto not_found;
110 111 112 113 114
		conn = rxrpc_find_service_conn_rcu(peer, skb);
		if (!conn || atomic_read(&conn->usage) == 0)
			goto not_found;
		_leave(" = %p", conn);
		return conn;
115
	} else {
116 117 118
		/* Look up client connections by connection ID alone as their
		 * IDs are unique for this machine.
		 */
119
		conn = idr_find(&rxrpc_client_conn_ids,
120 121 122 123 124 125 126
				sp->hdr.cid >> RXRPC_CIDSHIFT);
		if (!conn || atomic_read(&conn->usage) == 0) {
			_debug("no conn");
			goto not_found;
		}

		if (conn->proto.epoch != k.epoch ||
127 128 129 130 131 132 133 134 135 136 137 138
		    conn->params.local != local)
			goto not_found;

		peer = conn->params.peer;
		switch (srx.transport.family) {
		case AF_INET:
			if (peer->srx.transport.sin.sin_port !=
			    srx.transport.sin.sin_port ||
			    peer->srx.transport.sin.sin_addr.s_addr !=
			    srx.transport.sin.sin_addr.s_addr)
				goto not_found;
			break;
139
#ifdef CONFIG_AF_RXRPC_IPV6
D
David Howells 已提交
140 141 142 143 144 145 146 147
		case AF_INET6:
			if (peer->srx.transport.sin6.sin6_port !=
			    srx.transport.sin6.sin6_port ||
			    memcmp(&peer->srx.transport.sin6.sin6_addr,
				   &srx.transport.sin6.sin6_addr,
				   sizeof(struct in6_addr)) != 0)
				goto not_found;
			break;
148
#endif
149 150 151 152 153 154
		default:
			BUG();
		}

		_leave(" = %p", conn);
		return conn;
155 156
	}

157
not_found:
158 159 160 161
	_leave(" = NULL");
	return NULL;
}

162 163
/*
 * Disconnect a call and clear any channel it occupies when that call
164 165
 * terminates.  The caller must hold the channel_lock and must release the
 * call's ref on the connection.
166
 */
167 168
void __rxrpc_disconnect_call(struct rxrpc_connection *conn,
			     struct rxrpc_call *call)
169
{
170 171
	struct rxrpc_channel *chan =
		&conn->channels[call->cid & RXRPC_CHANNELMASK];
172

173
	_enter("%d,%x", conn->debug_id, call->cid);
174

175 176 177 178
	if (rcu_access_pointer(chan->call) == call) {
		/* Save the result of the call so that we can repeat it if necessary
		 * through the channel, whilst disposing of the actual call record.
		 */
D
David Howells 已提交
179
		trace_rxrpc_disconnect_call(call);
180 181
		switch (call->completion) {
		case RXRPC_CALL_SUCCEEDED:
182
			chan->last_seq = call->rx_hard_ack;
183
			chan->last_type = RXRPC_PACKET_TYPE_ACK;
184 185 186 187 188 189 190 191 192
			break;
		case RXRPC_CALL_LOCALLY_ABORTED:
			chan->last_abort = call->abort_code;
			chan->last_type = RXRPC_PACKET_TYPE_ABORT;
			break;
		default:
			chan->last_abort = RX_USER_ABORT;
			chan->last_type = RXRPC_PACKET_TYPE_ABORT;
			break;
193
		}
194

195
		/* Sync with rxrpc_conn_retransmit(). */
196 197 198
		smp_wmb();
		chan->last_call = chan->call_id;
		chan->call_id = chan->call_counter;
199

200
		rcu_assign_pointer(chan->call, NULL);
201
	}
202

203 204 205 206 207 208 209 210 211 212 213
	_leave("");
}

/*
 * Disconnect a call and clear any channel it occupies when that call
 * terminates.
 */
void rxrpc_disconnect_call(struct rxrpc_call *call)
{
	struct rxrpc_connection *conn = call->conn;

214 215
	call->peer->cong_cwnd = call->cong_cwnd;

216 217 218 219
	spin_lock_bh(&conn->params.peer->lock);
	hlist_del_init(&call->error_link);
	spin_unlock_bh(&conn->params.peer->lock);

220 221 222
	if (rxrpc_is_client_call(call))
		return rxrpc_disconnect_client_call(call);

223
	spin_lock(&conn->channel_lock);
224
	__rxrpc_disconnect_call(conn, call);
225 226 227
	spin_unlock(&conn->channel_lock);

	call->conn = NULL;
228
	conn->idle_timestamp = jiffies;
229
	rxrpc_put_connection(conn);
230 231
}

232 233 234 235 236
/*
 * Kill off a connection.
 */
void rxrpc_kill_connection(struct rxrpc_connection *conn)
{
237 238
	struct rxrpc_net *rxnet = conn->params.local->rxnet;

239 240 241 242 243 244
	ASSERT(!rcu_access_pointer(conn->channels[0].call) &&
	       !rcu_access_pointer(conn->channels[1].call) &&
	       !rcu_access_pointer(conn->channels[2].call) &&
	       !rcu_access_pointer(conn->channels[3].call));
	ASSERT(list_empty(&conn->cache_link));

245
	write_lock(&rxnet->conn_lock);
246
	list_del_init(&conn->proc_link);
247
	write_unlock(&rxnet->conn_lock);
248 249 250 251 252 253 254 255 256 257 258 259 260 261

	/* Drain the Rx queue.  Note that even though we've unpublished, an
	 * incoming packet could still be being added to our Rx queue, so we
	 * will need to drain it again in the RCU cleanup handler.
	 */
	rxrpc_purge_queue(&conn->rx_queue);

	/* Leave final destruction to RCU.  The connection processor work item
	 * must carry a ref on the connection to prevent us getting here whilst
	 * it is queued or running.
	 */
	call_rcu(&conn->rcu, rxrpc_destroy_connection);
}

262
/*
263 264
 * Queue a connection's work processor, getting a ref to pass to the work
 * queue.
265
 */
266
bool rxrpc_queue_conn(struct rxrpc_connection *conn)
267
{
268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320
	const void *here = __builtin_return_address(0);
	int n = __atomic_add_unless(&conn->usage, 1, 0);
	if (n == 0)
		return false;
	if (rxrpc_queue_work(&conn->processor))
		trace_rxrpc_conn(conn, rxrpc_conn_queued, n + 1, here);
	else
		rxrpc_put_connection(conn);
	return true;
}

/*
 * Note the re-emergence of a connection.
 */
void rxrpc_see_connection(struct rxrpc_connection *conn)
{
	const void *here = __builtin_return_address(0);
	if (conn) {
		int n = atomic_read(&conn->usage);

		trace_rxrpc_conn(conn, rxrpc_conn_seen, n, here);
	}
}

/*
 * Get a ref on a connection.
 */
void rxrpc_get_connection(struct rxrpc_connection *conn)
{
	const void *here = __builtin_return_address(0);
	int n = atomic_inc_return(&conn->usage);

	trace_rxrpc_conn(conn, rxrpc_conn_got, n, here);
}

/*
 * Try to get a ref on a connection.
 */
struct rxrpc_connection *
rxrpc_get_connection_maybe(struct rxrpc_connection *conn)
{
	const void *here = __builtin_return_address(0);

	if (conn) {
		int n = __atomic_add_unless(&conn->usage, 1, 0);
		if (n > 0)
			trace_rxrpc_conn(conn, rxrpc_conn_got, n + 1, here);
		else
			conn = NULL;
	}
	return conn;
}

D
David Howells 已提交
321 322 323 324 325 326 327 328 329 330
/*
 * Set the service connection reap timer.
 */
static void rxrpc_set_service_reap_timer(struct rxrpc_net *rxnet,
					 unsigned long reap_at)
{
	if (rxnet->live)
		timer_reduce(&rxnet->service_conn_reap_timer, reap_at);
}

331 332 333 334 335 336 337 338 339 340 341
/*
 * Release a service connection
 */
void rxrpc_put_service_conn(struct rxrpc_connection *conn)
{
	const void *here = __builtin_return_address(0);
	int n;

	n = atomic_dec_return(&conn->usage);
	trace_rxrpc_conn(conn, rxrpc_conn_put_service, n, here);
	ASSERTCMP(n, >=, 0);
D
David Howells 已提交
342 343 344
	if (n == 1)
		rxrpc_set_service_reap_timer(conn->params.local->rxnet,
					     jiffies + rxrpc_connection_expiry);
345 346 347 348 349
}

/*
 * destroy a virtual connection
 */
350
static void rxrpc_destroy_connection(struct rcu_head *rcu)
351
{
352 353 354 355
	struct rxrpc_connection *conn =
		container_of(rcu, struct rxrpc_connection, rcu);

	_enter("{%d,u=%d}", conn->debug_id, atomic_read(&conn->usage));
356 357 358 359 360

	ASSERTCMP(atomic_read(&conn->usage), ==, 0);

	_net("DESTROY CONN %d", conn->debug_id);

361
	del_timer_sync(&conn->timer);
362 363
	rxrpc_purge_queue(&conn->rx_queue);

364
	conn->security->clear(conn);
365
	key_put(conn->params.key);
366
	key_put(conn->server_key);
367
	rxrpc_put_peer(conn->params.peer);
368 369 370

	if (atomic_dec_and_test(&conn->params.local->rxnet->nr_conns))
		wake_up_atomic_t(&conn->params.local->rxnet->nr_conns);
371
	rxrpc_put_local(conn->params.local);
372

373 374 375 376 377
	kfree(conn);
	_leave("");
}

/*
378
 * reap dead service connections
379
 */
380
void rxrpc_service_connection_reaper(struct work_struct *work)
381 382
{
	struct rxrpc_connection *conn, *_p;
383
	struct rxrpc_net *rxnet =
D
David Howells 已提交
384
		container_of(work, struct rxrpc_net, service_conn_reaper);
D
David Howells 已提交
385
	unsigned long expire_at, earliest, idle_timestamp, now;
386 387 388 389 390

	LIST_HEAD(graveyard);

	_enter("");

391
	now = jiffies;
D
David Howells 已提交
392
	earliest = now + MAX_JIFFY_OFFSET;
393

394 395
	write_lock(&rxnet->conn_lock);
	list_for_each_entry_safe(conn, _p, &rxnet->service_conns, link) {
396 397
		ASSERTCMP(atomic_read(&conn->usage), >, 0);
		if (likely(atomic_read(&conn->usage) > 1))
398
			continue;
399 400
		if (conn->state == RXRPC_CONN_SERVICE_PREALLOC)
			continue;
401

D
David Howells 已提交
402 403 404 405 406 407 408 409 410 411 412 413 414 415 416
		if (rxnet->live) {
			idle_timestamp = READ_ONCE(conn->idle_timestamp);
			expire_at = idle_timestamp + rxrpc_connection_expiry * HZ;
			if (conn->params.local->service_closed)
				expire_at = idle_timestamp + rxrpc_closed_conn_expiry * HZ;

			_debug("reap CONN %d { u=%d,t=%ld }",
			       conn->debug_id, atomic_read(&conn->usage),
			       (long)expire_at - (long)now);

			if (time_before(now, expire_at)) {
				if (time_before(expire_at, earliest))
					earliest = expire_at;
				continue;
			}
417
		}
418 419 420 421 422 423

		/* The usage count sits at 1 whilst the object is unused on the
		 * list; we reduce that to 0 to make the object unavailable.
		 */
		if (atomic_cmpxchg(&conn->usage, 1, 0) != 1)
			continue;
424
		trace_rxrpc_conn(conn, rxrpc_conn_reap_service, 0, NULL);
425 426

		if (rxrpc_conn_is_client(conn))
427
			BUG();
428 429 430 431
		else
			rxrpc_unpublish_service_conn(conn);

		list_move_tail(&conn->link, &graveyard);
432
	}
433
	write_unlock(&rxnet->conn_lock);
434

D
David Howells 已提交
435 436
	if (earliest != now + MAX_JIFFY_OFFSET) {
		_debug("reschedule reaper %ld", (long)earliest - (long)now);
437
		ASSERT(time_after(earliest, now));
D
David Howells 已提交
438
		rxrpc_set_service_reap_timer(rxnet, earliest);
439 440 441 442 443 444 445 446
	}

	while (!list_empty(&graveyard)) {
		conn = list_entry(graveyard.next, struct rxrpc_connection,
				  link);
		list_del_init(&conn->link);

		ASSERTCMP(atomic_read(&conn->usage), ==, 0);
447
		rxrpc_kill_connection(conn);
448 449 450 451 452 453
	}

	_leave("");
}

/*
454 455
 * preemptively destroy all the service connection records rather than
 * waiting for them to time out
456
 */
457
void rxrpc_destroy_all_connections(struct rxrpc_net *rxnet)
458
{
459 460 461
	struct rxrpc_connection *conn, *_p;
	bool leak = false;

462 463
	_enter("");

464
	atomic_dec(&rxnet->nr_conns);
465
	rxrpc_destroy_all_client_connections(rxnet);
466

D
David Howells 已提交
467 468
	del_timer_sync(&rxnet->service_conn_reap_timer);
	rxrpc_queue_work(&rxnet->service_conn_reaper);
469 470
	flush_workqueue(rxrpc_workqueue);

471 472
	write_lock(&rxnet->conn_lock);
	list_for_each_entry_safe(conn, _p, &rxnet->service_conns, link) {
473 474 475 476
		pr_err("AF_RXRPC: Leaked conn %p {%d}\n",
		       conn, atomic_read(&conn->usage));
		leak = true;
	}
477
	write_unlock(&rxnet->conn_lock);
478 479
	BUG_ON(leak);

480
	ASSERT(list_empty(&rxnet->conn_proc_list));
481

482 483 484 485
	/* We need to wait for the connections to be destroyed by RCU as they
	 * pin things that we still need to get rid of.
	 */
	wait_on_atomic_t(&rxnet->nr_conns, atomic_t_wait, TASK_UNINTERRUPTIBLE);
486 487
	_leave("");
}