conn_object.c 8.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11
/* RxRPC virtual connection handler
 *
 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
 * Written by David Howells (dhowells@redhat.com)
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version
 * 2 of the License, or (at your option) any later version.
 */

12 13
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

14
#include <linux/module.h>
15
#include <linux/slab.h>
16 17 18 19 20 21 22
#include <linux/net.h>
#include <linux/skbuff.h>
#include <linux/crypto.h>
#include <net/sock.h>
#include <net/af_rxrpc.h>
#include "ar-internal.h"

23 24 25
/*
 * Time till a connection expires after last use (in seconds).
 */
26
unsigned int rxrpc_connection_expiry = 10 * 60;
27

28 29 30 31 32 33 34 35 36
static void rxrpc_connection_reaper(struct work_struct *work);

LIST_HEAD(rxrpc_connections);
DEFINE_RWLOCK(rxrpc_connection_lock);
static DECLARE_DELAYED_WORK(rxrpc_connection_reap, rxrpc_connection_reaper);

/*
 * allocate a new connection
 */
37
struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp)
38 39 40 41 42 43 44
{
	struct rxrpc_connection *conn;

	_enter("");

	conn = kzalloc(sizeof(struct rxrpc_connection), gfp);
	if (conn) {
45 46
		spin_lock_init(&conn->channel_lock);
		init_waitqueue_head(&conn->channel_wq);
47
		INIT_WORK(&conn->processor, &rxrpc_process_connection);
48
		INIT_LIST_HEAD(&conn->link);
49
		skb_queue_head_init(&conn->rx_queue);
50
		conn->security = &rxrpc_no_security;
51 52 53
		spin_lock_init(&conn->state_lock);
		atomic_set(&conn->usage, 1);
		conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
54
		atomic_set(&conn->avail_chans, RXRPC_MAXCALLS);
55
		conn->size_align = 4;
56
		conn->header_size = sizeof(struct rxrpc_wire_header);
57 58
	}

59
	_leave(" = %p{%d}", conn, conn ? conn->debug_id : 0);
60 61 62 63 64 65 66
	return conn;
}

/*
 * find a connection based on transport and RxRPC connection ID for an incoming
 * packet
 */
67 68
struct rxrpc_connection *rxrpc_find_connection(struct rxrpc_local *local,
					       struct rxrpc_peer *peer,
69
					       struct sk_buff *skb)
70 71
{
	struct rxrpc_connection *conn;
72
	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
73
	struct rb_node *p;
74
	u32 epoch, cid;
75

76
	_enter(",{%x,%x}", sp->hdr.cid, sp->hdr.flags);
77

78
	read_lock_bh(&peer->conn_lock);
79

80 81
	cid	= sp->hdr.cid & RXRPC_CIDMASK;
	epoch	= sp->hdr.epoch;
82

83
	if (sp->hdr.flags & RXRPC_CLIENT_INITIATED) {
84
		p = peer->service_conns.rb_node;
85
		while (p) {
86
			conn = rb_entry(p, struct rxrpc_connection, service_node);
87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102

			_debug("maybe %x", conn->proto.cid);

			if (epoch < conn->proto.epoch)
				p = p->rb_left;
			else if (epoch > conn->proto.epoch)
				p = p->rb_right;
			else if (cid < conn->proto.cid)
				p = p->rb_left;
			else if (cid > conn->proto.cid)
				p = p->rb_right;
			else
				goto found;
		}
	} else {
		conn = idr_find(&rxrpc_client_conn_ids, cid >> RXRPC_CIDSHIFT);
103 104 105
		if (conn &&
		    conn->proto.epoch == epoch &&
		    conn->params.peer == peer)
106 107 108
			goto found;
	}

109
	read_unlock_bh(&peer->conn_lock);
110 111 112 113
	_leave(" = NULL");
	return NULL;

found:
114
	rxrpc_get_connection(conn);
115
	read_unlock_bh(&peer->conn_lock);
116 117 118 119
	_leave(" = %p", conn);
	return conn;
}

120 121
/*
 * Disconnect a call and clear any channel it occupies when that call
122 123
 * terminates.  The caller must hold the channel_lock and must release the
 * call's ref on the connection.
124
 */
125
void __rxrpc_disconnect_call(struct rxrpc_call *call)
126 127
{
	struct rxrpc_connection *conn = call->conn;
128
	struct rxrpc_channel *chan = &conn->channels[call->channel];
129 130 131

	_enter("%d,%d", conn->debug_id, call->channel);

132 133 134 135 136 137 138 139
	if (rcu_access_pointer(chan->call) == call) {
		/* Save the result of the call so that we can repeat it if necessary
		 * through the channel, whilst disposing of the actual call record.
		 */
		chan->last_result = call->local_abort;
		smp_wmb();
		chan->last_call = chan->call_id;
		chan->call_id = chan->call_counter;
140

141
		rcu_assign_pointer(chan->call, NULL);
142 143 144
		atomic_inc(&conn->avail_chans);
		wake_up(&conn->channel_wq);
	}
145

146 147 148 149 150 151 152 153 154 155 156 157 158
	_leave("");
}

/*
 * Disconnect a call and clear any channel it occupies when that call
 * terminates.
 */
void rxrpc_disconnect_call(struct rxrpc_call *call)
{
	struct rxrpc_connection *conn = call->conn;

	spin_lock(&conn->channel_lock);
	__rxrpc_disconnect_call(call);
159 160 161 162
	spin_unlock(&conn->channel_lock);

	call->conn = NULL;
	rxrpc_put_connection(conn);
163 164
}

165 166 167 168 169
/*
 * release a virtual connection
 */
void rxrpc_put_connection(struct rxrpc_connection *conn)
{
170 171 172
	if (!conn)
		return;

173 174 175 176 177
	_enter("%p{u=%d,d=%d}",
	       conn, atomic_read(&conn->usage), conn->debug_id);

	ASSERTCMP(atomic_read(&conn->usage), >, 0);

178
	conn->put_time = ktime_get_seconds();
179 180
	if (atomic_dec_and_test(&conn->usage)) {
		_debug("zombie");
181
		rxrpc_queue_delayed_work(&rxrpc_connection_reap, 0);
182 183 184 185 186 187 188 189
	}

	_leave("");
}

/*
 * destroy a virtual connection
 */
190
static void rxrpc_destroy_connection(struct rcu_head *rcu)
191
{
192 193 194 195
	struct rxrpc_connection *conn =
		container_of(rcu, struct rxrpc_connection, rcu);

	_enter("{%d,u=%d}", conn->debug_id, atomic_read(&conn->usage));
196 197 198 199 200 201 202

	ASSERTCMP(atomic_read(&conn->usage), ==, 0);

	_net("DESTROY CONN %d", conn->debug_id);

	rxrpc_purge_queue(&conn->rx_queue);

203
	conn->security->clear(conn);
204
	key_put(conn->params.key);
205
	key_put(conn->server_key);
206 207
	rxrpc_put_peer(conn->params.peer);
	rxrpc_put_local(conn->params.local);
208

209 210 211 212 213 214 215
	kfree(conn);
	_leave("");
}

/*
 * reap dead connections
 */
R
Roel Kluin 已提交
216
static void rxrpc_connection_reaper(struct work_struct *work)
217 218
{
	struct rxrpc_connection *conn, *_p;
219
	struct rxrpc_peer *peer;
220 221 222 223 224 225
	unsigned long now, earliest, reap_time;

	LIST_HEAD(graveyard);

	_enter("");

226
	now = ktime_get_seconds();
227 228
	earliest = ULONG_MAX;

229
	write_lock(&rxrpc_connection_lock);
230 231 232 233 234 235 236 237
	list_for_each_entry_safe(conn, _p, &rxrpc_connections, link) {
		_debug("reap CONN %d { u=%d,t=%ld }",
		       conn->debug_id, atomic_read(&conn->usage),
		       (long) now - (long) conn->put_time);

		if (likely(atomic_read(&conn->usage) > 0))
			continue;

238 239 240 241
		if (rxrpc_conn_is_client(conn)) {
			struct rxrpc_local *local = conn->params.local;
			spin_lock(&local->client_conns_lock);
			reap_time = conn->put_time + rxrpc_connection_expiry;
242

243 244 245 246
			if (atomic_read(&conn->usage) > 0) {
				;
			} else if (reap_time <= now) {
				list_move_tail(&conn->link, &graveyard);
247
				rxrpc_put_client_connection_id(conn);
248 249 250 251 252 253 254 255
				rb_erase(&conn->client_node,
					 &local->client_conns);
			} else if (reap_time < earliest) {
				earliest = reap_time;
			}

			spin_unlock(&local->client_conns_lock);
		} else {
256 257
			peer = conn->params.peer;
			write_lock_bh(&peer->conn_lock);
258 259 260 261 262 263 264
			reap_time = conn->put_time + rxrpc_connection_expiry;

			if (atomic_read(&conn->usage) > 0) {
				;
			} else if (reap_time <= now) {
				list_move_tail(&conn->link, &graveyard);
				rb_erase(&conn->service_node,
265
					 &peer->service_conns);
266 267
			} else if (reap_time < earliest) {
				earliest = reap_time;
268 269
			}

270
			write_unlock_bh(&peer->conn_lock);
271 272
		}
	}
273
	write_unlock(&rxrpc_connection_lock);
274 275 276 277

	if (earliest != ULONG_MAX) {
		_debug("reschedule reaper %ld", (long) earliest - now);
		ASSERTCMP(earliest, >, now);
278 279
		rxrpc_queue_delayed_work(&rxrpc_connection_reap,
					 (earliest - now) * HZ);
280 281 282 283 284 285 286 287 288
	}

	/* then destroy all those pulled out */
	while (!list_empty(&graveyard)) {
		conn = list_entry(graveyard.next, struct rxrpc_connection,
				  link);
		list_del_init(&conn->link);

		ASSERTCMP(atomic_read(&conn->usage), ==, 0);
289 290
		skb_queue_purge(&conn->rx_queue);
		call_rcu(&conn->rcu, rxrpc_destroy_connection);
291 292 293 294 295 296 297 298 299 300 301
	}

	_leave("");
}

/*
 * preemptively destroy all the connection records rather than waiting for them
 * to time out
 */
void __exit rxrpc_destroy_all_connections(void)
{
302 303 304
	struct rxrpc_connection *conn, *_p;
	bool leak = false;

305 306
	_enter("");

307
	rxrpc_connection_expiry = 0;
308
	cancel_delayed_work(&rxrpc_connection_reap);
309
	rxrpc_queue_delayed_work(&rxrpc_connection_reap, 0);
310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325
	flush_workqueue(rxrpc_workqueue);

	write_lock(&rxrpc_connection_lock);
	list_for_each_entry_safe(conn, _p, &rxrpc_connections, link) {
		pr_err("AF_RXRPC: Leaked conn %p {%d}\n",
		       conn, atomic_read(&conn->usage));
		leak = true;
	}
	write_unlock(&rxrpc_connection_lock);
	BUG_ON(leak);

	/* Make sure the local and peer records pinned by any dying connections
	 * are released.
	 */
	rcu_barrier();
	rxrpc_destroy_client_conn_ids();
326 327 328

	_leave("");
}