local_object.c 9.2 KB
Newer Older
1
/* Local endpoint object management
2
 *
3
 * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved.
4 5 6
 * Written by David Howells (dhowells@redhat.com)
 *
 * This program is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU General Public Licence
8
 * as published by the Free Software Foundation; either version
9
 * 2 of the Licence, or (at your option) any later version.
10 11
 */

12 13
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

14 15 16
#include <linux/module.h>
#include <linux/net.h>
#include <linux/skbuff.h>
17
#include <linux/slab.h>
18 19
#include <linux/udp.h>
#include <linux/ip.h>
20
#include <linux/hashtable.h>
21 22 23 24
#include <net/sock.h>
#include <net/af_rxrpc.h>
#include "ar-internal.h"

25 26
static void rxrpc_local_processor(struct work_struct *);
static void rxrpc_local_rcu(struct rcu_head *);
27

28 29
static DEFINE_MUTEX(rxrpc_local_mutex);
static LIST_HEAD(rxrpc_local_endpoints);
30 31

/*
32 33 34 35 36 37 38
 * Compare a local to an address.  Return -ve, 0 or +ve to indicate less than,
 * same or greater than.
 *
 * We explicitly don't compare the RxRPC service ID as we want to reject
 * conflicting uses by differing services.  Further, we don't want to share
 * addresses with different options (IPv6), so we don't compare those bits
 * either.
39
 */
40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60
static long rxrpc_local_cmp_key(const struct rxrpc_local *local,
				const struct sockaddr_rxrpc *srx)
{
	long diff;

	diff = ((local->srx.transport_type - srx->transport_type) ?:
		(local->srx.transport_len - srx->transport_len) ?:
		(local->srx.transport.family - srx->transport.family));
	if (diff != 0)
		return diff;

	switch (srx->transport.family) {
	case AF_INET:
		/* If the choice of UDP port is left up to the transport, then
		 * the endpoint record doesn't match.
		 */
		return ((u16 __force)local->srx.transport.sin.sin_port -
			(u16 __force)srx->transport.sin.sin_port) ?:
			memcmp(&local->srx.transport.sin.sin_addr,
			       &srx->transport.sin.sin_addr,
			       sizeof(struct in_addr));
61
#ifdef CONFIG_AF_RXRPC_IPV6
D
David Howells 已提交
62 63 64 65 66 67 68 69 70
	case AF_INET6:
		/* If the choice of UDP6 port is left up to the transport, then
		 * the endpoint record doesn't match.
		 */
		return ((u16 __force)local->srx.transport.sin6.sin6_port -
			(u16 __force)srx->transport.sin6.sin6_port) ?:
			memcmp(&local->srx.transport.sin6.sin6_addr,
			       &srx->transport.sin6.sin6_addr,
			       sizeof(struct in6_addr));
71
#endif
72 73 74 75 76 77 78 79 80
	default:
		BUG();
	}
}

/*
 * Allocate a new local endpoint.
 */
static struct rxrpc_local *rxrpc_alloc_local(const struct sockaddr_rxrpc *srx)
81 82 83 84 85
{
	struct rxrpc_local *local;

	local = kzalloc(sizeof(struct rxrpc_local), GFP_KERNEL);
	if (local) {
86
		atomic_set(&local->usage, 1);
87
		INIT_LIST_HEAD(&local->link);
88
		INIT_WORK(&local->processor, rxrpc_local_processor);
89 90
		init_rwsem(&local->defrag_sem);
		skb_queue_head_init(&local->reject_queue);
91
		skb_queue_head_init(&local->event_queue);
92 93
		local->client_conns = RB_ROOT;
		spin_lock_init(&local->client_conns_lock);
94 95 96 97 98 99 100 101 102 103 104 105
		spin_lock_init(&local->lock);
		rwlock_init(&local->services_lock);
		local->debug_id = atomic_inc_return(&rxrpc_debug_id);
		memcpy(&local->srx, srx, sizeof(*srx));
	}

	_leave(" = %p", local);
	return local;
}

/*
 * create the local socket
106
 * - must be called with rxrpc_local_mutex locked
107
 */
108
static int rxrpc_open_socket(struct rxrpc_local *local)
109 110 111 112
{
	struct sock *sock;
	int ret, opt;

D
David Howells 已提交
113 114
	_enter("%p{%d,%d}",
	       local, local->srx.transport_type, local->srx.transport.family);
115 116

	/* create a socket to represent the local endpoint */
117 118
	ret = sock_create_kern(&init_net, local->srx.transport.family,
			       local->srx.transport_type, 0, &local->socket);
119 120 121 122 123 124 125 126 127
	if (ret < 0) {
		_leave(" = %d [socket]", ret);
		return ret;
	}

	/* if a local address was supplied then bind it */
	if (local->srx.transport_len > sizeof(sa_family_t)) {
		_debug("bind");
		ret = kernel_bind(local->socket,
128
				  (struct sockaddr *)&local->srx.transport,
129 130
				  local->srx.transport_len);
		if (ret < 0) {
131
			_debug("bind failed %d", ret);
132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157
			goto error;
		}
	}

	/* we want to receive ICMP errors */
	opt = 1;
	ret = kernel_setsockopt(local->socket, SOL_IP, IP_RECVERR,
				(char *) &opt, sizeof(opt));
	if (ret < 0) {
		_debug("setsockopt failed");
		goto error;
	}

	/* we want to set the don't fragment bit */
	opt = IP_PMTUDISC_DO;
	ret = kernel_setsockopt(local->socket, SOL_IP, IP_MTU_DISCOVER,
				(char *) &opt, sizeof(opt));
	if (ret < 0) {
		_debug("setsockopt failed");
		goto error;
	}

	/* set the socket up */
	sock = local->socket->sk;
	sock->sk_user_data	= local;
	sock->sk_data_ready	= rxrpc_data_ready;
158
	sock->sk_error_report	= rxrpc_error_report;
159 160 161 162
	_leave(" = 0");
	return 0;

error:
163
	kernel_sock_shutdown(local->socket, SHUT_RDWR);
164 165 166 167 168 169 170 171 172
	local->socket->sk->sk_user_data = NULL;
	sock_release(local->socket);
	local->socket = NULL;

	_leave(" = %d", ret);
	return ret;
}

/*
173
 * Look up or create a new local endpoint using the specified local address.
174
 */
175
struct rxrpc_local *rxrpc_lookup_local(const struct sockaddr_rxrpc *srx)
176 177
{
	struct rxrpc_local *local;
178 179 180
	struct list_head *cursor;
	const char *age;
	long diff;
181 182
	int ret;

D
David Howells 已提交
183 184
	_enter("{%d,%d,%pISp}",
	       srx->transport_type, srx->transport.family, &srx->transport);
185

186
	mutex_lock(&rxrpc_local_mutex);
187

188 189 190 191
	for (cursor = rxrpc_local_endpoints.next;
	     cursor != &rxrpc_local_endpoints;
	     cursor = cursor->next) {
		local = list_entry(cursor, struct rxrpc_local, link);
192

193 194
		diff = rxrpc_local_cmp_key(local, srx);
		if (diff < 0)
195
			continue;
196 197 198 199 200 201 202 203 204 205 206 207
		if (diff > 0)
			break;

		/* Services aren't allowed to share transport sockets, so
		 * reject that here.  It is possible that the object is dying -
		 * but it may also still have the local transport address that
		 * we want bound.
		 */
		if (srx->srx_service) {
			local = NULL;
			goto addr_in_use;
		}
208

209 210 211 212
		/* Found a match.  We replace a dying object.  Attempting to
		 * bind the transport socket may still fail if we're attempting
		 * to use a local address that the dying object is still using.
		 */
213
		if (!rxrpc_get_local_maybe(local)) {
214 215 216
			cursor = cursor->next;
			list_del_init(&local->link);
			break;
217 218
		}

219 220 221
		age = "old";
		goto found;
	}
222 223

	local = rxrpc_alloc_local(srx);
224 225
	if (!local)
		goto nomem;
226

227 228 229 230 231 232
	ret = rxrpc_open_socket(local);
	if (ret < 0)
		goto sock_error;

	list_add_tail(&local->link, cursor);
	age = "new";
233

234 235
found:
	mutex_unlock(&rxrpc_local_mutex);
236

D
David Howells 已提交
237 238
	_net("LOCAL %s %d {%pISp}",
	     age, local->debug_id, &local->srx.transport);
239

240
	_leave(" = %p", local);
241 242
	return local;

243 244 245 246 247 248 249
nomem:
	ret = -ENOMEM;
sock_error:
	mutex_unlock(&rxrpc_local_mutex);
	kfree(local);
	_leave(" = %d", ret);
	return ERR_PTR(ret);
250

251 252 253 254 255
addr_in_use:
	mutex_unlock(&rxrpc_local_mutex);
	_leave(" = -EADDRINUSE");
	return ERR_PTR(-EADDRINUSE);
}
256

257 258 259 260 261 262 263
/*
 * A local endpoint reached its end of life.
 */
void __rxrpc_put_local(struct rxrpc_local *local)
{
	_enter("%d", local->debug_id);
	rxrpc_queue_work(&local->processor);
264 265 266
}

/*
267 268 269 270 271
 * Destroy a local endpoint's socket and then hand the record to RCU to dispose
 * of.
 *
 * Closing the socket cannot be done from bottom half context or RCU callback
 * context because it might sleep.
272
 */
273
static void rxrpc_local_destroyer(struct rxrpc_local *local)
274
{
275
	struct socket *socket = local->socket;
276

277
	_enter("%d", local->debug_id);
278

279 280 281 282 283 284 285
	/* We can get a race between an incoming call packet queueing the
	 * processor again and the work processor starting the destruction
	 * process which will shut down the UDP socket.
	 */
	if (local->dead) {
		_leave(" [already dead]");
		return;
286
	}
287 288 289 290 291 292
	local->dead = true;

	mutex_lock(&rxrpc_local_mutex);
	list_del_init(&local->link);
	mutex_unlock(&rxrpc_local_mutex);

293
	ASSERT(RB_EMPTY_ROOT(&local->client_conns));
294
	ASSERT(!local->service);
295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310

	if (socket) {
		local->socket = NULL;
		kernel_sock_shutdown(socket, SHUT_RDWR);
		socket->sk->sk_user_data = NULL;
		sock_release(socket);
	}

	/* At this point, there should be no more packets coming in to the
	 * local endpoint.
	 */
	rxrpc_purge_queue(&local->reject_queue);
	rxrpc_purge_queue(&local->event_queue);

	_debug("rcu local %d", local->debug_id);
	call_rcu(&local->rcu, rxrpc_local_rcu);
311 312 313
}

/*
314
 * Process events on an endpoint
315
 */
316
static void rxrpc_local_processor(struct work_struct *work)
317 318
{
	struct rxrpc_local *local =
319 320
		container_of(work, struct rxrpc_local, processor);
	bool again;
321

322
	_enter("%d", local->debug_id);
323

324 325 326 327
	do {
		again = false;
		if (atomic_read(&local->usage) == 0)
			return rxrpc_local_destroyer(local);
328

329 330 331 332
		if (!skb_queue_empty(&local->reject_queue)) {
			rxrpc_reject_packets(local);
			again = true;
		}
333

334 335 336 337 338 339
		if (!skb_queue_empty(&local->event_queue)) {
			rxrpc_process_local_events(local);
			again = true;
		}
	} while (again);
}
340

341 342 343 344 345 346
/*
 * Destroy a local endpoint after the RCU grace period expires.
 */
static void rxrpc_local_rcu(struct rcu_head *rcu)
{
	struct rxrpc_local *local = container_of(rcu, struct rxrpc_local, rcu);
347

348
	_enter("%d", local->debug_id);
349

350
	ASSERT(!work_pending(&local->processor));
351 352 353 354 355 356 357

	_net("DESTROY LOCAL %d", local->debug_id);
	kfree(local);
	_leave("");
}

/*
358
 * Verify the local endpoint list is empty by this point.
359 360 361
 */
void __exit rxrpc_destroy_all_locals(void)
{
362
	struct rxrpc_local *local;
363 364 365

	_enter("");

366
	flush_workqueue(rxrpc_workqueue);
367

368 369 370 371 372 373 374 375
	if (!list_empty(&rxrpc_local_endpoints)) {
		mutex_lock(&rxrpc_local_mutex);
		list_for_each_entry(local, &rxrpc_local_endpoints, link) {
			pr_err("AF_RXRPC: Leaked local %p {%d}\n",
			       local, atomic_read(&local->usage));
		}
		mutex_unlock(&rxrpc_local_mutex);
		BUG();
376
	}
377 378

	rcu_barrier();
379
}