conn_object.c 16.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11
/* RxRPC virtual connection handler
 *
 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
 * Written by David Howells (dhowells@redhat.com)
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version
 * 2 of the License, or (at your option) any later version.
 */

12 13
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

14
#include <linux/module.h>
15
#include <linux/slab.h>
16 17 18 19 20 21 22
#include <linux/net.h>
#include <linux/skbuff.h>
#include <linux/crypto.h>
#include <net/sock.h>
#include <net/af_rxrpc.h>
#include "ar-internal.h"

23 24 25
/*
 * Time till a connection expires after last use (in seconds).
 */
26
unsigned int rxrpc_connection_expiry = 10 * 60;
27

28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44
static void rxrpc_connection_reaper(struct work_struct *work);

LIST_HEAD(rxrpc_connections);
DEFINE_RWLOCK(rxrpc_connection_lock);
static DECLARE_DELAYED_WORK(rxrpc_connection_reap, rxrpc_connection_reaper);

/*
 * allocate a new connection
 */
static struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp)
{
	struct rxrpc_connection *conn;

	_enter("");

	conn = kzalloc(sizeof(struct rxrpc_connection), gfp);
	if (conn) {
45 46
		spin_lock_init(&conn->channel_lock);
		init_waitqueue_head(&conn->channel_wq);
47
		INIT_WORK(&conn->processor, &rxrpc_process_connection);
48
		INIT_LIST_HEAD(&conn->link);
49 50
		conn->calls = RB_ROOT;
		skb_queue_head_init(&conn->rx_queue);
51
		conn->security = &rxrpc_no_security;
52 53 54 55
		rwlock_init(&conn->lock);
		spin_lock_init(&conn->state_lock);
		atomic_set(&conn->usage, 1);
		conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
56
		atomic_set(&conn->avail_chans, RXRPC_MAXCALLS);
57
		conn->size_align = 4;
58
		conn->header_size = sizeof(struct rxrpc_wire_header);
59 60
	}

61
	_leave(" = %p{%d}", conn, conn ? conn->debug_id : 0);
62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98
	return conn;
}

/*
 * add a call to a connection's call-by-ID tree
 */
static void rxrpc_add_call_ID_to_conn(struct rxrpc_connection *conn,
				      struct rxrpc_call *call)
{
	struct rxrpc_call *xcall;
	struct rb_node *parent, **p;
	__be32 call_id;

	write_lock_bh(&conn->lock);

	call_id = call->call_id;
	p = &conn->calls.rb_node;
	parent = NULL;
	while (*p) {
		parent = *p;
		xcall = rb_entry(parent, struct rxrpc_call, conn_node);

		if (call_id < xcall->call_id)
			p = &(*p)->rb_left;
		else if (call_id > xcall->call_id)
			p = &(*p)->rb_right;
		else
			BUG();
	}

	rb_link_node(&call->conn_node, parent, p);
	rb_insert_color(&call->conn_node, &conn->calls);

	write_unlock_bh(&conn->lock);
}

/*
99 100
 * Allocate a client connection.  The caller must take care to clear any
 * padding bytes in *cp.
101
 */
102 103 104 105
static struct rxrpc_connection *
rxrpc_alloc_client_connection(struct rxrpc_conn_parameters *cp,
			      struct rxrpc_transport *trans,
			      gfp_t gfp)
106 107
{
	struct rxrpc_connection *conn;
108
	int ret;
109 110 111

	_enter("");

112
	conn = rxrpc_alloc_connection(gfp);
113
	if (!conn) {
114
		_leave(" = -ENOMEM");
115
		return ERR_PTR(-ENOMEM);
116
	}
117

118 119 120 121 122 123 124 125 126
	conn->params		= *cp;
	conn->proto.local	= cp->local;
	conn->proto.epoch	= rxrpc_epoch;
	conn->proto.cid		= 0;
	conn->proto.in_clientflag = 0;
	conn->proto.family	= cp->peer->srx.transport.family;
	conn->out_clientflag	= RXRPC_CLIENT_INITIATED;
	conn->state		= RXRPC_CONN_CLIENT;

127 128 129 130 131 132 133 134
	switch (conn->proto.family) {
	case AF_INET:
		conn->proto.addr_size = sizeof(conn->proto.ipv4_addr);
		conn->proto.ipv4_addr = cp->peer->srx.transport.sin.sin_addr;
		conn->proto.port = cp->peer->srx.transport.sin.sin_port;
		break;
	}

135
	ret = rxrpc_get_client_connection_id(conn, gfp);
136 137
	if (ret < 0)
		goto error_0;
138 139

	ret = rxrpc_init_client_conn_security(conn);
140 141 142 143
	if (ret < 0)
		goto error_1;

	conn->security->prime_packet_security(conn);
144

145
	write_lock(&rxrpc_connection_lock);
146
	list_add_tail(&conn->link, &rxrpc_connections);
147
	write_unlock(&rxrpc_connection_lock);
148

149
	key_get(conn->params.key);
150 151
	conn->trans = trans;
	atomic_inc(&trans->usage);
152 153 154 155 156 157 158 159 160 161 162 163

	_leave(" = %p", conn);
	return conn;

error_1:
	rxrpc_put_client_connection_id(conn);
error_0:
	kfree(conn);
	_leave(" = %d", ret);
	return ERR_PTR(ret);
}

164 165 166 167
/*
 * find a connection for a call
 * - called in process context with IRQs enabled
 */
168
int rxrpc_connect_call(struct rxrpc_call *call,
169
		       struct rxrpc_conn_parameters *cp,
170
		       struct rxrpc_transport *trans,
171
		       struct sockaddr_rxrpc *srx,
172 173
		       gfp_t gfp)
{
174 175 176 177
	struct rxrpc_connection *conn, *candidate = NULL;
	struct rxrpc_local *local = cp->local;
	struct rb_node *p, **pp, *parent;
	long diff;
178
	int chan;
179 180 181

	DECLARE_WAITQUEUE(myself, current);

182
	_enter("{%d,%lx},", call->debug_id, call->user_call_ID);
183

184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206
	cp->peer = trans->peer;
	rxrpc_get_peer(cp->peer);

	if (!cp->exclusive) {
		/* Search for a existing client connection unless this is going
		 * to be a connection that's used exclusively for a single call.
		 */
		_debug("search 1");
		spin_lock(&local->client_conns_lock);
		p = local->client_conns.rb_node;
		while (p) {
			conn = rb_entry(p, struct rxrpc_connection, client_node);

#define cmp(X) ((long)conn->params.X - (long)cp->X)
			diff = (cmp(peer) ?:
				cmp(key) ?:
				cmp(security_level));
			if (diff < 0)
				p = p->rb_left;
			else if (diff > 0)
				p = p->rb_right;
			else
				goto found_extant_conn;
207
		}
208 209
		spin_unlock(&local->client_conns_lock);
	}
210

211 212 213 214 215 216 217
	/* We didn't find a connection or we want an exclusive one. */
	_debug("get new conn");
	candidate = rxrpc_alloc_client_connection(cp, trans, gfp);
	if (!candidate) {
		_leave(" = -ENOMEM");
		return -ENOMEM;
	}
218

219 220 221 222 223 224 225 226 227 228 229 230
	if (cp->exclusive) {
		/* Assign the call on an exclusive connection to channel 0 and
		 * don't add the connection to the endpoint's shareable conn
		 * lookup tree.
		 */
		_debug("exclusive chan 0");
		conn = candidate;
		atomic_set(&conn->avail_chans, RXRPC_MAXCALLS - 1);
		spin_lock(&conn->channel_lock);
		chan = 0;
		goto found_channel;
	}
231

232 233 234 235 236
	/* We need to redo the search before attempting to add a new connection
	 * lest we race with someone else adding a conflicting instance.
	 */
	_debug("search 2");
	spin_lock(&local->client_conns_lock);
237

238 239 240 241 242
	pp = &local->client_conns.rb_node;
	parent = NULL;
	while (*pp) {
		parent = *pp;
		conn = rb_entry(parent, struct rxrpc_connection, client_node);
243

244 245 246 247 248 249 250 251 252 253
		diff = (cmp(peer) ?:
			cmp(key) ?:
			cmp(security_level));
		if (diff < 0)
			pp = &(*pp)->rb_left;
		else if (diff > 0)
			pp = &(*pp)->rb_right;
		else
			goto found_extant_conn;
	}
254

255 256 257 258 259 260 261
	/* The second search also failed; simply add the new connection with
	 * the new call in channel 0.  Note that we need to take the channel
	 * lock before dropping the client conn lock.
	 */
	_debug("new conn");
	conn = candidate;
	candidate = NULL;
262

263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278
	rb_link_node(&conn->client_node, parent, pp);
	rb_insert_color(&conn->client_node, &local->client_conns);

	atomic_set(&conn->avail_chans, RXRPC_MAXCALLS - 1);
	spin_lock(&conn->channel_lock);
	spin_unlock(&local->client_conns_lock);
	chan = 0;

found_channel:
	_debug("found chan");
	call->conn	= conn;
	call->channel	= chan;
	call->epoch	= conn->proto.epoch;
	call->cid	= conn->proto.cid | chan;
	call->call_id	= ++conn->call_counter;
	rcu_assign_pointer(conn->channels[chan], call);
279

280
	_net("CONNECT call %d on conn %d", call->debug_id, conn->debug_id);
281

282 283 284 285 286 287 288 289 290 291 292 293 294
	rxrpc_add_call_ID_to_conn(conn, call);
	spin_unlock(&conn->channel_lock);
	_leave(" = %p {u=%d}", conn, atomic_read(&conn->usage));
	return 0;

	/* We found a suitable connection already in existence.  Discard any
	 * candidate we may have allocated, and try to get a channel on this
	 * one.
	 */
found_extant_conn:
	_debug("found conn");
	rxrpc_get_connection(conn);
	spin_unlock(&local->client_conns_lock);
295

296
	rxrpc_put_connection(candidate);
297

298 299 300 301 302 303
	if (!atomic_add_unless(&conn->avail_chans, -1, 0)) {
		if (!gfpflags_allow_blocking(gfp)) {
			rxrpc_put_connection(conn);
			_leave(" = -EAGAIN");
			return -EAGAIN;
		}
304

305 306 307 308 309 310 311 312 313 314 315
		add_wait_queue(&conn->channel_wq, &myself);
		for (;;) {
			set_current_state(TASK_INTERRUPTIBLE);
			if (atomic_add_unless(&conn->avail_chans, -1, 0))
				break;
			if (signal_pending(current))
				goto interrupted;
			schedule();
		}
		remove_wait_queue(&conn->channel_wq, &myself);
		__set_current_state(TASK_RUNNING);
316 317
	}

318 319
	/* The connection allegedly now has a free channel and we can now
	 * attach the call to it.
320
	 */
321 322
	spin_lock(&conn->channel_lock);

323 324 325 326 327 328
	for (chan = 0; chan < RXRPC_MAXCALLS; chan++)
		if (!conn->channels[chan])
			goto found_channel;
	BUG();

interrupted:
329 330 331
	remove_wait_queue(&conn->channel_wq, &myself);
	__set_current_state(TASK_RUNNING);
	rxrpc_put_connection(conn);
332 333 334 335 336 337 338
	_leave(" = -ERESTARTSYS");
	return -ERESTARTSYS;
}

/*
 * get a record of an incoming connection
 */
339 340
struct rxrpc_connection *rxrpc_incoming_connection(struct rxrpc_transport *trans,
						   struct sk_buff *skb)
341 342
{
	struct rxrpc_connection *conn, *candidate = NULL;
343
	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
344 345 346
	struct rb_node *p, **pp;
	const char *new = "old";
	__be32 epoch;
347
	u32 cid;
348 349 350

	_enter("");

351
	ASSERT(sp->hdr.flags & RXRPC_CLIENT_INITIATED);
352

353 354
	epoch = sp->hdr.epoch;
	cid = sp->hdr.cid & RXRPC_CIDMASK;
355 356 357 358 359 360

	/* search the connection list first */
	read_lock_bh(&trans->conn_lock);

	p = trans->server_conns.rb_node;
	while (p) {
361
		conn = rb_entry(p, struct rxrpc_connection, service_node);
362

363
		_debug("maybe %x", conn->proto.cid);
364

365
		if (epoch < conn->proto.epoch)
366
			p = p->rb_left;
367
		else if (epoch > conn->proto.epoch)
368
			p = p->rb_right;
369
		else if (cid < conn->proto.cid)
370
			p = p->rb_left;
371
		else if (cid > conn->proto.cid)
372 373 374 375 376 377 378 379
			p = p->rb_right;
		else
			goto found_extant_connection;
	}
	read_unlock_bh(&trans->conn_lock);

	/* not yet present - create a candidate for a new record and then
	 * redo the search */
380
	candidate = rxrpc_alloc_connection(GFP_NOIO);
381 382 383 384 385
	if (!candidate) {
		_leave(" = -ENOMEM");
		return ERR_PTR(-ENOMEM);
	}

386 387 388 389 390 391 392 393 394 395 396
	candidate->trans		= trans;
	candidate->proto.local		= trans->local;
	candidate->proto.epoch		= sp->hdr.epoch;
	candidate->proto.cid		= sp->hdr.cid & RXRPC_CIDMASK;
	candidate->proto.in_clientflag	= RXRPC_CLIENT_INITIATED;
	candidate->params.local		= trans->local;
	candidate->params.peer		= trans->peer;
	candidate->params.service_id	= sp->hdr.serviceId;
	candidate->security_ix		= sp->hdr.securityIndex;
	candidate->out_clientflag	= 0;
	candidate->state		= RXRPC_CONN_SERVER;
397
	if (candidate->params.service_id)
398
		candidate->state	= RXRPC_CONN_SERVER_UNSECURED;
399 400 401 402 403 404 405

	write_lock_bh(&trans->conn_lock);

	pp = &trans->server_conns.rb_node;
	p = NULL;
	while (*pp) {
		p = *pp;
406
		conn = rb_entry(p, struct rxrpc_connection, service_node);
407

408
		if (epoch < conn->proto.epoch)
409
			pp = &(*pp)->rb_left;
410
		else if (epoch > conn->proto.epoch)
411
			pp = &(*pp)->rb_right;
412
		else if (cid < conn->proto.cid)
413
			pp = &(*pp)->rb_left;
414
		else if (cid > conn->proto.cid)
415 416 417 418 419 420 421 422
			pp = &(*pp)->rb_right;
		else
			goto found_extant_second;
	}

	/* we can now add the new candidate to the list */
	conn = candidate;
	candidate = NULL;
423 424
	rb_link_node(&conn->service_node, p, pp);
	rb_insert_color(&conn->service_node, &trans->server_conns);
425 426 427 428
	atomic_inc(&conn->trans->usage);

	write_unlock_bh(&trans->conn_lock);

429
	write_lock(&rxrpc_connection_lock);
430
	list_add_tail(&conn->link, &rxrpc_connections);
431
	write_unlock(&rxrpc_connection_lock);
432 433 434 435

	new = "new";

success:
436
	_net("CONNECTION %s %d {%x}", new, conn->debug_id, conn->proto.cid);
437 438 439 440 441 442

	_leave(" = %p {u=%d}", conn, atomic_read(&conn->usage));
	return conn;

	/* we found the connection in the list immediately */
found_extant_connection:
443
	if (sp->hdr.securityIndex != conn->security_ix) {
444 445 446
		read_unlock_bh(&trans->conn_lock);
		goto security_mismatch;
	}
447
	rxrpc_get_connection(conn);
448 449 450 451 452
	read_unlock_bh(&trans->conn_lock);
	goto success;

	/* we found the connection on the second time through the list */
found_extant_second:
453
	if (sp->hdr.securityIndex != conn->security_ix) {
454 455 456
		write_unlock_bh(&trans->conn_lock);
		goto security_mismatch;
	}
457
	rxrpc_get_connection(conn);
458 459 460 461 462 463 464 465 466 467 468 469 470 471 472
	write_unlock_bh(&trans->conn_lock);
	kfree(candidate);
	goto success;

security_mismatch:
	kfree(candidate);
	_leave(" = -EKEYREJECTED");
	return ERR_PTR(-EKEYREJECTED);
}

/*
 * find a connection based on transport and RxRPC connection ID for an incoming
 * packet
 */
struct rxrpc_connection *rxrpc_find_connection(struct rxrpc_transport *trans,
473
					       struct sk_buff *skb)
474 475
{
	struct rxrpc_connection *conn;
476
	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
477
	struct rb_node *p;
478
	u32 epoch, cid;
479

480
	_enter(",{%x,%x}", sp->hdr.cid, sp->hdr.flags);
481 482 483

	read_lock_bh(&trans->conn_lock);

484 485
	cid	= sp->hdr.cid & RXRPC_CIDMASK;
	epoch	= sp->hdr.epoch;
486

487
	if (sp->hdr.flags & RXRPC_CLIENT_INITIATED) {
488
		p = trans->server_conns.rb_node;
489
		while (p) {
490
			conn = rb_entry(p, struct rxrpc_connection, service_node);
491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507

			_debug("maybe %x", conn->proto.cid);

			if (epoch < conn->proto.epoch)
				p = p->rb_left;
			else if (epoch > conn->proto.epoch)
				p = p->rb_right;
			else if (cid < conn->proto.cid)
				p = p->rb_left;
			else if (cid > conn->proto.cid)
				p = p->rb_right;
			else
				goto found;
		}
	} else {
		conn = idr_find(&rxrpc_client_conn_ids, cid >> RXRPC_CIDSHIFT);
		if (conn && conn->proto.epoch == epoch)
508 509 510 511 512 513 514 515
			goto found;
	}

	read_unlock_bh(&trans->conn_lock);
	_leave(" = NULL");
	return NULL;

found:
516
	rxrpc_get_connection(conn);
517 518 519 520 521
	read_unlock_bh(&trans->conn_lock);
	_leave(" = %p", conn);
	return conn;
}

522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539
/*
 * Disconnect a call and clear any channel it occupies when that call
 * terminates.
 */
void rxrpc_disconnect_call(struct rxrpc_call *call)
{
	struct rxrpc_connection *conn = call->conn;
	unsigned chan = call->channel;

	_enter("%d,%d", conn->debug_id, call->channel);

	if (conn->channels[chan] == call) {
		rcu_assign_pointer(conn->channels[chan], NULL);
		atomic_inc(&conn->avail_chans);
		wake_up(&conn->channel_wq);
	}
}

540 541 542 543 544
/*
 * release a virtual connection
 */
void rxrpc_put_connection(struct rxrpc_connection *conn)
{
545 546 547
	if (!conn)
		return;

548 549 550 551 552
	_enter("%p{u=%d,d=%d}",
	       conn, atomic_read(&conn->usage), conn->debug_id);

	ASSERTCMP(atomic_read(&conn->usage), >, 0);

553
	conn->put_time = ktime_get_seconds();
554 555
	if (atomic_dec_and_test(&conn->usage)) {
		_debug("zombie");
556
		rxrpc_queue_delayed_work(&rxrpc_connection_reap, 0);
557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575
	}

	_leave("");
}

/*
 * destroy a virtual connection
 */
static void rxrpc_destroy_connection(struct rxrpc_connection *conn)
{
	_enter("%p{%d}", conn, atomic_read(&conn->usage));

	ASSERTCMP(atomic_read(&conn->usage), ==, 0);

	_net("DESTROY CONN %d", conn->debug_id);

	ASSERT(RB_EMPTY_ROOT(&conn->calls));
	rxrpc_purge_queue(&conn->rx_queue);

576
	conn->security->clear(conn);
577
	key_put(conn->params.key);
578 579
	key_put(conn->server_key);

580 581 582 583 584 585 586 587
	rxrpc_put_transport(conn->trans);
	kfree(conn);
	_leave("");
}

/*
 * reap dead connections
 */
R
Roel Kluin 已提交
588
static void rxrpc_connection_reaper(struct work_struct *work)
589 590 591 592 593 594 595 596
{
	struct rxrpc_connection *conn, *_p;
	unsigned long now, earliest, reap_time;

	LIST_HEAD(graveyard);

	_enter("");

597
	now = ktime_get_seconds();
598 599
	earliest = ULONG_MAX;

600
	write_lock(&rxrpc_connection_lock);
601 602 603 604 605 606 607 608
	list_for_each_entry_safe(conn, _p, &rxrpc_connections, link) {
		_debug("reap CONN %d { u=%d,t=%ld }",
		       conn->debug_id, atomic_read(&conn->usage),
		       (long) now - (long) conn->put_time);

		if (likely(atomic_read(&conn->usage) > 0))
			continue;

609 610 611 612
		if (rxrpc_conn_is_client(conn)) {
			struct rxrpc_local *local = conn->params.local;
			spin_lock(&local->client_conns_lock);
			reap_time = conn->put_time + rxrpc_connection_expiry;
613

614 615 616 617
			if (atomic_read(&conn->usage) > 0) {
				;
			} else if (reap_time <= now) {
				list_move_tail(&conn->link, &graveyard);
618
				rxrpc_put_client_connection_id(conn);
619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634
				rb_erase(&conn->client_node,
					 &local->client_conns);
			} else if (reap_time < earliest) {
				earliest = reap_time;
			}

			spin_unlock(&local->client_conns_lock);
		} else {
			write_lock_bh(&conn->trans->conn_lock);
			reap_time = conn->put_time + rxrpc_connection_expiry;

			if (atomic_read(&conn->usage) > 0) {
				;
			} else if (reap_time <= now) {
				list_move_tail(&conn->link, &graveyard);
				rb_erase(&conn->service_node,
635
					 &conn->trans->server_conns);
636 637
			} else if (reap_time < earliest) {
				earliest = reap_time;
638 639
			}

640
			write_unlock_bh(&conn->trans->conn_lock);
641 642
		}
	}
643
	write_unlock(&rxrpc_connection_lock);
644 645 646 647

	if (earliest != ULONG_MAX) {
		_debug("reschedule reaper %ld", (long) earliest - now);
		ASSERTCMP(earliest, >, now);
648 649
		rxrpc_queue_delayed_work(&rxrpc_connection_reap,
					 (earliest - now) * HZ);
650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672
	}

	/* then destroy all those pulled out */
	while (!list_empty(&graveyard)) {
		conn = list_entry(graveyard.next, struct rxrpc_connection,
				  link);
		list_del_init(&conn->link);

		ASSERTCMP(atomic_read(&conn->usage), ==, 0);
		rxrpc_destroy_connection(conn);
	}

	_leave("");
}

/*
 * preemptively destroy all the connection records rather than waiting for them
 * to time out
 */
void __exit rxrpc_destroy_all_connections(void)
{
	_enter("");

673
	rxrpc_connection_expiry = 0;
674
	cancel_delayed_work(&rxrpc_connection_reap);
675
	rxrpc_queue_delayed_work(&rxrpc_connection_reap, 0);
676 677 678

	_leave("");
}