connection.c 25.3 KB
Newer Older
A
Andy Grover 已提交
1
/*
K
Ka-Cheong Poon 已提交
2
 * Copyright (c) 2006, 2018 Oracle and/or its affiliates. All rights reserved.
A
Andy Grover 已提交
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *      - Redistributions of source code must retain the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer.
 *
 *      - Redistributions in binary form must reproduce the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer in the documentation and/or other materials
 *        provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 *
 */
#include <linux/kernel.h>
#include <linux/list.h>
35
#include <linux/slab.h>
36
#include <linux/export.h>
37 38
#include <net/ipv6.h>
#include <net/inet6_hashtables.h>
K
Ka-Cheong Poon 已提交
39
#include <net/addrconf.h>
A
Andy Grover 已提交
40 41 42 43 44 45 46 47 48 49 50 51 52 53

#include "rds.h"
#include "loop.h"

#define RDS_CONNECTION_HASH_BITS 12
#define RDS_CONNECTION_HASH_ENTRIES (1 << RDS_CONNECTION_HASH_BITS)
#define RDS_CONNECTION_HASH_MASK (RDS_CONNECTION_HASH_ENTRIES - 1)

/* converting this to RCU is a chore for another day.. */
static DEFINE_SPINLOCK(rds_conn_lock);
static unsigned long rds_conn_count;
static struct hlist_head rds_conn_hash[RDS_CONNECTION_HASH_ENTRIES];
static struct kmem_cache *rds_conn_slab;

54 55
static struct hlist_head *rds_conn_bucket(const struct in6_addr *laddr,
					  const struct in6_addr *faddr)
A
Andy Grover 已提交
56
{
57
	static u32 rds6_hash_secret __read_mostly;
58 59
	static u32 rds_hash_secret __read_mostly;

60
	u32 lhash, fhash, hash;
61 62

	net_get_random_once(&rds_hash_secret, sizeof(rds_hash_secret));
63 64 65 66 67
	net_get_random_once(&rds6_hash_secret, sizeof(rds6_hash_secret));

	lhash = (__force u32)laddr->s6_addr32[3];
	fhash = __ipv6_addr_jhash(faddr, rds6_hash_secret);
	hash = __inet6_ehashfn(lhash, 0, fhash, 0, rds_hash_secret);
68

A
Andy Grover 已提交
69 70 71 72 73 74 75 76
	return &rds_conn_hash[hash & RDS_CONNECTION_HASH_MASK];
}

#define rds_conn_info_set(var, test, suffix) do {		\
	if (test)						\
		var |= RDS_INFO_CONNECTION_FLAG_##suffix;	\
} while (0)

77
/* rcu read lock must be held or the connection spinlock */
78 79
static struct rds_connection *rds_conn_lookup(struct net *net,
					      struct hlist_head *head,
80 81 82 83
					      const struct in6_addr *laddr,
					      const struct in6_addr *faddr,
					      struct rds_transport *trans,
					      int dev_if)
A
Andy Grover 已提交
84 85 86
{
	struct rds_connection *conn, *ret = NULL;

87
	hlist_for_each_entry_rcu(conn, head, c_hash_node) {
88 89 90 91 92
		if (ipv6_addr_equal(&conn->c_faddr, faddr) &&
		    ipv6_addr_equal(&conn->c_laddr, laddr) &&
		    conn->c_trans == trans &&
		    net == rds_conn_net(conn) &&
		    conn->c_dev_if == dev_if) {
A
Andy Grover 已提交
93 94 95 96
			ret = conn;
			break;
		}
	}
97 98
	rdsdebug("returning conn %p for %pI6c -> %pI6c\n", ret,
		 laddr, faddr);
A
Andy Grover 已提交
99 100 101 102 103 104 105 106 107
	return ret;
}

/*
 * This is called by transports as they're bringing down a connection.
 * It clears partial message state so that the transport can start sending
 * and receiving over this connection again in the future.  It is up to
 * the transport to have serialized this call with its send and recv.
 */
108
static void rds_conn_path_reset(struct rds_conn_path *cp)
A
Andy Grover 已提交
109
{
110 111
	struct rds_connection *conn = cp->cp_conn;

112 113
	rdsdebug("connection %pI6c to %pI6c reset\n",
		 &conn->c_laddr, &conn->c_faddr);
A
Andy Grover 已提交
114 115

	rds_stats_inc(s_conn_reset);
116 117
	rds_send_path_reset(cp);
	cp->cp_flags = 0;
A
Andy Grover 已提交
118 119 120 121 122 123 124

	/* Do not clear next_rx_seq here, else we cannot distinguish
	 * retransmitted packets from new packets, and will hand all
	 * of them to the application. That is not consistent with the
	 * reliability guarantees of RDS. */
}

125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145
static void __rds_conn_path_init(struct rds_connection *conn,
				 struct rds_conn_path *cp, bool is_outgoing)
{
	spin_lock_init(&cp->cp_lock);
	cp->cp_next_tx_seq = 1;
	init_waitqueue_head(&cp->cp_waitq);
	INIT_LIST_HEAD(&cp->cp_send_queue);
	INIT_LIST_HEAD(&cp->cp_retrans);

	cp->cp_conn = conn;
	atomic_set(&cp->cp_state, RDS_CONN_DOWN);
	cp->cp_send_gen = 0;
	cp->cp_reconnect_jiffies = 0;
	INIT_DELAYED_WORK(&cp->cp_send_w, rds_send_worker);
	INIT_DELAYED_WORK(&cp->cp_recv_w, rds_recv_worker);
	INIT_DELAYED_WORK(&cp->cp_conn_w, rds_connect_worker);
	INIT_WORK(&cp->cp_down_w, rds_shutdown_worker);
	mutex_init(&cp->cp_cm_lock);
	cp->cp_flags = 0;
}

A
Andy Grover 已提交
146 147 148 149 150 151 152 153
/*
 * There is only every one 'conn' for a given pair of addresses in the
 * system at a time.  They contain messages to be retransmitted and so
 * span the lifetime of the actual underlying transport connections.
 *
 * For now they are not garbage collected once they're created.  They
 * are torn down as the module is removed, if ever.
 */
154
static struct rds_connection *__rds_conn_create(struct net *net,
155 156 157 158 159 160
						const struct in6_addr *laddr,
						const struct in6_addr *faddr,
						struct rds_transport *trans,
						gfp_t gfp,
						int is_outgoing,
						int dev_if)
A
Andy Grover 已提交
161
{
162
	struct rds_connection *conn, *parent = NULL;
A
Andy Grover 已提交
163
	struct hlist_head *head = rds_conn_bucket(laddr, faddr);
164
	struct rds_transport *loop_trans;
A
Andy Grover 已提交
165
	unsigned long flags;
166
	int ret, i;
167
	int npaths = (trans->t_mp_capable ? RDS_MPATH_WORKERS : 1);
A
Andy Grover 已提交
168

169
	rcu_read_lock();
170 171 172 173 174 175
	conn = rds_conn_lookup(net, head, laddr, faddr, trans, dev_if);
	if (conn &&
	    conn->c_loopback &&
	    conn->c_trans != &rds_loop_transport &&
	    ipv6_addr_equal(laddr, faddr) &&
	    !is_outgoing) {
A
Andy Grover 已提交
176 177 178 179 180 181 182
		/* This is a looped back IB connection, and we're
		 * called by the code handling the incoming connect.
		 * We need a second connection object into which we
		 * can stick the other QP. */
		parent = conn;
		conn = parent->c_passive;
	}
183
	rcu_read_unlock();
A
Andy Grover 已提交
184 185 186
	if (conn)
		goto out;

187
	conn = kmem_cache_zalloc(rds_conn_slab, gfp);
188
	if (!conn) {
A
Andy Grover 已提交
189 190 191
		conn = ERR_PTR(-ENOMEM);
		goto out;
	}
192 193 194 195 196 197
	conn->c_path = kcalloc(npaths, sizeof(struct rds_conn_path), gfp);
	if (!conn->c_path) {
		kmem_cache_free(rds_conn_slab, conn);
		conn = ERR_PTR(-ENOMEM);
		goto out;
	}
A
Andy Grover 已提交
198 199

	INIT_HLIST_NODE(&conn->c_hash_node);
200 201 202 203
	conn->c_laddr = *laddr;
	conn->c_isv6 = !ipv6_addr_v4mapped(laddr);
	conn->c_faddr = *faddr;
	conn->c_dev_if = dev_if;
K
Ka-Cheong Poon 已提交
204 205 206 207 208 209 210 211 212
	/* If the local address is link local, set c_bound_if to be the
	 * index used for this connection.  Otherwise, set it to 0 as
	 * the socket is not bound to an interface.  c_bound_if is used
	 * to look up a socket when a packet is received
	 */
	if (ipv6_addr_type(laddr) & IPV6_ADDR_LINKLOCAL)
		conn->c_bound_if = dev_if;
	else
		conn->c_bound_if = 0;
A
Andy Grover 已提交
213

214
	rds_conn_net_set(conn, net);
A
Andy Grover 已提交
215 216 217

	ret = rds_cong_get_maps(conn);
	if (ret) {
218
		kfree(conn->c_path);
A
Andy Grover 已提交
219 220 221 222 223 224 225 226 227 228
		kmem_cache_free(rds_conn_slab, conn);
		conn = ERR_PTR(ret);
		goto out;
	}

	/*
	 * This is where a connection becomes loopback.  If *any* RDS sockets
	 * can bind to the destination address then we'd rather the messages
	 * flow through loopback rather than either transport.
	 */
229
	loop_trans = rds_trans_get_preferred(net, faddr, conn->c_dev_if);
230 231
	if (loop_trans) {
		rds_trans_put(loop_trans);
A
Andy Grover 已提交
232 233 234 235 236 237 238 239 240 241 242 243
		conn->c_loopback = 1;
		if (is_outgoing && trans->t_prefer_loopback) {
			/* "outgoing" connection - and the transport
			 * says it wants the connection handled by the
			 * loopback transport. This is what TCP does.
			 */
			trans = &rds_loop_transport;
		}
	}

	conn->c_trans = trans;

244
	init_waitqueue_head(&conn->c_hs_waitq);
245
	for (i = 0; i < npaths; i++) {
246 247 248 249
		__rds_conn_path_init(conn, &conn->c_path[i],
				     is_outgoing);
		conn->c_path[i].cp_index = i;
	}
250 251 252 253
	rcu_read_lock();
	if (rds_destroy_pending(conn))
		ret = -ENETDOWN;
	else
254
		ret = trans->conn_alloc(conn, GFP_ATOMIC);
A
Andy Grover 已提交
255
	if (ret) {
256
		rcu_read_unlock();
257
		kfree(conn->c_path);
A
Andy Grover 已提交
258 259 260 261 262
		kmem_cache_free(rds_conn_slab, conn);
		conn = ERR_PTR(ret);
		goto out;
	}

263 264 265 266
	rdsdebug("allocated conn %p for %pI6c -> %pI6c over %s %s\n",
		 conn, laddr, faddr,
		 strnlen(trans->t_name, sizeof(trans->t_name)) ?
		 trans->t_name : "[unknown]", is_outgoing ? "(outgoing)" : "");
A
Andy Grover 已提交
267

268 269 270 271 272 273 274
	/*
	 * Since we ran without holding the conn lock, someone could
	 * have created the same conn (either normal or passive) in the
	 * interim. We check while holding the lock. If we won, we complete
	 * init and return our conn. If we lost, we rollback and return the
	 * other one.
	 */
A
Andy Grover 已提交
275
	spin_lock_irqsave(&rds_conn_lock, flags);
276 277 278
	if (parent) {
		/* Creating passive conn */
		if (parent->c_passive) {
279
			trans->conn_free(conn->c_path[0].cp_transport_data);
280
			kfree(conn->c_path);
281 282 283
			kmem_cache_free(rds_conn_slab, conn);
			conn = parent->c_passive;
		} else {
A
Andy Grover 已提交
284
			parent->c_passive = conn;
285 286 287
			rds_cong_add_conn(conn);
			rds_conn_count++;
		}
A
Andy Grover 已提交
288
	} else {
289 290 291
		/* Creating normal conn */
		struct rds_connection *found;

292 293
		found = rds_conn_lookup(net, head, laddr, faddr, trans,
					dev_if);
294
		if (found) {
295 296 297
			struct rds_conn_path *cp;
			int i;

298
			for (i = 0; i < npaths; i++) {
299
				cp = &conn->c_path[i];
300 301 302 303 304 305
				/* The ->conn_alloc invocation may have
				 * allocated resource for all paths, so all
				 * of them may have to be freed here.
				 */
				if (cp->cp_transport_data)
					trans->conn_free(cp->cp_transport_data);
306
			}
307
			kfree(conn->c_path);
308 309 310
			kmem_cache_free(rds_conn_slab, conn);
			conn = found;
		} else {
311 312
			conn->c_my_gen_num = rds_gen_num;
			conn->c_peer_gen_num = 0;
313
			hlist_add_head_rcu(&conn->c_hash_node, head);
314 315 316
			rds_cong_add_conn(conn);
			rds_conn_count++;
		}
A
Andy Grover 已提交
317 318
	}
	spin_unlock_irqrestore(&rds_conn_lock, flags);
319
	rcu_read_unlock();
A
Andy Grover 已提交
320 321 322 323 324

out:
	return conn;
}

325
struct rds_connection *rds_conn_create(struct net *net,
326 327 328 329
				       const struct in6_addr *laddr,
				       const struct in6_addr *faddr,
				       struct rds_transport *trans, gfp_t gfp,
				       int dev_if)
A
Andy Grover 已提交
330
{
331
	return __rds_conn_create(net, laddr, faddr, trans, gfp, 0, dev_if);
A
Andy Grover 已提交
332
}
A
Andy Grover 已提交
333
EXPORT_SYMBOL_GPL(rds_conn_create);
A
Andy Grover 已提交
334

335
struct rds_connection *rds_conn_create_outgoing(struct net *net,
336 337 338 339
						const struct in6_addr *laddr,
						const struct in6_addr *faddr,
						struct rds_transport *trans,
						gfp_t gfp, int dev_if)
A
Andy Grover 已提交
340
{
341
	return __rds_conn_create(net, laddr, faddr, trans, gfp, 1, dev_if);
A
Andy Grover 已提交
342
}
A
Andy Grover 已提交
343
EXPORT_SYMBOL_GPL(rds_conn_create_outgoing);
A
Andy Grover 已提交
344

345
void rds_conn_shutdown(struct rds_conn_path *cp)
346
{
347 348
	struct rds_connection *conn = cp->cp_conn;

349
	/* shut it down unless it's down already */
350
	if (!rds_conn_path_transition(cp, RDS_CONN_DOWN, RDS_CONN_DOWN)) {
351 352 353 354 355 356 357
		/*
		 * Quiesce the connection mgmt handlers before we start tearing
		 * things down. We don't hold the mutex for the entire
		 * duration of the shutdown operation, else we may be
		 * deadlocking with the CM handler. Instead, the CM event
		 * handler is supposed to check for state DISCONNECTING
		 */
358 359 360 361 362 363 364 365 366
		mutex_lock(&cp->cp_cm_lock);
		if (!rds_conn_path_transition(cp, RDS_CONN_UP,
					      RDS_CONN_DISCONNECTING) &&
		    !rds_conn_path_transition(cp, RDS_CONN_ERROR,
					      RDS_CONN_DISCONNECTING)) {
			rds_conn_path_error(cp,
					    "shutdown called in state %d\n",
					    atomic_read(&cp->cp_state));
			mutex_unlock(&cp->cp_cm_lock);
367 368
			return;
		}
369
		mutex_unlock(&cp->cp_cm_lock);
370

371 372 373 374
		wait_event(cp->cp_waitq,
			   !test_bit(RDS_IN_XMIT, &cp->cp_flags));
		wait_event(cp->cp_waitq,
			   !test_bit(RDS_RECV_REFILL, &cp->cp_flags));
375

376
		conn->c_trans->conn_path_shutdown(cp);
377
		rds_conn_path_reset(cp);
378

379
		if (!rds_conn_path_transition(cp, RDS_CONN_DISCONNECTING,
380 381
					      RDS_CONN_DOWN) &&
		    !rds_conn_path_transition(cp, RDS_CONN_ERROR,
382
					      RDS_CONN_DOWN)) {
383 384
			/* This can happen - eg when we're in the middle of tearing
			 * down the connection, and someone unloads the rds module.
385
			 * Quite reproducible with loopback connections.
386
			 * Mostly harmless.
387 388 389 390 391 392
			 *
			 * Note that this also happens with rds-tcp because
			 * we could have triggered rds_conn_path_drop in irq
			 * mode from rds_tcp_state change on the receipt of
			 * a FIN, thus we need to recheck for RDS_CONN_ERROR
			 * here.
393
			 */
394 395 396 397
			rds_conn_path_error(cp, "%s: failed to transition "
					    "to state DOWN, current state "
					    "is %d\n", __func__,
					    atomic_read(&cp->cp_state));
398 399 400 401 402 403 404 405
			return;
		}
	}

	/* Then reconnect if it's still live.
	 * The passive side of an IB loopback connection is never added
	 * to the conn hash, so we never trigger a reconnect on this
	 * conn - the reconnect is always triggered by the active peer. */
406
	cancel_delayed_work_sync(&cp->cp_conn_w);
407 408 409
	rcu_read_lock();
	if (!hlist_unhashed(&conn->c_hash_node)) {
		rcu_read_unlock();
410
		rds_queue_reconnect(cp);
411 412 413
	} else {
		rcu_read_unlock();
	}
414 415
}

416 417 418 419 420 421 422
/* destroy a single rds_conn_path. rds_conn_destroy() iterates over
 * all paths using rds_conn_path_destroy()
 */
static void rds_conn_path_destroy(struct rds_conn_path *cp)
{
	struct rds_message *rm, *rtmp;

423 424 425
	if (!cp->cp_transport_data)
		return;

426 427 428 429
	/* make sure lingering queued work won't try to ref the conn */
	cancel_delayed_work_sync(&cp->cp_send_w);
	cancel_delayed_work_sync(&cp->cp_recv_w);

430 431 432
	rds_conn_path_drop(cp, true);
	flush_work(&cp->cp_down_w);

433 434 435 436 437 438 439 440 441 442 443
	/* tear down queued messages */
	list_for_each_entry_safe(rm, rtmp,
				 &cp->cp_send_queue,
				 m_conn_item) {
		list_del_init(&rm->m_conn_item);
		BUG_ON(!list_empty(&rm->m_sock_item));
		rds_message_put(rm);
	}
	if (cp->cp_xmit_rm)
		rds_message_put(cp->cp_xmit_rm);

444 445 446 447 448
	WARN_ON(delayed_work_pending(&cp->cp_send_w));
	WARN_ON(delayed_work_pending(&cp->cp_recv_w));
	WARN_ON(delayed_work_pending(&cp->cp_conn_w));
	WARN_ON(work_pending(&cp->cp_down_w));

449 450 451
	cp->cp_conn->c_trans->conn_free(cp->cp_transport_data);
}

452 453
/*
 * Stop and free a connection.
454 455 456 457
 *
 * This can only be used in very limited circumstances.  It assumes that once
 * the conn has been shutdown that no one else is referencing the connection.
 * We can only ensure this in the rmmod path in the current code.
458
 */
A
Andy Grover 已提交
459 460
void rds_conn_destroy(struct rds_connection *conn)
{
461
	unsigned long flags;
462 463
	int i;
	struct rds_conn_path *cp;
464
	int npaths = (conn->c_trans->t_mp_capable ? RDS_MPATH_WORKERS : 1);
A
Andy Grover 已提交
465 466 467 468 469

	rdsdebug("freeing conn %p for %pI4 -> "
		 "%pI4\n", conn, &conn->c_laddr,
		 &conn->c_faddr);

470 471
	/* Ensure conn will not be scheduled for reconnect */
	spin_lock_irq(&rds_conn_lock);
472
	hlist_del_init_rcu(&conn->c_hash_node);
473
	spin_unlock_irq(&rds_conn_lock);
474 475
	synchronize_rcu();

476
	/* shut the connection down */
477
	for (i = 0; i < npaths; i++) {
478 479 480
		cp = &conn->c_path[i];
		rds_conn_path_destroy(cp);
		BUG_ON(!list_empty(&cp->cp_retrans));
A
Andy Grover 已提交
481 482 483 484 485 486 487 488 489
	}

	/*
	 * The congestion maps aren't freed up here.  They're
	 * freed by rds_cong_exit() after all the connections
	 * have been freed.
	 */
	rds_cong_remove_conn(conn);

490
	kfree(conn->c_path);
A
Andy Grover 已提交
491 492
	kmem_cache_free(rds_conn_slab, conn);

493
	spin_lock_irqsave(&rds_conn_lock, flags);
A
Andy Grover 已提交
494
	rds_conn_count--;
495
	spin_unlock_irqrestore(&rds_conn_lock, flags);
A
Andy Grover 已提交
496
}
A
Andy Grover 已提交
497
EXPORT_SYMBOL_GPL(rds_conn_destroy);
A
Andy Grover 已提交
498

K
Ka-Cheong Poon 已提交
499 500
static void __rds_inc_msg_cp(struct rds_incoming *inc,
			     struct rds_info_iterator *iter,
501
			     void *saddr, void *daddr, int flip, bool isv6)
K
Ka-Cheong Poon 已提交
502
{
503 504 505 506 507
	if (isv6)
		rds6_inc_info_copy(inc, iter, saddr, daddr, flip);
	else
		rds_inc_info_copy(inc, iter, *(__be32 *)saddr,
				  *(__be32 *)daddr, flip);
K
Ka-Cheong Poon 已提交
508 509 510 511 512
}

static void rds_conn_message_info_cmn(struct socket *sock, unsigned int len,
				      struct rds_info_iterator *iter,
				      struct rds_info_lengths *lens,
513
				      int want_send, bool isv6)
A
Andy Grover 已提交
514 515 516 517 518 519
{
	struct hlist_head *head;
	struct list_head *list;
	struct rds_connection *conn;
	struct rds_message *rm;
	unsigned int total = 0;
520
	unsigned long flags;
A
Andy Grover 已提交
521
	size_t i;
522
	int j;
A
Andy Grover 已提交
523

524 525 526 527
	if (isv6)
		len /= sizeof(struct rds6_info_message);
	else
		len /= sizeof(struct rds_info_message);
A
Andy Grover 已提交
528

529
	rcu_read_lock();
A
Andy Grover 已提交
530 531 532

	for (i = 0, head = rds_conn_hash; i < ARRAY_SIZE(rds_conn_hash);
	     i++, head++) {
533
		hlist_for_each_entry_rcu(conn, head, c_hash_node) {
534
			struct rds_conn_path *cp;
535 536
			int npaths;

537 538 539
			if (!isv6 && conn->c_isv6)
				continue;

540 541
			npaths = (conn->c_trans->t_mp_capable ?
				 RDS_MPATH_WORKERS : 1);
542

543
			for (j = 0; j < npaths; j++) {
544 545 546 547 548 549 550 551 552 553 554 555
				cp = &conn->c_path[j];
				if (want_send)
					list = &cp->cp_send_queue;
				else
					list = &cp->cp_retrans;

				spin_lock_irqsave(&cp->cp_lock, flags);

				/* XXX too lazy to maintain counts.. */
				list_for_each_entry(rm, list, m_conn_item) {
					total++;
					if (total <= len)
K
Ka-Cheong Poon 已提交
556 557 558 559
						__rds_inc_msg_cp(&rm->m_inc,
								 iter,
								 &conn->c_laddr,
								 &conn->c_faddr,
560
								 0, isv6);
561 562 563
				}

				spin_unlock_irqrestore(&cp->cp_lock, flags);
A
Andy Grover 已提交
564 565 566
			}
		}
	}
567
	rcu_read_unlock();
A
Andy Grover 已提交
568 569

	lens->nr = total;
570 571 572 573
	if (isv6)
		lens->each = sizeof(struct rds6_info_message);
	else
		lens->each = sizeof(struct rds_info_message);
A
Andy Grover 已提交
574 575
}

K
Ka-Cheong Poon 已提交
576 577 578 579 580
static void rds_conn_message_info(struct socket *sock, unsigned int len,
				  struct rds_info_iterator *iter,
				  struct rds_info_lengths *lens,
				  int want_send)
{
581 582 583 584 585 586 587 588 589
	rds_conn_message_info_cmn(sock, len, iter, lens, want_send, false);
}

static void rds6_conn_message_info(struct socket *sock, unsigned int len,
				   struct rds_info_iterator *iter,
				   struct rds_info_lengths *lens,
				   int want_send)
{
	rds_conn_message_info_cmn(sock, len, iter, lens, want_send, true);
K
Ka-Cheong Poon 已提交
590 591
}

A
Andy Grover 已提交
592 593 594 595 596 597 598
static void rds_conn_message_info_send(struct socket *sock, unsigned int len,
				       struct rds_info_iterator *iter,
				       struct rds_info_lengths *lens)
{
	rds_conn_message_info(sock, len, iter, lens, 1);
}

599 600 601 602 603 604 605
static void rds6_conn_message_info_send(struct socket *sock, unsigned int len,
					struct rds_info_iterator *iter,
					struct rds_info_lengths *lens)
{
	rds6_conn_message_info(sock, len, iter, lens, 1);
}

A
Andy Grover 已提交
606 607 608 609 610 611 612 613
static void rds_conn_message_info_retrans(struct socket *sock,
					  unsigned int len,
					  struct rds_info_iterator *iter,
					  struct rds_info_lengths *lens)
{
	rds_conn_message_info(sock, len, iter, lens, 0);
}

614 615 616 617 618 619 620 621
static void rds6_conn_message_info_retrans(struct socket *sock,
					   unsigned int len,
					   struct rds_info_iterator *iter,
					   struct rds_info_lengths *lens)
{
	rds6_conn_message_info(sock, len, iter, lens, 0);
}

A
Andy Grover 已提交
622 623 624 625
void rds_for_each_conn_info(struct socket *sock, unsigned int len,
			  struct rds_info_iterator *iter,
			  struct rds_info_lengths *lens,
			  int (*visitor)(struct rds_connection *, void *),
626
			  u64 *buffer,
A
Andy Grover 已提交
627 628 629 630 631 632
			  size_t item_len)
{
	struct hlist_head *head;
	struct rds_connection *conn;
	size_t i;

633
	rcu_read_lock();
A
Andy Grover 已提交
634 635 636 637 638 639

	lens->nr = 0;
	lens->each = item_len;

	for (i = 0, head = rds_conn_hash; i < ARRAY_SIZE(rds_conn_hash);
	     i++, head++) {
640
		hlist_for_each_entry_rcu(conn, head, c_hash_node) {
A
Andy Grover 已提交
641 642 643 644 645 646 647 648 649 650 651 652 653 654 655

			/* XXX no c_lock usage.. */
			if (!visitor(conn, buffer))
				continue;

			/* We copy as much as we can fit in the buffer,
			 * but we count all items so that the caller
			 * can resize the buffer. */
			if (len >= item_len) {
				rds_info_copy(iter, buffer, item_len);
				len -= item_len;
			}
			lens->nr++;
		}
	}
656
	rcu_read_unlock();
A
Andy Grover 已提交
657
}
A
Andy Grover 已提交
658
EXPORT_SYMBOL_GPL(rds_for_each_conn_info);
A
Andy Grover 已提交
659

660 661 662 663
static void rds_walk_conn_path_info(struct socket *sock, unsigned int len,
				    struct rds_info_iterator *iter,
				    struct rds_info_lengths *lens,
				    int (*visitor)(struct rds_conn_path *, void *),
664
				    u64 *buffer,
665
				    size_t item_len)
666 667 668 669 670 671 672 673 674 675 676 677 678 679 680
{
	struct hlist_head *head;
	struct rds_connection *conn;
	size_t i;

	rcu_read_lock();

	lens->nr = 0;
	lens->each = item_len;

	for (i = 0, head = rds_conn_hash; i < ARRAY_SIZE(rds_conn_hash);
	     i++, head++) {
		hlist_for_each_entry_rcu(conn, head, c_hash_node) {
			struct rds_conn_path *cp;

681 682 683 684 685 686 687 688 689
			/* XXX We only copy the information from the first
			 * path for now.  The problem is that if there are
			 * more than one underlying paths, we cannot report
			 * information of all of them using the existing
			 * API.  For example, there is only one next_tx_seq,
			 * which path's next_tx_seq should we report?  It is
			 * a bug in the design of MPRDS.
			 */
			cp = conn->c_path;
690

691 692 693
			/* XXX no cp_lock usage.. */
			if (!visitor(cp, buffer))
				continue;
694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709

			/* We copy as much as we can fit in the buffer,
			 * but we count all items so that the caller
			 * can resize the buffer.
			 */
			if (len >= item_len) {
				rds_info_copy(iter, buffer, item_len);
				len -= item_len;
			}
			lens->nr++;
		}
	}
	rcu_read_unlock();
}

static int rds_conn_info_visitor(struct rds_conn_path *cp, void *buffer)
A
Andy Grover 已提交
710 711
{
	struct rds_info_connection *cinfo = buffer;
712
	struct rds_connection *conn = cp->cp_conn;
A
Andy Grover 已提交
713

K
Ka-Cheong Poon 已提交
714 715 716
	if (conn->c_isv6)
		return 0;

717 718
	cinfo->next_tx_seq = cp->cp_next_tx_seq;
	cinfo->next_rx_seq = cp->cp_next_rx_seq;
719 720 721
	cinfo->laddr = conn->c_laddr.s6_addr32[3];
	cinfo->faddr = conn->c_faddr.s6_addr32[3];
	strncpy(cinfo->transport, conn->c_trans->t_name,
A
Andy Grover 已提交
722 723 724
		sizeof(cinfo->transport));
	cinfo->flags = 0;

725
	rds_conn_info_set(cinfo->flags, test_bit(RDS_IN_XMIT, &cp->cp_flags),
726
			  SENDING);
A
Andy Grover 已提交
727 728
	/* XXX Future: return the state rather than these funky bits */
	rds_conn_info_set(cinfo->flags,
729
			  atomic_read(&cp->cp_state) == RDS_CONN_CONNECTING,
A
Andy Grover 已提交
730 731
			  CONNECTING);
	rds_conn_info_set(cinfo->flags,
732
			  atomic_read(&cp->cp_state) == RDS_CONN_UP,
A
Andy Grover 已提交
733 734 735 736
			  CONNECTED);
	return 1;
}

737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764
static int rds6_conn_info_visitor(struct rds_conn_path *cp, void *buffer)
{
	struct rds6_info_connection *cinfo6 = buffer;
	struct rds_connection *conn = cp->cp_conn;

	cinfo6->next_tx_seq = cp->cp_next_tx_seq;
	cinfo6->next_rx_seq = cp->cp_next_rx_seq;
	cinfo6->laddr = conn->c_laddr;
	cinfo6->faddr = conn->c_faddr;
	strncpy(cinfo6->transport, conn->c_trans->t_name,
		sizeof(cinfo6->transport));
	cinfo6->flags = 0;

	rds_conn_info_set(cinfo6->flags, test_bit(RDS_IN_XMIT, &cp->cp_flags),
			  SENDING);
	/* XXX Future: return the state rather than these funky bits */
	rds_conn_info_set(cinfo6->flags,
			  atomic_read(&cp->cp_state) == RDS_CONN_CONNECTING,
			  CONNECTING);
	rds_conn_info_set(cinfo6->flags,
			  atomic_read(&cp->cp_state) == RDS_CONN_UP,
			  CONNECTED);
	/* Just return 1 as there is no error case. This is a helper function
	 * for rds_walk_conn_path_info() and it wants a return value.
	 */
	return 1;
}

A
Andy Grover 已提交
765 766 767 768
static void rds_conn_info(struct socket *sock, unsigned int len,
			  struct rds_info_iterator *iter,
			  struct rds_info_lengths *lens)
{
769 770
	u64 buffer[(sizeof(struct rds_info_connection) + 7) / 8];

771
	rds_walk_conn_path_info(sock, len, iter, lens,
A
Andy Grover 已提交
772
				rds_conn_info_visitor,
773
				buffer,
A
Andy Grover 已提交
774 775 776
				sizeof(struct rds_info_connection));
}

777 778 779 780 781 782 783 784 785 786 787 788
static void rds6_conn_info(struct socket *sock, unsigned int len,
			   struct rds_info_iterator *iter,
			   struct rds_info_lengths *lens)
{
	u64 buffer[(sizeof(struct rds6_info_connection) + 7) / 8];

	rds_walk_conn_path_info(sock, len, iter, lens,
				rds6_conn_info_visitor,
				buffer,
				sizeof(struct rds6_info_connection));
}

789
int rds_conn_init(void)
A
Andy Grover 已提交
790
{
791 792 793 794 795 796
	int ret;

	ret = rds_loop_net_init(); /* register pernet callback */
	if (ret)
		return ret;

A
Andy Grover 已提交
797 798 799
	rds_conn_slab = kmem_cache_create("rds_connection",
					  sizeof(struct rds_connection),
					  0, 0, NULL);
800 801
	if (!rds_conn_slab) {
		rds_loop_net_exit();
A
Andy Grover 已提交
802
		return -ENOMEM;
803
	}
A
Andy Grover 已提交
804 805 806 807 808 809

	rds_info_register_func(RDS_INFO_CONNECTIONS, rds_conn_info);
	rds_info_register_func(RDS_INFO_SEND_MESSAGES,
			       rds_conn_message_info_send);
	rds_info_register_func(RDS_INFO_RETRANS_MESSAGES,
			       rds_conn_message_info_retrans);
810 811 812 813 814
	rds_info_register_func(RDS6_INFO_CONNECTIONS, rds6_conn_info);
	rds_info_register_func(RDS6_INFO_SEND_MESSAGES,
			       rds6_conn_message_info_send);
	rds_info_register_func(RDS6_INFO_RETRANS_MESSAGES,
			       rds6_conn_message_info_retrans);
A
Andy Grover 已提交
815 816 817 818 819 820

	return 0;
}

void rds_conn_exit(void)
{
821
	rds_loop_net_exit(); /* unregister pernet callback */
A
Andy Grover 已提交
822 823 824 825 826 827 828 829 830 831 832
	rds_loop_exit();

	WARN_ON(!hlist_empty(rds_conn_hash));

	kmem_cache_destroy(rds_conn_slab);

	rds_info_deregister_func(RDS_INFO_CONNECTIONS, rds_conn_info);
	rds_info_deregister_func(RDS_INFO_SEND_MESSAGES,
				 rds_conn_message_info_send);
	rds_info_deregister_func(RDS_INFO_RETRANS_MESSAGES,
				 rds_conn_message_info_retrans);
833 834 835 836 837
	rds_info_deregister_func(RDS6_INFO_CONNECTIONS, rds6_conn_info);
	rds_info_deregister_func(RDS6_INFO_SEND_MESSAGES,
				 rds6_conn_message_info_send);
	rds_info_deregister_func(RDS6_INFO_RETRANS_MESSAGES,
				 rds6_conn_message_info_retrans);
A
Andy Grover 已提交
838 839 840 841 842
}

/*
 * Force a disconnect
 */
843
void rds_conn_path_drop(struct rds_conn_path *cp, bool destroy)
844 845
{
	atomic_set(&cp->cp_state, RDS_CONN_ERROR);
846

847
	rcu_read_lock();
848
	if (!destroy && rds_destroy_pending(cp->cp_conn)) {
849
		rcu_read_unlock();
850
		return;
851
	}
852
	queue_work(rds_wq, &cp->cp_down_w);
853
	rcu_read_unlock();
854 855 856
}
EXPORT_SYMBOL_GPL(rds_conn_path_drop);

A
Andy Grover 已提交
857 858
void rds_conn_drop(struct rds_connection *conn)
{
859
	WARN_ON(conn->c_trans->t_mp_capable);
860
	rds_conn_path_drop(&conn->c_path[0], false);
A
Andy Grover 已提交
861
}
A
Andy Grover 已提交
862
EXPORT_SYMBOL_GPL(rds_conn_drop);
A
Andy Grover 已提交
863

864 865 866 867
/*
 * If the connection is down, trigger a connect. We may have scheduled a
 * delayed reconnect however - in this case we should not interfere.
 */
868 869
void rds_conn_path_connect_if_down(struct rds_conn_path *cp)
{
870
	rcu_read_lock();
871
	if (rds_destroy_pending(cp->cp_conn)) {
872 873 874
		rcu_read_unlock();
		return;
	}
875 876 877
	if (rds_conn_path_state(cp) == RDS_CONN_DOWN &&
	    !test_and_set_bit(RDS_RECONNECT_PENDING, &cp->cp_flags))
		queue_delayed_work(rds_wq, &cp->cp_conn_w, 0);
878
	rcu_read_unlock();
879
}
880
EXPORT_SYMBOL_GPL(rds_conn_path_connect_if_down);
881

882 883
void rds_conn_connect_if_down(struct rds_connection *conn)
{
884 885
	WARN_ON(conn->c_trans->t_mp_capable);
	rds_conn_path_connect_if_down(&conn->c_path[0]);
886 887 888
}
EXPORT_SYMBOL_GPL(rds_conn_connect_if_down);

889 890 891 892 893 894 895 896 897
void
__rds_conn_path_error(struct rds_conn_path *cp, const char *fmt, ...)
{
	va_list ap;

	va_start(ap, fmt);
	vprintk(fmt, ap);
	va_end(ap);

898
	rds_conn_path_drop(cp, false);
899
}