threads.c 7.3 KB
Newer Older
A
Andy Grover 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34
/*
 * Copyright (c) 2006 Oracle.  All rights reserved.
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *      - Redistributions of source code must retain the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer.
 *
 *      - Redistributions in binary form must reproduce the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer in the documentation and/or other materials
 *        provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 *
 */
#include <linux/kernel.h>
#include <linux/random.h>
35
#include <linux/export.h>
A
Andy Grover 已提交
36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64

#include "rds.h"

/*
 * All of connection management is simplified by serializing it through
 * work queues that execute in a connection managing thread.
 *
 * TCP wants to send acks through sendpage() in response to data_ready(),
 * but it needs a process context to do so.
 *
 * The receive paths need to allocate but can't drop packets (!) so we have
 * a thread around to block allocating if the receive fast path sees an
 * allocation failure.
 */

/* Grand Unified Theory of connection life cycle:
 * At any point in time, the connection can be in one of these states:
 * DOWN, CONNECTING, UP, DISCONNECTING, ERROR
 *
 * The following transitions are possible:
 *  ANY		  -> ERROR
 *  UP		  -> DISCONNECTING
 *  ERROR	  -> DISCONNECTING
 *  DISCONNECTING -> DOWN
 *  DOWN	  -> CONNECTING
 *  CONNECTING	  -> UP
 *
 * Transition to state DISCONNECTING/DOWN:
 *  -	Inside the shutdown worker; synchronizes with xmit path
65
 *	through RDS_IN_XMIT, and with connection management callbacks
A
Andy Grover 已提交
66 67 68 69 70 71
 *	via c_cm_lock.
 *
 *	For receive callbacks, we rely on the underlying transport
 *	(TCP, IB/RDMA) to provide the necessary synchronisation.
 */
struct workqueue_struct *rds_wq;
A
Andy Grover 已提交
72
EXPORT_SYMBOL_GPL(rds_wq);
A
Andy Grover 已提交
73

74
void rds_connect_path_complete(struct rds_conn_path *cp, int curr)
A
Andy Grover 已提交
75
{
76
	if (!rds_conn_path_transition(cp, curr, RDS_CONN_UP)) {
A
Andy Grover 已提交
77 78 79
		printk(KERN_WARNING "%s: Cannot transition to state UP, "
				"current state is %d\n",
				__func__,
80 81
				atomic_read(&cp->cp_state));
		rds_conn_path_drop(cp);
A
Andy Grover 已提交
82 83 84 85
		return;
	}

	rdsdebug("conn %p for %pI4 to %pI4 complete\n",
86
	  cp->cp_conn, &cp->cp_conn->c_laddr, &cp->cp_conn->c_faddr);
A
Andy Grover 已提交
87

88 89 90 91
	cp->cp_reconnect_jiffies = 0;
	set_bit(0, &cp->cp_conn->c_map_queued);
	queue_delayed_work(rds_wq, &cp->cp_send_w, 0);
	queue_delayed_work(rds_wq, &cp->cp_recv_w, 0);
A
Andy Grover 已提交
92
}
93 94 95 96
EXPORT_SYMBOL_GPL(rds_connect_path_complete);

void rds_connect_complete(struct rds_connection *conn)
{
97
	rds_connect_path_complete(&conn->c_path[0], RDS_CONN_CONNECTING);
98
}
A
Andy Grover 已提交
99
EXPORT_SYMBOL_GPL(rds_connect_complete);
A
Andy Grover 已提交
100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118

/*
 * This random exponential backoff is relied on to eventually resolve racing
 * connects.
 *
 * If connect attempts race then both parties drop both connections and come
 * here to wait for a random amount of time before trying again.  Eventually
 * the backoff range will be so much greater than the time it takes to
 * establish a connection that one of the pair will establish the connection
 * before the other's random delay fires.
 *
 * Connection attempts that arrive while a connection is already established
 * are also considered to be racing connects.  This lets a connection from
 * a rebooted machine replace an existing stale connection before the transport
 * notices that the connection has failed.
 *
 * We should *always* start with a random backoff; otherwise a broken connection
 * will always take several iterations to be re-established.
 */
119
void rds_queue_reconnect(struct rds_conn_path *cp)
A
Andy Grover 已提交
120 121
{
	unsigned long rand;
122
	struct rds_connection *conn = cp->cp_conn;
A
Andy Grover 已提交
123 124 125

	rdsdebug("conn %p for %pI4 to %pI4 reconnect jiffies %lu\n",
	  conn, &conn->c_laddr, &conn->c_faddr,
126
	  cp->cp_reconnect_jiffies);
A
Andy Grover 已提交
127

128 129 130 131
	set_bit(RDS_RECONNECT_PENDING, &cp->cp_flags);
	if (cp->cp_reconnect_jiffies == 0) {
		cp->cp_reconnect_jiffies = rds_sysctl_reconnect_min_jiffies;
		queue_delayed_work(rds_wq, &cp->cp_conn_w, 0);
A
Andy Grover 已提交
132 133 134 135 136
		return;
	}

	get_random_bytes(&rand, sizeof(rand));
	rdsdebug("%lu delay %lu ceil conn %p for %pI4 -> %pI4\n",
137
		 rand % cp->cp_reconnect_jiffies, cp->cp_reconnect_jiffies,
A
Andy Grover 已提交
138
		 conn, &conn->c_laddr, &conn->c_faddr);
139 140
	queue_delayed_work(rds_wq, &cp->cp_conn_w,
			   rand % cp->cp_reconnect_jiffies);
A
Andy Grover 已提交
141

142
	cp->cp_reconnect_jiffies = min(cp->cp_reconnect_jiffies * 2,
A
Andy Grover 已提交
143 144 145 146 147
					rds_sysctl_reconnect_max_jiffies);
}

void rds_connect_worker(struct work_struct *work)
{
148 149 150 151
	struct rds_conn_path *cp = container_of(work,
						struct rds_conn_path,
						cp_conn_w.work);
	struct rds_connection *conn = cp->cp_conn;
A
Andy Grover 已提交
152 153
	int ret;

154 155
	clear_bit(RDS_RECONNECT_PENDING, &cp->cp_flags);
	if (rds_conn_path_transition(cp, RDS_CONN_DOWN, RDS_CONN_CONNECTING)) {
A
Andy Grover 已提交
156 157 158 159 160
		ret = conn->c_trans->conn_connect(conn);
		rdsdebug("conn %p for %pI4 to %pI4 dispatched, ret %d\n",
			conn, &conn->c_laddr, &conn->c_faddr, ret);

		if (ret) {
161 162 163 164
			if (rds_conn_path_transition(cp,
						     RDS_CONN_CONNECTING,
						     RDS_CONN_DOWN))
				rds_queue_reconnect(cp);
A
Andy Grover 已提交
165
			else
166 167
				rds_conn_path_error(cp,
						    "RDS: connect failed\n");
A
Andy Grover 已提交
168 169 170 171 172 173
		}
	}
}

void rds_send_worker(struct work_struct *work)
{
174 175 176
	struct rds_conn_path *cp = container_of(work,
						struct rds_conn_path,
						cp_send_w.work);
A
Andy Grover 已提交
177 178
	int ret;

179 180
	if (rds_conn_path_state(cp) == RDS_CONN_UP) {
		clear_bit(RDS_LL_SEND_FULL, &cp->cp_flags);
181
		ret = rds_send_xmit(cp);
182
		cond_resched();
183
		rdsdebug("conn %p ret %d\n", cp->cp_conn, ret);
A
Andy Grover 已提交
184 185 186
		switch (ret) {
		case -EAGAIN:
			rds_stats_inc(s_send_immediate_retry);
187
			queue_delayed_work(rds_wq, &cp->cp_send_w, 0);
A
Andy Grover 已提交
188 189 190
			break;
		case -ENOMEM:
			rds_stats_inc(s_send_delayed_retry);
191
			queue_delayed_work(rds_wq, &cp->cp_send_w, 2);
A
Andy Grover 已提交
192 193 194 195 196 197 198 199
		default:
			break;
		}
	}
}

void rds_recv_worker(struct work_struct *work)
{
200 201 202
	struct rds_conn_path *cp = container_of(work,
						struct rds_conn_path,
						cp_recv_w.work);
A
Andy Grover 已提交
203 204
	int ret;

205 206 207
	if (rds_conn_path_state(cp) == RDS_CONN_UP) {
		ret = cp->cp_conn->c_trans->recv(cp->cp_conn);
		rdsdebug("conn %p ret %d\n", cp->cp_conn, ret);
A
Andy Grover 已提交
208 209 210
		switch (ret) {
		case -EAGAIN:
			rds_stats_inc(s_recv_immediate_retry);
211
			queue_delayed_work(rds_wq, &cp->cp_recv_w, 0);
A
Andy Grover 已提交
212 213 214
			break;
		case -ENOMEM:
			rds_stats_inc(s_recv_delayed_retry);
215
			queue_delayed_work(rds_wq, &cp->cp_recv_w, 2);
A
Andy Grover 已提交
216 217 218 219 220 221
		default:
			break;
		}
	}
}

222 223
void rds_shutdown_worker(struct work_struct *work)
{
224 225 226
	struct rds_conn_path *cp = container_of(work,
						struct rds_conn_path,
						cp_down_w);
227

228
	rds_conn_shutdown(cp);
229 230
}

A
Andy Grover 已提交
231 232 233 234 235
void rds_threads_exit(void)
{
	destroy_workqueue(rds_wq);
}

236
int rds_threads_init(void)
A
Andy Grover 已提交
237
{
238
	rds_wq = create_singlethread_workqueue("krdsd");
239
	if (!rds_wq)
A
Andy Grover 已提交
240 241 242 243
		return -ENOMEM;

	return 0;
}