xprt.c 40.7 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12
/*
 *  linux/net/sunrpc/xprt.c
 *
 *  This is a generic RPC call interface supporting congestion avoidance,
 *  and asynchronous calls.
 *
 *  The interface works like this:
 *
 *  -	When a process places a call, it allocates a request slot if
 *	one is available. Otherwise, it sleeps on the backlog queue
 *	(xprt_reserve).
 *  -	Next, the caller puts together the RPC message, stuffs it into
13 14
 *	the request struct, and calls xprt_transmit().
 *  -	xprt_transmit sends the message and installs the caller on the
15 16 17
 *	transport's wait list. At the same time, if a reply is expected,
 *	it installs a timer that is run after the packet's timeout has
 *	expired.
L
Linus Torvalds 已提交
18
 *  -	When a packet arrives, the data_ready handler walks the list of
19
 *	pending requests for that transport. If a matching XID is found, the
L
Linus Torvalds 已提交
20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35
 *	caller is woken up, and the timer removed.
 *  -	When no reply arrives within the timeout interval, the timer is
 *	fired by the kernel and runs xprt_timer(). It either adjusts the
 *	timeout values (minor timeout) or wakes up the caller with a status
 *	of -ETIMEDOUT.
 *  -	When the caller receives a notification from RPC that a reply arrived,
 *	it should release the RPC slot, and process the reply.
 *	If the call timed out, it may choose to retry the operation by
 *	adjusting the initial timeout value, and simply calling rpc_call
 *	again.
 *
 *  Support for async RPC is done through a set of RPC-specific scheduling
 *  primitives that `transparently' work for processes as well as async
 *  tasks that rely on callbacks.
 *
 *  Copyright (C) 1995-1997, Olaf Kirch <okir@monad.swb.de>
36 37
 *
 *  Transport switch API copyright (C) 2005, Chuck Lever <cel@netapp.com>
L
Linus Torvalds 已提交
38 39
 */

40 41
#include <linux/module.h>

L
Linus Torvalds 已提交
42
#include <linux/types.h>
43
#include <linux/interrupt.h>
L
Linus Torvalds 已提交
44
#include <linux/workqueue.h>
45
#include <linux/net.h>
46
#include <linux/ktime.h>
L
Linus Torvalds 已提交
47

48
#include <linux/sunrpc/clnt.h>
49
#include <linux/sunrpc/metrics.h>
50
#include <linux/sunrpc/bc_xprt.h>
51
#include <linux/rcupdate.h>
L
Linus Torvalds 已提交
52

53 54
#include <trace/events/sunrpc.h>

55 56
#include "sunrpc.h"

L
Linus Torvalds 已提交
57 58 59 60
/*
 * Local variables
 */

J
Jeff Layton 已提交
61
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
L
Linus Torvalds 已提交
62 63 64 65 66 67
# define RPCDBG_FACILITY	RPCDBG_XPRT
#endif

/*
 * Local functions
 */
68
static void	 xprt_init(struct rpc_xprt *xprt, struct net *net);
69
static __be32	xprt_alloc_xid(struct rpc_xprt *xprt);
L
Linus Torvalds 已提交
70 71
static void	xprt_connect_status(struct rpc_task *task);
static int      __xprt_get_cong(struct rpc_xprt *, struct rpc_task *);
72
static void     __xprt_put_cong(struct rpc_xprt *, struct rpc_rqst *);
73
static void	 xprt_destroy(struct rpc_xprt *xprt);
L
Linus Torvalds 已提交
74

J
Jiri Slaby 已提交
75
static DEFINE_SPINLOCK(xprt_list_lock);
76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98
static LIST_HEAD(xprt_list);

/**
 * xprt_register_transport - register a transport implementation
 * @transport: transport to register
 *
 * If a transport implementation is loaded as a kernel module, it can
 * call this interface to make itself known to the RPC client.
 *
 * Returns:
 * 0:		transport successfully registered
 * -EEXIST:	transport already registered
 * -EINVAL:	transport module being unloaded
 */
int xprt_register_transport(struct xprt_class *transport)
{
	struct xprt_class *t;
	int result;

	result = -EEXIST;
	spin_lock(&xprt_list_lock);
	list_for_each_entry(t, &xprt_list, list) {
		/* don't register the same transport class twice */
99
		if (t->ident == transport->ident)
100 101 102
			goto out;
	}

103 104 105 106
	list_add_tail(&transport->list, &xprt_list);
	printk(KERN_INFO "RPC: Registered %s transport module.\n",
	       transport->name);
	result = 0;
107 108 109 110 111 112 113 114 115

out:
	spin_unlock(&xprt_list_lock);
	return result;
}
EXPORT_SYMBOL_GPL(xprt_register_transport);

/**
 * xprt_unregister_transport - unregister a transport implementation
116
 * @transport: transport to unregister
117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145
 *
 * Returns:
 * 0:		transport successfully unregistered
 * -ENOENT:	transport never registered
 */
int xprt_unregister_transport(struct xprt_class *transport)
{
	struct xprt_class *t;
	int result;

	result = 0;
	spin_lock(&xprt_list_lock);
	list_for_each_entry(t, &xprt_list, list) {
		if (t == transport) {
			printk(KERN_INFO
				"RPC: Unregistered %s transport module.\n",
				transport->name);
			list_del_init(&transport->list);
			goto out;
		}
	}
	result = -ENOENT;

out:
	spin_unlock(&xprt_list_lock);
	return result;
}
EXPORT_SYMBOL_GPL(xprt_unregister_transport);

146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167
/**
 * xprt_load_transport - load a transport implementation
 * @transport_name: transport to load
 *
 * Returns:
 * 0:		transport successfully loaded
 * -ENOENT:	transport module not available
 */
int xprt_load_transport(const char *transport_name)
{
	struct xprt_class *t;
	int result;

	result = 0;
	spin_lock(&xprt_list_lock);
	list_for_each_entry(t, &xprt_list, list) {
		if (strcmp(t->name, transport_name) == 0) {
			spin_unlock(&xprt_list_lock);
			goto out;
		}
	}
	spin_unlock(&xprt_list_lock);
168
	result = request_module("xprt%s", transport_name);
169 170 171 172 173
out:
	return result;
}
EXPORT_SYMBOL_GPL(xprt_load_transport);

174 175 176
/**
 * xprt_reserve_xprt - serialize write access to transports
 * @task: task that is requesting access to the transport
177
 * @xprt: pointer to the target transport
178 179 180 181 182
 *
 * This prevents mixing the payload of separate requests, and prevents
 * transport connects from colliding with writes.  No congestion control
 * is provided.
 */
183
int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
184 185
{
	struct rpc_rqst *req = task->tk_rqstp;
186
	int priority;
187 188 189 190 191 192 193

	if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
		if (task == xprt->snd_task)
			return 1;
		goto out_sleep;
	}
	xprt->snd_task = task;
194
	if (req != NULL)
195
		req->rq_ntrans++;
196

197 198 199
	return 1;

out_sleep:
200
	dprintk("RPC: %5u failed to lock transport %p\n",
201 202 203
			task->tk_pid, xprt);
	task->tk_timeout = 0;
	task->tk_status = -EAGAIN;
204 205 206 207
	if (req == NULL)
		priority = RPC_PRIORITY_LOW;
	else if (!req->rq_ntrans)
		priority = RPC_PRIORITY_NORMAL;
208
	else
209 210
		priority = RPC_PRIORITY_HIGH;
	rpc_sleep_on_priority(&xprt->sending, task, NULL, priority);
211 212
	return 0;
}
213
EXPORT_SYMBOL_GPL(xprt_reserve_xprt);
214

215 216 217
static void xprt_clear_locked(struct rpc_xprt *xprt)
{
	xprt->snd_task = NULL;
218
	if (!test_bit(XPRT_CLOSE_WAIT, &xprt->state)) {
219
		smp_mb__before_atomic();
220
		clear_bit(XPRT_LOCKED, &xprt->state);
221
		smp_mb__after_atomic();
222
	} else
223
		queue_work(xprtiod_workqueue, &xprt->task_cleanup);
224 225
}

L
Linus Torvalds 已提交
226
/*
227 228 229 230 231 232
 * xprt_reserve_xprt_cong - serialize write access to transports
 * @task: task that is requesting access to the transport
 *
 * Same as xprt_reserve_xprt, but Van Jacobson congestion control is
 * integrated into the decision of whether a request is allowed to be
 * woken up and given access to the transport.
L
Linus Torvalds 已提交
233
 */
234
int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
L
Linus Torvalds 已提交
235 236
{
	struct rpc_rqst *req = task->tk_rqstp;
237
	int priority;
L
Linus Torvalds 已提交
238

239
	if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
L
Linus Torvalds 已提交
240 241 242 243
		if (task == xprt->snd_task)
			return 1;
		goto out_sleep;
	}
244 245 246 247
	if (req == NULL) {
		xprt->snd_task = task;
		return 1;
	}
248
	if (__xprt_get_cong(xprt, task)) {
L
Linus Torvalds 已提交
249
		xprt->snd_task = task;
250
		req->rq_ntrans++;
L
Linus Torvalds 已提交
251 252
		return 1;
	}
253
	xprt_clear_locked(xprt);
L
Linus Torvalds 已提交
254
out_sleep:
255 256
	if (req)
		__xprt_put_cong(xprt, req);
257
	dprintk("RPC: %5u failed to lock transport %p\n", task->tk_pid, xprt);
L
Linus Torvalds 已提交
258 259
	task->tk_timeout = 0;
	task->tk_status = -EAGAIN;
260 261 262 263
	if (req == NULL)
		priority = RPC_PRIORITY_LOW;
	else if (!req->rq_ntrans)
		priority = RPC_PRIORITY_NORMAL;
L
Linus Torvalds 已提交
264
	else
265 266
		priority = RPC_PRIORITY_HIGH;
	rpc_sleep_on_priority(&xprt->sending, task, NULL, priority);
L
Linus Torvalds 已提交
267 268
	return 0;
}
269
EXPORT_SYMBOL_GPL(xprt_reserve_xprt_cong);
L
Linus Torvalds 已提交
270

271
static inline int xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task)
L
Linus Torvalds 已提交
272 273 274
{
	int retval;

C
Chuck Lever 已提交
275
	spin_lock_bh(&xprt->transport_lock);
276
	retval = xprt->ops->reserve_xprt(xprt, task);
C
Chuck Lever 已提交
277
	spin_unlock_bh(&xprt->transport_lock);
L
Linus Torvalds 已提交
278 279 280
	return retval;
}

281
static bool __xprt_lock_write_func(struct rpc_task *task, void *data)
282
{
283
	struct rpc_xprt *xprt = data;
284 285 286 287
	struct rpc_rqst *req;

	req = task->tk_rqstp;
	xprt->snd_task = task;
288
	if (req)
289
		req->rq_ntrans++;
290 291
	return true;
}
292

293 294 295 296 297
static void __xprt_lock_write_next(struct rpc_xprt *xprt)
{
	if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
		return;

298 299
	if (rpc_wake_up_first_on_wq(xprtiod_workqueue, &xprt->sending,
				__xprt_lock_write_func, xprt))
300
		return;
301
	xprt_clear_locked(xprt);
302 303
}

304
static bool __xprt_lock_write_cong_func(struct rpc_task *task, void *data)
L
Linus Torvalds 已提交
305
{
306
	struct rpc_xprt *xprt = data;
307
	struct rpc_rqst *req;
L
Linus Torvalds 已提交
308

309 310 311
	req = task->tk_rqstp;
	if (req == NULL) {
		xprt->snd_task = task;
312
		return true;
313
	}
314
	if (__xprt_get_cong(xprt, task)) {
L
Linus Torvalds 已提交
315
		xprt->snd_task = task;
316
		req->rq_ntrans++;
317
		return true;
L
Linus Torvalds 已提交
318
	}
319 320 321 322 323 324 325 326 327
	return false;
}

static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt)
{
	if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
		return;
	if (RPCXPRT_CONGESTED(xprt))
		goto out_unlock;
328 329
	if (rpc_wake_up_first_on_wq(xprtiod_workqueue, &xprt->sending,
				__xprt_lock_write_cong_func, xprt))
330
		return;
L
Linus Torvalds 已提交
331
out_unlock:
332
	xprt_clear_locked(xprt);
L
Linus Torvalds 已提交
333 334
}

335 336 337 338 339 340 341 342 343
static void xprt_task_clear_bytes_sent(struct rpc_task *task)
{
	if (task != NULL) {
		struct rpc_rqst *req = task->tk_rqstp;
		if (req != NULL)
			req->rq_bytes_sent = 0;
	}
}

344 345 346 347 348 349
/**
 * xprt_release_xprt - allow other requests to use a transport
 * @xprt: transport with other tasks potentially waiting
 * @task: task that is releasing access to the transport
 *
 * Note that "task" can be NULL.  No congestion control is provided.
L
Linus Torvalds 已提交
350
 */
351
void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
L
Linus Torvalds 已提交
352 353
{
	if (xprt->snd_task == task) {
354
		xprt_task_clear_bytes_sent(task);
355
		xprt_clear_locked(xprt);
L
Linus Torvalds 已提交
356 357 358
		__xprt_lock_write_next(xprt);
	}
}
359
EXPORT_SYMBOL_GPL(xprt_release_xprt);
L
Linus Torvalds 已提交
360

361 362 363 364 365 366 367 368 369 370 371
/**
 * xprt_release_xprt_cong - allow other requests to use a transport
 * @xprt: transport with other tasks potentially waiting
 * @task: task that is releasing access to the transport
 *
 * Note that "task" can be NULL.  Another task is awoken to use the
 * transport if the transport's congestion window allows it.
 */
void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
{
	if (xprt->snd_task == task) {
372
		xprt_task_clear_bytes_sent(task);
373
		xprt_clear_locked(xprt);
374 375 376
		__xprt_lock_write_next_cong(xprt);
	}
}
377
EXPORT_SYMBOL_GPL(xprt_release_xprt_cong);
378 379

static inline void xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task)
L
Linus Torvalds 已提交
380
{
C
Chuck Lever 已提交
381
	spin_lock_bh(&xprt->transport_lock);
382
	xprt->ops->release_xprt(xprt, task);
C
Chuck Lever 已提交
383
	spin_unlock_bh(&xprt->transport_lock);
L
Linus Torvalds 已提交
384 385 386 387 388 389 390 391 392 393 394 395 396
}

/*
 * Van Jacobson congestion avoidance. Check if the congestion window
 * overflowed. Put the task to sleep if this is the case.
 */
static int
__xprt_get_cong(struct rpc_xprt *xprt, struct rpc_task *task)
{
	struct rpc_rqst *req = task->tk_rqstp;

	if (req->rq_cong)
		return 1;
397
	dprintk("RPC: %5u xprt_cwnd_limited cong = %lu cwnd = %lu\n",
L
Linus Torvalds 已提交
398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416
			task->tk_pid, xprt->cong, xprt->cwnd);
	if (RPCXPRT_CONGESTED(xprt))
		return 0;
	req->rq_cong = 1;
	xprt->cong += RPC_CWNDSCALE;
	return 1;
}

/*
 * Adjust the congestion window, and wake up the next task
 * that has been sleeping due to congestion
 */
static void
__xprt_put_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
{
	if (!req->rq_cong)
		return;
	req->rq_cong = 0;
	xprt->cong -= RPC_CWNDSCALE;
417
	__xprt_lock_write_next_cong(xprt);
L
Linus Torvalds 已提交
418 419
}

420 421 422 423 424 425 426 427
/**
 * xprt_release_rqst_cong - housekeeping when request is complete
 * @task: RPC request that recently completed
 *
 * Useful for transports that require congestion control.
 */
void xprt_release_rqst_cong(struct rpc_task *task)
{
428 429 430
	struct rpc_rqst *req = task->tk_rqstp;

	__xprt_put_cong(req->rq_xprt, req);
431
}
432
EXPORT_SYMBOL_GPL(xprt_release_rqst_cong);
433

434 435
/**
 * xprt_adjust_cwnd - adjust transport congestion window
436
 * @xprt: pointer to xprt
437 438 439
 * @task: recently completed RPC request used to adjust window
 * @result: result code of completed RPC request
 *
440 441 442 443 444 445 446 447 448
 * The transport code maintains an estimate on the maximum number of out-
 * standing RPC requests, using a smoothed version of the congestion
 * avoidance implemented in 44BSD. This is basically the Van Jacobson
 * congestion algorithm: If a retransmit occurs, the congestion window is
 * halved; otherwise, it is incremented by 1/cwnd when
 *
 *	-	a reply is received and
 *	-	a full number of requests are outstanding and
 *	-	the congestion window hasn't been updated recently.
L
Linus Torvalds 已提交
449
 */
450
void xprt_adjust_cwnd(struct rpc_xprt *xprt, struct rpc_task *task, int result)
L
Linus Torvalds 已提交
451
{
452 453
	struct rpc_rqst *req = task->tk_rqstp;
	unsigned long cwnd = xprt->cwnd;
L
Linus Torvalds 已提交
454 455 456 457 458 459 460

	if (result >= 0 && cwnd <= xprt->cong) {
		/* The (cwnd >> 1) term makes sure
		 * the result gets rounded properly. */
		cwnd += (RPC_CWNDSCALE * RPC_CWNDSCALE + (cwnd >> 1)) / cwnd;
		if (cwnd > RPC_MAXCWND(xprt))
			cwnd = RPC_MAXCWND(xprt);
461
		__xprt_lock_write_next_cong(xprt);
L
Linus Torvalds 已提交
462 463 464 465 466
	} else if (result == -ETIMEDOUT) {
		cwnd >>= 1;
		if (cwnd < RPC_CWNDSCALE)
			cwnd = RPC_CWNDSCALE;
	}
467
	dprintk("RPC:       cong %ld, cwnd was %ld, now %ld\n",
L
Linus Torvalds 已提交
468 469
			xprt->cong, xprt->cwnd, cwnd);
	xprt->cwnd = cwnd;
470
	__xprt_put_cong(xprt, req);
L
Linus Torvalds 已提交
471
}
472
EXPORT_SYMBOL_GPL(xprt_adjust_cwnd);
L
Linus Torvalds 已提交
473

474 475 476 477 478 479 480 481 482 483 484 485 486
/**
 * xprt_wake_pending_tasks - wake all tasks on a transport's pending queue
 * @xprt: transport with waiting tasks
 * @status: result code to plant in each task before waking it
 *
 */
void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status)
{
	if (status < 0)
		rpc_wake_up_status(&xprt->pending, status);
	else
		rpc_wake_up(&xprt->pending);
}
487
EXPORT_SYMBOL_GPL(xprt_wake_pending_tasks);
488

489 490 491
/**
 * xprt_wait_for_buffer_space - wait for transport output buffer to clear
 * @task: task to be put to sleep
R
Randy Dunlap 已提交
492
 * @action: function pointer to be executed after wait
493 494 495 496
 *
 * Note that we only set the timer for the case of RPC_IS_SOFT(), since
 * we don't in general want to force a socket disconnection due to
 * an incomplete RPC call transmission.
497
 */
498
void xprt_wait_for_buffer_space(struct rpc_task *task, rpc_action action)
499 500 501 502
{
	struct rpc_rqst *req = task->tk_rqstp;
	struct rpc_xprt *xprt = req->rq_xprt;

503
	task->tk_timeout = RPC_IS_SOFT(task) ? req->rq_timeout : 0;
504
	rpc_sleep_on(&xprt->pending, task, action);
505
}
506
EXPORT_SYMBOL_GPL(xprt_wait_for_buffer_space);
507 508 509 510 511 512 513 514 515 516 517

/**
 * xprt_write_space - wake the task waiting for transport output buffer space
 * @xprt: transport with waiting tasks
 *
 * Can be called in a soft IRQ context, so xprt_write_space never sleeps.
 */
void xprt_write_space(struct rpc_xprt *xprt)
{
	spin_lock_bh(&xprt->transport_lock);
	if (xprt->snd_task) {
518 519
		dprintk("RPC:       write space: waking waiting task on "
				"xprt %p\n", xprt);
520 521
		rpc_wake_up_queued_task_on_wq(xprtiod_workqueue,
				&xprt->pending, xprt->snd_task);
522 523 524
	}
	spin_unlock_bh(&xprt->transport_lock);
}
525
EXPORT_SYMBOL_GPL(xprt_write_space);
526

527 528 529 530 531 532 533 534 535 536 537 538
/**
 * xprt_set_retrans_timeout_def - set a request's retransmit timeout
 * @task: task whose timeout is to be set
 *
 * Set a request's retransmit timeout based on the transport's
 * default timeout parameters.  Used by transports that don't adjust
 * the retransmit timeout based on round-trip time estimation.
 */
void xprt_set_retrans_timeout_def(struct rpc_task *task)
{
	task->tk_timeout = task->tk_rqstp->rq_timeout;
}
539
EXPORT_SYMBOL_GPL(xprt_set_retrans_timeout_def);
540

541
/**
542 543
 * xprt_set_retrans_timeout_rtt - set a request's retransmit timeout
 * @task: task whose timeout is to be set
544
 *
545 546 547 548 549
 * Set a request's retransmit timeout using the RTT estimator.
 */
void xprt_set_retrans_timeout_rtt(struct rpc_task *task)
{
	int timer = task->tk_msg.rpc_proc->p_timer;
550 551
	struct rpc_clnt *clnt = task->tk_client;
	struct rpc_rtt *rtt = clnt->cl_rtt;
552
	struct rpc_rqst *req = task->tk_rqstp;
553
	unsigned long max_timeout = clnt->cl_timeout->to_maxval;
554 555 556 557 558 559

	task->tk_timeout = rpc_calc_rto(rtt, timer);
	task->tk_timeout <<= rpc_ntimeo(rtt, timer) + req->rq_retries;
	if (task->tk_timeout > max_timeout || task->tk_timeout == 0)
		task->tk_timeout = max_timeout;
}
560
EXPORT_SYMBOL_GPL(xprt_set_retrans_timeout_rtt);
561

L
Linus Torvalds 已提交
562 563
static void xprt_reset_majortimeo(struct rpc_rqst *req)
{
564
	const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
L
Linus Torvalds 已提交
565 566 567 568 569 570 571 572 573 574 575

	req->rq_majortimeo = req->rq_timeout;
	if (to->to_exponential)
		req->rq_majortimeo <<= to->to_retries;
	else
		req->rq_majortimeo += to->to_increment * to->to_retries;
	if (req->rq_majortimeo > to->to_maxval || req->rq_majortimeo == 0)
		req->rq_majortimeo = to->to_maxval;
	req->rq_majortimeo += jiffies;
}

576 577 578 579
/**
 * xprt_adjust_timeout - adjust timeout values for next retransmit
 * @req: RPC request containing parameters to use for the adjustment
 *
L
Linus Torvalds 已提交
580 581 582 583
 */
int xprt_adjust_timeout(struct rpc_rqst *req)
{
	struct rpc_xprt *xprt = req->rq_xprt;
584
	const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
L
Linus Torvalds 已提交
585 586 587 588 589 590 591 592 593 594 595 596 597 598 599
	int status = 0;

	if (time_before(jiffies, req->rq_majortimeo)) {
		if (to->to_exponential)
			req->rq_timeout <<= 1;
		else
			req->rq_timeout += to->to_increment;
		if (to->to_maxval && req->rq_timeout >= to->to_maxval)
			req->rq_timeout = to->to_maxval;
		req->rq_retries++;
	} else {
		req->rq_timeout = to->to_initval;
		req->rq_retries = 0;
		xprt_reset_majortimeo(req);
		/* Reset the RTT counters == "slow start" */
C
Chuck Lever 已提交
600
		spin_lock_bh(&xprt->transport_lock);
L
Linus Torvalds 已提交
601
		rpc_init_rtt(req->rq_task->tk_client->cl_rtt, to->to_initval);
C
Chuck Lever 已提交
602
		spin_unlock_bh(&xprt->transport_lock);
L
Linus Torvalds 已提交
603 604 605 606 607 608 609 610 611 612
		status = -ETIMEDOUT;
	}

	if (req->rq_timeout == 0) {
		printk(KERN_WARNING "xprt_adjust_timeout: rq_timeout = 0!\n");
		req->rq_timeout = 5 * HZ;
	}
	return status;
}

613
static void xprt_autoclose(struct work_struct *work)
L
Linus Torvalds 已提交
614
{
615 616
	struct rpc_xprt *xprt =
		container_of(work, struct rpc_xprt, task_cleanup);
L
Linus Torvalds 已提交
617

618
	clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
619
	xprt->ops->close(xprt);
L
Linus Torvalds 已提交
620
	xprt_release_write(xprt, NULL);
621
	wake_up_bit(&xprt->state, XPRT_LOCKED);
L
Linus Torvalds 已提交
622 623
}

624
/**
625
 * xprt_disconnect_done - mark a transport as disconnected
626 627
 * @xprt: transport to flag for disconnect
 *
L
Linus Torvalds 已提交
628
 */
629
void xprt_disconnect_done(struct rpc_xprt *xprt)
L
Linus Torvalds 已提交
630
{
631
	dprintk("RPC:       disconnected transport %p\n", xprt);
C
Chuck Lever 已提交
632
	spin_lock_bh(&xprt->transport_lock);
L
Linus Torvalds 已提交
633
	xprt_clear_connected(xprt);
634
	xprt_wake_pending_tasks(xprt, -EAGAIN);
C
Chuck Lever 已提交
635
	spin_unlock_bh(&xprt->transport_lock);
L
Linus Torvalds 已提交
636
}
637
EXPORT_SYMBOL_GPL(xprt_disconnect_done);
L
Linus Torvalds 已提交
638

639 640 641 642 643 644 645 646 647 648 649 650
/**
 * xprt_force_disconnect - force a transport to disconnect
 * @xprt: transport to disconnect
 *
 */
void xprt_force_disconnect(struct rpc_xprt *xprt)
{
	/* Don't race with the test_bit() in xprt_clear_locked() */
	spin_lock_bh(&xprt->transport_lock);
	set_bit(XPRT_CLOSE_WAIT, &xprt->state);
	/* Try to schedule an autoclose RPC call */
	if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
651
		queue_work(xprtiod_workqueue, &xprt->task_cleanup);
652
	xprt_wake_pending_tasks(xprt, -EAGAIN);
653 654
	spin_unlock_bh(&xprt->transport_lock);
}
655
EXPORT_SYMBOL_GPL(xprt_force_disconnect);
656

657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673
/**
 * xprt_conditional_disconnect - force a transport to disconnect
 * @xprt: transport to disconnect
 * @cookie: 'connection cookie'
 *
 * This attempts to break the connection if and only if 'cookie' matches
 * the current transport 'connection cookie'. It ensures that we don't
 * try to break the connection more than once when we need to retransmit
 * a batch of RPC requests.
 *
 */
void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie)
{
	/* Don't race with the test_bit() in xprt_clear_locked() */
	spin_lock_bh(&xprt->transport_lock);
	if (cookie != xprt->connect_cookie)
		goto out;
674
	if (test_bit(XPRT_CLOSING, &xprt->state))
675 676 677 678
		goto out;
	set_bit(XPRT_CLOSE_WAIT, &xprt->state);
	/* Try to schedule an autoclose RPC call */
	if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
679
		queue_work(xprtiod_workqueue, &xprt->task_cleanup);
680
	xprt_wake_pending_tasks(xprt, -EAGAIN);
681 682 683 684
out:
	spin_unlock_bh(&xprt->transport_lock);
}

685 686 687 688 689 690 691 692 693 694 695 696 697 698
static bool
xprt_has_timer(const struct rpc_xprt *xprt)
{
	return xprt->idle_timeout != 0;
}

static void
xprt_schedule_autodisconnect(struct rpc_xprt *xprt)
	__must_hold(&xprt->transport_lock)
{
	if (list_empty(&xprt->recv) && xprt_has_timer(xprt))
		mod_timer(&xprt->timer, xprt->last_used + xprt->idle_timeout);
}

L
Linus Torvalds 已提交
699
static void
700
xprt_init_autodisconnect(struct timer_list *t)
L
Linus Torvalds 已提交
701
{
702
	struct rpc_xprt *xprt = from_timer(xprt, t, timer);
L
Linus Torvalds 已提交
703

C
Chuck Lever 已提交
704
	spin_lock(&xprt->transport_lock);
705
	if (!list_empty(&xprt->recv))
L
Linus Torvalds 已提交
706
		goto out_abort;
707 708
	/* Reset xprt->last_used to avoid connect/autodisconnect cycling */
	xprt->last_used = jiffies;
709
	if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
L
Linus Torvalds 已提交
710
		goto out_abort;
C
Chuck Lever 已提交
711
	spin_unlock(&xprt->transport_lock);
712
	queue_work(xprtiod_workqueue, &xprt->task_cleanup);
L
Linus Torvalds 已提交
713 714
	return;
out_abort:
C
Chuck Lever 已提交
715
	spin_unlock(&xprt->transport_lock);
L
Linus Torvalds 已提交
716 717
}

718 719 720 721 722 723 724 725 726 727 728
bool xprt_lock_connect(struct rpc_xprt *xprt,
		struct rpc_task *task,
		void *cookie)
{
	bool ret = false;

	spin_lock_bh(&xprt->transport_lock);
	if (!test_bit(XPRT_LOCKED, &xprt->state))
		goto out;
	if (xprt->snd_task != task)
		goto out;
729
	xprt_task_clear_bytes_sent(task);
730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745
	xprt->snd_task = cookie;
	ret = true;
out:
	spin_unlock_bh(&xprt->transport_lock);
	return ret;
}

void xprt_unlock_connect(struct rpc_xprt *xprt, void *cookie)
{
	spin_lock_bh(&xprt->transport_lock);
	if (xprt->snd_task != cookie)
		goto out;
	if (!test_bit(XPRT_LOCKED, &xprt->state))
		goto out;
	xprt->snd_task =NULL;
	xprt->ops->release_xprt(xprt, NULL);
746
	xprt_schedule_autodisconnect(xprt);
747 748
out:
	spin_unlock_bh(&xprt->transport_lock);
749
	wake_up_bit(&xprt->state, XPRT_LOCKED);
750 751
}

752 753 754
/**
 * xprt_connect - schedule a transport connect operation
 * @task: RPC task that is requesting the connect
L
Linus Torvalds 已提交
755 756 757 758
 *
 */
void xprt_connect(struct rpc_task *task)
{
759
	struct rpc_xprt	*xprt = task->tk_rqstp->rq_xprt;
L
Linus Torvalds 已提交
760

761
	dprintk("RPC: %5u xprt_connect xprt %p %s connected\n", task->tk_pid,
L
Linus Torvalds 已提交
762 763
			xprt, (xprt_connected(xprt) ? "is" : "is not"));

764
	if (!xprt_bound(xprt)) {
765
		task->tk_status = -EAGAIN;
L
Linus Torvalds 已提交
766 767 768 769
		return;
	}
	if (!xprt_lock_write(xprt, task))
		return;
770 771 772 773

	if (test_and_clear_bit(XPRT_CLOSE_WAIT, &xprt->state))
		xprt->ops->close(xprt);

774
	if (!xprt_connected(xprt)) {
775
		task->tk_rqstp->rq_bytes_sent = 0;
776
		task->tk_timeout = task->tk_rqstp->rq_timeout;
777
		task->tk_rqstp->rq_connect_cookie = xprt->connect_cookie;
778
		rpc_sleep_on(&xprt->pending, task, xprt_connect_status);
779 780 781 782 783

		if (test_bit(XPRT_CLOSING, &xprt->state))
			return;
		if (xprt_test_and_set_connecting(xprt))
			return;
784
		xprt->stat.connect_start = jiffies;
785
		xprt->ops->connect(xprt, task);
L
Linus Torvalds 已提交
786
	}
787
	xprt_release_write(xprt, task);
L
Linus Torvalds 已提交
788 789
}

790
static void xprt_connect_status(struct rpc_task *task)
L
Linus Torvalds 已提交
791
{
792
	struct rpc_xprt	*xprt = task->tk_rqstp->rq_xprt;
L
Linus Torvalds 已提交
793

794
	if (task->tk_status == 0) {
795 796
		xprt->stat.connect_count++;
		xprt->stat.connect_time += (long)jiffies - xprt->stat.connect_start;
797
		dprintk("RPC: %5u xprt_connect_status: connection established\n",
L
Linus Torvalds 已提交
798 799 800 801 802
				task->tk_pid);
		return;
	}

	switch (task->tk_status) {
803 804 805 806 807
	case -ECONNREFUSED:
	case -ECONNRESET:
	case -ECONNABORTED:
	case -ENETUNREACH:
	case -EHOSTUNREACH:
808
	case -EPIPE:
809 810
	case -EAGAIN:
		dprintk("RPC: %5u xprt_connect_status: retrying\n", task->tk_pid);
811
		break;
L
Linus Torvalds 已提交
812
	case -ETIMEDOUT:
813 814
		dprintk("RPC: %5u xprt_connect_status: connect attempt timed "
				"out\n", task->tk_pid);
L
Linus Torvalds 已提交
815 816
		break;
	default:
817 818
		dprintk("RPC: %5u xprt_connect_status: error %d connecting to "
				"server %s\n", task->tk_pid, -task->tk_status,
819
				xprt->servername);
820
		task->tk_status = -EIO;
L
Linus Torvalds 已提交
821 822 823
	}
}

824 825 826 827 828
/**
 * xprt_lookup_rqst - find an RPC request corresponding to an XID
 * @xprt: transport on which the original request was transmitted
 * @xid: RPC XID of incoming reply
 *
829
 * Caller holds xprt->recv_lock.
L
Linus Torvalds 已提交
830
 */
831
struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid)
L
Linus Torvalds 已提交
832
{
833
	struct rpc_rqst *entry;
L
Linus Torvalds 已提交
834

835
	list_for_each_entry(entry, &xprt->recv, rq_list)
836 837
		if (entry->rq_xid == xid) {
			trace_xprt_lookup_rqst(xprt, xid, 0);
838
			entry->rq_rtt = ktime_sub(ktime_get(), entry->rq_xtime);
839
			return entry;
840
		}
841 842 843

	dprintk("RPC:       xprt_lookup_rqst did not find xid %08x\n",
			ntohl(xid));
844
	trace_xprt_lookup_rqst(xprt, xid, -ENOENT);
845 846
	xprt->stat.bad_xids++;
	return NULL;
L
Linus Torvalds 已提交
847
}
848
EXPORT_SYMBOL_GPL(xprt_lookup_rqst);
L
Linus Torvalds 已提交
849

850 851 852 853 854 855
static bool
xprt_is_pinned_rqst(struct rpc_rqst *req)
{
	return atomic_read(&req->rq_pin) != 0;
}

856 857 858 859 860
/**
 * xprt_pin_rqst - Pin a request on the transport receive list
 * @req: Request to pin
 *
 * Caller must ensure this is atomic with the call to xprt_lookup_rqst()
861
 * so should be holding the xprt receive lock.
862 863 864
 */
void xprt_pin_rqst(struct rpc_rqst *req)
{
865
	atomic_inc(&req->rq_pin);
866
}
867
EXPORT_SYMBOL_GPL(xprt_pin_rqst);
868 869 870 871 872

/**
 * xprt_unpin_rqst - Unpin a request on the transport receive list
 * @req: Request to pin
 *
873
 * Caller should be holding the xprt receive lock.
874 875 876
 */
void xprt_unpin_rqst(struct rpc_rqst *req)
{
877 878 879 880 881 882
	if (!test_bit(RPC_TASK_MSG_PIN_WAIT, &req->rq_task->tk_runstate)) {
		atomic_dec(&req->rq_pin);
		return;
	}
	if (atomic_dec_and_test(&req->rq_pin))
		wake_up_var(&req->rq_pin);
883
}
884
EXPORT_SYMBOL_GPL(xprt_unpin_rqst);
885 886 887

static void xprt_wait_on_pinned_rqst(struct rpc_rqst *req)
{
888
	wait_var_event(&req->rq_pin, !xprt_is_pinned_rqst(req));
889 890
}

891 892 893 894 895 896 897
/**
 * xprt_update_rtt - Update RPC RTT statistics
 * @task: RPC request that recently completed
 *
 * Caller holds xprt->recv_lock.
 */
void xprt_update_rtt(struct rpc_task *task)
898 899 900
{
	struct rpc_rqst *req = task->tk_rqstp;
	struct rpc_rtt *rtt = task->tk_client->cl_rtt;
901
	unsigned int timer = task->tk_msg.rpc_proc->p_timer;
902
	long m = usecs_to_jiffies(ktime_to_us(req->rq_rtt));
903 904 905

	if (timer) {
		if (req->rq_ntrans == 1)
906
			rpc_update_rtt(rtt, timer, m);
907 908 909
		rpc_set_timeo(rtt, timer, req->rq_ntrans - 1);
	}
}
910
EXPORT_SYMBOL_GPL(xprt_update_rtt);
911

912 913
/**
 * xprt_complete_rqst - called when reply processing is complete
914
 * @task: RPC request that recently completed
915 916
 * @copied: actual number of bytes received from the transport
 *
917
 * Caller holds xprt->recv_lock.
L
Linus Torvalds 已提交
918
 */
919
void xprt_complete_rqst(struct rpc_task *task, int copied)
L
Linus Torvalds 已提交
920
{
921
	struct rpc_rqst *req = task->tk_rqstp;
922
	struct rpc_xprt *xprt = req->rq_xprt;
L
Linus Torvalds 已提交
923

924 925
	dprintk("RPC: %5u xid %08x complete (%d bytes received)\n",
			task->tk_pid, ntohl(req->rq_xid), copied);
926
	trace_xprt_complete_rqst(xprt, req->rq_xid, copied);
L
Linus Torvalds 已提交
927

928
	xprt->stat.recvs++;
929

L
Linus Torvalds 已提交
930
	list_del_init(&req->rq_list);
931
	req->rq_private_buf.len = copied;
932 933
	/* Ensure all writes are done before we update */
	/* req->rq_reply_bytes_recvd */
934
	smp_wmb();
935
	req->rq_reply_bytes_recvd = copied;
936
	clear_bit(RPC_TASK_NEED_RECV, &task->tk_runstate);
937
	rpc_wake_up_queued_task(&xprt->pending, task);
L
Linus Torvalds 已提交
938
}
939
EXPORT_SYMBOL_GPL(xprt_complete_rqst);
L
Linus Torvalds 已提交
940

941 942 943 944 945 946 947
static bool
xprt_request_data_received(struct rpc_task *task)
{
	return !test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) &&
		task->tk_rqstp->rq_reply_bytes_recvd != 0;
}

948
static void xprt_timer(struct rpc_task *task)
L
Linus Torvalds 已提交
949
{
950
	struct rpc_rqst *req = task->tk_rqstp;
L
Linus Torvalds 已提交
951 952
	struct rpc_xprt *xprt = req->rq_xprt;

953 954
	if (task->tk_status != -ETIMEDOUT)
		return;
L
Linus Torvalds 已提交
955

C
Chuck Lever 已提交
956
	trace_xprt_timer(xprt, req->rq_xid, task->tk_status);
957
	if (!req->rq_reply_bytes_recvd) {
958
		if (xprt->ops->timer)
959
			xprt->ops->timer(xprt, task);
960 961
	} else
		task->tk_status = 0;
L
Linus Torvalds 已提交
962 963
}

964 965 966 967
/**
 * xprt_prepare_transmit - reserve the transport before sending a request
 * @task: RPC task about to send a request
 *
L
Linus Torvalds 已提交
968
 */
969
bool xprt_prepare_transmit(struct rpc_task *task)
L
Linus Torvalds 已提交
970 971 972
{
	struct rpc_rqst	*req = task->tk_rqstp;
	struct rpc_xprt	*xprt = req->rq_xprt;
973
	bool ret = false;
L
Linus Torvalds 已提交
974

975
	dprintk("RPC: %5u xprt_prepare_transmit\n", task->tk_pid);
L
Linus Torvalds 已提交
976

C
Chuck Lever 已提交
977
	spin_lock_bh(&xprt->transport_lock);
978 979 980 981 982 983 984 985 986 987 988 989
	if (!req->rq_bytes_sent) {
		if (req->rq_reply_bytes_recvd) {
			task->tk_status = req->rq_reply_bytes_recvd;
			goto out_unlock;
		}
		if ((task->tk_flags & RPC_TASK_NO_RETRANS_TIMEOUT)
		    && xprt_connected(xprt)
		    && req->rq_connect_cookie == xprt->connect_cookie) {
			xprt->ops->set_retrans_timeout(task);
			rpc_sleep_on(&xprt->pending, task, xprt_timer);
			goto out_unlock;
		}
L
Linus Torvalds 已提交
990
	}
991 992 993 994 995
	if (!xprt->ops->reserve_xprt(xprt, task)) {
		task->tk_status = -EAGAIN;
		goto out_unlock;
	}
	ret = true;
L
Linus Torvalds 已提交
996
out_unlock:
C
Chuck Lever 已提交
997
	spin_unlock_bh(&xprt->transport_lock);
998
	return ret;
L
Linus Torvalds 已提交
999 1000
}

1001
void xprt_end_transmit(struct rpc_task *task)
1002
{
1003
	xprt_release_write(task->tk_rqstp->rq_xprt, task);
1004 1005
}

1006 1007 1008 1009 1010 1011 1012
/**
 * xprt_transmit - send an RPC request on a transport
 * @task: controlling RPC task
 *
 * We have to copy the iovec because sendmsg fiddles with its contents.
 */
void xprt_transmit(struct rpc_task *task)
L
Linus Torvalds 已提交
1013 1014 1015
{
	struct rpc_rqst	*req = task->tk_rqstp;
	struct rpc_xprt	*xprt = req->rq_xprt;
1016
	unsigned int connect_cookie;
1017
	int status;
L
Linus Torvalds 已提交
1018

1019
	dprintk("RPC: %5u xprt_transmit(%u)\n", task->tk_pid, req->rq_slen);
L
Linus Torvalds 已提交
1020

1021
	if (!req->rq_reply_bytes_recvd) {
1022 1023 1024 1025 1026 1027 1028

		/* Verify that our message lies in the RPCSEC_GSS window */
		if (!req->rq_bytes_sent && rpcauth_xmit_need_reencode(task)) {
			task->tk_status = -EBADMSG;
			return;
		}

1029 1030 1031 1032
		if (list_empty(&req->rq_list) && rpc_reply_expected(task)) {
			/*
			 * Add to the list only if we're expecting a reply
			 */
L
Linus Torvalds 已提交
1033 1034 1035 1036
			/* Update the softirq receive buffer */
			memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
					sizeof(req->rq_private_buf));
			/* Add request to the receive list */
1037
			spin_lock(&xprt->recv_lock);
L
Linus Torvalds 已提交
1038
			list_add_tail(&req->rq_list, &xprt->recv);
1039
			set_bit(RPC_TASK_NEED_RECV, &task->tk_runstate);
1040
			spin_unlock(&xprt->recv_lock);
L
Linus Torvalds 已提交
1041
			xprt_reset_majortimeo(req);
1042 1043
			/* Turn off autodisconnect */
			del_singleshot_timer_sync(&xprt->timer);
L
Linus Torvalds 已提交
1044
		}
1045
	} else if (xprt_request_data_received(task) && !req->rq_bytes_sent)
L
Linus Torvalds 已提交
1046 1047
		return;

1048
	connect_cookie = xprt->connect_cookie;
1049
	status = xprt->ops->send_request(task);
1050
	trace_xprt_transmit(xprt, req->rq_xid, status);
1051 1052 1053 1054
	if (status != 0) {
		task->tk_status = status;
		return;
	}
1055

C
Chuck Lever 已提交
1056
	xprt_inject_disconnect(xprt);
1057

1058
	dprintk("RPC: %5u xmit complete\n", task->tk_pid);
1059
	clear_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate);
1060
	task->tk_flags |= RPC_TASK_SENT;
1061
	spin_lock_bh(&xprt->transport_lock);
1062

1063
	xprt->ops->set_retrans_timeout(task);
1064

1065 1066 1067
	xprt->stat.sends++;
	xprt->stat.req_u += xprt->stat.sends - xprt->stat.recvs;
	xprt->stat.bklog_u += xprt->backlog.qlen;
1068 1069
	xprt->stat.sending_u += xprt->sending.qlen;
	xprt->stat.pending_u += xprt->pending.qlen;
1070
	spin_unlock_bh(&xprt->transport_lock);
L
Linus Torvalds 已提交
1071

1072
	req->rq_connect_cookie = connect_cookie;
1073
	if (test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate)) {
1074
		/*
1075 1076 1077
		 * Sleep on the pending queue if we're expecting a reply.
		 * The spinlock ensures atomicity between the test of
		 * req->rq_reply_bytes_recvd, and the call to rpc_sleep_on().
1078
		 */
1079
		spin_lock(&xprt->recv_lock);
1080
		if (test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate)) {
1081
			rpc_sleep_on(&xprt->pending, task, xprt_timer);
1082
			/* Wake up immediately if the connection was dropped */
1083
			if (!xprt_connected(xprt))
1084 1085
				rpc_wake_up_queued_task_set_status(&xprt->pending,
						task, -ENOTCONN);
1086 1087
		}
		spin_unlock(&xprt->recv_lock);
1088
	}
L
Linus Torvalds 已提交
1089 1090
}

1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118
static void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task)
{
	set_bit(XPRT_CONGESTED, &xprt->state);
	rpc_sleep_on(&xprt->backlog, task, NULL);
}

static void xprt_wake_up_backlog(struct rpc_xprt *xprt)
{
	if (rpc_wake_up_next(&xprt->backlog) == NULL)
		clear_bit(XPRT_CONGESTED, &xprt->state);
}

static bool xprt_throttle_congested(struct rpc_xprt *xprt, struct rpc_task *task)
{
	bool ret = false;

	if (!test_bit(XPRT_CONGESTED, &xprt->state))
		goto out;
	spin_lock(&xprt->reserve_lock);
	if (test_bit(XPRT_CONGESTED, &xprt->state)) {
		rpc_sleep_on(&xprt->backlog, task, NULL);
		ret = true;
	}
	spin_unlock(&xprt->reserve_lock);
out:
	return ret;
}

1119
static struct rpc_rqst *xprt_dynamic_alloc_slot(struct rpc_xprt *xprt)
1120 1121 1122
{
	struct rpc_rqst *req = ERR_PTR(-EAGAIN);

1123
	if (xprt->num_reqs >= xprt->max_reqs)
1124
		goto out;
1125
	++xprt->num_reqs;
1126 1127 1128
	spin_unlock(&xprt->reserve_lock);
	req = kzalloc(sizeof(struct rpc_rqst), GFP_NOFS);
	spin_lock(&xprt->reserve_lock);
1129 1130
	if (req != NULL)
		goto out;
1131
	--xprt->num_reqs;
1132 1133 1134 1135 1136 1137 1138
	req = ERR_PTR(-ENOMEM);
out:
	return req;
}

static bool xprt_dynamic_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
{
1139 1140
	if (xprt->num_reqs > xprt->min_reqs) {
		--xprt->num_reqs;
1141 1142 1143 1144 1145 1146
		kfree(req);
		return true;
	}
	return false;
}

1147
void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
L
Linus Torvalds 已提交
1148
{
1149
	struct rpc_rqst *req;
L
Linus Torvalds 已提交
1150

1151
	spin_lock(&xprt->reserve_lock);
L
Linus Torvalds 已提交
1152
	if (!list_empty(&xprt->free)) {
1153 1154 1155 1156
		req = list_entry(xprt->free.next, struct rpc_rqst, rq_list);
		list_del(&req->rq_list);
		goto out_init_req;
	}
1157
	req = xprt_dynamic_alloc_slot(xprt);
1158 1159 1160 1161 1162 1163
	if (!IS_ERR(req))
		goto out_init_req;
	switch (PTR_ERR(req)) {
	case -ENOMEM:
		dprintk("RPC:       dynamic allocation of request slot "
				"failed! Retrying\n");
1164
		task->tk_status = -ENOMEM;
1165 1166
		break;
	case -EAGAIN:
1167
		xprt_add_backlog(xprt, task);
1168
		dprintk("RPC:       waiting for request slot\n");
1169
		/* fall through */
1170 1171
	default:
		task->tk_status = -EAGAIN;
L
Linus Torvalds 已提交
1172
	}
1173
	spin_unlock(&xprt->reserve_lock);
1174 1175
	return;
out_init_req:
1176 1177
	xprt->stat.max_slots = max_t(unsigned int, xprt->stat.max_slots,
				     xprt->num_reqs);
1178 1179
	spin_unlock(&xprt->reserve_lock);

1180 1181
	task->tk_status = 0;
	task->tk_rqstp = req;
1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195
}
EXPORT_SYMBOL_GPL(xprt_alloc_slot);

void xprt_lock_and_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
{
	/* Note: grabbing the xprt_lock_write() ensures that we throttle
	 * new slot allocation if the transport is congested (i.e. when
	 * reconnecting a stream transport or when out of socket write
	 * buffer space).
	 */
	if (xprt_lock_write(xprt, task)) {
		xprt_alloc_slot(xprt, task);
		xprt_release_write(xprt, task);
	}
L
Linus Torvalds 已提交
1196
}
1197
EXPORT_SYMBOL_GPL(xprt_lock_and_alloc_slot);
L
Linus Torvalds 已提交
1198

1199
void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
1200 1201
{
	spin_lock(&xprt->reserve_lock);
1202 1203 1204 1205
	if (!xprt_dynamic_free_slot(xprt, req)) {
		memset(req, 0, sizeof(*req));	/* mark unused */
		list_add(&req->rq_list, &xprt->free);
	}
1206
	xprt_wake_up_backlog(xprt);
1207 1208
	spin_unlock(&xprt->reserve_lock);
}
1209
EXPORT_SYMBOL_GPL(xprt_free_slot);
1210

1211 1212 1213 1214 1215 1216 1217 1218 1219 1220
static void xprt_free_all_slots(struct rpc_xprt *xprt)
{
	struct rpc_rqst *req;
	while (!list_empty(&xprt->free)) {
		req = list_first_entry(&xprt->free, struct rpc_rqst, rq_list);
		list_del(&req->rq_list);
		kfree(req);
	}
}

1221 1222 1223
struct rpc_xprt *xprt_alloc(struct net *net, size_t size,
		unsigned int num_prealloc,
		unsigned int max_alloc)
1224 1225
{
	struct rpc_xprt *xprt;
1226 1227
	struct rpc_rqst *req;
	int i;
1228 1229 1230 1231 1232

	xprt = kzalloc(size, GFP_KERNEL);
	if (xprt == NULL)
		goto out;

1233 1234 1235 1236 1237
	xprt_init(xprt, net);

	for (i = 0; i < num_prealloc; i++) {
		req = kzalloc(sizeof(struct rpc_rqst), GFP_KERNEL);
		if (!req)
1238
			goto out_free;
1239 1240
		list_add(&req->rq_list, &xprt->free);
	}
1241 1242 1243 1244 1245
	if (max_alloc > num_prealloc)
		xprt->max_reqs = max_alloc;
	else
		xprt->max_reqs = num_prealloc;
	xprt->min_reqs = num_prealloc;
1246
	xprt->num_reqs = num_prealloc;
1247 1248 1249 1250

	return xprt;

out_free:
1251
	xprt_free(xprt);
1252 1253 1254 1255 1256
out:
	return NULL;
}
EXPORT_SYMBOL_GPL(xprt_alloc);

1257 1258
void xprt_free(struct rpc_xprt *xprt)
{
P
Pavel Emelyanov 已提交
1259
	put_net(xprt->xprt_net);
1260
	xprt_free_all_slots(xprt);
1261
	kfree_rcu(xprt, rcu);
1262 1263 1264
}
EXPORT_SYMBOL_GPL(xprt_free);

1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313
static __be32
xprt_alloc_xid(struct rpc_xprt *xprt)
{
	__be32 xid;

	spin_lock(&xprt->reserve_lock);
	xid = (__force __be32)xprt->xid++;
	spin_unlock(&xprt->reserve_lock);
	return xid;
}

static void
xprt_init_xid(struct rpc_xprt *xprt)
{
	xprt->xid = prandom_u32();
}

static void
xprt_request_init(struct rpc_task *task)
{
	struct rpc_xprt *xprt = task->tk_xprt;
	struct rpc_rqst	*req = task->tk_rqstp;

	INIT_LIST_HEAD(&req->rq_list);
	req->rq_timeout = task->tk_client->cl_timeout->to_initval;
	req->rq_task	= task;
	req->rq_xprt    = xprt;
	req->rq_buffer  = NULL;
	req->rq_xid	= xprt_alloc_xid(xprt);
	req->rq_connect_cookie = xprt->connect_cookie - 1;
	req->rq_bytes_sent = 0;
	req->rq_snd_buf.len = 0;
	req->rq_snd_buf.buflen = 0;
	req->rq_rcv_buf.len = 0;
	req->rq_rcv_buf.buflen = 0;
	req->rq_release_snd_buf = NULL;
	xprt_reset_majortimeo(req);
	dprintk("RPC: %5u reserved req %p xid %08x\n", task->tk_pid,
			req, ntohl(req->rq_xid));
}

static void
xprt_do_reserve(struct rpc_xprt *xprt, struct rpc_task *task)
{
	xprt->ops->alloc_slot(xprt, task);
	if (task->tk_rqstp != NULL)
		xprt_request_init(task);
}

1314 1315 1316 1317
/**
 * xprt_reserve - allocate an RPC request slot
 * @task: RPC task requesting a slot allocation
 *
1318 1319
 * If the transport is marked as being congested, or if no more
 * slots are available, place the task on the transport's
1320 1321 1322
 * backlog queue.
 */
void xprt_reserve(struct rpc_task *task)
L
Linus Torvalds 已提交
1323
{
1324
	struct rpc_xprt *xprt = task->tk_xprt;
L
Linus Torvalds 已提交
1325

1326 1327 1328 1329 1330 1331
	task->tk_status = 0;
	if (task->tk_rqstp != NULL)
		return;

	task->tk_timeout = 0;
	task->tk_status = -EAGAIN;
1332
	if (!xprt_throttle_congested(xprt, task))
1333
		xprt_do_reserve(xprt, task);
1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346
}

/**
 * xprt_retry_reserve - allocate an RPC request slot
 * @task: RPC task requesting a slot allocation
 *
 * If no more slots are available, place the task on the transport's
 * backlog queue.
 * Note that the only difference with xprt_reserve is that we now
 * ignore the value of the XPRT_CONGESTED flag.
 */
void xprt_retry_reserve(struct rpc_task *task)
{
1347
	struct rpc_xprt *xprt = task->tk_xprt;
1348 1349 1350 1351 1352 1353 1354

	task->tk_status = 0;
	if (task->tk_rqstp != NULL)
		return;

	task->tk_timeout = 0;
	task->tk_status = -EAGAIN;
1355
	xprt_do_reserve(xprt, task);
L
Linus Torvalds 已提交
1356 1357
}

1358 1359 1360 1361
/**
 * xprt_release - release an RPC request slot
 * @task: task which is finished with the slot
 *
L
Linus Torvalds 已提交
1362
 */
1363
void xprt_release(struct rpc_task *task)
L
Linus Torvalds 已提交
1364
{
1365
	struct rpc_xprt	*xprt;
1366
	struct rpc_rqst	*req = task->tk_rqstp;
L
Linus Torvalds 已提交
1367

1368 1369
	if (req == NULL) {
		if (task->tk_client) {
1370
			xprt = task->tk_xprt;
1371 1372 1373
			if (xprt->snd_task == task)
				xprt_release_write(xprt, task);
		}
L
Linus Torvalds 已提交
1374
		return;
1375
	}
1376 1377

	xprt = req->rq_xprt;
1378 1379 1380 1381
	if (task->tk_ops->rpc_count_stats != NULL)
		task->tk_ops->rpc_count_stats(task, task->tk_calldata);
	else if (task->tk_client)
		rpc_count_iostats(task, task->tk_client->cl_metrics);
1382 1383
	spin_lock(&xprt->recv_lock);
	if (!list_empty(&req->rq_list)) {
1384
		list_del_init(&req->rq_list);
1385 1386 1387 1388 1389 1390 1391
		if (xprt_is_pinned_rqst(req)) {
			set_bit(RPC_TASK_MSG_PIN_WAIT, &req->rq_task->tk_runstate);
			spin_unlock(&xprt->recv_lock);
			xprt_wait_on_pinned_rqst(req);
			spin_lock(&xprt->recv_lock);
			clear_bit(RPC_TASK_MSG_PIN_WAIT, &req->rq_task->tk_runstate);
		}
1392 1393
	}
	spin_unlock(&xprt->recv_lock);
C
Chuck Lever 已提交
1394
	spin_lock_bh(&xprt->transport_lock);
1395
	xprt->ops->release_xprt(xprt, task);
1396 1397
	if (xprt->ops->release_request)
		xprt->ops->release_request(task);
L
Linus Torvalds 已提交
1398
	xprt->last_used = jiffies;
1399
	xprt_schedule_autodisconnect(xprt);
C
Chuck Lever 已提交
1400
	spin_unlock_bh(&xprt->transport_lock);
1401
	if (req->rq_buffer)
1402
		xprt->ops->buf_free(task);
C
Chuck Lever 已提交
1403
	xprt_inject_disconnect(xprt);
1404 1405
	if (req->rq_cred != NULL)
		put_rpccred(req->rq_cred);
L
Linus Torvalds 已提交
1406
	task->tk_rqstp = NULL;
1407 1408
	if (req->rq_release_snd_buf)
		req->rq_release_snd_buf(req);
1409

1410
	dprintk("RPC: %5u release request %p\n", task->tk_pid, req);
1411
	if (likely(!bc_prealloc(req)))
1412
		xprt->ops->free_slot(xprt, req);
1413
	else
1414
		xprt_free_bc_request(req);
L
Linus Torvalds 已提交
1415 1416
}

1417
static void xprt_init(struct rpc_xprt *xprt, struct net *net)
1418
{
1419
	kref_init(&xprt->kref);
1420 1421 1422

	spin_lock_init(&xprt->transport_lock);
	spin_lock_init(&xprt->reserve_lock);
1423
	spin_lock_init(&xprt->recv_lock);
1424 1425 1426

	INIT_LIST_HEAD(&xprt->free);
	INIT_LIST_HEAD(&xprt->recv);
1427
#if defined(CONFIG_SUNRPC_BACKCHANNEL)
1428 1429
	spin_lock_init(&xprt->bc_pa_lock);
	INIT_LIST_HEAD(&xprt->bc_pa_list);
1430
#endif /* CONFIG_SUNRPC_BACKCHANNEL */
1431
	INIT_LIST_HEAD(&xprt->xprt_switch);
1432

1433 1434
	xprt->last_used = jiffies;
	xprt->cwnd = RPC_INITCWND;
1435
	xprt->bind_index = 0;
1436 1437 1438

	rpc_init_wait_queue(&xprt->binding, "xprt_binding");
	rpc_init_wait_queue(&xprt->pending, "xprt_pending");
1439
	rpc_init_priority_wait_queue(&xprt->sending, "xprt_sending");
1440 1441 1442 1443
	rpc_init_priority_wait_queue(&xprt->backlog, "xprt_backlog");

	xprt_init_xid(xprt);

1444
	xprt->xprt_net = get_net(net);
1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464
}

/**
 * xprt_create_transport - create an RPC transport
 * @args: rpc transport creation arguments
 *
 */
struct rpc_xprt *xprt_create_transport(struct xprt_create *args)
{
	struct rpc_xprt	*xprt;
	struct xprt_class *t;

	spin_lock(&xprt_list_lock);
	list_for_each_entry(t, &xprt_list, list) {
		if (t->ident == args->ident) {
			spin_unlock(&xprt_list_lock);
			goto found;
		}
	}
	spin_unlock(&xprt_list_lock);
1465
	dprintk("RPC: transport (%d) not supported\n", args->ident);
1466 1467 1468 1469 1470 1471 1472
	return ERR_PTR(-EIO);

found:
	xprt = t->setup(args);
	if (IS_ERR(xprt)) {
		dprintk("RPC:       xprt_create_transport: failed, %ld\n",
				-PTR_ERR(xprt));
1473
		goto out;
1474
	}
1475 1476
	if (args->flags & XPRT_CREATE_NO_IDLE_TIMEOUT)
		xprt->idle_timeout = 0;
1477 1478
	INIT_WORK(&xprt->task_cleanup, xprt_autoclose);
	if (xprt_has_timer(xprt))
1479
		timer_setup(&xprt->timer, xprt_init_autodisconnect, 0);
1480
	else
1481
		timer_setup(&xprt->timer, NULL, 0);
1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492

	if (strlen(args->servername) > RPC_MAXNETNAMELEN) {
		xprt_destroy(xprt);
		return ERR_PTR(-EINVAL);
	}
	xprt->servername = kstrdup(args->servername, GFP_KERNEL);
	if (xprt->servername == NULL) {
		xprt_destroy(xprt);
		return ERR_PTR(-ENOMEM);
	}

1493
	rpc_xprt_debugfs_register(xprt);
1494

1495
	dprintk("RPC:       created transport %p with %u slots\n", xprt,
1496
			xprt->max_reqs);
1497
out:
1498 1499 1500
	return xprt;
}

1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517
static void xprt_destroy_cb(struct work_struct *work)
{
	struct rpc_xprt *xprt =
		container_of(work, struct rpc_xprt, task_cleanup);

	rpc_xprt_debugfs_unregister(xprt);
	rpc_destroy_wait_queue(&xprt->binding);
	rpc_destroy_wait_queue(&xprt->pending);
	rpc_destroy_wait_queue(&xprt->sending);
	rpc_destroy_wait_queue(&xprt->backlog);
	kfree(xprt->servername);
	/*
	 * Tear down transport state and free the rpc_xprt
	 */
	xprt->ops->destroy(xprt);
}

1518 1519
/**
 * xprt_destroy - destroy an RPC transport, killing off all requests.
1520
 * @xprt: transport to destroy
1521
 *
L
Linus Torvalds 已提交
1522
 */
1523
static void xprt_destroy(struct rpc_xprt *xprt)
L
Linus Torvalds 已提交
1524
{
1525
	dprintk("RPC:       destroying transport %p\n", xprt);
1526

1527 1528 1529
	/*
	 * Exclude transport connect/disconnect handlers and autoclose
	 */
1530 1531
	wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_UNINTERRUPTIBLE);

1532
	del_timer_sync(&xprt->timer);
1533 1534

	/*
1535 1536
	 * Destroy sockets etc from the system workqueue so they can
	 * safely flush receive work running on rpciod.
1537
	 */
1538 1539
	INIT_WORK(&xprt->task_cleanup, xprt_destroy_cb);
	schedule_work(&xprt->task_cleanup);
1540
}
L
Linus Torvalds 已提交
1541

1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559
static void xprt_destroy_kref(struct kref *kref)
{
	xprt_destroy(container_of(kref, struct rpc_xprt, kref));
}

/**
 * xprt_get - return a reference to an RPC transport.
 * @xprt: pointer to the transport
 *
 */
struct rpc_xprt *xprt_get(struct rpc_xprt *xprt)
{
	if (xprt != NULL && kref_get_unless_zero(&xprt->kref))
		return xprt;
	return NULL;
}
EXPORT_SYMBOL_GPL(xprt_get);

1560 1561 1562 1563 1564 1565 1566
/**
 * xprt_put - release a reference to an RPC transport.
 * @xprt: pointer to the transport
 *
 */
void xprt_put(struct rpc_xprt *xprt)
{
1567 1568
	if (xprt != NULL)
		kref_put(&xprt->kref, xprt_destroy_kref);
1569
}
1570
EXPORT_SYMBOL_GPL(xprt_put);