xprt.c 35.8 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12
/*
 *  linux/net/sunrpc/xprt.c
 *
 *  This is a generic RPC call interface supporting congestion avoidance,
 *  and asynchronous calls.
 *
 *  The interface works like this:
 *
 *  -	When a process places a call, it allocates a request slot if
 *	one is available. Otherwise, it sleeps on the backlog queue
 *	(xprt_reserve).
 *  -	Next, the caller puts together the RPC message, stuffs it into
13 14
 *	the request struct, and calls xprt_transmit().
 *  -	xprt_transmit sends the message and installs the caller on the
15 16 17
 *	transport's wait list. At the same time, if a reply is expected,
 *	it installs a timer that is run after the packet's timeout has
 *	expired.
L
Linus Torvalds 已提交
18
 *  -	When a packet arrives, the data_ready handler walks the list of
19
 *	pending requests for that transport. If a matching XID is found, the
L
Linus Torvalds 已提交
20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35
 *	caller is woken up, and the timer removed.
 *  -	When no reply arrives within the timeout interval, the timer is
 *	fired by the kernel and runs xprt_timer(). It either adjusts the
 *	timeout values (minor timeout) or wakes up the caller with a status
 *	of -ETIMEDOUT.
 *  -	When the caller receives a notification from RPC that a reply arrived,
 *	it should release the RPC slot, and process the reply.
 *	If the call timed out, it may choose to retry the operation by
 *	adjusting the initial timeout value, and simply calling rpc_call
 *	again.
 *
 *  Support for async RPC is done through a set of RPC-specific scheduling
 *  primitives that `transparently' work for processes as well as async
 *  tasks that rely on callbacks.
 *
 *  Copyright (C) 1995-1997, Olaf Kirch <okir@monad.swb.de>
36 37
 *
 *  Transport switch API copyright (C) 2005, Chuck Lever <cel@netapp.com>
L
Linus Torvalds 已提交
38 39
 */

40 41
#include <linux/module.h>

L
Linus Torvalds 已提交
42
#include <linux/types.h>
43
#include <linux/interrupt.h>
L
Linus Torvalds 已提交
44
#include <linux/workqueue.h>
45
#include <linux/net.h>
46
#include <linux/ktime.h>
L
Linus Torvalds 已提交
47

48
#include <linux/sunrpc/clnt.h>
49
#include <linux/sunrpc/metrics.h>
50
#include <linux/sunrpc/bc_xprt.h>
L
Linus Torvalds 已提交
51

52 53
#include "sunrpc.h"

L
Linus Torvalds 已提交
54 55 56 57 58 59 60 61 62 63 64
/*
 * Local variables
 */

#ifdef RPC_DEBUG
# define RPCDBG_FACILITY	RPCDBG_XPRT
#endif

/*
 * Local functions
 */
65
static void	 xprt_init(struct rpc_xprt *xprt, struct net *net);
L
Linus Torvalds 已提交
66 67 68
static void	xprt_request_init(struct rpc_task *, struct rpc_xprt *);
static void	xprt_connect_status(struct rpc_task *task);
static int      __xprt_get_cong(struct rpc_xprt *, struct rpc_task *);
69
static void	 xprt_destroy(struct rpc_xprt *xprt);
L
Linus Torvalds 已提交
70

J
Jiri Slaby 已提交
71
static DEFINE_SPINLOCK(xprt_list_lock);
72 73
static LIST_HEAD(xprt_list);

74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90
/*
 * The transport code maintains an estimate on the maximum number of out-
 * standing RPC requests, using a smoothed version of the congestion
 * avoidance implemented in 44BSD. This is basically the Van Jacobson
 * congestion algorithm: If a retransmit occurs, the congestion window is
 * halved; otherwise, it is incremented by 1/cwnd when
 *
 *	-	a reply is received and
 *	-	a full number of requests are outstanding and
 *	-	the congestion window hasn't been updated recently.
 */
#define RPC_CWNDSHIFT		(8U)
#define RPC_CWNDSCALE		(1U << RPC_CWNDSHIFT)
#define RPC_INITCWND		RPC_CWNDSCALE
#define RPC_MAXCWND(xprt)	((xprt)->max_reqs << RPC_CWNDSHIFT)

#define RPCXPRT_CONGESTED(xprt) ((xprt)->cong >= (xprt)->cwnd)
L
Linus Torvalds 已提交
91

92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112
/**
 * xprt_register_transport - register a transport implementation
 * @transport: transport to register
 *
 * If a transport implementation is loaded as a kernel module, it can
 * call this interface to make itself known to the RPC client.
 *
 * Returns:
 * 0:		transport successfully registered
 * -EEXIST:	transport already registered
 * -EINVAL:	transport module being unloaded
 */
int xprt_register_transport(struct xprt_class *transport)
{
	struct xprt_class *t;
	int result;

	result = -EEXIST;
	spin_lock(&xprt_list_lock);
	list_for_each_entry(t, &xprt_list, list) {
		/* don't register the same transport class twice */
113
		if (t->ident == transport->ident)
114 115 116
			goto out;
	}

117 118 119 120
	list_add_tail(&transport->list, &xprt_list);
	printk(KERN_INFO "RPC: Registered %s transport module.\n",
	       transport->name);
	result = 0;
121 122 123 124 125 126 127 128 129

out:
	spin_unlock(&xprt_list_lock);
	return result;
}
EXPORT_SYMBOL_GPL(xprt_register_transport);

/**
 * xprt_unregister_transport - unregister a transport implementation
130
 * @transport: transport to unregister
131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159
 *
 * Returns:
 * 0:		transport successfully unregistered
 * -ENOENT:	transport never registered
 */
int xprt_unregister_transport(struct xprt_class *transport)
{
	struct xprt_class *t;
	int result;

	result = 0;
	spin_lock(&xprt_list_lock);
	list_for_each_entry(t, &xprt_list, list) {
		if (t == transport) {
			printk(KERN_INFO
				"RPC: Unregistered %s transport module.\n",
				transport->name);
			list_del_init(&transport->list);
			goto out;
		}
	}
	result = -ENOENT;

out:
	spin_unlock(&xprt_list_lock);
	return result;
}
EXPORT_SYMBOL_GPL(xprt_unregister_transport);

160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181
/**
 * xprt_load_transport - load a transport implementation
 * @transport_name: transport to load
 *
 * Returns:
 * 0:		transport successfully loaded
 * -ENOENT:	transport module not available
 */
int xprt_load_transport(const char *transport_name)
{
	struct xprt_class *t;
	int result;

	result = 0;
	spin_lock(&xprt_list_lock);
	list_for_each_entry(t, &xprt_list, list) {
		if (strcmp(t->name, transport_name) == 0) {
			spin_unlock(&xprt_list_lock);
			goto out;
		}
	}
	spin_unlock(&xprt_list_lock);
182
	result = request_module("xprt%s", transport_name);
183 184 185 186 187
out:
	return result;
}
EXPORT_SYMBOL_GPL(xprt_load_transport);

188 189 190
/**
 * xprt_reserve_xprt - serialize write access to transports
 * @task: task that is requesting access to the transport
191
 * @xprt: pointer to the target transport
192 193 194 195 196
 *
 * This prevents mixing the payload of separate requests, and prevents
 * transport connects from colliding with writes.  No congestion control
 * is provided.
 */
197
int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
198 199
{
	struct rpc_rqst *req = task->tk_rqstp;
200
	int priority;
201 202 203 204 205 206 207

	if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
		if (task == xprt->snd_task)
			return 1;
		goto out_sleep;
	}
	xprt->snd_task = task;
208 209 210 211
	if (req != NULL) {
		req->rq_bytes_sent = 0;
		req->rq_ntrans++;
	}
212

213 214 215
	return 1;

out_sleep:
216
	dprintk("RPC: %5u failed to lock transport %p\n",
217 218 219
			task->tk_pid, xprt);
	task->tk_timeout = 0;
	task->tk_status = -EAGAIN;
220 221 222 223
	if (req == NULL)
		priority = RPC_PRIORITY_LOW;
	else if (!req->rq_ntrans)
		priority = RPC_PRIORITY_NORMAL;
224
	else
225 226
		priority = RPC_PRIORITY_HIGH;
	rpc_sleep_on_priority(&xprt->sending, task, NULL, priority);
227 228
	return 0;
}
229
EXPORT_SYMBOL_GPL(xprt_reserve_xprt);
230

231 232 233
static void xprt_clear_locked(struct rpc_xprt *xprt)
{
	xprt->snd_task = NULL;
234
	if (!test_bit(XPRT_CLOSE_WAIT, &xprt->state)) {
235 236 237 238
		smp_mb__before_clear_bit();
		clear_bit(XPRT_LOCKED, &xprt->state);
		smp_mb__after_clear_bit();
	} else
239
		queue_work(rpciod_workqueue, &xprt->task_cleanup);
240 241
}

L
Linus Torvalds 已提交
242
/*
243 244 245 246 247 248
 * xprt_reserve_xprt_cong - serialize write access to transports
 * @task: task that is requesting access to the transport
 *
 * Same as xprt_reserve_xprt, but Van Jacobson congestion control is
 * integrated into the decision of whether a request is allowed to be
 * woken up and given access to the transport.
L
Linus Torvalds 已提交
249
 */
250
int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
L
Linus Torvalds 已提交
251 252
{
	struct rpc_rqst *req = task->tk_rqstp;
253
	int priority;
L
Linus Torvalds 已提交
254

255
	if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
L
Linus Torvalds 已提交
256 257 258 259
		if (task == xprt->snd_task)
			return 1;
		goto out_sleep;
	}
260 261 262 263
	if (req == NULL) {
		xprt->snd_task = task;
		return 1;
	}
264
	if (__xprt_get_cong(xprt, task)) {
L
Linus Torvalds 已提交
265
		xprt->snd_task = task;
266 267
		req->rq_bytes_sent = 0;
		req->rq_ntrans++;
L
Linus Torvalds 已提交
268 269
		return 1;
	}
270
	xprt_clear_locked(xprt);
L
Linus Torvalds 已提交
271
out_sleep:
272
	dprintk("RPC: %5u failed to lock transport %p\n", task->tk_pid, xprt);
L
Linus Torvalds 已提交
273 274
	task->tk_timeout = 0;
	task->tk_status = -EAGAIN;
275 276 277 278
	if (req == NULL)
		priority = RPC_PRIORITY_LOW;
	else if (!req->rq_ntrans)
		priority = RPC_PRIORITY_NORMAL;
L
Linus Torvalds 已提交
279
	else
280 281
		priority = RPC_PRIORITY_HIGH;
	rpc_sleep_on_priority(&xprt->sending, task, NULL, priority);
L
Linus Torvalds 已提交
282 283
	return 0;
}
284
EXPORT_SYMBOL_GPL(xprt_reserve_xprt_cong);
L
Linus Torvalds 已提交
285

286
static inline int xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task)
L
Linus Torvalds 已提交
287 288 289
{
	int retval;

C
Chuck Lever 已提交
290
	spin_lock_bh(&xprt->transport_lock);
291
	retval = xprt->ops->reserve_xprt(xprt, task);
C
Chuck Lever 已提交
292
	spin_unlock_bh(&xprt->transport_lock);
L
Linus Torvalds 已提交
293 294 295
	return retval;
}

296
static bool __xprt_lock_write_func(struct rpc_task *task, void *data)
297
{
298
	struct rpc_xprt *xprt = data;
299 300 301 302 303 304 305 306
	struct rpc_rqst *req;

	req = task->tk_rqstp;
	xprt->snd_task = task;
	if (req) {
		req->rq_bytes_sent = 0;
		req->rq_ntrans++;
	}
307 308
	return true;
}
309

310 311 312 313 314 315 316
static void __xprt_lock_write_next(struct rpc_xprt *xprt)
{
	if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
		return;

	if (rpc_wake_up_first(&xprt->sending, __xprt_lock_write_func, xprt))
		return;
317
	xprt_clear_locked(xprt);
318 319
}

320
static bool __xprt_lock_write_cong_func(struct rpc_task *task, void *data)
L
Linus Torvalds 已提交
321
{
322
	struct rpc_xprt *xprt = data;
323
	struct rpc_rqst *req;
L
Linus Torvalds 已提交
324

325 326 327
	req = task->tk_rqstp;
	if (req == NULL) {
		xprt->snd_task = task;
328
		return true;
329
	}
330
	if (__xprt_get_cong(xprt, task)) {
L
Linus Torvalds 已提交
331
		xprt->snd_task = task;
332 333
		req->rq_bytes_sent = 0;
		req->rq_ntrans++;
334
		return true;
L
Linus Torvalds 已提交
335
	}
336 337 338 339 340 341 342 343 344 345 346
	return false;
}

static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt)
{
	if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
		return;
	if (RPCXPRT_CONGESTED(xprt))
		goto out_unlock;
	if (rpc_wake_up_first(&xprt->sending, __xprt_lock_write_cong_func, xprt))
		return;
L
Linus Torvalds 已提交
347
out_unlock:
348
	xprt_clear_locked(xprt);
L
Linus Torvalds 已提交
349 350
}

351 352 353 354 355 356
/**
 * xprt_release_xprt - allow other requests to use a transport
 * @xprt: transport with other tasks potentially waiting
 * @task: task that is releasing access to the transport
 *
 * Note that "task" can be NULL.  No congestion control is provided.
L
Linus Torvalds 已提交
357
 */
358
void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
L
Linus Torvalds 已提交
359 360
{
	if (xprt->snd_task == task) {
361
		xprt_clear_locked(xprt);
L
Linus Torvalds 已提交
362 363 364
		__xprt_lock_write_next(xprt);
	}
}
365
EXPORT_SYMBOL_GPL(xprt_release_xprt);
L
Linus Torvalds 已提交
366

367 368 369 370 371 372 373 374 375 376 377
/**
 * xprt_release_xprt_cong - allow other requests to use a transport
 * @xprt: transport with other tasks potentially waiting
 * @task: task that is releasing access to the transport
 *
 * Note that "task" can be NULL.  Another task is awoken to use the
 * transport if the transport's congestion window allows it.
 */
void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
{
	if (xprt->snd_task == task) {
378
		xprt_clear_locked(xprt);
379 380 381
		__xprt_lock_write_next_cong(xprt);
	}
}
382
EXPORT_SYMBOL_GPL(xprt_release_xprt_cong);
383 384

static inline void xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task)
L
Linus Torvalds 已提交
385
{
C
Chuck Lever 已提交
386
	spin_lock_bh(&xprt->transport_lock);
387
	xprt->ops->release_xprt(xprt, task);
C
Chuck Lever 已提交
388
	spin_unlock_bh(&xprt->transport_lock);
L
Linus Torvalds 已提交
389 390 391 392 393 394 395 396 397 398 399 400 401
}

/*
 * Van Jacobson congestion avoidance. Check if the congestion window
 * overflowed. Put the task to sleep if this is the case.
 */
static int
__xprt_get_cong(struct rpc_xprt *xprt, struct rpc_task *task)
{
	struct rpc_rqst *req = task->tk_rqstp;

	if (req->rq_cong)
		return 1;
402
	dprintk("RPC: %5u xprt_cwnd_limited cong = %lu cwnd = %lu\n",
L
Linus Torvalds 已提交
403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421
			task->tk_pid, xprt->cong, xprt->cwnd);
	if (RPCXPRT_CONGESTED(xprt))
		return 0;
	req->rq_cong = 1;
	xprt->cong += RPC_CWNDSCALE;
	return 1;
}

/*
 * Adjust the congestion window, and wake up the next task
 * that has been sleeping due to congestion
 */
static void
__xprt_put_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
{
	if (!req->rq_cong)
		return;
	req->rq_cong = 0;
	xprt->cong -= RPC_CWNDSCALE;
422
	__xprt_lock_write_next_cong(xprt);
L
Linus Torvalds 已提交
423 424
}

425 426 427 428 429 430 431 432
/**
 * xprt_release_rqst_cong - housekeeping when request is complete
 * @task: RPC request that recently completed
 *
 * Useful for transports that require congestion control.
 */
void xprt_release_rqst_cong(struct rpc_task *task)
{
433 434 435
	struct rpc_rqst *req = task->tk_rqstp;

	__xprt_put_cong(req->rq_xprt, req);
436
}
437
EXPORT_SYMBOL_GPL(xprt_release_rqst_cong);
438

439 440
/**
 * xprt_adjust_cwnd - adjust transport congestion window
441
 * @xprt: pointer to xprt
442 443 444
 * @task: recently completed RPC request used to adjust window
 * @result: result code of completed RPC request
 *
L
Linus Torvalds 已提交
445 446
 * We use a time-smoothed congestion estimator to avoid heavy oscillation.
 */
447
void xprt_adjust_cwnd(struct rpc_xprt *xprt, struct rpc_task *task, int result)
L
Linus Torvalds 已提交
448
{
449 450
	struct rpc_rqst *req = task->tk_rqstp;
	unsigned long cwnd = xprt->cwnd;
L
Linus Torvalds 已提交
451 452 453 454 455 456 457

	if (result >= 0 && cwnd <= xprt->cong) {
		/* The (cwnd >> 1) term makes sure
		 * the result gets rounded properly. */
		cwnd += (RPC_CWNDSCALE * RPC_CWNDSCALE + (cwnd >> 1)) / cwnd;
		if (cwnd > RPC_MAXCWND(xprt))
			cwnd = RPC_MAXCWND(xprt);
458
		__xprt_lock_write_next_cong(xprt);
L
Linus Torvalds 已提交
459 460 461 462 463
	} else if (result == -ETIMEDOUT) {
		cwnd >>= 1;
		if (cwnd < RPC_CWNDSCALE)
			cwnd = RPC_CWNDSCALE;
	}
464
	dprintk("RPC:       cong %ld, cwnd was %ld, now %ld\n",
L
Linus Torvalds 已提交
465 466
			xprt->cong, xprt->cwnd, cwnd);
	xprt->cwnd = cwnd;
467
	__xprt_put_cong(xprt, req);
L
Linus Torvalds 已提交
468
}
469
EXPORT_SYMBOL_GPL(xprt_adjust_cwnd);
L
Linus Torvalds 已提交
470

471 472 473 474 475 476 477 478 479 480 481 482 483
/**
 * xprt_wake_pending_tasks - wake all tasks on a transport's pending queue
 * @xprt: transport with waiting tasks
 * @status: result code to plant in each task before waking it
 *
 */
void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status)
{
	if (status < 0)
		rpc_wake_up_status(&xprt->pending, status);
	else
		rpc_wake_up(&xprt->pending);
}
484
EXPORT_SYMBOL_GPL(xprt_wake_pending_tasks);
485

486 487 488
/**
 * xprt_wait_for_buffer_space - wait for transport output buffer to clear
 * @task: task to be put to sleep
R
Randy Dunlap 已提交
489
 * @action: function pointer to be executed after wait
490 491 492 493
 *
 * Note that we only set the timer for the case of RPC_IS_SOFT(), since
 * we don't in general want to force a socket disconnection due to
 * an incomplete RPC call transmission.
494
 */
495
void xprt_wait_for_buffer_space(struct rpc_task *task, rpc_action action)
496 497 498 499
{
	struct rpc_rqst *req = task->tk_rqstp;
	struct rpc_xprt *xprt = req->rq_xprt;

500
	task->tk_timeout = RPC_IS_SOFT(task) ? req->rq_timeout : 0;
501
	rpc_sleep_on(&xprt->pending, task, action);
502
}
503
EXPORT_SYMBOL_GPL(xprt_wait_for_buffer_space);
504 505 506 507 508 509 510 511 512 513 514

/**
 * xprt_write_space - wake the task waiting for transport output buffer space
 * @xprt: transport with waiting tasks
 *
 * Can be called in a soft IRQ context, so xprt_write_space never sleeps.
 */
void xprt_write_space(struct rpc_xprt *xprt)
{
	spin_lock_bh(&xprt->transport_lock);
	if (xprt->snd_task) {
515 516
		dprintk("RPC:       write space: waking waiting task on "
				"xprt %p\n", xprt);
517
		rpc_wake_up_queued_task(&xprt->pending, xprt->snd_task);
518 519 520
	}
	spin_unlock_bh(&xprt->transport_lock);
}
521
EXPORT_SYMBOL_GPL(xprt_write_space);
522

523 524 525 526 527 528 529 530 531 532 533 534
/**
 * xprt_set_retrans_timeout_def - set a request's retransmit timeout
 * @task: task whose timeout is to be set
 *
 * Set a request's retransmit timeout based on the transport's
 * default timeout parameters.  Used by transports that don't adjust
 * the retransmit timeout based on round-trip time estimation.
 */
void xprt_set_retrans_timeout_def(struct rpc_task *task)
{
	task->tk_timeout = task->tk_rqstp->rq_timeout;
}
535
EXPORT_SYMBOL_GPL(xprt_set_retrans_timeout_def);
536

537
/**
538 539
 * xprt_set_retrans_timeout_rtt - set a request's retransmit timeout
 * @task: task whose timeout is to be set
540
 *
541 542 543 544 545
 * Set a request's retransmit timeout using the RTT estimator.
 */
void xprt_set_retrans_timeout_rtt(struct rpc_task *task)
{
	int timer = task->tk_msg.rpc_proc->p_timer;
546 547
	struct rpc_clnt *clnt = task->tk_client;
	struct rpc_rtt *rtt = clnt->cl_rtt;
548
	struct rpc_rqst *req = task->tk_rqstp;
549
	unsigned long max_timeout = clnt->cl_timeout->to_maxval;
550 551 552 553 554 555

	task->tk_timeout = rpc_calc_rto(rtt, timer);
	task->tk_timeout <<= rpc_ntimeo(rtt, timer) + req->rq_retries;
	if (task->tk_timeout > max_timeout || task->tk_timeout == 0)
		task->tk_timeout = max_timeout;
}
556
EXPORT_SYMBOL_GPL(xprt_set_retrans_timeout_rtt);
557

L
Linus Torvalds 已提交
558 559
static void xprt_reset_majortimeo(struct rpc_rqst *req)
{
560
	const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
L
Linus Torvalds 已提交
561 562 563 564 565 566 567 568 569 570 571

	req->rq_majortimeo = req->rq_timeout;
	if (to->to_exponential)
		req->rq_majortimeo <<= to->to_retries;
	else
		req->rq_majortimeo += to->to_increment * to->to_retries;
	if (req->rq_majortimeo > to->to_maxval || req->rq_majortimeo == 0)
		req->rq_majortimeo = to->to_maxval;
	req->rq_majortimeo += jiffies;
}

572 573 574 575
/**
 * xprt_adjust_timeout - adjust timeout values for next retransmit
 * @req: RPC request containing parameters to use for the adjustment
 *
L
Linus Torvalds 已提交
576 577 578 579
 */
int xprt_adjust_timeout(struct rpc_rqst *req)
{
	struct rpc_xprt *xprt = req->rq_xprt;
580
	const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
L
Linus Torvalds 已提交
581 582 583 584 585 586 587 588 589 590 591 592 593 594 595
	int status = 0;

	if (time_before(jiffies, req->rq_majortimeo)) {
		if (to->to_exponential)
			req->rq_timeout <<= 1;
		else
			req->rq_timeout += to->to_increment;
		if (to->to_maxval && req->rq_timeout >= to->to_maxval)
			req->rq_timeout = to->to_maxval;
		req->rq_retries++;
	} else {
		req->rq_timeout = to->to_initval;
		req->rq_retries = 0;
		xprt_reset_majortimeo(req);
		/* Reset the RTT counters == "slow start" */
C
Chuck Lever 已提交
596
		spin_lock_bh(&xprt->transport_lock);
L
Linus Torvalds 已提交
597
		rpc_init_rtt(req->rq_task->tk_client->cl_rtt, to->to_initval);
C
Chuck Lever 已提交
598
		spin_unlock_bh(&xprt->transport_lock);
L
Linus Torvalds 已提交
599 600 601 602 603 604 605 606 607 608
		status = -ETIMEDOUT;
	}

	if (req->rq_timeout == 0) {
		printk(KERN_WARNING "xprt_adjust_timeout: rq_timeout = 0!\n");
		req->rq_timeout = 5 * HZ;
	}
	return status;
}

609
static void xprt_autoclose(struct work_struct *work)
L
Linus Torvalds 已提交
610
{
611 612
	struct rpc_xprt *xprt =
		container_of(work, struct rpc_xprt, task_cleanup);
L
Linus Torvalds 已提交
613

614
	xprt->ops->close(xprt);
615
	clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
L
Linus Torvalds 已提交
616 617 618
	xprt_release_write(xprt, NULL);
}

619
/**
620
 * xprt_disconnect_done - mark a transport as disconnected
621 622
 * @xprt: transport to flag for disconnect
 *
L
Linus Torvalds 已提交
623
 */
624
void xprt_disconnect_done(struct rpc_xprt *xprt)
L
Linus Torvalds 已提交
625
{
626
	dprintk("RPC:       disconnected transport %p\n", xprt);
C
Chuck Lever 已提交
627
	spin_lock_bh(&xprt->transport_lock);
L
Linus Torvalds 已提交
628
	xprt_clear_connected(xprt);
629
	xprt_wake_pending_tasks(xprt, -EAGAIN);
C
Chuck Lever 已提交
630
	spin_unlock_bh(&xprt->transport_lock);
L
Linus Torvalds 已提交
631
}
632
EXPORT_SYMBOL_GPL(xprt_disconnect_done);
L
Linus Torvalds 已提交
633

634 635 636 637 638 639 640 641 642 643 644 645 646
/**
 * xprt_force_disconnect - force a transport to disconnect
 * @xprt: transport to disconnect
 *
 */
void xprt_force_disconnect(struct rpc_xprt *xprt)
{
	/* Don't race with the test_bit() in xprt_clear_locked() */
	spin_lock_bh(&xprt->transport_lock);
	set_bit(XPRT_CLOSE_WAIT, &xprt->state);
	/* Try to schedule an autoclose RPC call */
	if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
		queue_work(rpciod_workqueue, &xprt->task_cleanup);
647
	xprt_wake_pending_tasks(xprt, -EAGAIN);
648 649 650
	spin_unlock_bh(&xprt->transport_lock);
}

651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673
/**
 * xprt_conditional_disconnect - force a transport to disconnect
 * @xprt: transport to disconnect
 * @cookie: 'connection cookie'
 *
 * This attempts to break the connection if and only if 'cookie' matches
 * the current transport 'connection cookie'. It ensures that we don't
 * try to break the connection more than once when we need to retransmit
 * a batch of RPC requests.
 *
 */
void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie)
{
	/* Don't race with the test_bit() in xprt_clear_locked() */
	spin_lock_bh(&xprt->transport_lock);
	if (cookie != xprt->connect_cookie)
		goto out;
	if (test_bit(XPRT_CLOSING, &xprt->state) || !xprt_connected(xprt))
		goto out;
	set_bit(XPRT_CLOSE_WAIT, &xprt->state);
	/* Try to schedule an autoclose RPC call */
	if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
		queue_work(rpciod_workqueue, &xprt->task_cleanup);
674
	xprt_wake_pending_tasks(xprt, -EAGAIN);
675 676 677 678
out:
	spin_unlock_bh(&xprt->transport_lock);
}

L
Linus Torvalds 已提交
679 680 681 682 683
static void
xprt_init_autodisconnect(unsigned long data)
{
	struct rpc_xprt *xprt = (struct rpc_xprt *)data;

C
Chuck Lever 已提交
684
	spin_lock(&xprt->transport_lock);
685
	if (!list_empty(&xprt->recv))
L
Linus Torvalds 已提交
686
		goto out_abort;
687
	if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
L
Linus Torvalds 已提交
688
		goto out_abort;
C
Chuck Lever 已提交
689
	spin_unlock(&xprt->transport_lock);
690 691
	set_bit(XPRT_CONNECTION_CLOSE, &xprt->state);
	queue_work(rpciod_workqueue, &xprt->task_cleanup);
L
Linus Torvalds 已提交
692 693
	return;
out_abort:
C
Chuck Lever 已提交
694
	spin_unlock(&xprt->transport_lock);
L
Linus Torvalds 已提交
695 696
}

697 698 699
/**
 * xprt_connect - schedule a transport connect operation
 * @task: RPC task that is requesting the connect
L
Linus Torvalds 已提交
700 701 702 703
 *
 */
void xprt_connect(struct rpc_task *task)
{
704
	struct rpc_xprt	*xprt = task->tk_rqstp->rq_xprt;
L
Linus Torvalds 已提交
705

706
	dprintk("RPC: %5u xprt_connect xprt %p %s connected\n", task->tk_pid,
L
Linus Torvalds 已提交
707 708
			xprt, (xprt_connected(xprt) ? "is" : "is not"));

709
	if (!xprt_bound(xprt)) {
710
		task->tk_status = -EAGAIN;
L
Linus Torvalds 已提交
711 712 713 714
		return;
	}
	if (!xprt_lock_write(xprt, task))
		return;
715 716 717 718

	if (test_and_clear_bit(XPRT_CLOSE_WAIT, &xprt->state))
		xprt->ops->close(xprt);

L
Linus Torvalds 已提交
719
	if (xprt_connected(xprt))
720 721
		xprt_release_write(xprt, task);
	else {
722
		task->tk_rqstp->rq_bytes_sent = 0;
723
		task->tk_timeout = task->tk_rqstp->rq_timeout;
724
		rpc_sleep_on(&xprt->pending, task, xprt_connect_status);
725 726 727 728 729

		if (test_bit(XPRT_CLOSING, &xprt->state))
			return;
		if (xprt_test_and_set_connecting(xprt))
			return;
730
		xprt->stat.connect_start = jiffies;
731
		xprt->ops->connect(xprt, task);
L
Linus Torvalds 已提交
732 733 734
	}
}

735
static void xprt_connect_status(struct rpc_task *task)
L
Linus Torvalds 已提交
736
{
737
	struct rpc_xprt	*xprt = task->tk_rqstp->rq_xprt;
L
Linus Torvalds 已提交
738

739
	if (task->tk_status == 0) {
740 741
		xprt->stat.connect_count++;
		xprt->stat.connect_time += (long)jiffies - xprt->stat.connect_start;
742
		dprintk("RPC: %5u xprt_connect_status: connection established\n",
L
Linus Torvalds 已提交
743 744 745 746 747
				task->tk_pid);
		return;
	}

	switch (task->tk_status) {
748 749
	case -EAGAIN:
		dprintk("RPC: %5u xprt_connect_status: retrying\n", task->tk_pid);
750
		break;
L
Linus Torvalds 已提交
751
	case -ETIMEDOUT:
752 753
		dprintk("RPC: %5u xprt_connect_status: connect attempt timed "
				"out\n", task->tk_pid);
L
Linus Torvalds 已提交
754 755
		break;
	default:
756 757
		dprintk("RPC: %5u xprt_connect_status: error %d connecting to "
				"server %s\n", task->tk_pid, -task->tk_status,
758
				xprt->servername);
759 760
		xprt_release_write(xprt, task);
		task->tk_status = -EIO;
L
Linus Torvalds 已提交
761 762 763
	}
}

764 765 766 767 768
/**
 * xprt_lookup_rqst - find an RPC request corresponding to an XID
 * @xprt: transport on which the original request was transmitted
 * @xid: RPC XID of incoming reply
 *
L
Linus Torvalds 已提交
769
 */
770
struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid)
L
Linus Torvalds 已提交
771
{
772
	struct rpc_rqst *entry;
L
Linus Torvalds 已提交
773

774
	list_for_each_entry(entry, &xprt->recv, rq_list)
775 776
		if (entry->rq_xid == xid)
			return entry;
777 778 779

	dprintk("RPC:       xprt_lookup_rqst did not find xid %08x\n",
			ntohl(xid));
780 781
	xprt->stat.bad_xids++;
	return NULL;
L
Linus Torvalds 已提交
782
}
783
EXPORT_SYMBOL_GPL(xprt_lookup_rqst);
L
Linus Torvalds 已提交
784

785
static void xprt_update_rtt(struct rpc_task *task)
786 787 788
{
	struct rpc_rqst *req = task->tk_rqstp;
	struct rpc_rtt *rtt = task->tk_client->cl_rtt;
789
	unsigned int timer = task->tk_msg.rpc_proc->p_timer;
790
	long m = usecs_to_jiffies(ktime_to_us(req->rq_rtt));
791 792 793

	if (timer) {
		if (req->rq_ntrans == 1)
794
			rpc_update_rtt(rtt, timer, m);
795 796 797 798
		rpc_set_timeo(rtt, timer, req->rq_ntrans - 1);
	}
}

799 800
/**
 * xprt_complete_rqst - called when reply processing is complete
801
 * @task: RPC request that recently completed
802 803
 * @copied: actual number of bytes received from the transport
 *
804
 * Caller holds transport lock.
L
Linus Torvalds 已提交
805
 */
806
void xprt_complete_rqst(struct rpc_task *task, int copied)
L
Linus Torvalds 已提交
807
{
808
	struct rpc_rqst *req = task->tk_rqstp;
809
	struct rpc_xprt *xprt = req->rq_xprt;
L
Linus Torvalds 已提交
810

811 812
	dprintk("RPC: %5u xid %08x complete (%d bytes received)\n",
			task->tk_pid, ntohl(req->rq_xid), copied);
L
Linus Torvalds 已提交
813

814
	xprt->stat.recvs++;
815
	req->rq_rtt = ktime_sub(ktime_get(), req->rq_xtime);
816 817
	if (xprt->ops->timer != NULL)
		xprt_update_rtt(task);
818

L
Linus Torvalds 已提交
819
	list_del_init(&req->rq_list);
820
	req->rq_private_buf.len = copied;
821 822
	/* Ensure all writes are done before we update */
	/* req->rq_reply_bytes_recvd */
823
	smp_wmb();
824
	req->rq_reply_bytes_recvd = copied;
825
	rpc_wake_up_queued_task(&xprt->pending, task);
L
Linus Torvalds 已提交
826
}
827
EXPORT_SYMBOL_GPL(xprt_complete_rqst);
L
Linus Torvalds 已提交
828

829
static void xprt_timer(struct rpc_task *task)
L
Linus Torvalds 已提交
830
{
831
	struct rpc_rqst *req = task->tk_rqstp;
L
Linus Torvalds 已提交
832 833
	struct rpc_xprt *xprt = req->rq_xprt;

834 835
	if (task->tk_status != -ETIMEDOUT)
		return;
836
	dprintk("RPC: %5u xprt_timer\n", task->tk_pid);
L
Linus Torvalds 已提交
837

838
	spin_lock_bh(&xprt->transport_lock);
839
	if (!req->rq_reply_bytes_recvd) {
840
		if (xprt->ops->timer)
841
			xprt->ops->timer(xprt, task);
842 843 844
	} else
		task->tk_status = 0;
	spin_unlock_bh(&xprt->transport_lock);
L
Linus Torvalds 已提交
845 846
}

847 848 849 850 851
static inline int xprt_has_timer(struct rpc_xprt *xprt)
{
	return xprt->idle_timeout != 0;
}

852 853 854 855
/**
 * xprt_prepare_transmit - reserve the transport before sending a request
 * @task: RPC task about to send a request
 *
L
Linus Torvalds 已提交
856
 */
857
int xprt_prepare_transmit(struct rpc_task *task)
L
Linus Torvalds 已提交
858 859 860 861 862
{
	struct rpc_rqst	*req = task->tk_rqstp;
	struct rpc_xprt	*xprt = req->rq_xprt;
	int err = 0;

863
	dprintk("RPC: %5u xprt_prepare_transmit\n", task->tk_pid);
L
Linus Torvalds 已提交
864

C
Chuck Lever 已提交
865
	spin_lock_bh(&xprt->transport_lock);
866 867
	if (req->rq_reply_bytes_recvd && !req->rq_bytes_sent) {
		err = req->rq_reply_bytes_recvd;
L
Linus Torvalds 已提交
868 869
		goto out_unlock;
	}
870
	if (!xprt->ops->reserve_xprt(xprt, task))
L
Linus Torvalds 已提交
871 872
		err = -EAGAIN;
out_unlock:
C
Chuck Lever 已提交
873
	spin_unlock_bh(&xprt->transport_lock);
L
Linus Torvalds 已提交
874 875 876
	return err;
}

877
void xprt_end_transmit(struct rpc_task *task)
878
{
879
	xprt_release_write(task->tk_rqstp->rq_xprt, task);
880 881
}

882 883 884 885 886 887 888
/**
 * xprt_transmit - send an RPC request on a transport
 * @task: controlling RPC task
 *
 * We have to copy the iovec because sendmsg fiddles with its contents.
 */
void xprt_transmit(struct rpc_task *task)
L
Linus Torvalds 已提交
889 890 891
{
	struct rpc_rqst	*req = task->tk_rqstp;
	struct rpc_xprt	*xprt = req->rq_xprt;
892
	int status, numreqs;
L
Linus Torvalds 已提交
893

894
	dprintk("RPC: %5u xprt_transmit(%u)\n", task->tk_pid, req->rq_slen);
L
Linus Torvalds 已提交
895

896
	if (!req->rq_reply_bytes_recvd) {
897 898 899 900
		if (list_empty(&req->rq_list) && rpc_reply_expected(task)) {
			/*
			 * Add to the list only if we're expecting a reply
			 */
C
Chuck Lever 已提交
901
			spin_lock_bh(&xprt->transport_lock);
L
Linus Torvalds 已提交
902 903 904 905 906
			/* Update the softirq receive buffer */
			memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
					sizeof(req->rq_private_buf));
			/* Add request to the receive list */
			list_add_tail(&req->rq_list, &xprt->recv);
C
Chuck Lever 已提交
907
			spin_unlock_bh(&xprt->transport_lock);
L
Linus Torvalds 已提交
908
			xprt_reset_majortimeo(req);
909 910
			/* Turn off autodisconnect */
			del_singleshot_timer_sync(&xprt->timer);
L
Linus Torvalds 已提交
911 912 913 914
		}
	} else if (!req->rq_bytes_sent)
		return;

915
	req->rq_connect_cookie = xprt->connect_cookie;
916
	req->rq_xtime = ktime_get();
917
	status = xprt->ops->send_request(task);
918 919 920 921
	if (status != 0) {
		task->tk_status = status;
		return;
	}
922

923
	dprintk("RPC: %5u xmit complete\n", task->tk_pid);
924
	task->tk_flags |= RPC_TASK_SENT;
925
	spin_lock_bh(&xprt->transport_lock);
926

927
	xprt->ops->set_retrans_timeout(task);
928

929 930 931
	numreqs = atomic_read(&xprt->num_reqs);
	if (numreqs > xprt->stat.max_slots)
		xprt->stat.max_slots = numreqs;
932 933 934
	xprt->stat.sends++;
	xprt->stat.req_u += xprt->stat.sends - xprt->stat.recvs;
	xprt->stat.bklog_u += xprt->backlog.qlen;
935 936
	xprt->stat.sending_u += xprt->sending.qlen;
	xprt->stat.pending_u += xprt->pending.qlen;
L
Linus Torvalds 已提交
937

938 939 940
	/* Don't race with disconnect */
	if (!xprt_connected(xprt))
		task->tk_status = -ENOTCONN;
941
	else if (!req->rq_reply_bytes_recvd && rpc_reply_expected(task)) {
942 943 944 945
		/*
		 * Sleep on the pending queue since
		 * we're expecting a reply.
		 */
946
		rpc_sleep_on(&xprt->pending, task, xprt_timer);
947
	}
948
	spin_unlock_bh(&xprt->transport_lock);
L
Linus Torvalds 已提交
949 950
}

951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978
static void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task)
{
	set_bit(XPRT_CONGESTED, &xprt->state);
	rpc_sleep_on(&xprt->backlog, task, NULL);
}

static void xprt_wake_up_backlog(struct rpc_xprt *xprt)
{
	if (rpc_wake_up_next(&xprt->backlog) == NULL)
		clear_bit(XPRT_CONGESTED, &xprt->state);
}

static bool xprt_throttle_congested(struct rpc_xprt *xprt, struct rpc_task *task)
{
	bool ret = false;

	if (!test_bit(XPRT_CONGESTED, &xprt->state))
		goto out;
	spin_lock(&xprt->reserve_lock);
	if (test_bit(XPRT_CONGESTED, &xprt->state)) {
		rpc_sleep_on(&xprt->backlog, task, NULL);
		ret = true;
	}
	spin_unlock(&xprt->reserve_lock);
out:
	return ret;
}

979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002
static struct rpc_rqst *xprt_dynamic_alloc_slot(struct rpc_xprt *xprt, gfp_t gfp_flags)
{
	struct rpc_rqst *req = ERR_PTR(-EAGAIN);

	if (!atomic_add_unless(&xprt->num_reqs, 1, xprt->max_reqs))
		goto out;
	req = kzalloc(sizeof(struct rpc_rqst), gfp_flags);
	if (req != NULL)
		goto out;
	atomic_dec(&xprt->num_reqs);
	req = ERR_PTR(-ENOMEM);
out:
	return req;
}

static bool xprt_dynamic_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
{
	if (atomic_add_unless(&xprt->num_reqs, -1, xprt->min_reqs)) {
		kfree(req);
		return true;
	}
	return false;
}

1003
void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
L
Linus Torvalds 已提交
1004
{
1005
	struct rpc_rqst *req;
L
Linus Torvalds 已提交
1006

1007
	spin_lock(&xprt->reserve_lock);
L
Linus Torvalds 已提交
1008
	if (!list_empty(&xprt->free)) {
1009 1010 1011 1012
		req = list_entry(xprt->free.next, struct rpc_rqst, rq_list);
		list_del(&req->rq_list);
		goto out_init_req;
	}
1013
	req = xprt_dynamic_alloc_slot(xprt, GFP_NOWAIT|__GFP_NOWARN);
1014 1015 1016 1017 1018 1019
	if (!IS_ERR(req))
		goto out_init_req;
	switch (PTR_ERR(req)) {
	case -ENOMEM:
		dprintk("RPC:       dynamic allocation of request slot "
				"failed! Retrying\n");
1020
		task->tk_status = -ENOMEM;
1021 1022
		break;
	case -EAGAIN:
1023
		xprt_add_backlog(xprt, task);
1024
		dprintk("RPC:       waiting for request slot\n");
1025 1026
	default:
		task->tk_status = -EAGAIN;
L
Linus Torvalds 已提交
1027
	}
1028
	spin_unlock(&xprt->reserve_lock);
1029 1030 1031 1032 1033
	return;
out_init_req:
	task->tk_status = 0;
	task->tk_rqstp = req;
	xprt_request_init(task, xprt);
1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048
	spin_unlock(&xprt->reserve_lock);
}
EXPORT_SYMBOL_GPL(xprt_alloc_slot);

void xprt_lock_and_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
{
	/* Note: grabbing the xprt_lock_write() ensures that we throttle
	 * new slot allocation if the transport is congested (i.e. when
	 * reconnecting a stream transport or when out of socket write
	 * buffer space).
	 */
	if (xprt_lock_write(xprt, task)) {
		xprt_alloc_slot(xprt, task);
		xprt_release_write(xprt, task);
	}
L
Linus Torvalds 已提交
1049
}
1050
EXPORT_SYMBOL_GPL(xprt_lock_and_alloc_slot);
L
Linus Torvalds 已提交
1051

1052 1053 1054
static void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
{
	spin_lock(&xprt->reserve_lock);
1055 1056 1057 1058
	if (!xprt_dynamic_free_slot(xprt, req)) {
		memset(req, 0, sizeof(*req));	/* mark unused */
		list_add(&req->rq_list, &xprt->free);
	}
1059
	xprt_wake_up_backlog(xprt);
1060 1061 1062
	spin_unlock(&xprt->reserve_lock);
}

1063 1064 1065 1066 1067 1068 1069 1070 1071 1072
static void xprt_free_all_slots(struct rpc_xprt *xprt)
{
	struct rpc_rqst *req;
	while (!list_empty(&xprt->free)) {
		req = list_first_entry(&xprt->free, struct rpc_rqst, rq_list);
		list_del(&req->rq_list);
		kfree(req);
	}
}

1073 1074 1075
struct rpc_xprt *xprt_alloc(struct net *net, size_t size,
		unsigned int num_prealloc,
		unsigned int max_alloc)
1076 1077
{
	struct rpc_xprt *xprt;
1078 1079
	struct rpc_rqst *req;
	int i;
1080 1081 1082 1083 1084

	xprt = kzalloc(size, GFP_KERNEL);
	if (xprt == NULL)
		goto out;

1085 1086 1087 1088 1089 1090 1091 1092 1093
	xprt_init(xprt, net);

	for (i = 0; i < num_prealloc; i++) {
		req = kzalloc(sizeof(struct rpc_rqst), GFP_KERNEL);
		if (!req)
			break;
		list_add(&req->rq_list, &xprt->free);
	}
	if (i < num_prealloc)
1094
		goto out_free;
1095 1096 1097 1098 1099 1100
	if (max_alloc > num_prealloc)
		xprt->max_reqs = max_alloc;
	else
		xprt->max_reqs = num_prealloc;
	xprt->min_reqs = num_prealloc;
	atomic_set(&xprt->num_reqs, num_prealloc);
1101 1102 1103 1104

	return xprt;

out_free:
1105
	xprt_free(xprt);
1106 1107 1108 1109 1110
out:
	return NULL;
}
EXPORT_SYMBOL_GPL(xprt_alloc);

1111 1112
void xprt_free(struct rpc_xprt *xprt)
{
P
Pavel Emelyanov 已提交
1113
	put_net(xprt->xprt_net);
1114
	xprt_free_all_slots(xprt);
1115 1116 1117 1118
	kfree(xprt);
}
EXPORT_SYMBOL_GPL(xprt_free);

1119 1120 1121 1122
/**
 * xprt_reserve - allocate an RPC request slot
 * @task: RPC task requesting a slot allocation
 *
1123 1124
 * If the transport is marked as being congested, or if no more
 * slots are available, place the task on the transport's
1125 1126 1127
 * backlog queue.
 */
void xprt_reserve(struct rpc_task *task)
L
Linus Torvalds 已提交
1128
{
1129
	struct rpc_xprt	*xprt;
L
Linus Torvalds 已提交
1130

1131 1132 1133 1134 1135 1136
	task->tk_status = 0;
	if (task->tk_rqstp != NULL)
		return;

	task->tk_timeout = 0;
	task->tk_status = -EAGAIN;
1137 1138
	rcu_read_lock();
	xprt = rcu_dereference(task->tk_client->cl_xprt);
1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164
	if (!xprt_throttle_congested(xprt, task))
		xprt->ops->alloc_slot(xprt, task);
	rcu_read_unlock();
}

/**
 * xprt_retry_reserve - allocate an RPC request slot
 * @task: RPC task requesting a slot allocation
 *
 * If no more slots are available, place the task on the transport's
 * backlog queue.
 * Note that the only difference with xprt_reserve is that we now
 * ignore the value of the XPRT_CONGESTED flag.
 */
void xprt_retry_reserve(struct rpc_task *task)
{
	struct rpc_xprt	*xprt;

	task->tk_status = 0;
	if (task->tk_rqstp != NULL)
		return;

	task->tk_timeout = 0;
	task->tk_status = -EAGAIN;
	rcu_read_lock();
	xprt = rcu_dereference(task->tk_client->cl_xprt);
1165
	xprt->ops->alloc_slot(xprt, task);
1166
	rcu_read_unlock();
L
Linus Torvalds 已提交
1167 1168
}

1169
static inline __be32 xprt_alloc_xid(struct rpc_xprt *xprt)
L
Linus Torvalds 已提交
1170
{
1171
	return (__force __be32)xprt->xid++;
L
Linus Torvalds 已提交
1172 1173 1174 1175
}

static inline void xprt_init_xid(struct rpc_xprt *xprt)
{
1176
	xprt->xid = net_random();
L
Linus Torvalds 已提交
1177 1178
}

1179
static void xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt)
L
Linus Torvalds 已提交
1180 1181 1182
{
	struct rpc_rqst	*req = task->tk_rqstp;

1183
	INIT_LIST_HEAD(&req->rq_list);
1184
	req->rq_timeout = task->tk_client->cl_timeout->to_initval;
L
Linus Torvalds 已提交
1185 1186
	req->rq_task	= task;
	req->rq_xprt    = xprt;
1187
	req->rq_buffer  = NULL;
L
Linus Torvalds 已提交
1188
	req->rq_xid     = xprt_alloc_xid(xprt);
1189
	req->rq_release_snd_buf = NULL;
1190
	xprt_reset_majortimeo(req);
1191
	dprintk("RPC: %5u reserved req %p xid %08x\n", task->tk_pid,
L
Linus Torvalds 已提交
1192 1193 1194
			req, ntohl(req->rq_xid));
}

1195 1196 1197 1198
/**
 * xprt_release - release an RPC request slot
 * @task: task which is finished with the slot
 *
L
Linus Torvalds 已提交
1199
 */
1200
void xprt_release(struct rpc_task *task)
L
Linus Torvalds 已提交
1201
{
1202
	struct rpc_xprt	*xprt;
1203
	struct rpc_rqst	*req = task->tk_rqstp;
L
Linus Torvalds 已提交
1204

1205 1206 1207 1208 1209 1210 1211 1212
	if (req == NULL) {
		if (task->tk_client) {
			rcu_read_lock();
			xprt = rcu_dereference(task->tk_client->cl_xprt);
			if (xprt->snd_task == task)
				xprt_release_write(xprt, task);
			rcu_read_unlock();
		}
L
Linus Torvalds 已提交
1213
		return;
1214
	}
1215 1216

	xprt = req->rq_xprt;
1217 1218 1219 1220
	if (task->tk_ops->rpc_count_stats != NULL)
		task->tk_ops->rpc_count_stats(task, task->tk_calldata);
	else if (task->tk_client)
		rpc_count_iostats(task, task->tk_client->cl_metrics);
C
Chuck Lever 已提交
1221
	spin_lock_bh(&xprt->transport_lock);
1222
	xprt->ops->release_xprt(xprt, task);
1223 1224
	if (xprt->ops->release_request)
		xprt->ops->release_request(task);
L
Linus Torvalds 已提交
1225 1226 1227
	if (!list_empty(&req->rq_list))
		list_del(&req->rq_list);
	xprt->last_used = jiffies;
1228
	if (list_empty(&xprt->recv) && xprt_has_timer(xprt))
1229
		mod_timer(&xprt->timer,
1230
				xprt->last_used + xprt->idle_timeout);
C
Chuck Lever 已提交
1231
	spin_unlock_bh(&xprt->transport_lock);
1232
	if (req->rq_buffer)
1233
		xprt->ops->buf_free(req->rq_buffer);
1234 1235
	if (req->rq_cred != NULL)
		put_rpccred(req->rq_cred);
L
Linus Torvalds 已提交
1236
	task->tk_rqstp = NULL;
1237 1238
	if (req->rq_release_snd_buf)
		req->rq_release_snd_buf(req);
1239

1240
	dprintk("RPC: %5u release request %p\n", task->tk_pid, req);
1241 1242 1243
	if (likely(!bc_prealloc(req)))
		xprt_free_slot(xprt, req);
	else
1244
		xprt_free_bc_request(req);
L
Linus Torvalds 已提交
1245 1246
}

1247
static void xprt_init(struct rpc_xprt *xprt, struct net *net)
1248
{
1249
	atomic_set(&xprt->count, 1);
1250 1251 1252 1253 1254 1255

	spin_lock_init(&xprt->transport_lock);
	spin_lock_init(&xprt->reserve_lock);

	INIT_LIST_HEAD(&xprt->free);
	INIT_LIST_HEAD(&xprt->recv);
1256
#if defined(CONFIG_SUNRPC_BACKCHANNEL)
1257 1258
	spin_lock_init(&xprt->bc_pa_lock);
	INIT_LIST_HEAD(&xprt->bc_pa_list);
1259
#endif /* CONFIG_SUNRPC_BACKCHANNEL */
1260

1261 1262
	xprt->last_used = jiffies;
	xprt->cwnd = RPC_INITCWND;
1263
	xprt->bind_index = 0;
1264 1265 1266

	rpc_init_wait_queue(&xprt->binding, "xprt_binding");
	rpc_init_wait_queue(&xprt->pending, "xprt_pending");
1267
	rpc_init_priority_wait_queue(&xprt->sending, "xprt_sending");
1268 1269 1270 1271
	rpc_init_priority_wait_queue(&xprt->backlog, "xprt_backlog");

	xprt_init_xid(xprt);

1272
	xprt->xprt_net = get_net(net);
1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300
}

/**
 * xprt_create_transport - create an RPC transport
 * @args: rpc transport creation arguments
 *
 */
struct rpc_xprt *xprt_create_transport(struct xprt_create *args)
{
	struct rpc_xprt	*xprt;
	struct xprt_class *t;

	spin_lock(&xprt_list_lock);
	list_for_each_entry(t, &xprt_list, list) {
		if (t->ident == args->ident) {
			spin_unlock(&xprt_list_lock);
			goto found;
		}
	}
	spin_unlock(&xprt_list_lock);
	printk(KERN_ERR "RPC: transport (%d) not supported\n", args->ident);
	return ERR_PTR(-EIO);

found:
	xprt = t->setup(args);
	if (IS_ERR(xprt)) {
		dprintk("RPC:       xprt_create_transport: failed, %ld\n",
				-PTR_ERR(xprt));
1301
		goto out;
1302
	}
1303 1304 1305 1306 1307 1308
	INIT_WORK(&xprt->task_cleanup, xprt_autoclose);
	if (xprt_has_timer(xprt))
		setup_timer(&xprt->timer, xprt_init_autodisconnect,
			    (unsigned long)xprt);
	else
		init_timer(&xprt->timer);
1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319

	if (strlen(args->servername) > RPC_MAXNETNAMELEN) {
		xprt_destroy(xprt);
		return ERR_PTR(-EINVAL);
	}
	xprt->servername = kstrdup(args->servername, GFP_KERNEL);
	if (xprt->servername == NULL) {
		xprt_destroy(xprt);
		return ERR_PTR(-ENOMEM);
	}

1320
	dprintk("RPC:       created transport %p with %u slots\n", xprt,
1321
			xprt->max_reqs);
1322
out:
1323 1324 1325
	return xprt;
}

1326 1327
/**
 * xprt_destroy - destroy an RPC transport, killing off all requests.
1328
 * @xprt: transport to destroy
1329
 *
L
Linus Torvalds 已提交
1330
 */
1331
static void xprt_destroy(struct rpc_xprt *xprt)
L
Linus Torvalds 已提交
1332
{
1333
	dprintk("RPC:       destroying transport %p\n", xprt);
1334
	del_timer_sync(&xprt->timer);
1335

1336 1337 1338 1339
	rpc_destroy_wait_queue(&xprt->binding);
	rpc_destroy_wait_queue(&xprt->pending);
	rpc_destroy_wait_queue(&xprt->sending);
	rpc_destroy_wait_queue(&xprt->backlog);
1340
	cancel_work_sync(&xprt->task_cleanup);
1341
	kfree(xprt->servername);
1342 1343 1344
	/*
	 * Tear down transport state and free the rpc_xprt
	 */
1345
	xprt->ops->destroy(xprt);
1346
}
L
Linus Torvalds 已提交
1347

1348 1349 1350 1351 1352 1353 1354
/**
 * xprt_put - release a reference to an RPC transport.
 * @xprt: pointer to the transport
 *
 */
void xprt_put(struct rpc_xprt *xprt)
{
1355 1356
	if (atomic_dec_and_test(&xprt->count))
		xprt_destroy(xprt);
1357 1358 1359 1360 1361 1362 1363 1364 1365
}

/**
 * xprt_get - return a reference to an RPC transport.
 * @xprt: pointer to the transport
 *
 */
struct rpc_xprt *xprt_get(struct rpc_xprt *xprt)
{
1366 1367 1368
	if (atomic_inc_not_zero(&xprt->count))
		return xprt;
	return NULL;
L
Linus Torvalds 已提交
1369
}