xprt.c 36.5 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12
/*
 *  linux/net/sunrpc/xprt.c
 *
 *  This is a generic RPC call interface supporting congestion avoidance,
 *  and asynchronous calls.
 *
 *  The interface works like this:
 *
 *  -	When a process places a call, it allocates a request slot if
 *	one is available. Otherwise, it sleeps on the backlog queue
 *	(xprt_reserve).
 *  -	Next, the caller puts together the RPC message, stuffs it into
13 14
 *	the request struct, and calls xprt_transmit().
 *  -	xprt_transmit sends the message and installs the caller on the
15 16 17
 *	transport's wait list. At the same time, if a reply is expected,
 *	it installs a timer that is run after the packet's timeout has
 *	expired.
L
Linus Torvalds 已提交
18
 *  -	When a packet arrives, the data_ready handler walks the list of
19
 *	pending requests for that transport. If a matching XID is found, the
L
Linus Torvalds 已提交
20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35
 *	caller is woken up, and the timer removed.
 *  -	When no reply arrives within the timeout interval, the timer is
 *	fired by the kernel and runs xprt_timer(). It either adjusts the
 *	timeout values (minor timeout) or wakes up the caller with a status
 *	of -ETIMEDOUT.
 *  -	When the caller receives a notification from RPC that a reply arrived,
 *	it should release the RPC slot, and process the reply.
 *	If the call timed out, it may choose to retry the operation by
 *	adjusting the initial timeout value, and simply calling rpc_call
 *	again.
 *
 *  Support for async RPC is done through a set of RPC-specific scheduling
 *  primitives that `transparently' work for processes as well as async
 *  tasks that rely on callbacks.
 *
 *  Copyright (C) 1995-1997, Olaf Kirch <okir@monad.swb.de>
36 37
 *
 *  Transport switch API copyright (C) 2005, Chuck Lever <cel@netapp.com>
L
Linus Torvalds 已提交
38 39
 */

40 41
#include <linux/module.h>

L
Linus Torvalds 已提交
42
#include <linux/types.h>
43
#include <linux/interrupt.h>
L
Linus Torvalds 已提交
44
#include <linux/workqueue.h>
45
#include <linux/net.h>
46
#include <linux/ktime.h>
L
Linus Torvalds 已提交
47

48
#include <linux/sunrpc/clnt.h>
49
#include <linux/sunrpc/metrics.h>
50
#include <linux/sunrpc/bc_xprt.h>
L
Linus Torvalds 已提交
51

52 53
#include "sunrpc.h"

L
Linus Torvalds 已提交
54 55 56 57 58 59 60 61 62 63 64
/*
 * Local variables
 */

#ifdef RPC_DEBUG
# define RPCDBG_FACILITY	RPCDBG_XPRT
#endif

/*
 * Local functions
 */
65
static void	 xprt_init(struct rpc_xprt *xprt, struct net *net);
L
Linus Torvalds 已提交
66 67 68
static void	xprt_request_init(struct rpc_task *, struct rpc_xprt *);
static void	xprt_connect_status(struct rpc_task *task);
static int      __xprt_get_cong(struct rpc_xprt *, struct rpc_task *);
69
static void	 xprt_destroy(struct rpc_xprt *xprt);
L
Linus Torvalds 已提交
70

J
Jiri Slaby 已提交
71
static DEFINE_SPINLOCK(xprt_list_lock);
72 73
static LIST_HEAD(xprt_list);

74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90
/*
 * The transport code maintains an estimate on the maximum number of out-
 * standing RPC requests, using a smoothed version of the congestion
 * avoidance implemented in 44BSD. This is basically the Van Jacobson
 * congestion algorithm: If a retransmit occurs, the congestion window is
 * halved; otherwise, it is incremented by 1/cwnd when
 *
 *	-	a reply is received and
 *	-	a full number of requests are outstanding and
 *	-	the congestion window hasn't been updated recently.
 */
#define RPC_CWNDSHIFT		(8U)
#define RPC_CWNDSCALE		(1U << RPC_CWNDSHIFT)
#define RPC_INITCWND		RPC_CWNDSCALE
#define RPC_MAXCWND(xprt)	((xprt)->max_reqs << RPC_CWNDSHIFT)

#define RPCXPRT_CONGESTED(xprt) ((xprt)->cong >= (xprt)->cwnd)
L
Linus Torvalds 已提交
91

92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112
/**
 * xprt_register_transport - register a transport implementation
 * @transport: transport to register
 *
 * If a transport implementation is loaded as a kernel module, it can
 * call this interface to make itself known to the RPC client.
 *
 * Returns:
 * 0:		transport successfully registered
 * -EEXIST:	transport already registered
 * -EINVAL:	transport module being unloaded
 */
int xprt_register_transport(struct xprt_class *transport)
{
	struct xprt_class *t;
	int result;

	result = -EEXIST;
	spin_lock(&xprt_list_lock);
	list_for_each_entry(t, &xprt_list, list) {
		/* don't register the same transport class twice */
113
		if (t->ident == transport->ident)
114 115 116
			goto out;
	}

117 118 119 120
	list_add_tail(&transport->list, &xprt_list);
	printk(KERN_INFO "RPC: Registered %s transport module.\n",
	       transport->name);
	result = 0;
121 122 123 124 125 126 127 128 129

out:
	spin_unlock(&xprt_list_lock);
	return result;
}
EXPORT_SYMBOL_GPL(xprt_register_transport);

/**
 * xprt_unregister_transport - unregister a transport implementation
130
 * @transport: transport to unregister
131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159
 *
 * Returns:
 * 0:		transport successfully unregistered
 * -ENOENT:	transport never registered
 */
int xprt_unregister_transport(struct xprt_class *transport)
{
	struct xprt_class *t;
	int result;

	result = 0;
	spin_lock(&xprt_list_lock);
	list_for_each_entry(t, &xprt_list, list) {
		if (t == transport) {
			printk(KERN_INFO
				"RPC: Unregistered %s transport module.\n",
				transport->name);
			list_del_init(&transport->list);
			goto out;
		}
	}
	result = -ENOENT;

out:
	spin_unlock(&xprt_list_lock);
	return result;
}
EXPORT_SYMBOL_GPL(xprt_unregister_transport);

160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181
/**
 * xprt_load_transport - load a transport implementation
 * @transport_name: transport to load
 *
 * Returns:
 * 0:		transport successfully loaded
 * -ENOENT:	transport module not available
 */
int xprt_load_transport(const char *transport_name)
{
	struct xprt_class *t;
	int result;

	result = 0;
	spin_lock(&xprt_list_lock);
	list_for_each_entry(t, &xprt_list, list) {
		if (strcmp(t->name, transport_name) == 0) {
			spin_unlock(&xprt_list_lock);
			goto out;
		}
	}
	spin_unlock(&xprt_list_lock);
182
	result = request_module("xprt%s", transport_name);
183 184 185 186 187
out:
	return result;
}
EXPORT_SYMBOL_GPL(xprt_load_transport);

188 189 190
/**
 * xprt_reserve_xprt - serialize write access to transports
 * @task: task that is requesting access to the transport
191
 * @xprt: pointer to the target transport
192 193 194 195 196
 *
 * This prevents mixing the payload of separate requests, and prevents
 * transport connects from colliding with writes.  No congestion control
 * is provided.
 */
197
int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
198 199
{
	struct rpc_rqst *req = task->tk_rqstp;
200
	int priority;
201 202 203 204 205 206 207

	if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
		if (task == xprt->snd_task)
			return 1;
		goto out_sleep;
	}
	xprt->snd_task = task;
208
	if (req != NULL)
209
		req->rq_ntrans++;
210

211 212 213
	return 1;

out_sleep:
214
	dprintk("RPC: %5u failed to lock transport %p\n",
215 216 217
			task->tk_pid, xprt);
	task->tk_timeout = 0;
	task->tk_status = -EAGAIN;
218 219 220 221
	if (req == NULL)
		priority = RPC_PRIORITY_LOW;
	else if (!req->rq_ntrans)
		priority = RPC_PRIORITY_NORMAL;
222
	else
223 224
		priority = RPC_PRIORITY_HIGH;
	rpc_sleep_on_priority(&xprt->sending, task, NULL, priority);
225 226
	return 0;
}
227
EXPORT_SYMBOL_GPL(xprt_reserve_xprt);
228

229 230 231
static void xprt_clear_locked(struct rpc_xprt *xprt)
{
	xprt->snd_task = NULL;
232
	if (!test_bit(XPRT_CLOSE_WAIT, &xprt->state)) {
233 234 235 236
		smp_mb__before_clear_bit();
		clear_bit(XPRT_LOCKED, &xprt->state);
		smp_mb__after_clear_bit();
	} else
237
		queue_work(rpciod_workqueue, &xprt->task_cleanup);
238 239
}

L
Linus Torvalds 已提交
240
/*
241 242 243 244 245 246
 * xprt_reserve_xprt_cong - serialize write access to transports
 * @task: task that is requesting access to the transport
 *
 * Same as xprt_reserve_xprt, but Van Jacobson congestion control is
 * integrated into the decision of whether a request is allowed to be
 * woken up and given access to the transport.
L
Linus Torvalds 已提交
247
 */
248
int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
L
Linus Torvalds 已提交
249 250
{
	struct rpc_rqst *req = task->tk_rqstp;
251
	int priority;
L
Linus Torvalds 已提交
252

253
	if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
L
Linus Torvalds 已提交
254 255 256 257
		if (task == xprt->snd_task)
			return 1;
		goto out_sleep;
	}
258 259 260 261
	if (req == NULL) {
		xprt->snd_task = task;
		return 1;
	}
262
	if (__xprt_get_cong(xprt, task)) {
L
Linus Torvalds 已提交
263
		xprt->snd_task = task;
264
		req->rq_ntrans++;
L
Linus Torvalds 已提交
265 266
		return 1;
	}
267
	xprt_clear_locked(xprt);
L
Linus Torvalds 已提交
268
out_sleep:
269
	dprintk("RPC: %5u failed to lock transport %p\n", task->tk_pid, xprt);
L
Linus Torvalds 已提交
270 271
	task->tk_timeout = 0;
	task->tk_status = -EAGAIN;
272 273 274 275
	if (req == NULL)
		priority = RPC_PRIORITY_LOW;
	else if (!req->rq_ntrans)
		priority = RPC_PRIORITY_NORMAL;
L
Linus Torvalds 已提交
276
	else
277 278
		priority = RPC_PRIORITY_HIGH;
	rpc_sleep_on_priority(&xprt->sending, task, NULL, priority);
L
Linus Torvalds 已提交
279 280
	return 0;
}
281
EXPORT_SYMBOL_GPL(xprt_reserve_xprt_cong);
L
Linus Torvalds 已提交
282

283
static inline int xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task)
L
Linus Torvalds 已提交
284 285 286
{
	int retval;

C
Chuck Lever 已提交
287
	spin_lock_bh(&xprt->transport_lock);
288
	retval = xprt->ops->reserve_xprt(xprt, task);
C
Chuck Lever 已提交
289
	spin_unlock_bh(&xprt->transport_lock);
L
Linus Torvalds 已提交
290 291 292
	return retval;
}

293
static bool __xprt_lock_write_func(struct rpc_task *task, void *data)
294
{
295
	struct rpc_xprt *xprt = data;
296 297 298 299
	struct rpc_rqst *req;

	req = task->tk_rqstp;
	xprt->snd_task = task;
300
	if (req)
301
		req->rq_ntrans++;
302 303
	return true;
}
304

305 306 307 308 309 310 311
static void __xprt_lock_write_next(struct rpc_xprt *xprt)
{
	if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
		return;

	if (rpc_wake_up_first(&xprt->sending, __xprt_lock_write_func, xprt))
		return;
312
	xprt_clear_locked(xprt);
313 314
}

315
static bool __xprt_lock_write_cong_func(struct rpc_task *task, void *data)
L
Linus Torvalds 已提交
316
{
317
	struct rpc_xprt *xprt = data;
318
	struct rpc_rqst *req;
L
Linus Torvalds 已提交
319

320 321 322
	req = task->tk_rqstp;
	if (req == NULL) {
		xprt->snd_task = task;
323
		return true;
324
	}
325
	if (__xprt_get_cong(xprt, task)) {
L
Linus Torvalds 已提交
326
		xprt->snd_task = task;
327
		req->rq_ntrans++;
328
		return true;
L
Linus Torvalds 已提交
329
	}
330 331 332 333 334 335 336 337 338 339 340
	return false;
}

static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt)
{
	if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
		return;
	if (RPCXPRT_CONGESTED(xprt))
		goto out_unlock;
	if (rpc_wake_up_first(&xprt->sending, __xprt_lock_write_cong_func, xprt))
		return;
L
Linus Torvalds 已提交
341
out_unlock:
342
	xprt_clear_locked(xprt);
L
Linus Torvalds 已提交
343 344
}

345 346 347 348 349 350
/**
 * xprt_release_xprt - allow other requests to use a transport
 * @xprt: transport with other tasks potentially waiting
 * @task: task that is releasing access to the transport
 *
 * Note that "task" can be NULL.  No congestion control is provided.
L
Linus Torvalds 已提交
351
 */
352
void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
L
Linus Torvalds 已提交
353 354
{
	if (xprt->snd_task == task) {
355 356 357 358 359
		if (task != NULL) {
			struct rpc_rqst *req = task->tk_rqstp;
			if (req != NULL)
				req->rq_bytes_sent = 0;
		}
360
		xprt_clear_locked(xprt);
L
Linus Torvalds 已提交
361 362 363
		__xprt_lock_write_next(xprt);
	}
}
364
EXPORT_SYMBOL_GPL(xprt_release_xprt);
L
Linus Torvalds 已提交
365

366 367 368 369 370 371 372 373 374 375 376
/**
 * xprt_release_xprt_cong - allow other requests to use a transport
 * @xprt: transport with other tasks potentially waiting
 * @task: task that is releasing access to the transport
 *
 * Note that "task" can be NULL.  Another task is awoken to use the
 * transport if the transport's congestion window allows it.
 */
void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
{
	if (xprt->snd_task == task) {
377 378 379 380 381
		if (task != NULL) {
			struct rpc_rqst *req = task->tk_rqstp;
			if (req != NULL)
				req->rq_bytes_sent = 0;
		}
382
		xprt_clear_locked(xprt);
383 384 385
		__xprt_lock_write_next_cong(xprt);
	}
}
386
EXPORT_SYMBOL_GPL(xprt_release_xprt_cong);
387 388

static inline void xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task)
L
Linus Torvalds 已提交
389
{
C
Chuck Lever 已提交
390
	spin_lock_bh(&xprt->transport_lock);
391
	xprt->ops->release_xprt(xprt, task);
C
Chuck Lever 已提交
392
	spin_unlock_bh(&xprt->transport_lock);
L
Linus Torvalds 已提交
393 394 395 396 397 398 399 400 401 402 403 404 405
}

/*
 * Van Jacobson congestion avoidance. Check if the congestion window
 * overflowed. Put the task to sleep if this is the case.
 */
static int
__xprt_get_cong(struct rpc_xprt *xprt, struct rpc_task *task)
{
	struct rpc_rqst *req = task->tk_rqstp;

	if (req->rq_cong)
		return 1;
406
	dprintk("RPC: %5u xprt_cwnd_limited cong = %lu cwnd = %lu\n",
L
Linus Torvalds 已提交
407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425
			task->tk_pid, xprt->cong, xprt->cwnd);
	if (RPCXPRT_CONGESTED(xprt))
		return 0;
	req->rq_cong = 1;
	xprt->cong += RPC_CWNDSCALE;
	return 1;
}

/*
 * Adjust the congestion window, and wake up the next task
 * that has been sleeping due to congestion
 */
static void
__xprt_put_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
{
	if (!req->rq_cong)
		return;
	req->rq_cong = 0;
	xprt->cong -= RPC_CWNDSCALE;
426
	__xprt_lock_write_next_cong(xprt);
L
Linus Torvalds 已提交
427 428
}

429 430 431 432 433 434 435 436
/**
 * xprt_release_rqst_cong - housekeeping when request is complete
 * @task: RPC request that recently completed
 *
 * Useful for transports that require congestion control.
 */
void xprt_release_rqst_cong(struct rpc_task *task)
{
437 438 439
	struct rpc_rqst *req = task->tk_rqstp;

	__xprt_put_cong(req->rq_xprt, req);
440
}
441
EXPORT_SYMBOL_GPL(xprt_release_rqst_cong);
442

443 444
/**
 * xprt_adjust_cwnd - adjust transport congestion window
445
 * @xprt: pointer to xprt
446 447 448
 * @task: recently completed RPC request used to adjust window
 * @result: result code of completed RPC request
 *
L
Linus Torvalds 已提交
449 450
 * We use a time-smoothed congestion estimator to avoid heavy oscillation.
 */
451
void xprt_adjust_cwnd(struct rpc_xprt *xprt, struct rpc_task *task, int result)
L
Linus Torvalds 已提交
452
{
453 454
	struct rpc_rqst *req = task->tk_rqstp;
	unsigned long cwnd = xprt->cwnd;
L
Linus Torvalds 已提交
455 456 457 458 459 460 461

	if (result >= 0 && cwnd <= xprt->cong) {
		/* The (cwnd >> 1) term makes sure
		 * the result gets rounded properly. */
		cwnd += (RPC_CWNDSCALE * RPC_CWNDSCALE + (cwnd >> 1)) / cwnd;
		if (cwnd > RPC_MAXCWND(xprt))
			cwnd = RPC_MAXCWND(xprt);
462
		__xprt_lock_write_next_cong(xprt);
L
Linus Torvalds 已提交
463 464 465 466 467
	} else if (result == -ETIMEDOUT) {
		cwnd >>= 1;
		if (cwnd < RPC_CWNDSCALE)
			cwnd = RPC_CWNDSCALE;
	}
468
	dprintk("RPC:       cong %ld, cwnd was %ld, now %ld\n",
L
Linus Torvalds 已提交
469 470
			xprt->cong, xprt->cwnd, cwnd);
	xprt->cwnd = cwnd;
471
	__xprt_put_cong(xprt, req);
L
Linus Torvalds 已提交
472
}
473
EXPORT_SYMBOL_GPL(xprt_adjust_cwnd);
L
Linus Torvalds 已提交
474

475 476 477 478 479 480 481 482 483 484 485 486 487
/**
 * xprt_wake_pending_tasks - wake all tasks on a transport's pending queue
 * @xprt: transport with waiting tasks
 * @status: result code to plant in each task before waking it
 *
 */
void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status)
{
	if (status < 0)
		rpc_wake_up_status(&xprt->pending, status);
	else
		rpc_wake_up(&xprt->pending);
}
488
EXPORT_SYMBOL_GPL(xprt_wake_pending_tasks);
489

490 491 492
/**
 * xprt_wait_for_buffer_space - wait for transport output buffer to clear
 * @task: task to be put to sleep
R
Randy Dunlap 已提交
493
 * @action: function pointer to be executed after wait
494 495 496 497
 *
 * Note that we only set the timer for the case of RPC_IS_SOFT(), since
 * we don't in general want to force a socket disconnection due to
 * an incomplete RPC call transmission.
498
 */
499
void xprt_wait_for_buffer_space(struct rpc_task *task, rpc_action action)
500 501 502 503
{
	struct rpc_rqst *req = task->tk_rqstp;
	struct rpc_xprt *xprt = req->rq_xprt;

504
	task->tk_timeout = RPC_IS_SOFT(task) ? req->rq_timeout : 0;
505
	rpc_sleep_on(&xprt->pending, task, action);
506
}
507
EXPORT_SYMBOL_GPL(xprt_wait_for_buffer_space);
508 509 510 511 512 513 514 515 516 517 518

/**
 * xprt_write_space - wake the task waiting for transport output buffer space
 * @xprt: transport with waiting tasks
 *
 * Can be called in a soft IRQ context, so xprt_write_space never sleeps.
 */
void xprt_write_space(struct rpc_xprt *xprt)
{
	spin_lock_bh(&xprt->transport_lock);
	if (xprt->snd_task) {
519 520
		dprintk("RPC:       write space: waking waiting task on "
				"xprt %p\n", xprt);
521
		rpc_wake_up_queued_task(&xprt->pending, xprt->snd_task);
522 523 524
	}
	spin_unlock_bh(&xprt->transport_lock);
}
525
EXPORT_SYMBOL_GPL(xprt_write_space);
526

527 528 529 530 531 532 533 534 535 536 537 538
/**
 * xprt_set_retrans_timeout_def - set a request's retransmit timeout
 * @task: task whose timeout is to be set
 *
 * Set a request's retransmit timeout based on the transport's
 * default timeout parameters.  Used by transports that don't adjust
 * the retransmit timeout based on round-trip time estimation.
 */
void xprt_set_retrans_timeout_def(struct rpc_task *task)
{
	task->tk_timeout = task->tk_rqstp->rq_timeout;
}
539
EXPORT_SYMBOL_GPL(xprt_set_retrans_timeout_def);
540

541
/**
542 543
 * xprt_set_retrans_timeout_rtt - set a request's retransmit timeout
 * @task: task whose timeout is to be set
544
 *
545 546 547 548 549
 * Set a request's retransmit timeout using the RTT estimator.
 */
void xprt_set_retrans_timeout_rtt(struct rpc_task *task)
{
	int timer = task->tk_msg.rpc_proc->p_timer;
550 551
	struct rpc_clnt *clnt = task->tk_client;
	struct rpc_rtt *rtt = clnt->cl_rtt;
552
	struct rpc_rqst *req = task->tk_rqstp;
553
	unsigned long max_timeout = clnt->cl_timeout->to_maxval;
554 555 556 557 558 559

	task->tk_timeout = rpc_calc_rto(rtt, timer);
	task->tk_timeout <<= rpc_ntimeo(rtt, timer) + req->rq_retries;
	if (task->tk_timeout > max_timeout || task->tk_timeout == 0)
		task->tk_timeout = max_timeout;
}
560
EXPORT_SYMBOL_GPL(xprt_set_retrans_timeout_rtt);
561

L
Linus Torvalds 已提交
562 563
static void xprt_reset_majortimeo(struct rpc_rqst *req)
{
564
	const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
L
Linus Torvalds 已提交
565 566 567 568 569 570 571 572 573 574 575

	req->rq_majortimeo = req->rq_timeout;
	if (to->to_exponential)
		req->rq_majortimeo <<= to->to_retries;
	else
		req->rq_majortimeo += to->to_increment * to->to_retries;
	if (req->rq_majortimeo > to->to_maxval || req->rq_majortimeo == 0)
		req->rq_majortimeo = to->to_maxval;
	req->rq_majortimeo += jiffies;
}

576 577 578 579
/**
 * xprt_adjust_timeout - adjust timeout values for next retransmit
 * @req: RPC request containing parameters to use for the adjustment
 *
L
Linus Torvalds 已提交
580 581 582 583
 */
int xprt_adjust_timeout(struct rpc_rqst *req)
{
	struct rpc_xprt *xprt = req->rq_xprt;
584
	const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
L
Linus Torvalds 已提交
585 586 587 588 589 590 591 592 593 594 595 596 597 598 599
	int status = 0;

	if (time_before(jiffies, req->rq_majortimeo)) {
		if (to->to_exponential)
			req->rq_timeout <<= 1;
		else
			req->rq_timeout += to->to_increment;
		if (to->to_maxval && req->rq_timeout >= to->to_maxval)
			req->rq_timeout = to->to_maxval;
		req->rq_retries++;
	} else {
		req->rq_timeout = to->to_initval;
		req->rq_retries = 0;
		xprt_reset_majortimeo(req);
		/* Reset the RTT counters == "slow start" */
C
Chuck Lever 已提交
600
		spin_lock_bh(&xprt->transport_lock);
L
Linus Torvalds 已提交
601
		rpc_init_rtt(req->rq_task->tk_client->cl_rtt, to->to_initval);
C
Chuck Lever 已提交
602
		spin_unlock_bh(&xprt->transport_lock);
L
Linus Torvalds 已提交
603 604 605 606 607 608 609 610 611 612
		status = -ETIMEDOUT;
	}

	if (req->rq_timeout == 0) {
		printk(KERN_WARNING "xprt_adjust_timeout: rq_timeout = 0!\n");
		req->rq_timeout = 5 * HZ;
	}
	return status;
}

613
static void xprt_autoclose(struct work_struct *work)
L
Linus Torvalds 已提交
614
{
615 616
	struct rpc_xprt *xprt =
		container_of(work, struct rpc_xprt, task_cleanup);
L
Linus Torvalds 已提交
617

618
	xprt->ops->close(xprt);
619
	clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
L
Linus Torvalds 已提交
620 621 622
	xprt_release_write(xprt, NULL);
}

623
/**
624
 * xprt_disconnect_done - mark a transport as disconnected
625 626
 * @xprt: transport to flag for disconnect
 *
L
Linus Torvalds 已提交
627
 */
628
void xprt_disconnect_done(struct rpc_xprt *xprt)
L
Linus Torvalds 已提交
629
{
630
	dprintk("RPC:       disconnected transport %p\n", xprt);
C
Chuck Lever 已提交
631
	spin_lock_bh(&xprt->transport_lock);
L
Linus Torvalds 已提交
632
	xprt_clear_connected(xprt);
633
	xprt_wake_pending_tasks(xprt, -EAGAIN);
C
Chuck Lever 已提交
634
	spin_unlock_bh(&xprt->transport_lock);
L
Linus Torvalds 已提交
635
}
636
EXPORT_SYMBOL_GPL(xprt_disconnect_done);
L
Linus Torvalds 已提交
637

638 639 640 641 642 643 644 645 646 647 648 649 650
/**
 * xprt_force_disconnect - force a transport to disconnect
 * @xprt: transport to disconnect
 *
 */
void xprt_force_disconnect(struct rpc_xprt *xprt)
{
	/* Don't race with the test_bit() in xprt_clear_locked() */
	spin_lock_bh(&xprt->transport_lock);
	set_bit(XPRT_CLOSE_WAIT, &xprt->state);
	/* Try to schedule an autoclose RPC call */
	if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
		queue_work(rpciod_workqueue, &xprt->task_cleanup);
651
	xprt_wake_pending_tasks(xprt, -EAGAIN);
652 653 654
	spin_unlock_bh(&xprt->transport_lock);
}

655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677
/**
 * xprt_conditional_disconnect - force a transport to disconnect
 * @xprt: transport to disconnect
 * @cookie: 'connection cookie'
 *
 * This attempts to break the connection if and only if 'cookie' matches
 * the current transport 'connection cookie'. It ensures that we don't
 * try to break the connection more than once when we need to retransmit
 * a batch of RPC requests.
 *
 */
void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie)
{
	/* Don't race with the test_bit() in xprt_clear_locked() */
	spin_lock_bh(&xprt->transport_lock);
	if (cookie != xprt->connect_cookie)
		goto out;
	if (test_bit(XPRT_CLOSING, &xprt->state) || !xprt_connected(xprt))
		goto out;
	set_bit(XPRT_CLOSE_WAIT, &xprt->state);
	/* Try to schedule an autoclose RPC call */
	if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
		queue_work(rpciod_workqueue, &xprt->task_cleanup);
678
	xprt_wake_pending_tasks(xprt, -EAGAIN);
679 680 681 682
out:
	spin_unlock_bh(&xprt->transport_lock);
}

L
Linus Torvalds 已提交
683 684 685 686 687
static void
xprt_init_autodisconnect(unsigned long data)
{
	struct rpc_xprt *xprt = (struct rpc_xprt *)data;

C
Chuck Lever 已提交
688
	spin_lock(&xprt->transport_lock);
689
	if (!list_empty(&xprt->recv))
L
Linus Torvalds 已提交
690
		goto out_abort;
691
	if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
L
Linus Torvalds 已提交
692
		goto out_abort;
C
Chuck Lever 已提交
693
	spin_unlock(&xprt->transport_lock);
694 695
	set_bit(XPRT_CONNECTION_CLOSE, &xprt->state);
	queue_work(rpciod_workqueue, &xprt->task_cleanup);
L
Linus Torvalds 已提交
696 697
	return;
out_abort:
C
Chuck Lever 已提交
698
	spin_unlock(&xprt->transport_lock);
L
Linus Torvalds 已提交
699 700
}

701 702 703
/**
 * xprt_connect - schedule a transport connect operation
 * @task: RPC task that is requesting the connect
L
Linus Torvalds 已提交
704 705 706 707
 *
 */
void xprt_connect(struct rpc_task *task)
{
708
	struct rpc_xprt	*xprt = task->tk_rqstp->rq_xprt;
L
Linus Torvalds 已提交
709

710
	dprintk("RPC: %5u xprt_connect xprt %p %s connected\n", task->tk_pid,
L
Linus Torvalds 已提交
711 712
			xprt, (xprt_connected(xprt) ? "is" : "is not"));

713
	if (!xprt_bound(xprt)) {
714
		task->tk_status = -EAGAIN;
L
Linus Torvalds 已提交
715 716 717 718
		return;
	}
	if (!xprt_lock_write(xprt, task))
		return;
719 720 721 722

	if (test_and_clear_bit(XPRT_CLOSE_WAIT, &xprt->state))
		xprt->ops->close(xprt);

L
Linus Torvalds 已提交
723
	if (xprt_connected(xprt))
724 725
		xprt_release_write(xprt, task);
	else {
726
		task->tk_rqstp->rq_bytes_sent = 0;
727
		task->tk_timeout = task->tk_rqstp->rq_timeout;
728
		rpc_sleep_on(&xprt->pending, task, xprt_connect_status);
729 730 731 732 733

		if (test_bit(XPRT_CLOSING, &xprt->state))
			return;
		if (xprt_test_and_set_connecting(xprt))
			return;
734
		xprt->stat.connect_start = jiffies;
735
		xprt->ops->connect(xprt, task);
L
Linus Torvalds 已提交
736 737 738
	}
}

739
static void xprt_connect_status(struct rpc_task *task)
L
Linus Torvalds 已提交
740
{
741
	struct rpc_xprt	*xprt = task->tk_rqstp->rq_xprt;
L
Linus Torvalds 已提交
742

743
	if (task->tk_status == 0) {
744 745
		xprt->stat.connect_count++;
		xprt->stat.connect_time += (long)jiffies - xprt->stat.connect_start;
746
		dprintk("RPC: %5u xprt_connect_status: connection established\n",
L
Linus Torvalds 已提交
747 748 749 750 751
				task->tk_pid);
		return;
	}

	switch (task->tk_status) {
752 753
	case -EAGAIN:
		dprintk("RPC: %5u xprt_connect_status: retrying\n", task->tk_pid);
754
		break;
L
Linus Torvalds 已提交
755
	case -ETIMEDOUT:
756 757
		dprintk("RPC: %5u xprt_connect_status: connect attempt timed "
				"out\n", task->tk_pid);
L
Linus Torvalds 已提交
758 759
		break;
	default:
760 761
		dprintk("RPC: %5u xprt_connect_status: error %d connecting to "
				"server %s\n", task->tk_pid, -task->tk_status,
762
				xprt->servername);
763 764
		xprt_release_write(xprt, task);
		task->tk_status = -EIO;
L
Linus Torvalds 已提交
765 766 767
	}
}

768 769 770 771 772
/**
 * xprt_lookup_rqst - find an RPC request corresponding to an XID
 * @xprt: transport on which the original request was transmitted
 * @xid: RPC XID of incoming reply
 *
L
Linus Torvalds 已提交
773
 */
774
struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid)
L
Linus Torvalds 已提交
775
{
776
	struct rpc_rqst *entry;
L
Linus Torvalds 已提交
777

778
	list_for_each_entry(entry, &xprt->recv, rq_list)
779 780
		if (entry->rq_xid == xid)
			return entry;
781 782 783

	dprintk("RPC:       xprt_lookup_rqst did not find xid %08x\n",
			ntohl(xid));
784 785
	xprt->stat.bad_xids++;
	return NULL;
L
Linus Torvalds 已提交
786
}
787
EXPORT_SYMBOL_GPL(xprt_lookup_rqst);
L
Linus Torvalds 已提交
788

789
static void xprt_update_rtt(struct rpc_task *task)
790 791 792
{
	struct rpc_rqst *req = task->tk_rqstp;
	struct rpc_rtt *rtt = task->tk_client->cl_rtt;
793
	unsigned int timer = task->tk_msg.rpc_proc->p_timer;
794
	long m = usecs_to_jiffies(ktime_to_us(req->rq_rtt));
795 796 797

	if (timer) {
		if (req->rq_ntrans == 1)
798
			rpc_update_rtt(rtt, timer, m);
799 800 801 802
		rpc_set_timeo(rtt, timer, req->rq_ntrans - 1);
	}
}

803 804
/**
 * xprt_complete_rqst - called when reply processing is complete
805
 * @task: RPC request that recently completed
806 807
 * @copied: actual number of bytes received from the transport
 *
808
 * Caller holds transport lock.
L
Linus Torvalds 已提交
809
 */
810
void xprt_complete_rqst(struct rpc_task *task, int copied)
L
Linus Torvalds 已提交
811
{
812
	struct rpc_rqst *req = task->tk_rqstp;
813
	struct rpc_xprt *xprt = req->rq_xprt;
L
Linus Torvalds 已提交
814

815 816
	dprintk("RPC: %5u xid %08x complete (%d bytes received)\n",
			task->tk_pid, ntohl(req->rq_xid), copied);
L
Linus Torvalds 已提交
817

818
	xprt->stat.recvs++;
819
	req->rq_rtt = ktime_sub(ktime_get(), req->rq_xtime);
820 821
	if (xprt->ops->timer != NULL)
		xprt_update_rtt(task);
822

L
Linus Torvalds 已提交
823
	list_del_init(&req->rq_list);
824
	req->rq_private_buf.len = copied;
825 826
	/* Ensure all writes are done before we update */
	/* req->rq_reply_bytes_recvd */
827
	smp_wmb();
828
	req->rq_reply_bytes_recvd = copied;
829
	rpc_wake_up_queued_task(&xprt->pending, task);
L
Linus Torvalds 已提交
830
}
831
EXPORT_SYMBOL_GPL(xprt_complete_rqst);
L
Linus Torvalds 已提交
832

833
static void xprt_timer(struct rpc_task *task)
L
Linus Torvalds 已提交
834
{
835
	struct rpc_rqst *req = task->tk_rqstp;
L
Linus Torvalds 已提交
836 837
	struct rpc_xprt *xprt = req->rq_xprt;

838 839
	if (task->tk_status != -ETIMEDOUT)
		return;
840
	dprintk("RPC: %5u xprt_timer\n", task->tk_pid);
L
Linus Torvalds 已提交
841

842
	spin_lock_bh(&xprt->transport_lock);
843
	if (!req->rq_reply_bytes_recvd) {
844
		if (xprt->ops->timer)
845
			xprt->ops->timer(xprt, task);
846 847 848
	} else
		task->tk_status = 0;
	spin_unlock_bh(&xprt->transport_lock);
L
Linus Torvalds 已提交
849 850
}

851 852 853 854 855
static inline int xprt_has_timer(struct rpc_xprt *xprt)
{
	return xprt->idle_timeout != 0;
}

856 857 858 859
/**
 * xprt_prepare_transmit - reserve the transport before sending a request
 * @task: RPC task about to send a request
 *
L
Linus Torvalds 已提交
860
 */
861
bool xprt_prepare_transmit(struct rpc_task *task)
L
Linus Torvalds 已提交
862 863 864
{
	struct rpc_rqst	*req = task->tk_rqstp;
	struct rpc_xprt	*xprt = req->rq_xprt;
865
	bool ret = false;
L
Linus Torvalds 已提交
866

867
	dprintk("RPC: %5u xprt_prepare_transmit\n", task->tk_pid);
L
Linus Torvalds 已提交
868

C
Chuck Lever 已提交
869
	spin_lock_bh(&xprt->transport_lock);
870 871 872 873 874 875 876 877 878 879 880 881
	if (!req->rq_bytes_sent) {
		if (req->rq_reply_bytes_recvd) {
			task->tk_status = req->rq_reply_bytes_recvd;
			goto out_unlock;
		}
		if ((task->tk_flags & RPC_TASK_NO_RETRANS_TIMEOUT)
		    && xprt_connected(xprt)
		    && req->rq_connect_cookie == xprt->connect_cookie) {
			xprt->ops->set_retrans_timeout(task);
			rpc_sleep_on(&xprt->pending, task, xprt_timer);
			goto out_unlock;
		}
L
Linus Torvalds 已提交
882
	}
883 884 885 886 887
	if (!xprt->ops->reserve_xprt(xprt, task)) {
		task->tk_status = -EAGAIN;
		goto out_unlock;
	}
	ret = true;
L
Linus Torvalds 已提交
888
out_unlock:
C
Chuck Lever 已提交
889
	spin_unlock_bh(&xprt->transport_lock);
890
	return ret;
L
Linus Torvalds 已提交
891 892
}

893
void xprt_end_transmit(struct rpc_task *task)
894
{
895
	xprt_release_write(task->tk_rqstp->rq_xprt, task);
896 897
}

898 899 900 901 902 903 904
/**
 * xprt_transmit - send an RPC request on a transport
 * @task: controlling RPC task
 *
 * We have to copy the iovec because sendmsg fiddles with its contents.
 */
void xprt_transmit(struct rpc_task *task)
L
Linus Torvalds 已提交
905 906 907
{
	struct rpc_rqst	*req = task->tk_rqstp;
	struct rpc_xprt	*xprt = req->rq_xprt;
908
	int status, numreqs;
L
Linus Torvalds 已提交
909

910
	dprintk("RPC: %5u xprt_transmit(%u)\n", task->tk_pid, req->rq_slen);
L
Linus Torvalds 已提交
911

912
	if (!req->rq_reply_bytes_recvd) {
913 914 915 916
		if (list_empty(&req->rq_list) && rpc_reply_expected(task)) {
			/*
			 * Add to the list only if we're expecting a reply
			 */
C
Chuck Lever 已提交
917
			spin_lock_bh(&xprt->transport_lock);
L
Linus Torvalds 已提交
918 919 920 921 922
			/* Update the softirq receive buffer */
			memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
					sizeof(req->rq_private_buf));
			/* Add request to the receive list */
			list_add_tail(&req->rq_list, &xprt->recv);
C
Chuck Lever 已提交
923
			spin_unlock_bh(&xprt->transport_lock);
L
Linus Torvalds 已提交
924
			xprt_reset_majortimeo(req);
925 926
			/* Turn off autodisconnect */
			del_singleshot_timer_sync(&xprt->timer);
L
Linus Torvalds 已提交
927 928 929 930
		}
	} else if (!req->rq_bytes_sent)
		return;

931
	req->rq_xtime = ktime_get();
932
	status = xprt->ops->send_request(task);
933 934 935 936
	if (status != 0) {
		task->tk_status = status;
		return;
	}
937

938
	dprintk("RPC: %5u xmit complete\n", task->tk_pid);
939
	task->tk_flags |= RPC_TASK_SENT;
940
	spin_lock_bh(&xprt->transport_lock);
941

942
	xprt->ops->set_retrans_timeout(task);
943

944 945 946
	numreqs = atomic_read(&xprt->num_reqs);
	if (numreqs > xprt->stat.max_slots)
		xprt->stat.max_slots = numreqs;
947 948 949
	xprt->stat.sends++;
	xprt->stat.req_u += xprt->stat.sends - xprt->stat.recvs;
	xprt->stat.bklog_u += xprt->backlog.qlen;
950 951
	xprt->stat.sending_u += xprt->sending.qlen;
	xprt->stat.pending_u += xprt->pending.qlen;
L
Linus Torvalds 已提交
952

953 954 955
	/* Don't race with disconnect */
	if (!xprt_connected(xprt))
		task->tk_status = -ENOTCONN;
956
	else {
957 958 959 960
		/*
		 * Sleep on the pending queue since
		 * we're expecting a reply.
		 */
961 962 963
		if (!req->rq_reply_bytes_recvd && rpc_reply_expected(task))
			rpc_sleep_on(&xprt->pending, task, xprt_timer);
		req->rq_connect_cookie = xprt->connect_cookie;
964
	}
965
	spin_unlock_bh(&xprt->transport_lock);
L
Linus Torvalds 已提交
966 967
}

968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995
static void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task)
{
	set_bit(XPRT_CONGESTED, &xprt->state);
	rpc_sleep_on(&xprt->backlog, task, NULL);
}

static void xprt_wake_up_backlog(struct rpc_xprt *xprt)
{
	if (rpc_wake_up_next(&xprt->backlog) == NULL)
		clear_bit(XPRT_CONGESTED, &xprt->state);
}

static bool xprt_throttle_congested(struct rpc_xprt *xprt, struct rpc_task *task)
{
	bool ret = false;

	if (!test_bit(XPRT_CONGESTED, &xprt->state))
		goto out;
	spin_lock(&xprt->reserve_lock);
	if (test_bit(XPRT_CONGESTED, &xprt->state)) {
		rpc_sleep_on(&xprt->backlog, task, NULL);
		ret = true;
	}
	spin_unlock(&xprt->reserve_lock);
out:
	return ret;
}

996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019
static struct rpc_rqst *xprt_dynamic_alloc_slot(struct rpc_xprt *xprt, gfp_t gfp_flags)
{
	struct rpc_rqst *req = ERR_PTR(-EAGAIN);

	if (!atomic_add_unless(&xprt->num_reqs, 1, xprt->max_reqs))
		goto out;
	req = kzalloc(sizeof(struct rpc_rqst), gfp_flags);
	if (req != NULL)
		goto out;
	atomic_dec(&xprt->num_reqs);
	req = ERR_PTR(-ENOMEM);
out:
	return req;
}

static bool xprt_dynamic_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
{
	if (atomic_add_unless(&xprt->num_reqs, -1, xprt->min_reqs)) {
		kfree(req);
		return true;
	}
	return false;
}

1020
void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
L
Linus Torvalds 已提交
1021
{
1022
	struct rpc_rqst *req;
L
Linus Torvalds 已提交
1023

1024
	spin_lock(&xprt->reserve_lock);
L
Linus Torvalds 已提交
1025
	if (!list_empty(&xprt->free)) {
1026 1027 1028 1029
		req = list_entry(xprt->free.next, struct rpc_rqst, rq_list);
		list_del(&req->rq_list);
		goto out_init_req;
	}
1030
	req = xprt_dynamic_alloc_slot(xprt, GFP_NOWAIT|__GFP_NOWARN);
1031 1032 1033 1034 1035 1036
	if (!IS_ERR(req))
		goto out_init_req;
	switch (PTR_ERR(req)) {
	case -ENOMEM:
		dprintk("RPC:       dynamic allocation of request slot "
				"failed! Retrying\n");
1037
		task->tk_status = -ENOMEM;
1038 1039
		break;
	case -EAGAIN:
1040
		xprt_add_backlog(xprt, task);
1041
		dprintk("RPC:       waiting for request slot\n");
1042 1043
	default:
		task->tk_status = -EAGAIN;
L
Linus Torvalds 已提交
1044
	}
1045
	spin_unlock(&xprt->reserve_lock);
1046 1047 1048 1049 1050
	return;
out_init_req:
	task->tk_status = 0;
	task->tk_rqstp = req;
	xprt_request_init(task, xprt);
1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065
	spin_unlock(&xprt->reserve_lock);
}
EXPORT_SYMBOL_GPL(xprt_alloc_slot);

void xprt_lock_and_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
{
	/* Note: grabbing the xprt_lock_write() ensures that we throttle
	 * new slot allocation if the transport is congested (i.e. when
	 * reconnecting a stream transport or when out of socket write
	 * buffer space).
	 */
	if (xprt_lock_write(xprt, task)) {
		xprt_alloc_slot(xprt, task);
		xprt_release_write(xprt, task);
	}
L
Linus Torvalds 已提交
1066
}
1067
EXPORT_SYMBOL_GPL(xprt_lock_and_alloc_slot);
L
Linus Torvalds 已提交
1068

1069 1070 1071
static void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
{
	spin_lock(&xprt->reserve_lock);
1072 1073 1074 1075
	if (!xprt_dynamic_free_slot(xprt, req)) {
		memset(req, 0, sizeof(*req));	/* mark unused */
		list_add(&req->rq_list, &xprt->free);
	}
1076
	xprt_wake_up_backlog(xprt);
1077 1078 1079
	spin_unlock(&xprt->reserve_lock);
}

1080 1081 1082 1083 1084 1085 1086 1087 1088 1089
static void xprt_free_all_slots(struct rpc_xprt *xprt)
{
	struct rpc_rqst *req;
	while (!list_empty(&xprt->free)) {
		req = list_first_entry(&xprt->free, struct rpc_rqst, rq_list);
		list_del(&req->rq_list);
		kfree(req);
	}
}

1090 1091 1092
struct rpc_xprt *xprt_alloc(struct net *net, size_t size,
		unsigned int num_prealloc,
		unsigned int max_alloc)
1093 1094
{
	struct rpc_xprt *xprt;
1095 1096
	struct rpc_rqst *req;
	int i;
1097 1098 1099 1100 1101

	xprt = kzalloc(size, GFP_KERNEL);
	if (xprt == NULL)
		goto out;

1102 1103 1104 1105 1106
	xprt_init(xprt, net);

	for (i = 0; i < num_prealloc; i++) {
		req = kzalloc(sizeof(struct rpc_rqst), GFP_KERNEL);
		if (!req)
1107
			goto out_free;
1108 1109
		list_add(&req->rq_list, &xprt->free);
	}
1110 1111 1112 1113 1114 1115
	if (max_alloc > num_prealloc)
		xprt->max_reqs = max_alloc;
	else
		xprt->max_reqs = num_prealloc;
	xprt->min_reqs = num_prealloc;
	atomic_set(&xprt->num_reqs, num_prealloc);
1116 1117 1118 1119

	return xprt;

out_free:
1120
	xprt_free(xprt);
1121 1122 1123 1124 1125
out:
	return NULL;
}
EXPORT_SYMBOL_GPL(xprt_alloc);

1126 1127
void xprt_free(struct rpc_xprt *xprt)
{
P
Pavel Emelyanov 已提交
1128
	put_net(xprt->xprt_net);
1129
	xprt_free_all_slots(xprt);
1130 1131 1132 1133
	kfree(xprt);
}
EXPORT_SYMBOL_GPL(xprt_free);

1134 1135 1136 1137
/**
 * xprt_reserve - allocate an RPC request slot
 * @task: RPC task requesting a slot allocation
 *
1138 1139
 * If the transport is marked as being congested, or if no more
 * slots are available, place the task on the transport's
1140 1141 1142
 * backlog queue.
 */
void xprt_reserve(struct rpc_task *task)
L
Linus Torvalds 已提交
1143
{
1144
	struct rpc_xprt	*xprt;
L
Linus Torvalds 已提交
1145

1146 1147 1148 1149 1150 1151
	task->tk_status = 0;
	if (task->tk_rqstp != NULL)
		return;

	task->tk_timeout = 0;
	task->tk_status = -EAGAIN;
1152 1153
	rcu_read_lock();
	xprt = rcu_dereference(task->tk_client->cl_xprt);
1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179
	if (!xprt_throttle_congested(xprt, task))
		xprt->ops->alloc_slot(xprt, task);
	rcu_read_unlock();
}

/**
 * xprt_retry_reserve - allocate an RPC request slot
 * @task: RPC task requesting a slot allocation
 *
 * If no more slots are available, place the task on the transport's
 * backlog queue.
 * Note that the only difference with xprt_reserve is that we now
 * ignore the value of the XPRT_CONGESTED flag.
 */
void xprt_retry_reserve(struct rpc_task *task)
{
	struct rpc_xprt	*xprt;

	task->tk_status = 0;
	if (task->tk_rqstp != NULL)
		return;

	task->tk_timeout = 0;
	task->tk_status = -EAGAIN;
	rcu_read_lock();
	xprt = rcu_dereference(task->tk_client->cl_xprt);
1180
	xprt->ops->alloc_slot(xprt, task);
1181
	rcu_read_unlock();
L
Linus Torvalds 已提交
1182 1183
}

1184
static inline __be32 xprt_alloc_xid(struct rpc_xprt *xprt)
L
Linus Torvalds 已提交
1185
{
1186
	return (__force __be32)xprt->xid++;
L
Linus Torvalds 已提交
1187 1188 1189 1190
}

static inline void xprt_init_xid(struct rpc_xprt *xprt)
{
1191
	xprt->xid = net_random();
L
Linus Torvalds 已提交
1192 1193
}

1194
static void xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt)
L
Linus Torvalds 已提交
1195 1196 1197
{
	struct rpc_rqst	*req = task->tk_rqstp;

1198
	INIT_LIST_HEAD(&req->rq_list);
1199
	req->rq_timeout = task->tk_client->cl_timeout->to_initval;
L
Linus Torvalds 已提交
1200 1201
	req->rq_task	= task;
	req->rq_xprt    = xprt;
1202
	req->rq_buffer  = NULL;
L
Linus Torvalds 已提交
1203
	req->rq_xid     = xprt_alloc_xid(xprt);
1204
	req->rq_connect_cookie = xprt->connect_cookie - 1;
1205 1206 1207 1208 1209
	req->rq_bytes_sent = 0;
	req->rq_snd_buf.len = 0;
	req->rq_snd_buf.buflen = 0;
	req->rq_rcv_buf.len = 0;
	req->rq_rcv_buf.buflen = 0;
1210
	req->rq_release_snd_buf = NULL;
1211
	xprt_reset_majortimeo(req);
1212
	dprintk("RPC: %5u reserved req %p xid %08x\n", task->tk_pid,
L
Linus Torvalds 已提交
1213 1214 1215
			req, ntohl(req->rq_xid));
}

1216 1217 1218 1219
/**
 * xprt_release - release an RPC request slot
 * @task: task which is finished with the slot
 *
L
Linus Torvalds 已提交
1220
 */
1221
void xprt_release(struct rpc_task *task)
L
Linus Torvalds 已提交
1222
{
1223
	struct rpc_xprt	*xprt;
1224
	struct rpc_rqst	*req = task->tk_rqstp;
L
Linus Torvalds 已提交
1225

1226 1227 1228 1229 1230 1231 1232 1233
	if (req == NULL) {
		if (task->tk_client) {
			rcu_read_lock();
			xprt = rcu_dereference(task->tk_client->cl_xprt);
			if (xprt->snd_task == task)
				xprt_release_write(xprt, task);
			rcu_read_unlock();
		}
L
Linus Torvalds 已提交
1234
		return;
1235
	}
1236 1237

	xprt = req->rq_xprt;
1238 1239 1240 1241
	if (task->tk_ops->rpc_count_stats != NULL)
		task->tk_ops->rpc_count_stats(task, task->tk_calldata);
	else if (task->tk_client)
		rpc_count_iostats(task, task->tk_client->cl_metrics);
C
Chuck Lever 已提交
1242
	spin_lock_bh(&xprt->transport_lock);
1243
	xprt->ops->release_xprt(xprt, task);
1244 1245
	if (xprt->ops->release_request)
		xprt->ops->release_request(task);
L
Linus Torvalds 已提交
1246 1247 1248
	if (!list_empty(&req->rq_list))
		list_del(&req->rq_list);
	xprt->last_used = jiffies;
1249
	if (list_empty(&xprt->recv) && xprt_has_timer(xprt))
1250
		mod_timer(&xprt->timer,
1251
				xprt->last_used + xprt->idle_timeout);
C
Chuck Lever 已提交
1252
	spin_unlock_bh(&xprt->transport_lock);
1253
	if (req->rq_buffer)
1254
		xprt->ops->buf_free(req->rq_buffer);
1255 1256
	if (req->rq_cred != NULL)
		put_rpccred(req->rq_cred);
L
Linus Torvalds 已提交
1257
	task->tk_rqstp = NULL;
1258 1259
	if (req->rq_release_snd_buf)
		req->rq_release_snd_buf(req);
1260

1261
	dprintk("RPC: %5u release request %p\n", task->tk_pid, req);
1262 1263 1264
	if (likely(!bc_prealloc(req)))
		xprt_free_slot(xprt, req);
	else
1265
		xprt_free_bc_request(req);
L
Linus Torvalds 已提交
1266 1267
}

1268
static void xprt_init(struct rpc_xprt *xprt, struct net *net)
1269
{
1270
	atomic_set(&xprt->count, 1);
1271 1272 1273 1274 1275 1276

	spin_lock_init(&xprt->transport_lock);
	spin_lock_init(&xprt->reserve_lock);

	INIT_LIST_HEAD(&xprt->free);
	INIT_LIST_HEAD(&xprt->recv);
1277
#if defined(CONFIG_SUNRPC_BACKCHANNEL)
1278 1279
	spin_lock_init(&xprt->bc_pa_lock);
	INIT_LIST_HEAD(&xprt->bc_pa_list);
1280
#endif /* CONFIG_SUNRPC_BACKCHANNEL */
1281

1282 1283
	xprt->last_used = jiffies;
	xprt->cwnd = RPC_INITCWND;
1284
	xprt->bind_index = 0;
1285 1286 1287

	rpc_init_wait_queue(&xprt->binding, "xprt_binding");
	rpc_init_wait_queue(&xprt->pending, "xprt_pending");
1288
	rpc_init_priority_wait_queue(&xprt->sending, "xprt_sending");
1289 1290 1291 1292
	rpc_init_priority_wait_queue(&xprt->backlog, "xprt_backlog");

	xprt_init_xid(xprt);

1293
	xprt->xprt_net = get_net(net);
1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321
}

/**
 * xprt_create_transport - create an RPC transport
 * @args: rpc transport creation arguments
 *
 */
struct rpc_xprt *xprt_create_transport(struct xprt_create *args)
{
	struct rpc_xprt	*xprt;
	struct xprt_class *t;

	spin_lock(&xprt_list_lock);
	list_for_each_entry(t, &xprt_list, list) {
		if (t->ident == args->ident) {
			spin_unlock(&xprt_list_lock);
			goto found;
		}
	}
	spin_unlock(&xprt_list_lock);
	printk(KERN_ERR "RPC: transport (%d) not supported\n", args->ident);
	return ERR_PTR(-EIO);

found:
	xprt = t->setup(args);
	if (IS_ERR(xprt)) {
		dprintk("RPC:       xprt_create_transport: failed, %ld\n",
				-PTR_ERR(xprt));
1322
		goto out;
1323
	}
1324 1325
	if (args->flags & XPRT_CREATE_NO_IDLE_TIMEOUT)
		xprt->idle_timeout = 0;
1326 1327 1328 1329 1330 1331
	INIT_WORK(&xprt->task_cleanup, xprt_autoclose);
	if (xprt_has_timer(xprt))
		setup_timer(&xprt->timer, xprt_init_autodisconnect,
			    (unsigned long)xprt);
	else
		init_timer(&xprt->timer);
1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342

	if (strlen(args->servername) > RPC_MAXNETNAMELEN) {
		xprt_destroy(xprt);
		return ERR_PTR(-EINVAL);
	}
	xprt->servername = kstrdup(args->servername, GFP_KERNEL);
	if (xprt->servername == NULL) {
		xprt_destroy(xprt);
		return ERR_PTR(-ENOMEM);
	}

1343
	dprintk("RPC:       created transport %p with %u slots\n", xprt,
1344
			xprt->max_reqs);
1345
out:
1346 1347 1348
	return xprt;
}

1349 1350
/**
 * xprt_destroy - destroy an RPC transport, killing off all requests.
1351
 * @xprt: transport to destroy
1352
 *
L
Linus Torvalds 已提交
1353
 */
1354
static void xprt_destroy(struct rpc_xprt *xprt)
L
Linus Torvalds 已提交
1355
{
1356
	dprintk("RPC:       destroying transport %p\n", xprt);
1357
	del_timer_sync(&xprt->timer);
1358

1359 1360 1361 1362
	rpc_destroy_wait_queue(&xprt->binding);
	rpc_destroy_wait_queue(&xprt->pending);
	rpc_destroy_wait_queue(&xprt->sending);
	rpc_destroy_wait_queue(&xprt->backlog);
1363
	cancel_work_sync(&xprt->task_cleanup);
1364
	kfree(xprt->servername);
1365 1366 1367
	/*
	 * Tear down transport state and free the rpc_xprt
	 */
1368
	xprt->ops->destroy(xprt);
1369
}
L
Linus Torvalds 已提交
1370

1371 1372 1373 1374 1375 1376 1377
/**
 * xprt_put - release a reference to an RPC transport.
 * @xprt: pointer to the transport
 *
 */
void xprt_put(struct rpc_xprt *xprt)
{
1378 1379
	if (atomic_dec_and_test(&xprt->count))
		xprt_destroy(xprt);
1380 1381 1382 1383 1384 1385 1386 1387 1388
}

/**
 * xprt_get - return a reference to an RPC transport.
 * @xprt: pointer to the transport
 *
 */
struct rpc_xprt *xprt_get(struct rpc_xprt *xprt)
{
1389 1390 1391
	if (atomic_inc_not_zero(&xprt->count))
		return xprt;
	return NULL;
L
Linus Torvalds 已提交
1392
}