xprt.c 36.5 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12
/*
 *  linux/net/sunrpc/xprt.c
 *
 *  This is a generic RPC call interface supporting congestion avoidance,
 *  and asynchronous calls.
 *
 *  The interface works like this:
 *
 *  -	When a process places a call, it allocates a request slot if
 *	one is available. Otherwise, it sleeps on the backlog queue
 *	(xprt_reserve).
 *  -	Next, the caller puts together the RPC message, stuffs it into
13 14
 *	the request struct, and calls xprt_transmit().
 *  -	xprt_transmit sends the message and installs the caller on the
15 16 17
 *	transport's wait list. At the same time, if a reply is expected,
 *	it installs a timer that is run after the packet's timeout has
 *	expired.
L
Linus Torvalds 已提交
18
 *  -	When a packet arrives, the data_ready handler walks the list of
19
 *	pending requests for that transport. If a matching XID is found, the
L
Linus Torvalds 已提交
20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35
 *	caller is woken up, and the timer removed.
 *  -	When no reply arrives within the timeout interval, the timer is
 *	fired by the kernel and runs xprt_timer(). It either adjusts the
 *	timeout values (minor timeout) or wakes up the caller with a status
 *	of -ETIMEDOUT.
 *  -	When the caller receives a notification from RPC that a reply arrived,
 *	it should release the RPC slot, and process the reply.
 *	If the call timed out, it may choose to retry the operation by
 *	adjusting the initial timeout value, and simply calling rpc_call
 *	again.
 *
 *  Support for async RPC is done through a set of RPC-specific scheduling
 *  primitives that `transparently' work for processes as well as async
 *  tasks that rely on callbacks.
 *
 *  Copyright (C) 1995-1997, Olaf Kirch <okir@monad.swb.de>
36 37
 *
 *  Transport switch API copyright (C) 2005, Chuck Lever <cel@netapp.com>
L
Linus Torvalds 已提交
38 39
 */

40 41
#include <linux/module.h>

L
Linus Torvalds 已提交
42
#include <linux/types.h>
43
#include <linux/interrupt.h>
L
Linus Torvalds 已提交
44
#include <linux/workqueue.h>
45
#include <linux/net.h>
46
#include <linux/ktime.h>
L
Linus Torvalds 已提交
47

48
#include <linux/sunrpc/clnt.h>
49
#include <linux/sunrpc/metrics.h>
50
#include <linux/sunrpc/bc_xprt.h>
L
Linus Torvalds 已提交
51

52 53
#include "sunrpc.h"

L
Linus Torvalds 已提交
54 55 56 57 58 59 60 61 62 63 64
/*
 * Local variables
 */

#ifdef RPC_DEBUG
# define RPCDBG_FACILITY	RPCDBG_XPRT
#endif

/*
 * Local functions
 */
65
static void	 xprt_init(struct rpc_xprt *xprt, struct net *net);
L
Linus Torvalds 已提交
66 67 68
static void	xprt_request_init(struct rpc_task *, struct rpc_xprt *);
static void	xprt_connect_status(struct rpc_task *task);
static int      __xprt_get_cong(struct rpc_xprt *, struct rpc_task *);
69
static void	 xprt_destroy(struct rpc_xprt *xprt);
L
Linus Torvalds 已提交
70

J
Jiri Slaby 已提交
71
static DEFINE_SPINLOCK(xprt_list_lock);
72 73
static LIST_HEAD(xprt_list);

74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90
/*
 * The transport code maintains an estimate on the maximum number of out-
 * standing RPC requests, using a smoothed version of the congestion
 * avoidance implemented in 44BSD. This is basically the Van Jacobson
 * congestion algorithm: If a retransmit occurs, the congestion window is
 * halved; otherwise, it is incremented by 1/cwnd when
 *
 *	-	a reply is received and
 *	-	a full number of requests are outstanding and
 *	-	the congestion window hasn't been updated recently.
 */
#define RPC_CWNDSHIFT		(8U)
#define RPC_CWNDSCALE		(1U << RPC_CWNDSHIFT)
#define RPC_INITCWND		RPC_CWNDSCALE
#define RPC_MAXCWND(xprt)	((xprt)->max_reqs << RPC_CWNDSHIFT)

#define RPCXPRT_CONGESTED(xprt) ((xprt)->cong >= (xprt)->cwnd)
L
Linus Torvalds 已提交
91

92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112
/**
 * xprt_register_transport - register a transport implementation
 * @transport: transport to register
 *
 * If a transport implementation is loaded as a kernel module, it can
 * call this interface to make itself known to the RPC client.
 *
 * Returns:
 * 0:		transport successfully registered
 * -EEXIST:	transport already registered
 * -EINVAL:	transport module being unloaded
 */
int xprt_register_transport(struct xprt_class *transport)
{
	struct xprt_class *t;
	int result;

	result = -EEXIST;
	spin_lock(&xprt_list_lock);
	list_for_each_entry(t, &xprt_list, list) {
		/* don't register the same transport class twice */
113
		if (t->ident == transport->ident)
114 115 116
			goto out;
	}

117 118 119 120
	list_add_tail(&transport->list, &xprt_list);
	printk(KERN_INFO "RPC: Registered %s transport module.\n",
	       transport->name);
	result = 0;
121 122 123 124 125 126 127 128 129

out:
	spin_unlock(&xprt_list_lock);
	return result;
}
EXPORT_SYMBOL_GPL(xprt_register_transport);

/**
 * xprt_unregister_transport - unregister a transport implementation
130
 * @transport: transport to unregister
131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159
 *
 * Returns:
 * 0:		transport successfully unregistered
 * -ENOENT:	transport never registered
 */
int xprt_unregister_transport(struct xprt_class *transport)
{
	struct xprt_class *t;
	int result;

	result = 0;
	spin_lock(&xprt_list_lock);
	list_for_each_entry(t, &xprt_list, list) {
		if (t == transport) {
			printk(KERN_INFO
				"RPC: Unregistered %s transport module.\n",
				transport->name);
			list_del_init(&transport->list);
			goto out;
		}
	}
	result = -ENOENT;

out:
	spin_unlock(&xprt_list_lock);
	return result;
}
EXPORT_SYMBOL_GPL(xprt_unregister_transport);

160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181
/**
 * xprt_load_transport - load a transport implementation
 * @transport_name: transport to load
 *
 * Returns:
 * 0:		transport successfully loaded
 * -ENOENT:	transport module not available
 */
int xprt_load_transport(const char *transport_name)
{
	struct xprt_class *t;
	int result;

	result = 0;
	spin_lock(&xprt_list_lock);
	list_for_each_entry(t, &xprt_list, list) {
		if (strcmp(t->name, transport_name) == 0) {
			spin_unlock(&xprt_list_lock);
			goto out;
		}
	}
	spin_unlock(&xprt_list_lock);
182
	result = request_module("xprt%s", transport_name);
183 184 185 186 187
out:
	return result;
}
EXPORT_SYMBOL_GPL(xprt_load_transport);

188 189 190
/**
 * xprt_reserve_xprt - serialize write access to transports
 * @task: task that is requesting access to the transport
191
 * @xprt: pointer to the target transport
192 193 194 195 196
 *
 * This prevents mixing the payload of separate requests, and prevents
 * transport connects from colliding with writes.  No congestion control
 * is provided.
 */
197
int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
198 199
{
	struct rpc_rqst *req = task->tk_rqstp;
200
	int priority;
201 202 203 204 205 206 207

	if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
		if (task == xprt->snd_task)
			return 1;
		goto out_sleep;
	}
	xprt->snd_task = task;
208 209 210 211
	if (req != NULL) {
		req->rq_bytes_sent = 0;
		req->rq_ntrans++;
	}
212

213 214 215
	return 1;

out_sleep:
216
	dprintk("RPC: %5u failed to lock transport %p\n",
217 218 219
			task->tk_pid, xprt);
	task->tk_timeout = 0;
	task->tk_status = -EAGAIN;
220 221 222 223
	if (req == NULL)
		priority = RPC_PRIORITY_LOW;
	else if (!req->rq_ntrans)
		priority = RPC_PRIORITY_NORMAL;
224
	else
225 226
		priority = RPC_PRIORITY_HIGH;
	rpc_sleep_on_priority(&xprt->sending, task, NULL, priority);
227 228
	return 0;
}
229
EXPORT_SYMBOL_GPL(xprt_reserve_xprt);
230

231 232 233
static void xprt_clear_locked(struct rpc_xprt *xprt)
{
	xprt->snd_task = NULL;
234
	if (!test_bit(XPRT_CLOSE_WAIT, &xprt->state)) {
235 236 237 238
		smp_mb__before_clear_bit();
		clear_bit(XPRT_LOCKED, &xprt->state);
		smp_mb__after_clear_bit();
	} else
239
		queue_work(rpciod_workqueue, &xprt->task_cleanup);
240 241
}

L
Linus Torvalds 已提交
242
/*
243 244 245 246 247 248
 * xprt_reserve_xprt_cong - serialize write access to transports
 * @task: task that is requesting access to the transport
 *
 * Same as xprt_reserve_xprt, but Van Jacobson congestion control is
 * integrated into the decision of whether a request is allowed to be
 * woken up and given access to the transport.
L
Linus Torvalds 已提交
249
 */
250
int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
L
Linus Torvalds 已提交
251 252
{
	struct rpc_rqst *req = task->tk_rqstp;
253
	int priority;
L
Linus Torvalds 已提交
254

255
	if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
L
Linus Torvalds 已提交
256 257 258 259
		if (task == xprt->snd_task)
			return 1;
		goto out_sleep;
	}
260 261 262 263
	if (req == NULL) {
		xprt->snd_task = task;
		return 1;
	}
264
	if (__xprt_get_cong(xprt, task)) {
L
Linus Torvalds 已提交
265
		xprt->snd_task = task;
266 267
		req->rq_bytes_sent = 0;
		req->rq_ntrans++;
L
Linus Torvalds 已提交
268 269
		return 1;
	}
270
	xprt_clear_locked(xprt);
L
Linus Torvalds 已提交
271
out_sleep:
272
	dprintk("RPC: %5u failed to lock transport %p\n", task->tk_pid, xprt);
L
Linus Torvalds 已提交
273 274
	task->tk_timeout = 0;
	task->tk_status = -EAGAIN;
275 276 277 278
	if (req == NULL)
		priority = RPC_PRIORITY_LOW;
	else if (!req->rq_ntrans)
		priority = RPC_PRIORITY_NORMAL;
L
Linus Torvalds 已提交
279
	else
280 281
		priority = RPC_PRIORITY_HIGH;
	rpc_sleep_on_priority(&xprt->sending, task, NULL, priority);
L
Linus Torvalds 已提交
282 283
	return 0;
}
284
EXPORT_SYMBOL_GPL(xprt_reserve_xprt_cong);
L
Linus Torvalds 已提交
285

286
static inline int xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task)
L
Linus Torvalds 已提交
287 288 289
{
	int retval;

C
Chuck Lever 已提交
290
	spin_lock_bh(&xprt->transport_lock);
291
	retval = xprt->ops->reserve_xprt(xprt, task);
C
Chuck Lever 已提交
292
	spin_unlock_bh(&xprt->transport_lock);
L
Linus Torvalds 已提交
293 294 295
	return retval;
}

296
static bool __xprt_lock_write_func(struct rpc_task *task, void *data)
297
{
298
	struct rpc_xprt *xprt = data;
299 300 301 302 303 304 305 306
	struct rpc_rqst *req;

	req = task->tk_rqstp;
	xprt->snd_task = task;
	if (req) {
		req->rq_bytes_sent = 0;
		req->rq_ntrans++;
	}
307 308
	return true;
}
309

310 311 312 313 314 315 316
static void __xprt_lock_write_next(struct rpc_xprt *xprt)
{
	if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
		return;

	if (rpc_wake_up_first(&xprt->sending, __xprt_lock_write_func, xprt))
		return;
317
	xprt_clear_locked(xprt);
318 319
}

320
static bool __xprt_lock_write_cong_func(struct rpc_task *task, void *data)
L
Linus Torvalds 已提交
321
{
322
	struct rpc_xprt *xprt = data;
323
	struct rpc_rqst *req;
L
Linus Torvalds 已提交
324

325 326 327
	req = task->tk_rqstp;
	if (req == NULL) {
		xprt->snd_task = task;
328
		return true;
329
	}
330
	if (__xprt_get_cong(xprt, task)) {
L
Linus Torvalds 已提交
331
		xprt->snd_task = task;
332 333
		req->rq_bytes_sent = 0;
		req->rq_ntrans++;
334
		return true;
L
Linus Torvalds 已提交
335
	}
336 337 338 339 340 341 342 343 344 345 346
	return false;
}

static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt)
{
	if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
		return;
	if (RPCXPRT_CONGESTED(xprt))
		goto out_unlock;
	if (rpc_wake_up_first(&xprt->sending, __xprt_lock_write_cong_func, xprt))
		return;
L
Linus Torvalds 已提交
347
out_unlock:
348
	xprt_clear_locked(xprt);
L
Linus Torvalds 已提交
349 350
}

351 352 353 354 355 356
/**
 * xprt_release_xprt - allow other requests to use a transport
 * @xprt: transport with other tasks potentially waiting
 * @task: task that is releasing access to the transport
 *
 * Note that "task" can be NULL.  No congestion control is provided.
L
Linus Torvalds 已提交
357
 */
358
void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
L
Linus Torvalds 已提交
359 360
{
	if (xprt->snd_task == task) {
361 362 363 364 365
		if (task != NULL) {
			struct rpc_rqst *req = task->tk_rqstp;
			if (req != NULL)
				req->rq_bytes_sent = 0;
		}
366
		xprt_clear_locked(xprt);
L
Linus Torvalds 已提交
367 368 369
		__xprt_lock_write_next(xprt);
	}
}
370
EXPORT_SYMBOL_GPL(xprt_release_xprt);
L
Linus Torvalds 已提交
371

372 373 374 375 376 377 378 379 380 381 382
/**
 * xprt_release_xprt_cong - allow other requests to use a transport
 * @xprt: transport with other tasks potentially waiting
 * @task: task that is releasing access to the transport
 *
 * Note that "task" can be NULL.  Another task is awoken to use the
 * transport if the transport's congestion window allows it.
 */
void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
{
	if (xprt->snd_task == task) {
383 384 385 386 387
		if (task != NULL) {
			struct rpc_rqst *req = task->tk_rqstp;
			if (req != NULL)
				req->rq_bytes_sent = 0;
		}
388
		xprt_clear_locked(xprt);
389 390 391
		__xprt_lock_write_next_cong(xprt);
	}
}
392
EXPORT_SYMBOL_GPL(xprt_release_xprt_cong);
393 394

static inline void xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task)
L
Linus Torvalds 已提交
395
{
C
Chuck Lever 已提交
396
	spin_lock_bh(&xprt->transport_lock);
397
	xprt->ops->release_xprt(xprt, task);
C
Chuck Lever 已提交
398
	spin_unlock_bh(&xprt->transport_lock);
L
Linus Torvalds 已提交
399 400 401 402 403 404 405 406 407 408 409 410 411
}

/*
 * Van Jacobson congestion avoidance. Check if the congestion window
 * overflowed. Put the task to sleep if this is the case.
 */
static int
__xprt_get_cong(struct rpc_xprt *xprt, struct rpc_task *task)
{
	struct rpc_rqst *req = task->tk_rqstp;

	if (req->rq_cong)
		return 1;
412
	dprintk("RPC: %5u xprt_cwnd_limited cong = %lu cwnd = %lu\n",
L
Linus Torvalds 已提交
413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431
			task->tk_pid, xprt->cong, xprt->cwnd);
	if (RPCXPRT_CONGESTED(xprt))
		return 0;
	req->rq_cong = 1;
	xprt->cong += RPC_CWNDSCALE;
	return 1;
}

/*
 * Adjust the congestion window, and wake up the next task
 * that has been sleeping due to congestion
 */
static void
__xprt_put_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
{
	if (!req->rq_cong)
		return;
	req->rq_cong = 0;
	xprt->cong -= RPC_CWNDSCALE;
432
	__xprt_lock_write_next_cong(xprt);
L
Linus Torvalds 已提交
433 434
}

435 436 437 438 439 440 441 442
/**
 * xprt_release_rqst_cong - housekeeping when request is complete
 * @task: RPC request that recently completed
 *
 * Useful for transports that require congestion control.
 */
void xprt_release_rqst_cong(struct rpc_task *task)
{
443 444 445
	struct rpc_rqst *req = task->tk_rqstp;

	__xprt_put_cong(req->rq_xprt, req);
446
}
447
EXPORT_SYMBOL_GPL(xprt_release_rqst_cong);
448

449 450
/**
 * xprt_adjust_cwnd - adjust transport congestion window
451
 * @xprt: pointer to xprt
452 453 454
 * @task: recently completed RPC request used to adjust window
 * @result: result code of completed RPC request
 *
L
Linus Torvalds 已提交
455 456
 * We use a time-smoothed congestion estimator to avoid heavy oscillation.
 */
457
void xprt_adjust_cwnd(struct rpc_xprt *xprt, struct rpc_task *task, int result)
L
Linus Torvalds 已提交
458
{
459 460
	struct rpc_rqst *req = task->tk_rqstp;
	unsigned long cwnd = xprt->cwnd;
L
Linus Torvalds 已提交
461 462 463 464 465 466 467

	if (result >= 0 && cwnd <= xprt->cong) {
		/* The (cwnd >> 1) term makes sure
		 * the result gets rounded properly. */
		cwnd += (RPC_CWNDSCALE * RPC_CWNDSCALE + (cwnd >> 1)) / cwnd;
		if (cwnd > RPC_MAXCWND(xprt))
			cwnd = RPC_MAXCWND(xprt);
468
		__xprt_lock_write_next_cong(xprt);
L
Linus Torvalds 已提交
469 470 471 472 473
	} else if (result == -ETIMEDOUT) {
		cwnd >>= 1;
		if (cwnd < RPC_CWNDSCALE)
			cwnd = RPC_CWNDSCALE;
	}
474
	dprintk("RPC:       cong %ld, cwnd was %ld, now %ld\n",
L
Linus Torvalds 已提交
475 476
			xprt->cong, xprt->cwnd, cwnd);
	xprt->cwnd = cwnd;
477
	__xprt_put_cong(xprt, req);
L
Linus Torvalds 已提交
478
}
479
EXPORT_SYMBOL_GPL(xprt_adjust_cwnd);
L
Linus Torvalds 已提交
480

481 482 483 484 485 486 487 488 489 490 491 492 493
/**
 * xprt_wake_pending_tasks - wake all tasks on a transport's pending queue
 * @xprt: transport with waiting tasks
 * @status: result code to plant in each task before waking it
 *
 */
void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status)
{
	if (status < 0)
		rpc_wake_up_status(&xprt->pending, status);
	else
		rpc_wake_up(&xprt->pending);
}
494
EXPORT_SYMBOL_GPL(xprt_wake_pending_tasks);
495

496 497 498
/**
 * xprt_wait_for_buffer_space - wait for transport output buffer to clear
 * @task: task to be put to sleep
R
Randy Dunlap 已提交
499
 * @action: function pointer to be executed after wait
500 501 502 503
 *
 * Note that we only set the timer for the case of RPC_IS_SOFT(), since
 * we don't in general want to force a socket disconnection due to
 * an incomplete RPC call transmission.
504
 */
505
void xprt_wait_for_buffer_space(struct rpc_task *task, rpc_action action)
506 507 508 509
{
	struct rpc_rqst *req = task->tk_rqstp;
	struct rpc_xprt *xprt = req->rq_xprt;

510
	task->tk_timeout = RPC_IS_SOFT(task) ? req->rq_timeout : 0;
511
	rpc_sleep_on(&xprt->pending, task, action);
512
}
513
EXPORT_SYMBOL_GPL(xprt_wait_for_buffer_space);
514 515 516 517 518 519 520 521 522 523 524

/**
 * xprt_write_space - wake the task waiting for transport output buffer space
 * @xprt: transport with waiting tasks
 *
 * Can be called in a soft IRQ context, so xprt_write_space never sleeps.
 */
void xprt_write_space(struct rpc_xprt *xprt)
{
	spin_lock_bh(&xprt->transport_lock);
	if (xprt->snd_task) {
525 526
		dprintk("RPC:       write space: waking waiting task on "
				"xprt %p\n", xprt);
527
		rpc_wake_up_queued_task(&xprt->pending, xprt->snd_task);
528 529 530
	}
	spin_unlock_bh(&xprt->transport_lock);
}
531
EXPORT_SYMBOL_GPL(xprt_write_space);
532

533 534 535 536 537 538 539 540 541 542 543 544
/**
 * xprt_set_retrans_timeout_def - set a request's retransmit timeout
 * @task: task whose timeout is to be set
 *
 * Set a request's retransmit timeout based on the transport's
 * default timeout parameters.  Used by transports that don't adjust
 * the retransmit timeout based on round-trip time estimation.
 */
void xprt_set_retrans_timeout_def(struct rpc_task *task)
{
	task->tk_timeout = task->tk_rqstp->rq_timeout;
}
545
EXPORT_SYMBOL_GPL(xprt_set_retrans_timeout_def);
546

547
/**
548 549
 * xprt_set_retrans_timeout_rtt - set a request's retransmit timeout
 * @task: task whose timeout is to be set
550
 *
551 552 553 554 555
 * Set a request's retransmit timeout using the RTT estimator.
 */
void xprt_set_retrans_timeout_rtt(struct rpc_task *task)
{
	int timer = task->tk_msg.rpc_proc->p_timer;
556 557
	struct rpc_clnt *clnt = task->tk_client;
	struct rpc_rtt *rtt = clnt->cl_rtt;
558
	struct rpc_rqst *req = task->tk_rqstp;
559
	unsigned long max_timeout = clnt->cl_timeout->to_maxval;
560 561 562 563 564 565

	task->tk_timeout = rpc_calc_rto(rtt, timer);
	task->tk_timeout <<= rpc_ntimeo(rtt, timer) + req->rq_retries;
	if (task->tk_timeout > max_timeout || task->tk_timeout == 0)
		task->tk_timeout = max_timeout;
}
566
EXPORT_SYMBOL_GPL(xprt_set_retrans_timeout_rtt);
567

L
Linus Torvalds 已提交
568 569
static void xprt_reset_majortimeo(struct rpc_rqst *req)
{
570
	const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
L
Linus Torvalds 已提交
571 572 573 574 575 576 577 578 579 580 581

	req->rq_majortimeo = req->rq_timeout;
	if (to->to_exponential)
		req->rq_majortimeo <<= to->to_retries;
	else
		req->rq_majortimeo += to->to_increment * to->to_retries;
	if (req->rq_majortimeo > to->to_maxval || req->rq_majortimeo == 0)
		req->rq_majortimeo = to->to_maxval;
	req->rq_majortimeo += jiffies;
}

582 583 584 585
/**
 * xprt_adjust_timeout - adjust timeout values for next retransmit
 * @req: RPC request containing parameters to use for the adjustment
 *
L
Linus Torvalds 已提交
586 587 588 589
 */
int xprt_adjust_timeout(struct rpc_rqst *req)
{
	struct rpc_xprt *xprt = req->rq_xprt;
590
	const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
L
Linus Torvalds 已提交
591 592 593 594 595 596 597 598 599 600 601 602 603 604 605
	int status = 0;

	if (time_before(jiffies, req->rq_majortimeo)) {
		if (to->to_exponential)
			req->rq_timeout <<= 1;
		else
			req->rq_timeout += to->to_increment;
		if (to->to_maxval && req->rq_timeout >= to->to_maxval)
			req->rq_timeout = to->to_maxval;
		req->rq_retries++;
	} else {
		req->rq_timeout = to->to_initval;
		req->rq_retries = 0;
		xprt_reset_majortimeo(req);
		/* Reset the RTT counters == "slow start" */
C
Chuck Lever 已提交
606
		spin_lock_bh(&xprt->transport_lock);
L
Linus Torvalds 已提交
607
		rpc_init_rtt(req->rq_task->tk_client->cl_rtt, to->to_initval);
C
Chuck Lever 已提交
608
		spin_unlock_bh(&xprt->transport_lock);
L
Linus Torvalds 已提交
609 610 611 612 613 614 615 616 617 618
		status = -ETIMEDOUT;
	}

	if (req->rq_timeout == 0) {
		printk(KERN_WARNING "xprt_adjust_timeout: rq_timeout = 0!\n");
		req->rq_timeout = 5 * HZ;
	}
	return status;
}

619
static void xprt_autoclose(struct work_struct *work)
L
Linus Torvalds 已提交
620
{
621 622
	struct rpc_xprt *xprt =
		container_of(work, struct rpc_xprt, task_cleanup);
L
Linus Torvalds 已提交
623

624
	xprt->ops->close(xprt);
625
	clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
L
Linus Torvalds 已提交
626 627 628
	xprt_release_write(xprt, NULL);
}

629
/**
630
 * xprt_disconnect_done - mark a transport as disconnected
631 632
 * @xprt: transport to flag for disconnect
 *
L
Linus Torvalds 已提交
633
 */
634
void xprt_disconnect_done(struct rpc_xprt *xprt)
L
Linus Torvalds 已提交
635
{
636
	dprintk("RPC:       disconnected transport %p\n", xprt);
C
Chuck Lever 已提交
637
	spin_lock_bh(&xprt->transport_lock);
L
Linus Torvalds 已提交
638
	xprt_clear_connected(xprt);
639
	xprt_wake_pending_tasks(xprt, -EAGAIN);
C
Chuck Lever 已提交
640
	spin_unlock_bh(&xprt->transport_lock);
L
Linus Torvalds 已提交
641
}
642
EXPORT_SYMBOL_GPL(xprt_disconnect_done);
L
Linus Torvalds 已提交
643

644 645 646 647 648 649 650 651 652 653 654 655 656
/**
 * xprt_force_disconnect - force a transport to disconnect
 * @xprt: transport to disconnect
 *
 */
void xprt_force_disconnect(struct rpc_xprt *xprt)
{
	/* Don't race with the test_bit() in xprt_clear_locked() */
	spin_lock_bh(&xprt->transport_lock);
	set_bit(XPRT_CLOSE_WAIT, &xprt->state);
	/* Try to schedule an autoclose RPC call */
	if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
		queue_work(rpciod_workqueue, &xprt->task_cleanup);
657
	xprt_wake_pending_tasks(xprt, -EAGAIN);
658 659 660
	spin_unlock_bh(&xprt->transport_lock);
}

661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683
/**
 * xprt_conditional_disconnect - force a transport to disconnect
 * @xprt: transport to disconnect
 * @cookie: 'connection cookie'
 *
 * This attempts to break the connection if and only if 'cookie' matches
 * the current transport 'connection cookie'. It ensures that we don't
 * try to break the connection more than once when we need to retransmit
 * a batch of RPC requests.
 *
 */
void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie)
{
	/* Don't race with the test_bit() in xprt_clear_locked() */
	spin_lock_bh(&xprt->transport_lock);
	if (cookie != xprt->connect_cookie)
		goto out;
	if (test_bit(XPRT_CLOSING, &xprt->state) || !xprt_connected(xprt))
		goto out;
	set_bit(XPRT_CLOSE_WAIT, &xprt->state);
	/* Try to schedule an autoclose RPC call */
	if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
		queue_work(rpciod_workqueue, &xprt->task_cleanup);
684
	xprt_wake_pending_tasks(xprt, -EAGAIN);
685 686 687 688
out:
	spin_unlock_bh(&xprt->transport_lock);
}

L
Linus Torvalds 已提交
689 690 691 692 693
static void
xprt_init_autodisconnect(unsigned long data)
{
	struct rpc_xprt *xprt = (struct rpc_xprt *)data;

C
Chuck Lever 已提交
694
	spin_lock(&xprt->transport_lock);
695
	if (!list_empty(&xprt->recv))
L
Linus Torvalds 已提交
696
		goto out_abort;
697
	if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
L
Linus Torvalds 已提交
698
		goto out_abort;
C
Chuck Lever 已提交
699
	spin_unlock(&xprt->transport_lock);
700 701
	set_bit(XPRT_CONNECTION_CLOSE, &xprt->state);
	queue_work(rpciod_workqueue, &xprt->task_cleanup);
L
Linus Torvalds 已提交
702 703
	return;
out_abort:
C
Chuck Lever 已提交
704
	spin_unlock(&xprt->transport_lock);
L
Linus Torvalds 已提交
705 706
}

707 708 709
/**
 * xprt_connect - schedule a transport connect operation
 * @task: RPC task that is requesting the connect
L
Linus Torvalds 已提交
710 711 712 713
 *
 */
void xprt_connect(struct rpc_task *task)
{
714
	struct rpc_xprt	*xprt = task->tk_rqstp->rq_xprt;
L
Linus Torvalds 已提交
715

716
	dprintk("RPC: %5u xprt_connect xprt %p %s connected\n", task->tk_pid,
L
Linus Torvalds 已提交
717 718
			xprt, (xprt_connected(xprt) ? "is" : "is not"));

719
	if (!xprt_bound(xprt)) {
720
		task->tk_status = -EAGAIN;
L
Linus Torvalds 已提交
721 722 723 724
		return;
	}
	if (!xprt_lock_write(xprt, task))
		return;
725 726 727 728

	if (test_and_clear_bit(XPRT_CLOSE_WAIT, &xprt->state))
		xprt->ops->close(xprt);

L
Linus Torvalds 已提交
729
	if (xprt_connected(xprt))
730 731
		xprt_release_write(xprt, task);
	else {
732
		task->tk_rqstp->rq_bytes_sent = 0;
733
		task->tk_timeout = task->tk_rqstp->rq_timeout;
734
		rpc_sleep_on(&xprt->pending, task, xprt_connect_status);
735 736 737 738 739

		if (test_bit(XPRT_CLOSING, &xprt->state))
			return;
		if (xprt_test_and_set_connecting(xprt))
			return;
740
		xprt->stat.connect_start = jiffies;
741
		xprt->ops->connect(xprt, task);
L
Linus Torvalds 已提交
742 743 744
	}
}

745
static void xprt_connect_status(struct rpc_task *task)
L
Linus Torvalds 已提交
746
{
747
	struct rpc_xprt	*xprt = task->tk_rqstp->rq_xprt;
L
Linus Torvalds 已提交
748

749
	if (task->tk_status == 0) {
750 751
		xprt->stat.connect_count++;
		xprt->stat.connect_time += (long)jiffies - xprt->stat.connect_start;
752
		dprintk("RPC: %5u xprt_connect_status: connection established\n",
L
Linus Torvalds 已提交
753 754 755 756 757
				task->tk_pid);
		return;
	}

	switch (task->tk_status) {
758 759
	case -EAGAIN:
		dprintk("RPC: %5u xprt_connect_status: retrying\n", task->tk_pid);
760
		break;
L
Linus Torvalds 已提交
761
	case -ETIMEDOUT:
762 763
		dprintk("RPC: %5u xprt_connect_status: connect attempt timed "
				"out\n", task->tk_pid);
L
Linus Torvalds 已提交
764 765
		break;
	default:
766 767
		dprintk("RPC: %5u xprt_connect_status: error %d connecting to "
				"server %s\n", task->tk_pid, -task->tk_status,
768
				xprt->servername);
769 770
		xprt_release_write(xprt, task);
		task->tk_status = -EIO;
L
Linus Torvalds 已提交
771 772 773
	}
}

774 775 776 777 778
/**
 * xprt_lookup_rqst - find an RPC request corresponding to an XID
 * @xprt: transport on which the original request was transmitted
 * @xid: RPC XID of incoming reply
 *
L
Linus Torvalds 已提交
779
 */
780
struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid)
L
Linus Torvalds 已提交
781
{
782
	struct rpc_rqst *entry;
L
Linus Torvalds 已提交
783

784
	list_for_each_entry(entry, &xprt->recv, rq_list)
785 786
		if (entry->rq_xid == xid)
			return entry;
787 788 789

	dprintk("RPC:       xprt_lookup_rqst did not find xid %08x\n",
			ntohl(xid));
790 791
	xprt->stat.bad_xids++;
	return NULL;
L
Linus Torvalds 已提交
792
}
793
EXPORT_SYMBOL_GPL(xprt_lookup_rqst);
L
Linus Torvalds 已提交
794

795
static void xprt_update_rtt(struct rpc_task *task)
796 797 798
{
	struct rpc_rqst *req = task->tk_rqstp;
	struct rpc_rtt *rtt = task->tk_client->cl_rtt;
799
	unsigned int timer = task->tk_msg.rpc_proc->p_timer;
800
	long m = usecs_to_jiffies(ktime_to_us(req->rq_rtt));
801 802 803

	if (timer) {
		if (req->rq_ntrans == 1)
804
			rpc_update_rtt(rtt, timer, m);
805 806 807 808
		rpc_set_timeo(rtt, timer, req->rq_ntrans - 1);
	}
}

809 810
/**
 * xprt_complete_rqst - called when reply processing is complete
811
 * @task: RPC request that recently completed
812 813
 * @copied: actual number of bytes received from the transport
 *
814
 * Caller holds transport lock.
L
Linus Torvalds 已提交
815
 */
816
void xprt_complete_rqst(struct rpc_task *task, int copied)
L
Linus Torvalds 已提交
817
{
818
	struct rpc_rqst *req = task->tk_rqstp;
819
	struct rpc_xprt *xprt = req->rq_xprt;
L
Linus Torvalds 已提交
820

821 822
	dprintk("RPC: %5u xid %08x complete (%d bytes received)\n",
			task->tk_pid, ntohl(req->rq_xid), copied);
L
Linus Torvalds 已提交
823

824
	xprt->stat.recvs++;
825
	req->rq_rtt = ktime_sub(ktime_get(), req->rq_xtime);
826 827
	if (xprt->ops->timer != NULL)
		xprt_update_rtt(task);
828

L
Linus Torvalds 已提交
829
	list_del_init(&req->rq_list);
830
	req->rq_private_buf.len = copied;
831 832
	/* Ensure all writes are done before we update */
	/* req->rq_reply_bytes_recvd */
833
	smp_wmb();
834
	req->rq_reply_bytes_recvd = copied;
835
	rpc_wake_up_queued_task(&xprt->pending, task);
L
Linus Torvalds 已提交
836
}
837
EXPORT_SYMBOL_GPL(xprt_complete_rqst);
L
Linus Torvalds 已提交
838

839
static void xprt_timer(struct rpc_task *task)
L
Linus Torvalds 已提交
840
{
841
	struct rpc_rqst *req = task->tk_rqstp;
L
Linus Torvalds 已提交
842 843
	struct rpc_xprt *xprt = req->rq_xprt;

844 845
	if (task->tk_status != -ETIMEDOUT)
		return;
846
	dprintk("RPC: %5u xprt_timer\n", task->tk_pid);
L
Linus Torvalds 已提交
847

848
	spin_lock_bh(&xprt->transport_lock);
849
	if (!req->rq_reply_bytes_recvd) {
850
		if (xprt->ops->timer)
851
			xprt->ops->timer(xprt, task);
852 853 854
	} else
		task->tk_status = 0;
	spin_unlock_bh(&xprt->transport_lock);
L
Linus Torvalds 已提交
855 856
}

857 858 859 860 861
static inline int xprt_has_timer(struct rpc_xprt *xprt)
{
	return xprt->idle_timeout != 0;
}

862 863 864 865
/**
 * xprt_prepare_transmit - reserve the transport before sending a request
 * @task: RPC task about to send a request
 *
L
Linus Torvalds 已提交
866
 */
867
bool xprt_prepare_transmit(struct rpc_task *task)
L
Linus Torvalds 已提交
868 869 870
{
	struct rpc_rqst	*req = task->tk_rqstp;
	struct rpc_xprt	*xprt = req->rq_xprt;
871
	bool ret = false;
L
Linus Torvalds 已提交
872

873
	dprintk("RPC: %5u xprt_prepare_transmit\n", task->tk_pid);
L
Linus Torvalds 已提交
874

C
Chuck Lever 已提交
875
	spin_lock_bh(&xprt->transport_lock);
876 877 878 879 880 881 882 883 884 885 886 887
	if (!req->rq_bytes_sent) {
		if (req->rq_reply_bytes_recvd) {
			task->tk_status = req->rq_reply_bytes_recvd;
			goto out_unlock;
		}
		if ((task->tk_flags & RPC_TASK_NO_RETRANS_TIMEOUT)
		    && xprt_connected(xprt)
		    && req->rq_connect_cookie == xprt->connect_cookie) {
			xprt->ops->set_retrans_timeout(task);
			rpc_sleep_on(&xprt->pending, task, xprt_timer);
			goto out_unlock;
		}
L
Linus Torvalds 已提交
888
	}
889 890 891 892 893
	if (!xprt->ops->reserve_xprt(xprt, task)) {
		task->tk_status = -EAGAIN;
		goto out_unlock;
	}
	ret = true;
L
Linus Torvalds 已提交
894
out_unlock:
C
Chuck Lever 已提交
895
	spin_unlock_bh(&xprt->transport_lock);
896
	return ret;
L
Linus Torvalds 已提交
897 898
}

899
void xprt_end_transmit(struct rpc_task *task)
900
{
901
	xprt_release_write(task->tk_rqstp->rq_xprt, task);
902 903
}

904 905 906 907 908 909 910
/**
 * xprt_transmit - send an RPC request on a transport
 * @task: controlling RPC task
 *
 * We have to copy the iovec because sendmsg fiddles with its contents.
 */
void xprt_transmit(struct rpc_task *task)
L
Linus Torvalds 已提交
911 912 913
{
	struct rpc_rqst	*req = task->tk_rqstp;
	struct rpc_xprt	*xprt = req->rq_xprt;
914
	int status, numreqs;
L
Linus Torvalds 已提交
915

916
	dprintk("RPC: %5u xprt_transmit(%u)\n", task->tk_pid, req->rq_slen);
L
Linus Torvalds 已提交
917

918
	if (!req->rq_reply_bytes_recvd) {
919 920 921 922
		if (list_empty(&req->rq_list) && rpc_reply_expected(task)) {
			/*
			 * Add to the list only if we're expecting a reply
			 */
C
Chuck Lever 已提交
923
			spin_lock_bh(&xprt->transport_lock);
L
Linus Torvalds 已提交
924 925 926 927 928
			/* Update the softirq receive buffer */
			memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
					sizeof(req->rq_private_buf));
			/* Add request to the receive list */
			list_add_tail(&req->rq_list, &xprt->recv);
C
Chuck Lever 已提交
929
			spin_unlock_bh(&xprt->transport_lock);
L
Linus Torvalds 已提交
930
			xprt_reset_majortimeo(req);
931 932
			/* Turn off autodisconnect */
			del_singleshot_timer_sync(&xprt->timer);
L
Linus Torvalds 已提交
933 934 935 936
		}
	} else if (!req->rq_bytes_sent)
		return;

937
	req->rq_xtime = ktime_get();
938
	status = xprt->ops->send_request(task);
939 940 941 942
	if (status != 0) {
		task->tk_status = status;
		return;
	}
943

944
	dprintk("RPC: %5u xmit complete\n", task->tk_pid);
945
	task->tk_flags |= RPC_TASK_SENT;
946
	spin_lock_bh(&xprt->transport_lock);
947

948
	xprt->ops->set_retrans_timeout(task);
949

950 951 952
	numreqs = atomic_read(&xprt->num_reqs);
	if (numreqs > xprt->stat.max_slots)
		xprt->stat.max_slots = numreqs;
953 954 955
	xprt->stat.sends++;
	xprt->stat.req_u += xprt->stat.sends - xprt->stat.recvs;
	xprt->stat.bklog_u += xprt->backlog.qlen;
956 957
	xprt->stat.sending_u += xprt->sending.qlen;
	xprt->stat.pending_u += xprt->pending.qlen;
L
Linus Torvalds 已提交
958

959 960 961
	/* Don't race with disconnect */
	if (!xprt_connected(xprt))
		task->tk_status = -ENOTCONN;
962
	else {
963 964 965 966
		/*
		 * Sleep on the pending queue since
		 * we're expecting a reply.
		 */
967 968 969
		if (!req->rq_reply_bytes_recvd && rpc_reply_expected(task))
			rpc_sleep_on(&xprt->pending, task, xprt_timer);
		req->rq_connect_cookie = xprt->connect_cookie;
970
	}
971
	spin_unlock_bh(&xprt->transport_lock);
L
Linus Torvalds 已提交
972 973
}

974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001
static void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task)
{
	set_bit(XPRT_CONGESTED, &xprt->state);
	rpc_sleep_on(&xprt->backlog, task, NULL);
}

static void xprt_wake_up_backlog(struct rpc_xprt *xprt)
{
	if (rpc_wake_up_next(&xprt->backlog) == NULL)
		clear_bit(XPRT_CONGESTED, &xprt->state);
}

static bool xprt_throttle_congested(struct rpc_xprt *xprt, struct rpc_task *task)
{
	bool ret = false;

	if (!test_bit(XPRT_CONGESTED, &xprt->state))
		goto out;
	spin_lock(&xprt->reserve_lock);
	if (test_bit(XPRT_CONGESTED, &xprt->state)) {
		rpc_sleep_on(&xprt->backlog, task, NULL);
		ret = true;
	}
	spin_unlock(&xprt->reserve_lock);
out:
	return ret;
}

1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025
static struct rpc_rqst *xprt_dynamic_alloc_slot(struct rpc_xprt *xprt, gfp_t gfp_flags)
{
	struct rpc_rqst *req = ERR_PTR(-EAGAIN);

	if (!atomic_add_unless(&xprt->num_reqs, 1, xprt->max_reqs))
		goto out;
	req = kzalloc(sizeof(struct rpc_rqst), gfp_flags);
	if (req != NULL)
		goto out;
	atomic_dec(&xprt->num_reqs);
	req = ERR_PTR(-ENOMEM);
out:
	return req;
}

static bool xprt_dynamic_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
{
	if (atomic_add_unless(&xprt->num_reqs, -1, xprt->min_reqs)) {
		kfree(req);
		return true;
	}
	return false;
}

1026
void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
L
Linus Torvalds 已提交
1027
{
1028
	struct rpc_rqst *req;
L
Linus Torvalds 已提交
1029

1030
	spin_lock(&xprt->reserve_lock);
L
Linus Torvalds 已提交
1031
	if (!list_empty(&xprt->free)) {
1032 1033 1034 1035
		req = list_entry(xprt->free.next, struct rpc_rqst, rq_list);
		list_del(&req->rq_list);
		goto out_init_req;
	}
1036
	req = xprt_dynamic_alloc_slot(xprt, GFP_NOWAIT|__GFP_NOWARN);
1037 1038 1039 1040 1041 1042
	if (!IS_ERR(req))
		goto out_init_req;
	switch (PTR_ERR(req)) {
	case -ENOMEM:
		dprintk("RPC:       dynamic allocation of request slot "
				"failed! Retrying\n");
1043
		task->tk_status = -ENOMEM;
1044 1045
		break;
	case -EAGAIN:
1046
		xprt_add_backlog(xprt, task);
1047
		dprintk("RPC:       waiting for request slot\n");
1048 1049
	default:
		task->tk_status = -EAGAIN;
L
Linus Torvalds 已提交
1050
	}
1051
	spin_unlock(&xprt->reserve_lock);
1052 1053 1054 1055 1056
	return;
out_init_req:
	task->tk_status = 0;
	task->tk_rqstp = req;
	xprt_request_init(task, xprt);
1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071
	spin_unlock(&xprt->reserve_lock);
}
EXPORT_SYMBOL_GPL(xprt_alloc_slot);

void xprt_lock_and_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
{
	/* Note: grabbing the xprt_lock_write() ensures that we throttle
	 * new slot allocation if the transport is congested (i.e. when
	 * reconnecting a stream transport or when out of socket write
	 * buffer space).
	 */
	if (xprt_lock_write(xprt, task)) {
		xprt_alloc_slot(xprt, task);
		xprt_release_write(xprt, task);
	}
L
Linus Torvalds 已提交
1072
}
1073
EXPORT_SYMBOL_GPL(xprt_lock_and_alloc_slot);
L
Linus Torvalds 已提交
1074

1075 1076 1077
static void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
{
	spin_lock(&xprt->reserve_lock);
1078 1079 1080 1081
	if (!xprt_dynamic_free_slot(xprt, req)) {
		memset(req, 0, sizeof(*req));	/* mark unused */
		list_add(&req->rq_list, &xprt->free);
	}
1082
	xprt_wake_up_backlog(xprt);
1083 1084 1085
	spin_unlock(&xprt->reserve_lock);
}

1086 1087 1088 1089 1090 1091 1092 1093 1094 1095
static void xprt_free_all_slots(struct rpc_xprt *xprt)
{
	struct rpc_rqst *req;
	while (!list_empty(&xprt->free)) {
		req = list_first_entry(&xprt->free, struct rpc_rqst, rq_list);
		list_del(&req->rq_list);
		kfree(req);
	}
}

1096 1097 1098
struct rpc_xprt *xprt_alloc(struct net *net, size_t size,
		unsigned int num_prealloc,
		unsigned int max_alloc)
1099 1100
{
	struct rpc_xprt *xprt;
1101 1102
	struct rpc_rqst *req;
	int i;
1103 1104 1105 1106 1107

	xprt = kzalloc(size, GFP_KERNEL);
	if (xprt == NULL)
		goto out;

1108 1109 1110 1111 1112 1113 1114 1115 1116
	xprt_init(xprt, net);

	for (i = 0; i < num_prealloc; i++) {
		req = kzalloc(sizeof(struct rpc_rqst), GFP_KERNEL);
		if (!req)
			break;
		list_add(&req->rq_list, &xprt->free);
	}
	if (i < num_prealloc)
1117
		goto out_free;
1118 1119 1120 1121 1122 1123
	if (max_alloc > num_prealloc)
		xprt->max_reqs = max_alloc;
	else
		xprt->max_reqs = num_prealloc;
	xprt->min_reqs = num_prealloc;
	atomic_set(&xprt->num_reqs, num_prealloc);
1124 1125 1126 1127

	return xprt;

out_free:
1128
	xprt_free(xprt);
1129 1130 1131 1132 1133
out:
	return NULL;
}
EXPORT_SYMBOL_GPL(xprt_alloc);

1134 1135
void xprt_free(struct rpc_xprt *xprt)
{
P
Pavel Emelyanov 已提交
1136
	put_net(xprt->xprt_net);
1137
	xprt_free_all_slots(xprt);
1138 1139 1140 1141
	kfree(xprt);
}
EXPORT_SYMBOL_GPL(xprt_free);

1142 1143 1144 1145
/**
 * xprt_reserve - allocate an RPC request slot
 * @task: RPC task requesting a slot allocation
 *
1146 1147
 * If the transport is marked as being congested, or if no more
 * slots are available, place the task on the transport's
1148 1149 1150
 * backlog queue.
 */
void xprt_reserve(struct rpc_task *task)
L
Linus Torvalds 已提交
1151
{
1152
	struct rpc_xprt	*xprt;
L
Linus Torvalds 已提交
1153

1154 1155 1156 1157 1158 1159
	task->tk_status = 0;
	if (task->tk_rqstp != NULL)
		return;

	task->tk_timeout = 0;
	task->tk_status = -EAGAIN;
1160 1161
	rcu_read_lock();
	xprt = rcu_dereference(task->tk_client->cl_xprt);
1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187
	if (!xprt_throttle_congested(xprt, task))
		xprt->ops->alloc_slot(xprt, task);
	rcu_read_unlock();
}

/**
 * xprt_retry_reserve - allocate an RPC request slot
 * @task: RPC task requesting a slot allocation
 *
 * If no more slots are available, place the task on the transport's
 * backlog queue.
 * Note that the only difference with xprt_reserve is that we now
 * ignore the value of the XPRT_CONGESTED flag.
 */
void xprt_retry_reserve(struct rpc_task *task)
{
	struct rpc_xprt	*xprt;

	task->tk_status = 0;
	if (task->tk_rqstp != NULL)
		return;

	task->tk_timeout = 0;
	task->tk_status = -EAGAIN;
	rcu_read_lock();
	xprt = rcu_dereference(task->tk_client->cl_xprt);
1188
	xprt->ops->alloc_slot(xprt, task);
1189
	rcu_read_unlock();
L
Linus Torvalds 已提交
1190 1191
}

1192
static inline __be32 xprt_alloc_xid(struct rpc_xprt *xprt)
L
Linus Torvalds 已提交
1193
{
1194
	return (__force __be32)xprt->xid++;
L
Linus Torvalds 已提交
1195 1196 1197 1198
}

static inline void xprt_init_xid(struct rpc_xprt *xprt)
{
1199
	xprt->xid = net_random();
L
Linus Torvalds 已提交
1200 1201
}

1202
static void xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt)
L
Linus Torvalds 已提交
1203 1204 1205
{
	struct rpc_rqst	*req = task->tk_rqstp;

1206
	INIT_LIST_HEAD(&req->rq_list);
1207
	req->rq_timeout = task->tk_client->cl_timeout->to_initval;
L
Linus Torvalds 已提交
1208 1209
	req->rq_task	= task;
	req->rq_xprt    = xprt;
1210
	req->rq_buffer  = NULL;
L
Linus Torvalds 已提交
1211
	req->rq_xid     = xprt_alloc_xid(xprt);
1212
	req->rq_connect_cookie = xprt->connect_cookie - 1;
1213
	req->rq_release_snd_buf = NULL;
1214
	xprt_reset_majortimeo(req);
1215
	dprintk("RPC: %5u reserved req %p xid %08x\n", task->tk_pid,
L
Linus Torvalds 已提交
1216 1217 1218
			req, ntohl(req->rq_xid));
}

1219 1220 1221 1222
/**
 * xprt_release - release an RPC request slot
 * @task: task which is finished with the slot
 *
L
Linus Torvalds 已提交
1223
 */
1224
void xprt_release(struct rpc_task *task)
L
Linus Torvalds 已提交
1225
{
1226
	struct rpc_xprt	*xprt;
1227
	struct rpc_rqst	*req = task->tk_rqstp;
L
Linus Torvalds 已提交
1228

1229 1230 1231 1232 1233 1234 1235 1236
	if (req == NULL) {
		if (task->tk_client) {
			rcu_read_lock();
			xprt = rcu_dereference(task->tk_client->cl_xprt);
			if (xprt->snd_task == task)
				xprt_release_write(xprt, task);
			rcu_read_unlock();
		}
L
Linus Torvalds 已提交
1237
		return;
1238
	}
1239 1240

	xprt = req->rq_xprt;
1241 1242 1243 1244
	if (task->tk_ops->rpc_count_stats != NULL)
		task->tk_ops->rpc_count_stats(task, task->tk_calldata);
	else if (task->tk_client)
		rpc_count_iostats(task, task->tk_client->cl_metrics);
C
Chuck Lever 已提交
1245
	spin_lock_bh(&xprt->transport_lock);
1246
	xprt->ops->release_xprt(xprt, task);
1247 1248
	if (xprt->ops->release_request)
		xprt->ops->release_request(task);
L
Linus Torvalds 已提交
1249 1250 1251
	if (!list_empty(&req->rq_list))
		list_del(&req->rq_list);
	xprt->last_used = jiffies;
1252
	if (list_empty(&xprt->recv) && xprt_has_timer(xprt))
1253
		mod_timer(&xprt->timer,
1254
				xprt->last_used + xprt->idle_timeout);
C
Chuck Lever 已提交
1255
	spin_unlock_bh(&xprt->transport_lock);
1256
	if (req->rq_buffer)
1257
		xprt->ops->buf_free(req->rq_buffer);
1258 1259
	if (req->rq_cred != NULL)
		put_rpccred(req->rq_cred);
L
Linus Torvalds 已提交
1260
	task->tk_rqstp = NULL;
1261 1262
	if (req->rq_release_snd_buf)
		req->rq_release_snd_buf(req);
1263

1264
	dprintk("RPC: %5u release request %p\n", task->tk_pid, req);
1265 1266 1267
	if (likely(!bc_prealloc(req)))
		xprt_free_slot(xprt, req);
	else
1268
		xprt_free_bc_request(req);
L
Linus Torvalds 已提交
1269 1270
}

1271
static void xprt_init(struct rpc_xprt *xprt, struct net *net)
1272
{
1273
	atomic_set(&xprt->count, 1);
1274 1275 1276 1277 1278 1279

	spin_lock_init(&xprt->transport_lock);
	spin_lock_init(&xprt->reserve_lock);

	INIT_LIST_HEAD(&xprt->free);
	INIT_LIST_HEAD(&xprt->recv);
1280
#if defined(CONFIG_SUNRPC_BACKCHANNEL)
1281 1282
	spin_lock_init(&xprt->bc_pa_lock);
	INIT_LIST_HEAD(&xprt->bc_pa_list);
1283
#endif /* CONFIG_SUNRPC_BACKCHANNEL */
1284

1285 1286
	xprt->last_used = jiffies;
	xprt->cwnd = RPC_INITCWND;
1287
	xprt->bind_index = 0;
1288 1289 1290

	rpc_init_wait_queue(&xprt->binding, "xprt_binding");
	rpc_init_wait_queue(&xprt->pending, "xprt_pending");
1291
	rpc_init_priority_wait_queue(&xprt->sending, "xprt_sending");
1292 1293 1294 1295
	rpc_init_priority_wait_queue(&xprt->backlog, "xprt_backlog");

	xprt_init_xid(xprt);

1296
	xprt->xprt_net = get_net(net);
1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324
}

/**
 * xprt_create_transport - create an RPC transport
 * @args: rpc transport creation arguments
 *
 */
struct rpc_xprt *xprt_create_transport(struct xprt_create *args)
{
	struct rpc_xprt	*xprt;
	struct xprt_class *t;

	spin_lock(&xprt_list_lock);
	list_for_each_entry(t, &xprt_list, list) {
		if (t->ident == args->ident) {
			spin_unlock(&xprt_list_lock);
			goto found;
		}
	}
	spin_unlock(&xprt_list_lock);
	printk(KERN_ERR "RPC: transport (%d) not supported\n", args->ident);
	return ERR_PTR(-EIO);

found:
	xprt = t->setup(args);
	if (IS_ERR(xprt)) {
		dprintk("RPC:       xprt_create_transport: failed, %ld\n",
				-PTR_ERR(xprt));
1325
		goto out;
1326
	}
1327 1328
	if (args->flags & XPRT_CREATE_NO_IDLE_TIMEOUT)
		xprt->idle_timeout = 0;
1329 1330 1331 1332 1333 1334
	INIT_WORK(&xprt->task_cleanup, xprt_autoclose);
	if (xprt_has_timer(xprt))
		setup_timer(&xprt->timer, xprt_init_autodisconnect,
			    (unsigned long)xprt);
	else
		init_timer(&xprt->timer);
1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345

	if (strlen(args->servername) > RPC_MAXNETNAMELEN) {
		xprt_destroy(xprt);
		return ERR_PTR(-EINVAL);
	}
	xprt->servername = kstrdup(args->servername, GFP_KERNEL);
	if (xprt->servername == NULL) {
		xprt_destroy(xprt);
		return ERR_PTR(-ENOMEM);
	}

1346
	dprintk("RPC:       created transport %p with %u slots\n", xprt,
1347
			xprt->max_reqs);
1348
out:
1349 1350 1351
	return xprt;
}

1352 1353
/**
 * xprt_destroy - destroy an RPC transport, killing off all requests.
1354
 * @xprt: transport to destroy
1355
 *
L
Linus Torvalds 已提交
1356
 */
1357
static void xprt_destroy(struct rpc_xprt *xprt)
L
Linus Torvalds 已提交
1358
{
1359
	dprintk("RPC:       destroying transport %p\n", xprt);
1360
	del_timer_sync(&xprt->timer);
1361

1362 1363 1364 1365
	rpc_destroy_wait_queue(&xprt->binding);
	rpc_destroy_wait_queue(&xprt->pending);
	rpc_destroy_wait_queue(&xprt->sending);
	rpc_destroy_wait_queue(&xprt->backlog);
1366
	cancel_work_sync(&xprt->task_cleanup);
1367
	kfree(xprt->servername);
1368 1369 1370
	/*
	 * Tear down transport state and free the rpc_xprt
	 */
1371
	xprt->ops->destroy(xprt);
1372
}
L
Linus Torvalds 已提交
1373

1374 1375 1376 1377 1378 1379 1380
/**
 * xprt_put - release a reference to an RPC transport.
 * @xprt: pointer to the transport
 *
 */
void xprt_put(struct rpc_xprt *xprt)
{
1381 1382
	if (atomic_dec_and_test(&xprt->count))
		xprt_destroy(xprt);
1383 1384 1385 1386 1387 1388 1389 1390 1391
}

/**
 * xprt_get - return a reference to an RPC transport.
 * @xprt: pointer to the transport
 *
 */
struct rpc_xprt *xprt_get(struct rpc_xprt *xprt)
{
1392 1393 1394
	if (atomic_inc_not_zero(&xprt->count))
		return xprt;
	return NULL;
L
Linus Torvalds 已提交
1395
}