xprt.c 36.3 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12
/*
 *  linux/net/sunrpc/xprt.c
 *
 *  This is a generic RPC call interface supporting congestion avoidance,
 *  and asynchronous calls.
 *
 *  The interface works like this:
 *
 *  -	When a process places a call, it allocates a request slot if
 *	one is available. Otherwise, it sleeps on the backlog queue
 *	(xprt_reserve).
 *  -	Next, the caller puts together the RPC message, stuffs it into
13 14
 *	the request struct, and calls xprt_transmit().
 *  -	xprt_transmit sends the message and installs the caller on the
15 16 17
 *	transport's wait list. At the same time, if a reply is expected,
 *	it installs a timer that is run after the packet's timeout has
 *	expired.
L
Linus Torvalds 已提交
18
 *  -	When a packet arrives, the data_ready handler walks the list of
19
 *	pending requests for that transport. If a matching XID is found, the
L
Linus Torvalds 已提交
20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35
 *	caller is woken up, and the timer removed.
 *  -	When no reply arrives within the timeout interval, the timer is
 *	fired by the kernel and runs xprt_timer(). It either adjusts the
 *	timeout values (minor timeout) or wakes up the caller with a status
 *	of -ETIMEDOUT.
 *  -	When the caller receives a notification from RPC that a reply arrived,
 *	it should release the RPC slot, and process the reply.
 *	If the call timed out, it may choose to retry the operation by
 *	adjusting the initial timeout value, and simply calling rpc_call
 *	again.
 *
 *  Support for async RPC is done through a set of RPC-specific scheduling
 *  primitives that `transparently' work for processes as well as async
 *  tasks that rely on callbacks.
 *
 *  Copyright (C) 1995-1997, Olaf Kirch <okir@monad.swb.de>
36 37
 *
 *  Transport switch API copyright (C) 2005, Chuck Lever <cel@netapp.com>
L
Linus Torvalds 已提交
38 39
 */

40 41
#include <linux/module.h>

L
Linus Torvalds 已提交
42
#include <linux/types.h>
43
#include <linux/interrupt.h>
L
Linus Torvalds 已提交
44
#include <linux/workqueue.h>
45
#include <linux/net.h>
46
#include <linux/ktime.h>
L
Linus Torvalds 已提交
47

48
#include <linux/sunrpc/clnt.h>
49
#include <linux/sunrpc/metrics.h>
50
#include <linux/sunrpc/bc_xprt.h>
L
Linus Torvalds 已提交
51

52 53
#include "sunrpc.h"

L
Linus Torvalds 已提交
54 55 56 57 58 59 60 61 62 63 64
/*
 * Local variables
 */

#ifdef RPC_DEBUG
# define RPCDBG_FACILITY	RPCDBG_XPRT
#endif

/*
 * Local functions
 */
65
static void	 xprt_init(struct rpc_xprt *xprt, struct net *net);
L
Linus Torvalds 已提交
66 67 68
static void	xprt_request_init(struct rpc_task *, struct rpc_xprt *);
static void	xprt_connect_status(struct rpc_task *task);
static int      __xprt_get_cong(struct rpc_xprt *, struct rpc_task *);
69
static void	 xprt_destroy(struct rpc_xprt *xprt);
L
Linus Torvalds 已提交
70

J
Jiri Slaby 已提交
71
static DEFINE_SPINLOCK(xprt_list_lock);
72 73
static LIST_HEAD(xprt_list);

74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90
/*
 * The transport code maintains an estimate on the maximum number of out-
 * standing RPC requests, using a smoothed version of the congestion
 * avoidance implemented in 44BSD. This is basically the Van Jacobson
 * congestion algorithm: If a retransmit occurs, the congestion window is
 * halved; otherwise, it is incremented by 1/cwnd when
 *
 *	-	a reply is received and
 *	-	a full number of requests are outstanding and
 *	-	the congestion window hasn't been updated recently.
 */
#define RPC_CWNDSHIFT		(8U)
#define RPC_CWNDSCALE		(1U << RPC_CWNDSHIFT)
#define RPC_INITCWND		RPC_CWNDSCALE
#define RPC_MAXCWND(xprt)	((xprt)->max_reqs << RPC_CWNDSHIFT)

#define RPCXPRT_CONGESTED(xprt) ((xprt)->cong >= (xprt)->cwnd)
L
Linus Torvalds 已提交
91

92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112
/**
 * xprt_register_transport - register a transport implementation
 * @transport: transport to register
 *
 * If a transport implementation is loaded as a kernel module, it can
 * call this interface to make itself known to the RPC client.
 *
 * Returns:
 * 0:		transport successfully registered
 * -EEXIST:	transport already registered
 * -EINVAL:	transport module being unloaded
 */
int xprt_register_transport(struct xprt_class *transport)
{
	struct xprt_class *t;
	int result;

	result = -EEXIST;
	spin_lock(&xprt_list_lock);
	list_for_each_entry(t, &xprt_list, list) {
		/* don't register the same transport class twice */
113
		if (t->ident == transport->ident)
114 115 116
			goto out;
	}

117 118 119 120
	list_add_tail(&transport->list, &xprt_list);
	printk(KERN_INFO "RPC: Registered %s transport module.\n",
	       transport->name);
	result = 0;
121 122 123 124 125 126 127 128 129

out:
	spin_unlock(&xprt_list_lock);
	return result;
}
EXPORT_SYMBOL_GPL(xprt_register_transport);

/**
 * xprt_unregister_transport - unregister a transport implementation
130
 * @transport: transport to unregister
131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159
 *
 * Returns:
 * 0:		transport successfully unregistered
 * -ENOENT:	transport never registered
 */
int xprt_unregister_transport(struct xprt_class *transport)
{
	struct xprt_class *t;
	int result;

	result = 0;
	spin_lock(&xprt_list_lock);
	list_for_each_entry(t, &xprt_list, list) {
		if (t == transport) {
			printk(KERN_INFO
				"RPC: Unregistered %s transport module.\n",
				transport->name);
			list_del_init(&transport->list);
			goto out;
		}
	}
	result = -ENOENT;

out:
	spin_unlock(&xprt_list_lock);
	return result;
}
EXPORT_SYMBOL_GPL(xprt_unregister_transport);

160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181
/**
 * xprt_load_transport - load a transport implementation
 * @transport_name: transport to load
 *
 * Returns:
 * 0:		transport successfully loaded
 * -ENOENT:	transport module not available
 */
int xprt_load_transport(const char *transport_name)
{
	struct xprt_class *t;
	int result;

	result = 0;
	spin_lock(&xprt_list_lock);
	list_for_each_entry(t, &xprt_list, list) {
		if (strcmp(t->name, transport_name) == 0) {
			spin_unlock(&xprt_list_lock);
			goto out;
		}
	}
	spin_unlock(&xprt_list_lock);
182
	result = request_module("xprt%s", transport_name);
183 184 185 186 187
out:
	return result;
}
EXPORT_SYMBOL_GPL(xprt_load_transport);

188 189 190
/**
 * xprt_reserve_xprt - serialize write access to transports
 * @task: task that is requesting access to the transport
191
 * @xprt: pointer to the target transport
192 193 194 195 196
 *
 * This prevents mixing the payload of separate requests, and prevents
 * transport connects from colliding with writes.  No congestion control
 * is provided.
 */
197
int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
198 199
{
	struct rpc_rqst *req = task->tk_rqstp;
200
	int priority;
201 202 203 204 205 206 207

	if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
		if (task == xprt->snd_task)
			return 1;
		goto out_sleep;
	}
	xprt->snd_task = task;
208
	if (req != NULL)
209
		req->rq_ntrans++;
210

211 212 213
	return 1;

out_sleep:
214
	dprintk("RPC: %5u failed to lock transport %p\n",
215 216 217
			task->tk_pid, xprt);
	task->tk_timeout = 0;
	task->tk_status = -EAGAIN;
218 219 220 221
	if (req == NULL)
		priority = RPC_PRIORITY_LOW;
	else if (!req->rq_ntrans)
		priority = RPC_PRIORITY_NORMAL;
222
	else
223 224
		priority = RPC_PRIORITY_HIGH;
	rpc_sleep_on_priority(&xprt->sending, task, NULL, priority);
225 226
	return 0;
}
227
EXPORT_SYMBOL_GPL(xprt_reserve_xprt);
228

229 230 231
static void xprt_clear_locked(struct rpc_xprt *xprt)
{
	xprt->snd_task = NULL;
232
	if (!test_bit(XPRT_CLOSE_WAIT, &xprt->state)) {
233 234 235 236
		smp_mb__before_clear_bit();
		clear_bit(XPRT_LOCKED, &xprt->state);
		smp_mb__after_clear_bit();
	} else
237
		queue_work(rpciod_workqueue, &xprt->task_cleanup);
238 239
}

L
Linus Torvalds 已提交
240
/*
241 242 243 244 245 246
 * xprt_reserve_xprt_cong - serialize write access to transports
 * @task: task that is requesting access to the transport
 *
 * Same as xprt_reserve_xprt, but Van Jacobson congestion control is
 * integrated into the decision of whether a request is allowed to be
 * woken up and given access to the transport.
L
Linus Torvalds 已提交
247
 */
248
int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
L
Linus Torvalds 已提交
249 250
{
	struct rpc_rqst *req = task->tk_rqstp;
251
	int priority;
L
Linus Torvalds 已提交
252

253
	if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
L
Linus Torvalds 已提交
254 255 256 257
		if (task == xprt->snd_task)
			return 1;
		goto out_sleep;
	}
258 259 260 261
	if (req == NULL) {
		xprt->snd_task = task;
		return 1;
	}
262
	if (__xprt_get_cong(xprt, task)) {
L
Linus Torvalds 已提交
263
		xprt->snd_task = task;
264
		req->rq_ntrans++;
L
Linus Torvalds 已提交
265 266
		return 1;
	}
267
	xprt_clear_locked(xprt);
L
Linus Torvalds 已提交
268
out_sleep:
269
	dprintk("RPC: %5u failed to lock transport %p\n", task->tk_pid, xprt);
L
Linus Torvalds 已提交
270 271
	task->tk_timeout = 0;
	task->tk_status = -EAGAIN;
272 273 274 275
	if (req == NULL)
		priority = RPC_PRIORITY_LOW;
	else if (!req->rq_ntrans)
		priority = RPC_PRIORITY_NORMAL;
L
Linus Torvalds 已提交
276
	else
277 278
		priority = RPC_PRIORITY_HIGH;
	rpc_sleep_on_priority(&xprt->sending, task, NULL, priority);
L
Linus Torvalds 已提交
279 280
	return 0;
}
281
EXPORT_SYMBOL_GPL(xprt_reserve_xprt_cong);
L
Linus Torvalds 已提交
282

283
static inline int xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task)
L
Linus Torvalds 已提交
284 285 286
{
	int retval;

C
Chuck Lever 已提交
287
	spin_lock_bh(&xprt->transport_lock);
288
	retval = xprt->ops->reserve_xprt(xprt, task);
C
Chuck Lever 已提交
289
	spin_unlock_bh(&xprt->transport_lock);
L
Linus Torvalds 已提交
290 291 292
	return retval;
}

293
static bool __xprt_lock_write_func(struct rpc_task *task, void *data)
294
{
295
	struct rpc_xprt *xprt = data;
296 297 298 299
	struct rpc_rqst *req;

	req = task->tk_rqstp;
	xprt->snd_task = task;
300
	if (req)
301
		req->rq_ntrans++;
302 303
	return true;
}
304

305 306 307 308 309 310 311
static void __xprt_lock_write_next(struct rpc_xprt *xprt)
{
	if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
		return;

	if (rpc_wake_up_first(&xprt->sending, __xprt_lock_write_func, xprt))
		return;
312
	xprt_clear_locked(xprt);
313 314
}

315
static bool __xprt_lock_write_cong_func(struct rpc_task *task, void *data)
L
Linus Torvalds 已提交
316
{
317
	struct rpc_xprt *xprt = data;
318
	struct rpc_rqst *req;
L
Linus Torvalds 已提交
319

320 321 322
	req = task->tk_rqstp;
	if (req == NULL) {
		xprt->snd_task = task;
323
		return true;
324
	}
325
	if (__xprt_get_cong(xprt, task)) {
L
Linus Torvalds 已提交
326
		xprt->snd_task = task;
327
		req->rq_ntrans++;
328
		return true;
L
Linus Torvalds 已提交
329
	}
330 331 332 333 334 335 336 337 338 339 340
	return false;
}

static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt)
{
	if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
		return;
	if (RPCXPRT_CONGESTED(xprt))
		goto out_unlock;
	if (rpc_wake_up_first(&xprt->sending, __xprt_lock_write_cong_func, xprt))
		return;
L
Linus Torvalds 已提交
341
out_unlock:
342
	xprt_clear_locked(xprt);
L
Linus Torvalds 已提交
343 344
}

345 346 347 348 349 350
/**
 * xprt_release_xprt - allow other requests to use a transport
 * @xprt: transport with other tasks potentially waiting
 * @task: task that is releasing access to the transport
 *
 * Note that "task" can be NULL.  No congestion control is provided.
L
Linus Torvalds 已提交
351
 */
352
void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
L
Linus Torvalds 已提交
353 354
{
	if (xprt->snd_task == task) {
355 356 357 358 359
		if (task != NULL) {
			struct rpc_rqst *req = task->tk_rqstp;
			if (req != NULL)
				req->rq_bytes_sent = 0;
		}
360
		xprt_clear_locked(xprt);
L
Linus Torvalds 已提交
361 362 363
		__xprt_lock_write_next(xprt);
	}
}
364
EXPORT_SYMBOL_GPL(xprt_release_xprt);
L
Linus Torvalds 已提交
365

366 367 368 369 370 371 372 373 374 375 376
/**
 * xprt_release_xprt_cong - allow other requests to use a transport
 * @xprt: transport with other tasks potentially waiting
 * @task: task that is releasing access to the transport
 *
 * Note that "task" can be NULL.  Another task is awoken to use the
 * transport if the transport's congestion window allows it.
 */
void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
{
	if (xprt->snd_task == task) {
377 378 379 380 381
		if (task != NULL) {
			struct rpc_rqst *req = task->tk_rqstp;
			if (req != NULL)
				req->rq_bytes_sent = 0;
		}
382
		xprt_clear_locked(xprt);
383 384 385
		__xprt_lock_write_next_cong(xprt);
	}
}
386
EXPORT_SYMBOL_GPL(xprt_release_xprt_cong);
387 388

static inline void xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task)
L
Linus Torvalds 已提交
389
{
C
Chuck Lever 已提交
390
	spin_lock_bh(&xprt->transport_lock);
391
	xprt->ops->release_xprt(xprt, task);
C
Chuck Lever 已提交
392
	spin_unlock_bh(&xprt->transport_lock);
L
Linus Torvalds 已提交
393 394 395 396 397 398 399 400 401 402 403 404 405
}

/*
 * Van Jacobson congestion avoidance. Check if the congestion window
 * overflowed. Put the task to sleep if this is the case.
 */
static int
__xprt_get_cong(struct rpc_xprt *xprt, struct rpc_task *task)
{
	struct rpc_rqst *req = task->tk_rqstp;

	if (req->rq_cong)
		return 1;
406
	dprintk("RPC: %5u xprt_cwnd_limited cong = %lu cwnd = %lu\n",
L
Linus Torvalds 已提交
407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425
			task->tk_pid, xprt->cong, xprt->cwnd);
	if (RPCXPRT_CONGESTED(xprt))
		return 0;
	req->rq_cong = 1;
	xprt->cong += RPC_CWNDSCALE;
	return 1;
}

/*
 * Adjust the congestion window, and wake up the next task
 * that has been sleeping due to congestion
 */
static void
__xprt_put_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
{
	if (!req->rq_cong)
		return;
	req->rq_cong = 0;
	xprt->cong -= RPC_CWNDSCALE;
426
	__xprt_lock_write_next_cong(xprt);
L
Linus Torvalds 已提交
427 428
}

429 430 431 432 433 434 435 436
/**
 * xprt_release_rqst_cong - housekeeping when request is complete
 * @task: RPC request that recently completed
 *
 * Useful for transports that require congestion control.
 */
void xprt_release_rqst_cong(struct rpc_task *task)
{
437 438 439
	struct rpc_rqst *req = task->tk_rqstp;

	__xprt_put_cong(req->rq_xprt, req);
440
}
441
EXPORT_SYMBOL_GPL(xprt_release_rqst_cong);
442

443 444
/**
 * xprt_adjust_cwnd - adjust transport congestion window
445
 * @xprt: pointer to xprt
446 447 448
 * @task: recently completed RPC request used to adjust window
 * @result: result code of completed RPC request
 *
L
Linus Torvalds 已提交
449 450
 * We use a time-smoothed congestion estimator to avoid heavy oscillation.
 */
451
void xprt_adjust_cwnd(struct rpc_xprt *xprt, struct rpc_task *task, int result)
L
Linus Torvalds 已提交
452
{
453 454
	struct rpc_rqst *req = task->tk_rqstp;
	unsigned long cwnd = xprt->cwnd;
L
Linus Torvalds 已提交
455 456 457 458 459 460 461

	if (result >= 0 && cwnd <= xprt->cong) {
		/* The (cwnd >> 1) term makes sure
		 * the result gets rounded properly. */
		cwnd += (RPC_CWNDSCALE * RPC_CWNDSCALE + (cwnd >> 1)) / cwnd;
		if (cwnd > RPC_MAXCWND(xprt))
			cwnd = RPC_MAXCWND(xprt);
462
		__xprt_lock_write_next_cong(xprt);
L
Linus Torvalds 已提交
463 464 465 466 467
	} else if (result == -ETIMEDOUT) {
		cwnd >>= 1;
		if (cwnd < RPC_CWNDSCALE)
			cwnd = RPC_CWNDSCALE;
	}
468
	dprintk("RPC:       cong %ld, cwnd was %ld, now %ld\n",
L
Linus Torvalds 已提交
469 470
			xprt->cong, xprt->cwnd, cwnd);
	xprt->cwnd = cwnd;
471
	__xprt_put_cong(xprt, req);
L
Linus Torvalds 已提交
472
}
473
EXPORT_SYMBOL_GPL(xprt_adjust_cwnd);
L
Linus Torvalds 已提交
474

475 476 477 478 479 480 481 482 483 484 485 486 487
/**
 * xprt_wake_pending_tasks - wake all tasks on a transport's pending queue
 * @xprt: transport with waiting tasks
 * @status: result code to plant in each task before waking it
 *
 */
void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status)
{
	if (status < 0)
		rpc_wake_up_status(&xprt->pending, status);
	else
		rpc_wake_up(&xprt->pending);
}
488
EXPORT_SYMBOL_GPL(xprt_wake_pending_tasks);
489

490 491 492
/**
 * xprt_wait_for_buffer_space - wait for transport output buffer to clear
 * @task: task to be put to sleep
R
Randy Dunlap 已提交
493
 * @action: function pointer to be executed after wait
494 495 496 497
 *
 * Note that we only set the timer for the case of RPC_IS_SOFT(), since
 * we don't in general want to force a socket disconnection due to
 * an incomplete RPC call transmission.
498
 */
499
void xprt_wait_for_buffer_space(struct rpc_task *task, rpc_action action)
500 501 502 503
{
	struct rpc_rqst *req = task->tk_rqstp;
	struct rpc_xprt *xprt = req->rq_xprt;

504
	task->tk_timeout = RPC_IS_SOFT(task) ? req->rq_timeout : 0;
505
	rpc_sleep_on(&xprt->pending, task, action);
506
}
507
EXPORT_SYMBOL_GPL(xprt_wait_for_buffer_space);
508 509 510 511 512 513 514 515 516 517 518

/**
 * xprt_write_space - wake the task waiting for transport output buffer space
 * @xprt: transport with waiting tasks
 *
 * Can be called in a soft IRQ context, so xprt_write_space never sleeps.
 */
void xprt_write_space(struct rpc_xprt *xprt)
{
	spin_lock_bh(&xprt->transport_lock);
	if (xprt->snd_task) {
519 520
		dprintk("RPC:       write space: waking waiting task on "
				"xprt %p\n", xprt);
521
		rpc_wake_up_queued_task(&xprt->pending, xprt->snd_task);
522 523 524
	}
	spin_unlock_bh(&xprt->transport_lock);
}
525
EXPORT_SYMBOL_GPL(xprt_write_space);
526

527 528 529 530 531 532 533 534 535 536 537 538
/**
 * xprt_set_retrans_timeout_def - set a request's retransmit timeout
 * @task: task whose timeout is to be set
 *
 * Set a request's retransmit timeout based on the transport's
 * default timeout parameters.  Used by transports that don't adjust
 * the retransmit timeout based on round-trip time estimation.
 */
void xprt_set_retrans_timeout_def(struct rpc_task *task)
{
	task->tk_timeout = task->tk_rqstp->rq_timeout;
}
539
EXPORT_SYMBOL_GPL(xprt_set_retrans_timeout_def);
540

541
/**
542 543
 * xprt_set_retrans_timeout_rtt - set a request's retransmit timeout
 * @task: task whose timeout is to be set
544
 *
545 546 547 548 549
 * Set a request's retransmit timeout using the RTT estimator.
 */
void xprt_set_retrans_timeout_rtt(struct rpc_task *task)
{
	int timer = task->tk_msg.rpc_proc->p_timer;
550 551
	struct rpc_clnt *clnt = task->tk_client;
	struct rpc_rtt *rtt = clnt->cl_rtt;
552
	struct rpc_rqst *req = task->tk_rqstp;
553
	unsigned long max_timeout = clnt->cl_timeout->to_maxval;
554 555 556 557 558 559

	task->tk_timeout = rpc_calc_rto(rtt, timer);
	task->tk_timeout <<= rpc_ntimeo(rtt, timer) + req->rq_retries;
	if (task->tk_timeout > max_timeout || task->tk_timeout == 0)
		task->tk_timeout = max_timeout;
}
560
EXPORT_SYMBOL_GPL(xprt_set_retrans_timeout_rtt);
561

L
Linus Torvalds 已提交
562 563
static void xprt_reset_majortimeo(struct rpc_rqst *req)
{
564
	const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
L
Linus Torvalds 已提交
565 566 567 568 569 570 571 572 573 574 575

	req->rq_majortimeo = req->rq_timeout;
	if (to->to_exponential)
		req->rq_majortimeo <<= to->to_retries;
	else
		req->rq_majortimeo += to->to_increment * to->to_retries;
	if (req->rq_majortimeo > to->to_maxval || req->rq_majortimeo == 0)
		req->rq_majortimeo = to->to_maxval;
	req->rq_majortimeo += jiffies;
}

576 577 578 579
/**
 * xprt_adjust_timeout - adjust timeout values for next retransmit
 * @req: RPC request containing parameters to use for the adjustment
 *
L
Linus Torvalds 已提交
580 581 582 583
 */
int xprt_adjust_timeout(struct rpc_rqst *req)
{
	struct rpc_xprt *xprt = req->rq_xprt;
584
	const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
L
Linus Torvalds 已提交
585 586 587 588 589 590 591 592 593 594 595 596 597 598 599
	int status = 0;

	if (time_before(jiffies, req->rq_majortimeo)) {
		if (to->to_exponential)
			req->rq_timeout <<= 1;
		else
			req->rq_timeout += to->to_increment;
		if (to->to_maxval && req->rq_timeout >= to->to_maxval)
			req->rq_timeout = to->to_maxval;
		req->rq_retries++;
	} else {
		req->rq_timeout = to->to_initval;
		req->rq_retries = 0;
		xprt_reset_majortimeo(req);
		/* Reset the RTT counters == "slow start" */
C
Chuck Lever 已提交
600
		spin_lock_bh(&xprt->transport_lock);
L
Linus Torvalds 已提交
601
		rpc_init_rtt(req->rq_task->tk_client->cl_rtt, to->to_initval);
C
Chuck Lever 已提交
602
		spin_unlock_bh(&xprt->transport_lock);
L
Linus Torvalds 已提交
603 604 605 606 607 608 609 610 611 612
		status = -ETIMEDOUT;
	}

	if (req->rq_timeout == 0) {
		printk(KERN_WARNING "xprt_adjust_timeout: rq_timeout = 0!\n");
		req->rq_timeout = 5 * HZ;
	}
	return status;
}

613
static void xprt_autoclose(struct work_struct *work)
L
Linus Torvalds 已提交
614
{
615 616
	struct rpc_xprt *xprt =
		container_of(work, struct rpc_xprt, task_cleanup);
L
Linus Torvalds 已提交
617

618
	xprt->ops->close(xprt);
619
	clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
L
Linus Torvalds 已提交
620 621 622
	xprt_release_write(xprt, NULL);
}

623
/**
624
 * xprt_disconnect_done - mark a transport as disconnected
625 626
 * @xprt: transport to flag for disconnect
 *
L
Linus Torvalds 已提交
627
 */
628
void xprt_disconnect_done(struct rpc_xprt *xprt)
L
Linus Torvalds 已提交
629
{
630
	dprintk("RPC:       disconnected transport %p\n", xprt);
C
Chuck Lever 已提交
631
	spin_lock_bh(&xprt->transport_lock);
L
Linus Torvalds 已提交
632
	xprt_clear_connected(xprt);
633
	xprt_wake_pending_tasks(xprt, -EAGAIN);
C
Chuck Lever 已提交
634
	spin_unlock_bh(&xprt->transport_lock);
L
Linus Torvalds 已提交
635
}
636
EXPORT_SYMBOL_GPL(xprt_disconnect_done);
L
Linus Torvalds 已提交
637

638 639 640 641 642 643 644 645 646 647 648 649 650
/**
 * xprt_force_disconnect - force a transport to disconnect
 * @xprt: transport to disconnect
 *
 */
void xprt_force_disconnect(struct rpc_xprt *xprt)
{
	/* Don't race with the test_bit() in xprt_clear_locked() */
	spin_lock_bh(&xprt->transport_lock);
	set_bit(XPRT_CLOSE_WAIT, &xprt->state);
	/* Try to schedule an autoclose RPC call */
	if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
		queue_work(rpciod_workqueue, &xprt->task_cleanup);
651
	xprt_wake_pending_tasks(xprt, -EAGAIN);
652 653 654
	spin_unlock_bh(&xprt->transport_lock);
}

655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677
/**
 * xprt_conditional_disconnect - force a transport to disconnect
 * @xprt: transport to disconnect
 * @cookie: 'connection cookie'
 *
 * This attempts to break the connection if and only if 'cookie' matches
 * the current transport 'connection cookie'. It ensures that we don't
 * try to break the connection more than once when we need to retransmit
 * a batch of RPC requests.
 *
 */
void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie)
{
	/* Don't race with the test_bit() in xprt_clear_locked() */
	spin_lock_bh(&xprt->transport_lock);
	if (cookie != xprt->connect_cookie)
		goto out;
	if (test_bit(XPRT_CLOSING, &xprt->state) || !xprt_connected(xprt))
		goto out;
	set_bit(XPRT_CLOSE_WAIT, &xprt->state);
	/* Try to schedule an autoclose RPC call */
	if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
		queue_work(rpciod_workqueue, &xprt->task_cleanup);
678
	xprt_wake_pending_tasks(xprt, -EAGAIN);
679 680 681 682
out:
	spin_unlock_bh(&xprt->transport_lock);
}

L
Linus Torvalds 已提交
683 684 685 686 687
static void
xprt_init_autodisconnect(unsigned long data)
{
	struct rpc_xprt *xprt = (struct rpc_xprt *)data;

C
Chuck Lever 已提交
688
	spin_lock(&xprt->transport_lock);
689
	if (!list_empty(&xprt->recv))
L
Linus Torvalds 已提交
690
		goto out_abort;
691
	if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
L
Linus Torvalds 已提交
692
		goto out_abort;
C
Chuck Lever 已提交
693
	spin_unlock(&xprt->transport_lock);
694 695
	set_bit(XPRT_CONNECTION_CLOSE, &xprt->state);
	queue_work(rpciod_workqueue, &xprt->task_cleanup);
L
Linus Torvalds 已提交
696 697
	return;
out_abort:
C
Chuck Lever 已提交
698
	spin_unlock(&xprt->transport_lock);
L
Linus Torvalds 已提交
699 700
}

701 702 703
/**
 * xprt_connect - schedule a transport connect operation
 * @task: RPC task that is requesting the connect
L
Linus Torvalds 已提交
704 705 706 707
 *
 */
void xprt_connect(struct rpc_task *task)
{
708
	struct rpc_xprt	*xprt = task->tk_rqstp->rq_xprt;
L
Linus Torvalds 已提交
709

710
	dprintk("RPC: %5u xprt_connect xprt %p %s connected\n", task->tk_pid,
L
Linus Torvalds 已提交
711 712
			xprt, (xprt_connected(xprt) ? "is" : "is not"));

713
	if (!xprt_bound(xprt)) {
714
		task->tk_status = -EAGAIN;
L
Linus Torvalds 已提交
715 716 717 718
		return;
	}
	if (!xprt_lock_write(xprt, task))
		return;
719 720 721 722

	if (test_and_clear_bit(XPRT_CLOSE_WAIT, &xprt->state))
		xprt->ops->close(xprt);

L
Linus Torvalds 已提交
723
	if (xprt_connected(xprt))
724 725
		xprt_release_write(xprt, task);
	else {
726
		task->tk_rqstp->rq_bytes_sent = 0;
727
		task->tk_timeout = task->tk_rqstp->rq_timeout;
728
		rpc_sleep_on(&xprt->pending, task, xprt_connect_status);
729 730 731 732 733

		if (test_bit(XPRT_CLOSING, &xprt->state))
			return;
		if (xprt_test_and_set_connecting(xprt))
			return;
734
		xprt->stat.connect_start = jiffies;
735
		xprt->ops->connect(xprt, task);
L
Linus Torvalds 已提交
736 737 738
	}
}

739
static void xprt_connect_status(struct rpc_task *task)
L
Linus Torvalds 已提交
740
{
741
	struct rpc_xprt	*xprt = task->tk_rqstp->rq_xprt;
L
Linus Torvalds 已提交
742

743
	if (task->tk_status == 0) {
744 745
		xprt->stat.connect_count++;
		xprt->stat.connect_time += (long)jiffies - xprt->stat.connect_start;
746
		dprintk("RPC: %5u xprt_connect_status: connection established\n",
L
Linus Torvalds 已提交
747 748 749 750 751
				task->tk_pid);
		return;
	}

	switch (task->tk_status) {
752 753 754 755 756
	case -ECONNREFUSED:
	case -ECONNRESET:
	case -ECONNABORTED:
	case -ENETUNREACH:
	case -EHOSTUNREACH:
757 758
	case -EAGAIN:
		dprintk("RPC: %5u xprt_connect_status: retrying\n", task->tk_pid);
759
		break;
L
Linus Torvalds 已提交
760
	case -ETIMEDOUT:
761 762
		dprintk("RPC: %5u xprt_connect_status: connect attempt timed "
				"out\n", task->tk_pid);
L
Linus Torvalds 已提交
763 764
		break;
	default:
765 766
		dprintk("RPC: %5u xprt_connect_status: error %d connecting to "
				"server %s\n", task->tk_pid, -task->tk_status,
767
				xprt->servername);
768 769
		xprt_release_write(xprt, task);
		task->tk_status = -EIO;
L
Linus Torvalds 已提交
770 771 772
	}
}

773 774 775 776 777
/**
 * xprt_lookup_rqst - find an RPC request corresponding to an XID
 * @xprt: transport on which the original request was transmitted
 * @xid: RPC XID of incoming reply
 *
L
Linus Torvalds 已提交
778
 */
779
struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid)
L
Linus Torvalds 已提交
780
{
781
	struct rpc_rqst *entry;
L
Linus Torvalds 已提交
782

783
	list_for_each_entry(entry, &xprt->recv, rq_list)
784 785
		if (entry->rq_xid == xid)
			return entry;
786 787 788

	dprintk("RPC:       xprt_lookup_rqst did not find xid %08x\n",
			ntohl(xid));
789 790
	xprt->stat.bad_xids++;
	return NULL;
L
Linus Torvalds 已提交
791
}
792
EXPORT_SYMBOL_GPL(xprt_lookup_rqst);
L
Linus Torvalds 已提交
793

794
static void xprt_update_rtt(struct rpc_task *task)
795 796 797
{
	struct rpc_rqst *req = task->tk_rqstp;
	struct rpc_rtt *rtt = task->tk_client->cl_rtt;
798
	unsigned int timer = task->tk_msg.rpc_proc->p_timer;
799
	long m = usecs_to_jiffies(ktime_to_us(req->rq_rtt));
800 801 802

	if (timer) {
		if (req->rq_ntrans == 1)
803
			rpc_update_rtt(rtt, timer, m);
804 805 806 807
		rpc_set_timeo(rtt, timer, req->rq_ntrans - 1);
	}
}

808 809
/**
 * xprt_complete_rqst - called when reply processing is complete
810
 * @task: RPC request that recently completed
811 812
 * @copied: actual number of bytes received from the transport
 *
813
 * Caller holds transport lock.
L
Linus Torvalds 已提交
814
 */
815
void xprt_complete_rqst(struct rpc_task *task, int copied)
L
Linus Torvalds 已提交
816
{
817
	struct rpc_rqst *req = task->tk_rqstp;
818
	struct rpc_xprt *xprt = req->rq_xprt;
L
Linus Torvalds 已提交
819

820 821
	dprintk("RPC: %5u xid %08x complete (%d bytes received)\n",
			task->tk_pid, ntohl(req->rq_xid), copied);
L
Linus Torvalds 已提交
822

823
	xprt->stat.recvs++;
824
	req->rq_rtt = ktime_sub(ktime_get(), req->rq_xtime);
825 826
	if (xprt->ops->timer != NULL)
		xprt_update_rtt(task);
827

L
Linus Torvalds 已提交
828
	list_del_init(&req->rq_list);
829
	req->rq_private_buf.len = copied;
830 831
	/* Ensure all writes are done before we update */
	/* req->rq_reply_bytes_recvd */
832
	smp_wmb();
833
	req->rq_reply_bytes_recvd = copied;
834
	rpc_wake_up_queued_task(&xprt->pending, task);
L
Linus Torvalds 已提交
835
}
836
EXPORT_SYMBOL_GPL(xprt_complete_rqst);
L
Linus Torvalds 已提交
837

838
static void xprt_timer(struct rpc_task *task)
L
Linus Torvalds 已提交
839
{
840
	struct rpc_rqst *req = task->tk_rqstp;
L
Linus Torvalds 已提交
841 842
	struct rpc_xprt *xprt = req->rq_xprt;

843 844
	if (task->tk_status != -ETIMEDOUT)
		return;
845
	dprintk("RPC: %5u xprt_timer\n", task->tk_pid);
L
Linus Torvalds 已提交
846

847
	spin_lock_bh(&xprt->transport_lock);
848
	if (!req->rq_reply_bytes_recvd) {
849
		if (xprt->ops->timer)
850
			xprt->ops->timer(xprt, task);
851 852 853
	} else
		task->tk_status = 0;
	spin_unlock_bh(&xprt->transport_lock);
L
Linus Torvalds 已提交
854 855
}

856 857 858 859 860
static inline int xprt_has_timer(struct rpc_xprt *xprt)
{
	return xprt->idle_timeout != 0;
}

861 862 863 864
/**
 * xprt_prepare_transmit - reserve the transport before sending a request
 * @task: RPC task about to send a request
 *
L
Linus Torvalds 已提交
865
 */
866
bool xprt_prepare_transmit(struct rpc_task *task)
L
Linus Torvalds 已提交
867 868 869
{
	struct rpc_rqst	*req = task->tk_rqstp;
	struct rpc_xprt	*xprt = req->rq_xprt;
870
	bool ret = false;
L
Linus Torvalds 已提交
871

872
	dprintk("RPC: %5u xprt_prepare_transmit\n", task->tk_pid);
L
Linus Torvalds 已提交
873

C
Chuck Lever 已提交
874
	spin_lock_bh(&xprt->transport_lock);
875 876 877 878 879 880 881 882 883 884 885 886
	if (!req->rq_bytes_sent) {
		if (req->rq_reply_bytes_recvd) {
			task->tk_status = req->rq_reply_bytes_recvd;
			goto out_unlock;
		}
		if ((task->tk_flags & RPC_TASK_NO_RETRANS_TIMEOUT)
		    && xprt_connected(xprt)
		    && req->rq_connect_cookie == xprt->connect_cookie) {
			xprt->ops->set_retrans_timeout(task);
			rpc_sleep_on(&xprt->pending, task, xprt_timer);
			goto out_unlock;
		}
L
Linus Torvalds 已提交
887
	}
888 889 890 891 892
	if (!xprt->ops->reserve_xprt(xprt, task)) {
		task->tk_status = -EAGAIN;
		goto out_unlock;
	}
	ret = true;
L
Linus Torvalds 已提交
893
out_unlock:
C
Chuck Lever 已提交
894
	spin_unlock_bh(&xprt->transport_lock);
895
	return ret;
L
Linus Torvalds 已提交
896 897
}

898
void xprt_end_transmit(struct rpc_task *task)
899
{
900
	xprt_release_write(task->tk_rqstp->rq_xprt, task);
901 902
}

903 904 905 906 907 908 909
/**
 * xprt_transmit - send an RPC request on a transport
 * @task: controlling RPC task
 *
 * We have to copy the iovec because sendmsg fiddles with its contents.
 */
void xprt_transmit(struct rpc_task *task)
L
Linus Torvalds 已提交
910 911 912
{
	struct rpc_rqst	*req = task->tk_rqstp;
	struct rpc_xprt	*xprt = req->rq_xprt;
913
	int status, numreqs;
L
Linus Torvalds 已提交
914

915
	dprintk("RPC: %5u xprt_transmit(%u)\n", task->tk_pid, req->rq_slen);
L
Linus Torvalds 已提交
916

917
	if (!req->rq_reply_bytes_recvd) {
918 919 920 921
		if (list_empty(&req->rq_list) && rpc_reply_expected(task)) {
			/*
			 * Add to the list only if we're expecting a reply
			 */
C
Chuck Lever 已提交
922
			spin_lock_bh(&xprt->transport_lock);
L
Linus Torvalds 已提交
923 924 925 926 927
			/* Update the softirq receive buffer */
			memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
					sizeof(req->rq_private_buf));
			/* Add request to the receive list */
			list_add_tail(&req->rq_list, &xprt->recv);
C
Chuck Lever 已提交
928
			spin_unlock_bh(&xprt->transport_lock);
L
Linus Torvalds 已提交
929
			xprt_reset_majortimeo(req);
930 931
			/* Turn off autodisconnect */
			del_singleshot_timer_sync(&xprt->timer);
L
Linus Torvalds 已提交
932 933 934 935
		}
	} else if (!req->rq_bytes_sent)
		return;

936
	req->rq_xtime = ktime_get();
937
	status = xprt->ops->send_request(task);
938 939 940 941
	if (status != 0) {
		task->tk_status = status;
		return;
	}
942

943
	dprintk("RPC: %5u xmit complete\n", task->tk_pid);
944
	task->tk_flags |= RPC_TASK_SENT;
945
	spin_lock_bh(&xprt->transport_lock);
946

947
	xprt->ops->set_retrans_timeout(task);
948

949 950 951
	numreqs = atomic_read(&xprt->num_reqs);
	if (numreqs > xprt->stat.max_slots)
		xprt->stat.max_slots = numreqs;
952 953 954
	xprt->stat.sends++;
	xprt->stat.req_u += xprt->stat.sends - xprt->stat.recvs;
	xprt->stat.bklog_u += xprt->backlog.qlen;
955 956
	xprt->stat.sending_u += xprt->sending.qlen;
	xprt->stat.pending_u += xprt->pending.qlen;
L
Linus Torvalds 已提交
957

958 959 960
	/* Don't race with disconnect */
	if (!xprt_connected(xprt))
		task->tk_status = -ENOTCONN;
961
	else {
962 963 964 965
		/*
		 * Sleep on the pending queue since
		 * we're expecting a reply.
		 */
966 967 968
		if (!req->rq_reply_bytes_recvd && rpc_reply_expected(task))
			rpc_sleep_on(&xprt->pending, task, xprt_timer);
		req->rq_connect_cookie = xprt->connect_cookie;
969
	}
970
	spin_unlock_bh(&xprt->transport_lock);
L
Linus Torvalds 已提交
971 972
}

973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000
static void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task)
{
	set_bit(XPRT_CONGESTED, &xprt->state);
	rpc_sleep_on(&xprt->backlog, task, NULL);
}

static void xprt_wake_up_backlog(struct rpc_xprt *xprt)
{
	if (rpc_wake_up_next(&xprt->backlog) == NULL)
		clear_bit(XPRT_CONGESTED, &xprt->state);
}

static bool xprt_throttle_congested(struct rpc_xprt *xprt, struct rpc_task *task)
{
	bool ret = false;

	if (!test_bit(XPRT_CONGESTED, &xprt->state))
		goto out;
	spin_lock(&xprt->reserve_lock);
	if (test_bit(XPRT_CONGESTED, &xprt->state)) {
		rpc_sleep_on(&xprt->backlog, task, NULL);
		ret = true;
	}
	spin_unlock(&xprt->reserve_lock);
out:
	return ret;
}

1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024
static struct rpc_rqst *xprt_dynamic_alloc_slot(struct rpc_xprt *xprt, gfp_t gfp_flags)
{
	struct rpc_rqst *req = ERR_PTR(-EAGAIN);

	if (!atomic_add_unless(&xprt->num_reqs, 1, xprt->max_reqs))
		goto out;
	req = kzalloc(sizeof(struct rpc_rqst), gfp_flags);
	if (req != NULL)
		goto out;
	atomic_dec(&xprt->num_reqs);
	req = ERR_PTR(-ENOMEM);
out:
	return req;
}

static bool xprt_dynamic_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
{
	if (atomic_add_unless(&xprt->num_reqs, -1, xprt->min_reqs)) {
		kfree(req);
		return true;
	}
	return false;
}

1025
void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
L
Linus Torvalds 已提交
1026
{
1027
	struct rpc_rqst *req;
L
Linus Torvalds 已提交
1028

1029
	spin_lock(&xprt->reserve_lock);
L
Linus Torvalds 已提交
1030
	if (!list_empty(&xprt->free)) {
1031 1032 1033 1034
		req = list_entry(xprt->free.next, struct rpc_rqst, rq_list);
		list_del(&req->rq_list);
		goto out_init_req;
	}
1035
	req = xprt_dynamic_alloc_slot(xprt, GFP_NOWAIT|__GFP_NOWARN);
1036 1037 1038 1039 1040 1041
	if (!IS_ERR(req))
		goto out_init_req;
	switch (PTR_ERR(req)) {
	case -ENOMEM:
		dprintk("RPC:       dynamic allocation of request slot "
				"failed! Retrying\n");
1042
		task->tk_status = -ENOMEM;
1043 1044
		break;
	case -EAGAIN:
1045
		xprt_add_backlog(xprt, task);
1046
		dprintk("RPC:       waiting for request slot\n");
1047 1048
	default:
		task->tk_status = -EAGAIN;
L
Linus Torvalds 已提交
1049
	}
1050
	spin_unlock(&xprt->reserve_lock);
1051 1052 1053 1054 1055
	return;
out_init_req:
	task->tk_status = 0;
	task->tk_rqstp = req;
	xprt_request_init(task, xprt);
1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070
	spin_unlock(&xprt->reserve_lock);
}
EXPORT_SYMBOL_GPL(xprt_alloc_slot);

void xprt_lock_and_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
{
	/* Note: grabbing the xprt_lock_write() ensures that we throttle
	 * new slot allocation if the transport is congested (i.e. when
	 * reconnecting a stream transport or when out of socket write
	 * buffer space).
	 */
	if (xprt_lock_write(xprt, task)) {
		xprt_alloc_slot(xprt, task);
		xprt_release_write(xprt, task);
	}
L
Linus Torvalds 已提交
1071
}
1072
EXPORT_SYMBOL_GPL(xprt_lock_and_alloc_slot);
L
Linus Torvalds 已提交
1073

1074 1075 1076
static void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
{
	spin_lock(&xprt->reserve_lock);
1077 1078 1079 1080
	if (!xprt_dynamic_free_slot(xprt, req)) {
		memset(req, 0, sizeof(*req));	/* mark unused */
		list_add(&req->rq_list, &xprt->free);
	}
1081
	xprt_wake_up_backlog(xprt);
1082 1083 1084
	spin_unlock(&xprt->reserve_lock);
}

1085 1086 1087 1088 1089 1090 1091 1092 1093 1094
static void xprt_free_all_slots(struct rpc_xprt *xprt)
{
	struct rpc_rqst *req;
	while (!list_empty(&xprt->free)) {
		req = list_first_entry(&xprt->free, struct rpc_rqst, rq_list);
		list_del(&req->rq_list);
		kfree(req);
	}
}

1095 1096 1097
struct rpc_xprt *xprt_alloc(struct net *net, size_t size,
		unsigned int num_prealloc,
		unsigned int max_alloc)
1098 1099
{
	struct rpc_xprt *xprt;
1100 1101
	struct rpc_rqst *req;
	int i;
1102 1103 1104 1105 1106

	xprt = kzalloc(size, GFP_KERNEL);
	if (xprt == NULL)
		goto out;

1107 1108 1109 1110 1111
	xprt_init(xprt, net);

	for (i = 0; i < num_prealloc; i++) {
		req = kzalloc(sizeof(struct rpc_rqst), GFP_KERNEL);
		if (!req)
1112
			goto out_free;
1113 1114
		list_add(&req->rq_list, &xprt->free);
	}
1115 1116 1117 1118 1119 1120
	if (max_alloc > num_prealloc)
		xprt->max_reqs = max_alloc;
	else
		xprt->max_reqs = num_prealloc;
	xprt->min_reqs = num_prealloc;
	atomic_set(&xprt->num_reqs, num_prealloc);
1121 1122 1123 1124

	return xprt;

out_free:
1125
	xprt_free(xprt);
1126 1127 1128 1129 1130
out:
	return NULL;
}
EXPORT_SYMBOL_GPL(xprt_alloc);

1131 1132
void xprt_free(struct rpc_xprt *xprt)
{
P
Pavel Emelyanov 已提交
1133
	put_net(xprt->xprt_net);
1134
	xprt_free_all_slots(xprt);
1135 1136 1137 1138
	kfree(xprt);
}
EXPORT_SYMBOL_GPL(xprt_free);

1139 1140 1141 1142
/**
 * xprt_reserve - allocate an RPC request slot
 * @task: RPC task requesting a slot allocation
 *
1143 1144
 * If the transport is marked as being congested, or if no more
 * slots are available, place the task on the transport's
1145 1146 1147
 * backlog queue.
 */
void xprt_reserve(struct rpc_task *task)
L
Linus Torvalds 已提交
1148
{
1149
	struct rpc_xprt	*xprt;
L
Linus Torvalds 已提交
1150

1151 1152 1153 1154 1155 1156
	task->tk_status = 0;
	if (task->tk_rqstp != NULL)
		return;

	task->tk_timeout = 0;
	task->tk_status = -EAGAIN;
1157 1158
	rcu_read_lock();
	xprt = rcu_dereference(task->tk_client->cl_xprt);
1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184
	if (!xprt_throttle_congested(xprt, task))
		xprt->ops->alloc_slot(xprt, task);
	rcu_read_unlock();
}

/**
 * xprt_retry_reserve - allocate an RPC request slot
 * @task: RPC task requesting a slot allocation
 *
 * If no more slots are available, place the task on the transport's
 * backlog queue.
 * Note that the only difference with xprt_reserve is that we now
 * ignore the value of the XPRT_CONGESTED flag.
 */
void xprt_retry_reserve(struct rpc_task *task)
{
	struct rpc_xprt	*xprt;

	task->tk_status = 0;
	if (task->tk_rqstp != NULL)
		return;

	task->tk_timeout = 0;
	task->tk_status = -EAGAIN;
	rcu_read_lock();
	xprt = rcu_dereference(task->tk_client->cl_xprt);
1185
	xprt->ops->alloc_slot(xprt, task);
1186
	rcu_read_unlock();
L
Linus Torvalds 已提交
1187 1188
}

1189
static inline __be32 xprt_alloc_xid(struct rpc_xprt *xprt)
L
Linus Torvalds 已提交
1190
{
1191
	return (__force __be32)xprt->xid++;
L
Linus Torvalds 已提交
1192 1193 1194 1195
}

static inline void xprt_init_xid(struct rpc_xprt *xprt)
{
1196
	xprt->xid = prandom_u32();
L
Linus Torvalds 已提交
1197 1198
}

1199
static void xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt)
L
Linus Torvalds 已提交
1200 1201 1202
{
	struct rpc_rqst	*req = task->tk_rqstp;

1203
	INIT_LIST_HEAD(&req->rq_list);
1204
	req->rq_timeout = task->tk_client->cl_timeout->to_initval;
L
Linus Torvalds 已提交
1205 1206
	req->rq_task	= task;
	req->rq_xprt    = xprt;
1207
	req->rq_buffer  = NULL;
L
Linus Torvalds 已提交
1208
	req->rq_xid     = xprt_alloc_xid(xprt);
1209
	req->rq_connect_cookie = xprt->connect_cookie - 1;
1210 1211 1212 1213 1214
	req->rq_bytes_sent = 0;
	req->rq_snd_buf.len = 0;
	req->rq_snd_buf.buflen = 0;
	req->rq_rcv_buf.len = 0;
	req->rq_rcv_buf.buflen = 0;
1215
	req->rq_release_snd_buf = NULL;
1216
	xprt_reset_majortimeo(req);
1217
	dprintk("RPC: %5u reserved req %p xid %08x\n", task->tk_pid,
L
Linus Torvalds 已提交
1218 1219 1220
			req, ntohl(req->rq_xid));
}

1221 1222 1223 1224
/**
 * xprt_release - release an RPC request slot
 * @task: task which is finished with the slot
 *
L
Linus Torvalds 已提交
1225
 */
1226
void xprt_release(struct rpc_task *task)
L
Linus Torvalds 已提交
1227
{
1228
	struct rpc_xprt	*xprt;
1229
	struct rpc_rqst	*req = task->tk_rqstp;
L
Linus Torvalds 已提交
1230

1231 1232 1233 1234 1235 1236 1237 1238
	if (req == NULL) {
		if (task->tk_client) {
			rcu_read_lock();
			xprt = rcu_dereference(task->tk_client->cl_xprt);
			if (xprt->snd_task == task)
				xprt_release_write(xprt, task);
			rcu_read_unlock();
		}
L
Linus Torvalds 已提交
1239
		return;
1240
	}
1241 1242

	xprt = req->rq_xprt;
1243 1244 1245 1246
	if (task->tk_ops->rpc_count_stats != NULL)
		task->tk_ops->rpc_count_stats(task, task->tk_calldata);
	else if (task->tk_client)
		rpc_count_iostats(task, task->tk_client->cl_metrics);
C
Chuck Lever 已提交
1247
	spin_lock_bh(&xprt->transport_lock);
1248
	xprt->ops->release_xprt(xprt, task);
1249 1250
	if (xprt->ops->release_request)
		xprt->ops->release_request(task);
L
Linus Torvalds 已提交
1251 1252 1253
	if (!list_empty(&req->rq_list))
		list_del(&req->rq_list);
	xprt->last_used = jiffies;
1254
	if (list_empty(&xprt->recv) && xprt_has_timer(xprt))
1255
		mod_timer(&xprt->timer,
1256
				xprt->last_used + xprt->idle_timeout);
C
Chuck Lever 已提交
1257
	spin_unlock_bh(&xprt->transport_lock);
1258
	if (req->rq_buffer)
1259
		xprt->ops->buf_free(req->rq_buffer);
1260 1261
	if (req->rq_cred != NULL)
		put_rpccred(req->rq_cred);
L
Linus Torvalds 已提交
1262
	task->tk_rqstp = NULL;
1263 1264
	if (req->rq_release_snd_buf)
		req->rq_release_snd_buf(req);
1265

1266
	dprintk("RPC: %5u release request %p\n", task->tk_pid, req);
1267 1268 1269
	if (likely(!bc_prealloc(req)))
		xprt_free_slot(xprt, req);
	else
1270
		xprt_free_bc_request(req);
L
Linus Torvalds 已提交
1271 1272
}

1273
static void xprt_init(struct rpc_xprt *xprt, struct net *net)
1274
{
1275
	atomic_set(&xprt->count, 1);
1276 1277 1278 1279 1280 1281

	spin_lock_init(&xprt->transport_lock);
	spin_lock_init(&xprt->reserve_lock);

	INIT_LIST_HEAD(&xprt->free);
	INIT_LIST_HEAD(&xprt->recv);
1282
#if defined(CONFIG_SUNRPC_BACKCHANNEL)
1283 1284
	spin_lock_init(&xprt->bc_pa_lock);
	INIT_LIST_HEAD(&xprt->bc_pa_list);
1285
#endif /* CONFIG_SUNRPC_BACKCHANNEL */
1286

1287 1288
	xprt->last_used = jiffies;
	xprt->cwnd = RPC_INITCWND;
1289
	xprt->bind_index = 0;
1290 1291 1292

	rpc_init_wait_queue(&xprt->binding, "xprt_binding");
	rpc_init_wait_queue(&xprt->pending, "xprt_pending");
1293
	rpc_init_priority_wait_queue(&xprt->sending, "xprt_sending");
1294 1295 1296 1297
	rpc_init_priority_wait_queue(&xprt->backlog, "xprt_backlog");

	xprt_init_xid(xprt);

1298
	xprt->xprt_net = get_net(net);
1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326
}

/**
 * xprt_create_transport - create an RPC transport
 * @args: rpc transport creation arguments
 *
 */
struct rpc_xprt *xprt_create_transport(struct xprt_create *args)
{
	struct rpc_xprt	*xprt;
	struct xprt_class *t;

	spin_lock(&xprt_list_lock);
	list_for_each_entry(t, &xprt_list, list) {
		if (t->ident == args->ident) {
			spin_unlock(&xprt_list_lock);
			goto found;
		}
	}
	spin_unlock(&xprt_list_lock);
	printk(KERN_ERR "RPC: transport (%d) not supported\n", args->ident);
	return ERR_PTR(-EIO);

found:
	xprt = t->setup(args);
	if (IS_ERR(xprt)) {
		dprintk("RPC:       xprt_create_transport: failed, %ld\n",
				-PTR_ERR(xprt));
1327
		goto out;
1328
	}
1329 1330
	if (args->flags & XPRT_CREATE_NO_IDLE_TIMEOUT)
		xprt->idle_timeout = 0;
1331 1332 1333 1334 1335 1336
	INIT_WORK(&xprt->task_cleanup, xprt_autoclose);
	if (xprt_has_timer(xprt))
		setup_timer(&xprt->timer, xprt_init_autodisconnect,
			    (unsigned long)xprt);
	else
		init_timer(&xprt->timer);
1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347

	if (strlen(args->servername) > RPC_MAXNETNAMELEN) {
		xprt_destroy(xprt);
		return ERR_PTR(-EINVAL);
	}
	xprt->servername = kstrdup(args->servername, GFP_KERNEL);
	if (xprt->servername == NULL) {
		xprt_destroy(xprt);
		return ERR_PTR(-ENOMEM);
	}

1348
	dprintk("RPC:       created transport %p with %u slots\n", xprt,
1349
			xprt->max_reqs);
1350
out:
1351 1352 1353
	return xprt;
}

1354 1355
/**
 * xprt_destroy - destroy an RPC transport, killing off all requests.
1356
 * @xprt: transport to destroy
1357
 *
L
Linus Torvalds 已提交
1358
 */
1359
static void xprt_destroy(struct rpc_xprt *xprt)
L
Linus Torvalds 已提交
1360
{
1361
	dprintk("RPC:       destroying transport %p\n", xprt);
1362
	del_timer_sync(&xprt->timer);
1363

1364 1365 1366 1367
	rpc_destroy_wait_queue(&xprt->binding);
	rpc_destroy_wait_queue(&xprt->pending);
	rpc_destroy_wait_queue(&xprt->sending);
	rpc_destroy_wait_queue(&xprt->backlog);
1368
	cancel_work_sync(&xprt->task_cleanup);
1369
	kfree(xprt->servername);
1370 1371 1372
	/*
	 * Tear down transport state and free the rpc_xprt
	 */
1373
	xprt->ops->destroy(xprt);
1374
}
L
Linus Torvalds 已提交
1375

1376 1377 1378 1379 1380 1381 1382
/**
 * xprt_put - release a reference to an RPC transport.
 * @xprt: pointer to the transport
 *
 */
void xprt_put(struct rpc_xprt *xprt)
{
1383 1384
	if (atomic_dec_and_test(&xprt->count))
		xprt_destroy(xprt);
1385
}