xprt.c 37.1 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12
/*
 *  linux/net/sunrpc/xprt.c
 *
 *  This is a generic RPC call interface supporting congestion avoidance,
 *  and asynchronous calls.
 *
 *  The interface works like this:
 *
 *  -	When a process places a call, it allocates a request slot if
 *	one is available. Otherwise, it sleeps on the backlog queue
 *	(xprt_reserve).
 *  -	Next, the caller puts together the RPC message, stuffs it into
13 14
 *	the request struct, and calls xprt_transmit().
 *  -	xprt_transmit sends the message and installs the caller on the
15 16 17
 *	transport's wait list. At the same time, if a reply is expected,
 *	it installs a timer that is run after the packet's timeout has
 *	expired.
L
Linus Torvalds 已提交
18
 *  -	When a packet arrives, the data_ready handler walks the list of
19
 *	pending requests for that transport. If a matching XID is found, the
L
Linus Torvalds 已提交
20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35
 *	caller is woken up, and the timer removed.
 *  -	When no reply arrives within the timeout interval, the timer is
 *	fired by the kernel and runs xprt_timer(). It either adjusts the
 *	timeout values (minor timeout) or wakes up the caller with a status
 *	of -ETIMEDOUT.
 *  -	When the caller receives a notification from RPC that a reply arrived,
 *	it should release the RPC slot, and process the reply.
 *	If the call timed out, it may choose to retry the operation by
 *	adjusting the initial timeout value, and simply calling rpc_call
 *	again.
 *
 *  Support for async RPC is done through a set of RPC-specific scheduling
 *  primitives that `transparently' work for processes as well as async
 *  tasks that rely on callbacks.
 *
 *  Copyright (C) 1995-1997, Olaf Kirch <okir@monad.swb.de>
36 37
 *
 *  Transport switch API copyright (C) 2005, Chuck Lever <cel@netapp.com>
L
Linus Torvalds 已提交
38 39
 */

40 41
#include <linux/module.h>

L
Linus Torvalds 已提交
42
#include <linux/types.h>
43
#include <linux/interrupt.h>
L
Linus Torvalds 已提交
44
#include <linux/workqueue.h>
45
#include <linux/net.h>
46
#include <linux/ktime.h>
L
Linus Torvalds 已提交
47

48
#include <linux/sunrpc/clnt.h>
49
#include <linux/sunrpc/metrics.h>
50
#include <linux/sunrpc/bc_xprt.h>
L
Linus Torvalds 已提交
51

52 53
#include <trace/events/sunrpc.h>

54 55
#include "sunrpc.h"

L
Linus Torvalds 已提交
56 57 58 59
/*
 * Local variables
 */

J
Jeff Layton 已提交
60
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
L
Linus Torvalds 已提交
61 62 63 64 65 66
# define RPCDBG_FACILITY	RPCDBG_XPRT
#endif

/*
 * Local functions
 */
67
static void	 xprt_init(struct rpc_xprt *xprt, struct net *net);
L
Linus Torvalds 已提交
68 69 70
static void	xprt_request_init(struct rpc_task *, struct rpc_xprt *);
static void	xprt_connect_status(struct rpc_task *task);
static int      __xprt_get_cong(struct rpc_xprt *, struct rpc_task *);
71
static void     __xprt_put_cong(struct rpc_xprt *, struct rpc_rqst *);
72
static void	 xprt_destroy(struct rpc_xprt *xprt);
L
Linus Torvalds 已提交
73

J
Jiri Slaby 已提交
74
static DEFINE_SPINLOCK(xprt_list_lock);
75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97
static LIST_HEAD(xprt_list);

/**
 * xprt_register_transport - register a transport implementation
 * @transport: transport to register
 *
 * If a transport implementation is loaded as a kernel module, it can
 * call this interface to make itself known to the RPC client.
 *
 * Returns:
 * 0:		transport successfully registered
 * -EEXIST:	transport already registered
 * -EINVAL:	transport module being unloaded
 */
int xprt_register_transport(struct xprt_class *transport)
{
	struct xprt_class *t;
	int result;

	result = -EEXIST;
	spin_lock(&xprt_list_lock);
	list_for_each_entry(t, &xprt_list, list) {
		/* don't register the same transport class twice */
98
		if (t->ident == transport->ident)
99 100 101
			goto out;
	}

102 103 104 105
	list_add_tail(&transport->list, &xprt_list);
	printk(KERN_INFO "RPC: Registered %s transport module.\n",
	       transport->name);
	result = 0;
106 107 108 109 110 111 112 113 114

out:
	spin_unlock(&xprt_list_lock);
	return result;
}
EXPORT_SYMBOL_GPL(xprt_register_transport);

/**
 * xprt_unregister_transport - unregister a transport implementation
115
 * @transport: transport to unregister
116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144
 *
 * Returns:
 * 0:		transport successfully unregistered
 * -ENOENT:	transport never registered
 */
int xprt_unregister_transport(struct xprt_class *transport)
{
	struct xprt_class *t;
	int result;

	result = 0;
	spin_lock(&xprt_list_lock);
	list_for_each_entry(t, &xprt_list, list) {
		if (t == transport) {
			printk(KERN_INFO
				"RPC: Unregistered %s transport module.\n",
				transport->name);
			list_del_init(&transport->list);
			goto out;
		}
	}
	result = -ENOENT;

out:
	spin_unlock(&xprt_list_lock);
	return result;
}
EXPORT_SYMBOL_GPL(xprt_unregister_transport);

145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166
/**
 * xprt_load_transport - load a transport implementation
 * @transport_name: transport to load
 *
 * Returns:
 * 0:		transport successfully loaded
 * -ENOENT:	transport module not available
 */
int xprt_load_transport(const char *transport_name)
{
	struct xprt_class *t;
	int result;

	result = 0;
	spin_lock(&xprt_list_lock);
	list_for_each_entry(t, &xprt_list, list) {
		if (strcmp(t->name, transport_name) == 0) {
			spin_unlock(&xprt_list_lock);
			goto out;
		}
	}
	spin_unlock(&xprt_list_lock);
167
	result = request_module("xprt%s", transport_name);
168 169 170 171 172
out:
	return result;
}
EXPORT_SYMBOL_GPL(xprt_load_transport);

173 174 175
/**
 * xprt_reserve_xprt - serialize write access to transports
 * @task: task that is requesting access to the transport
176
 * @xprt: pointer to the target transport
177 178 179 180 181
 *
 * This prevents mixing the payload of separate requests, and prevents
 * transport connects from colliding with writes.  No congestion control
 * is provided.
 */
182
int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
183 184
{
	struct rpc_rqst *req = task->tk_rqstp;
185
	int priority;
186 187 188 189 190 191 192

	if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
		if (task == xprt->snd_task)
			return 1;
		goto out_sleep;
	}
	xprt->snd_task = task;
193
	if (req != NULL)
194
		req->rq_ntrans++;
195

196 197 198
	return 1;

out_sleep:
199
	dprintk("RPC: %5u failed to lock transport %p\n",
200 201 202
			task->tk_pid, xprt);
	task->tk_timeout = 0;
	task->tk_status = -EAGAIN;
203 204 205 206
	if (req == NULL)
		priority = RPC_PRIORITY_LOW;
	else if (!req->rq_ntrans)
		priority = RPC_PRIORITY_NORMAL;
207
	else
208 209
		priority = RPC_PRIORITY_HIGH;
	rpc_sleep_on_priority(&xprt->sending, task, NULL, priority);
210 211
	return 0;
}
212
EXPORT_SYMBOL_GPL(xprt_reserve_xprt);
213

214 215 216
static void xprt_clear_locked(struct rpc_xprt *xprt)
{
	xprt->snd_task = NULL;
217
	if (!test_bit(XPRT_CLOSE_WAIT, &xprt->state)) {
218
		smp_mb__before_atomic();
219
		clear_bit(XPRT_LOCKED, &xprt->state);
220
		smp_mb__after_atomic();
221
	} else
222
		queue_work(rpciod_workqueue, &xprt->task_cleanup);
223 224
}

L
Linus Torvalds 已提交
225
/*
226 227 228 229 230 231
 * xprt_reserve_xprt_cong - serialize write access to transports
 * @task: task that is requesting access to the transport
 *
 * Same as xprt_reserve_xprt, but Van Jacobson congestion control is
 * integrated into the decision of whether a request is allowed to be
 * woken up and given access to the transport.
L
Linus Torvalds 已提交
232
 */
233
int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
L
Linus Torvalds 已提交
234 235
{
	struct rpc_rqst *req = task->tk_rqstp;
236
	int priority;
L
Linus Torvalds 已提交
237

238
	if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
L
Linus Torvalds 已提交
239 240 241 242
		if (task == xprt->snd_task)
			return 1;
		goto out_sleep;
	}
243 244 245 246
	if (req == NULL) {
		xprt->snd_task = task;
		return 1;
	}
247
	if (__xprt_get_cong(xprt, task)) {
L
Linus Torvalds 已提交
248
		xprt->snd_task = task;
249
		req->rq_ntrans++;
L
Linus Torvalds 已提交
250 251
		return 1;
	}
252
	xprt_clear_locked(xprt);
L
Linus Torvalds 已提交
253
out_sleep:
254 255
	if (req)
		__xprt_put_cong(xprt, req);
256
	dprintk("RPC: %5u failed to lock transport %p\n", task->tk_pid, xprt);
L
Linus Torvalds 已提交
257 258
	task->tk_timeout = 0;
	task->tk_status = -EAGAIN;
259 260 261 262
	if (req == NULL)
		priority = RPC_PRIORITY_LOW;
	else if (!req->rq_ntrans)
		priority = RPC_PRIORITY_NORMAL;
L
Linus Torvalds 已提交
263
	else
264 265
		priority = RPC_PRIORITY_HIGH;
	rpc_sleep_on_priority(&xprt->sending, task, NULL, priority);
L
Linus Torvalds 已提交
266 267
	return 0;
}
268
EXPORT_SYMBOL_GPL(xprt_reserve_xprt_cong);
L
Linus Torvalds 已提交
269

270
static inline int xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task)
L
Linus Torvalds 已提交
271 272 273
{
	int retval;

C
Chuck Lever 已提交
274
	spin_lock_bh(&xprt->transport_lock);
275
	retval = xprt->ops->reserve_xprt(xprt, task);
C
Chuck Lever 已提交
276
	spin_unlock_bh(&xprt->transport_lock);
L
Linus Torvalds 已提交
277 278 279
	return retval;
}

280
static bool __xprt_lock_write_func(struct rpc_task *task, void *data)
281
{
282
	struct rpc_xprt *xprt = data;
283 284 285 286
	struct rpc_rqst *req;

	req = task->tk_rqstp;
	xprt->snd_task = task;
287
	if (req)
288
		req->rq_ntrans++;
289 290
	return true;
}
291

292 293 294 295 296 297 298
static void __xprt_lock_write_next(struct rpc_xprt *xprt)
{
	if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
		return;

	if (rpc_wake_up_first(&xprt->sending, __xprt_lock_write_func, xprt))
		return;
299
	xprt_clear_locked(xprt);
300 301
}

302
static bool __xprt_lock_write_cong_func(struct rpc_task *task, void *data)
L
Linus Torvalds 已提交
303
{
304
	struct rpc_xprt *xprt = data;
305
	struct rpc_rqst *req;
L
Linus Torvalds 已提交
306

307 308 309
	req = task->tk_rqstp;
	if (req == NULL) {
		xprt->snd_task = task;
310
		return true;
311
	}
312
	if (__xprt_get_cong(xprt, task)) {
L
Linus Torvalds 已提交
313
		xprt->snd_task = task;
314
		req->rq_ntrans++;
315
		return true;
L
Linus Torvalds 已提交
316
	}
317 318 319 320 321 322 323 324 325 326 327
	return false;
}

static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt)
{
	if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
		return;
	if (RPCXPRT_CONGESTED(xprt))
		goto out_unlock;
	if (rpc_wake_up_first(&xprt->sending, __xprt_lock_write_cong_func, xprt))
		return;
L
Linus Torvalds 已提交
328
out_unlock:
329
	xprt_clear_locked(xprt);
L
Linus Torvalds 已提交
330 331
}

332 333 334 335 336 337 338 339 340
static void xprt_task_clear_bytes_sent(struct rpc_task *task)
{
	if (task != NULL) {
		struct rpc_rqst *req = task->tk_rqstp;
		if (req != NULL)
			req->rq_bytes_sent = 0;
	}
}

341 342 343 344 345 346
/**
 * xprt_release_xprt - allow other requests to use a transport
 * @xprt: transport with other tasks potentially waiting
 * @task: task that is releasing access to the transport
 *
 * Note that "task" can be NULL.  No congestion control is provided.
L
Linus Torvalds 已提交
347
 */
348
void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
L
Linus Torvalds 已提交
349 350
{
	if (xprt->snd_task == task) {
351
		xprt_task_clear_bytes_sent(task);
352
		xprt_clear_locked(xprt);
L
Linus Torvalds 已提交
353 354 355
		__xprt_lock_write_next(xprt);
	}
}
356
EXPORT_SYMBOL_GPL(xprt_release_xprt);
L
Linus Torvalds 已提交
357

358 359 360 361 362 363 364 365 366 367 368
/**
 * xprt_release_xprt_cong - allow other requests to use a transport
 * @xprt: transport with other tasks potentially waiting
 * @task: task that is releasing access to the transport
 *
 * Note that "task" can be NULL.  Another task is awoken to use the
 * transport if the transport's congestion window allows it.
 */
void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
{
	if (xprt->snd_task == task) {
369
		xprt_task_clear_bytes_sent(task);
370
		xprt_clear_locked(xprt);
371 372 373
		__xprt_lock_write_next_cong(xprt);
	}
}
374
EXPORT_SYMBOL_GPL(xprt_release_xprt_cong);
375 376

static inline void xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task)
L
Linus Torvalds 已提交
377
{
C
Chuck Lever 已提交
378
	spin_lock_bh(&xprt->transport_lock);
379
	xprt->ops->release_xprt(xprt, task);
C
Chuck Lever 已提交
380
	spin_unlock_bh(&xprt->transport_lock);
L
Linus Torvalds 已提交
381 382 383 384 385 386 387 388 389 390 391 392 393
}

/*
 * Van Jacobson congestion avoidance. Check if the congestion window
 * overflowed. Put the task to sleep if this is the case.
 */
static int
__xprt_get_cong(struct rpc_xprt *xprt, struct rpc_task *task)
{
	struct rpc_rqst *req = task->tk_rqstp;

	if (req->rq_cong)
		return 1;
394
	dprintk("RPC: %5u xprt_cwnd_limited cong = %lu cwnd = %lu\n",
L
Linus Torvalds 已提交
395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413
			task->tk_pid, xprt->cong, xprt->cwnd);
	if (RPCXPRT_CONGESTED(xprt))
		return 0;
	req->rq_cong = 1;
	xprt->cong += RPC_CWNDSCALE;
	return 1;
}

/*
 * Adjust the congestion window, and wake up the next task
 * that has been sleeping due to congestion
 */
static void
__xprt_put_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
{
	if (!req->rq_cong)
		return;
	req->rq_cong = 0;
	xprt->cong -= RPC_CWNDSCALE;
414
	__xprt_lock_write_next_cong(xprt);
L
Linus Torvalds 已提交
415 416
}

417 418 419 420 421 422 423 424
/**
 * xprt_release_rqst_cong - housekeeping when request is complete
 * @task: RPC request that recently completed
 *
 * Useful for transports that require congestion control.
 */
void xprt_release_rqst_cong(struct rpc_task *task)
{
425 426 427
	struct rpc_rqst *req = task->tk_rqstp;

	__xprt_put_cong(req->rq_xprt, req);
428
}
429
EXPORT_SYMBOL_GPL(xprt_release_rqst_cong);
430

431 432
/**
 * xprt_adjust_cwnd - adjust transport congestion window
433
 * @xprt: pointer to xprt
434 435 436
 * @task: recently completed RPC request used to adjust window
 * @result: result code of completed RPC request
 *
437 438 439 440 441 442 443 444 445
 * The transport code maintains an estimate on the maximum number of out-
 * standing RPC requests, using a smoothed version of the congestion
 * avoidance implemented in 44BSD. This is basically the Van Jacobson
 * congestion algorithm: If a retransmit occurs, the congestion window is
 * halved; otherwise, it is incremented by 1/cwnd when
 *
 *	-	a reply is received and
 *	-	a full number of requests are outstanding and
 *	-	the congestion window hasn't been updated recently.
L
Linus Torvalds 已提交
446
 */
447
void xprt_adjust_cwnd(struct rpc_xprt *xprt, struct rpc_task *task, int result)
L
Linus Torvalds 已提交
448
{
449 450
	struct rpc_rqst *req = task->tk_rqstp;
	unsigned long cwnd = xprt->cwnd;
L
Linus Torvalds 已提交
451 452 453 454 455 456 457

	if (result >= 0 && cwnd <= xprt->cong) {
		/* The (cwnd >> 1) term makes sure
		 * the result gets rounded properly. */
		cwnd += (RPC_CWNDSCALE * RPC_CWNDSCALE + (cwnd >> 1)) / cwnd;
		if (cwnd > RPC_MAXCWND(xprt))
			cwnd = RPC_MAXCWND(xprt);
458
		__xprt_lock_write_next_cong(xprt);
L
Linus Torvalds 已提交
459 460 461 462 463
	} else if (result == -ETIMEDOUT) {
		cwnd >>= 1;
		if (cwnd < RPC_CWNDSCALE)
			cwnd = RPC_CWNDSCALE;
	}
464
	dprintk("RPC:       cong %ld, cwnd was %ld, now %ld\n",
L
Linus Torvalds 已提交
465 466
			xprt->cong, xprt->cwnd, cwnd);
	xprt->cwnd = cwnd;
467
	__xprt_put_cong(xprt, req);
L
Linus Torvalds 已提交
468
}
469
EXPORT_SYMBOL_GPL(xprt_adjust_cwnd);
L
Linus Torvalds 已提交
470

471 472 473 474 475 476 477 478 479 480 481 482 483
/**
 * xprt_wake_pending_tasks - wake all tasks on a transport's pending queue
 * @xprt: transport with waiting tasks
 * @status: result code to plant in each task before waking it
 *
 */
void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status)
{
	if (status < 0)
		rpc_wake_up_status(&xprt->pending, status);
	else
		rpc_wake_up(&xprt->pending);
}
484
EXPORT_SYMBOL_GPL(xprt_wake_pending_tasks);
485

486 487 488
/**
 * xprt_wait_for_buffer_space - wait for transport output buffer to clear
 * @task: task to be put to sleep
R
Randy Dunlap 已提交
489
 * @action: function pointer to be executed after wait
490 491 492 493
 *
 * Note that we only set the timer for the case of RPC_IS_SOFT(), since
 * we don't in general want to force a socket disconnection due to
 * an incomplete RPC call transmission.
494
 */
495
void xprt_wait_for_buffer_space(struct rpc_task *task, rpc_action action)
496 497 498 499
{
	struct rpc_rqst *req = task->tk_rqstp;
	struct rpc_xprt *xprt = req->rq_xprt;

500
	task->tk_timeout = RPC_IS_SOFT(task) ? req->rq_timeout : 0;
501
	rpc_sleep_on(&xprt->pending, task, action);
502
}
503
EXPORT_SYMBOL_GPL(xprt_wait_for_buffer_space);
504 505 506 507 508 509 510 511 512 513 514

/**
 * xprt_write_space - wake the task waiting for transport output buffer space
 * @xprt: transport with waiting tasks
 *
 * Can be called in a soft IRQ context, so xprt_write_space never sleeps.
 */
void xprt_write_space(struct rpc_xprt *xprt)
{
	spin_lock_bh(&xprt->transport_lock);
	if (xprt->snd_task) {
515 516
		dprintk("RPC:       write space: waking waiting task on "
				"xprt %p\n", xprt);
517
		rpc_wake_up_queued_task(&xprt->pending, xprt->snd_task);
518 519 520
	}
	spin_unlock_bh(&xprt->transport_lock);
}
521
EXPORT_SYMBOL_GPL(xprt_write_space);
522

523 524 525 526 527 528 529 530 531 532 533 534
/**
 * xprt_set_retrans_timeout_def - set a request's retransmit timeout
 * @task: task whose timeout is to be set
 *
 * Set a request's retransmit timeout based on the transport's
 * default timeout parameters.  Used by transports that don't adjust
 * the retransmit timeout based on round-trip time estimation.
 */
void xprt_set_retrans_timeout_def(struct rpc_task *task)
{
	task->tk_timeout = task->tk_rqstp->rq_timeout;
}
535
EXPORT_SYMBOL_GPL(xprt_set_retrans_timeout_def);
536

537
/**
538 539
 * xprt_set_retrans_timeout_rtt - set a request's retransmit timeout
 * @task: task whose timeout is to be set
540
 *
541 542 543 544 545
 * Set a request's retransmit timeout using the RTT estimator.
 */
void xprt_set_retrans_timeout_rtt(struct rpc_task *task)
{
	int timer = task->tk_msg.rpc_proc->p_timer;
546 547
	struct rpc_clnt *clnt = task->tk_client;
	struct rpc_rtt *rtt = clnt->cl_rtt;
548
	struct rpc_rqst *req = task->tk_rqstp;
549
	unsigned long max_timeout = clnt->cl_timeout->to_maxval;
550 551 552 553 554 555

	task->tk_timeout = rpc_calc_rto(rtt, timer);
	task->tk_timeout <<= rpc_ntimeo(rtt, timer) + req->rq_retries;
	if (task->tk_timeout > max_timeout || task->tk_timeout == 0)
		task->tk_timeout = max_timeout;
}
556
EXPORT_SYMBOL_GPL(xprt_set_retrans_timeout_rtt);
557

L
Linus Torvalds 已提交
558 559
static void xprt_reset_majortimeo(struct rpc_rqst *req)
{
560
	const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
L
Linus Torvalds 已提交
561 562 563 564 565 566 567 568 569 570 571

	req->rq_majortimeo = req->rq_timeout;
	if (to->to_exponential)
		req->rq_majortimeo <<= to->to_retries;
	else
		req->rq_majortimeo += to->to_increment * to->to_retries;
	if (req->rq_majortimeo > to->to_maxval || req->rq_majortimeo == 0)
		req->rq_majortimeo = to->to_maxval;
	req->rq_majortimeo += jiffies;
}

572 573 574 575
/**
 * xprt_adjust_timeout - adjust timeout values for next retransmit
 * @req: RPC request containing parameters to use for the adjustment
 *
L
Linus Torvalds 已提交
576 577 578 579
 */
int xprt_adjust_timeout(struct rpc_rqst *req)
{
	struct rpc_xprt *xprt = req->rq_xprt;
580
	const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
L
Linus Torvalds 已提交
581 582 583 584 585 586 587 588 589 590 591 592 593 594 595
	int status = 0;

	if (time_before(jiffies, req->rq_majortimeo)) {
		if (to->to_exponential)
			req->rq_timeout <<= 1;
		else
			req->rq_timeout += to->to_increment;
		if (to->to_maxval && req->rq_timeout >= to->to_maxval)
			req->rq_timeout = to->to_maxval;
		req->rq_retries++;
	} else {
		req->rq_timeout = to->to_initval;
		req->rq_retries = 0;
		xprt_reset_majortimeo(req);
		/* Reset the RTT counters == "slow start" */
C
Chuck Lever 已提交
596
		spin_lock_bh(&xprt->transport_lock);
L
Linus Torvalds 已提交
597
		rpc_init_rtt(req->rq_task->tk_client->cl_rtt, to->to_initval);
C
Chuck Lever 已提交
598
		spin_unlock_bh(&xprt->transport_lock);
L
Linus Torvalds 已提交
599 600 601 602 603 604 605 606 607 608
		status = -ETIMEDOUT;
	}

	if (req->rq_timeout == 0) {
		printk(KERN_WARNING "xprt_adjust_timeout: rq_timeout = 0!\n");
		req->rq_timeout = 5 * HZ;
	}
	return status;
}

609
static void xprt_autoclose(struct work_struct *work)
L
Linus Torvalds 已提交
610
{
611 612
	struct rpc_xprt *xprt =
		container_of(work, struct rpc_xprt, task_cleanup);
L
Linus Torvalds 已提交
613

614
	clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
615
	xprt->ops->close(xprt);
L
Linus Torvalds 已提交
616 617 618
	xprt_release_write(xprt, NULL);
}

619
/**
620
 * xprt_disconnect_done - mark a transport as disconnected
621 622
 * @xprt: transport to flag for disconnect
 *
L
Linus Torvalds 已提交
623
 */
624
void xprt_disconnect_done(struct rpc_xprt *xprt)
L
Linus Torvalds 已提交
625
{
626
	dprintk("RPC:       disconnected transport %p\n", xprt);
C
Chuck Lever 已提交
627
	spin_lock_bh(&xprt->transport_lock);
L
Linus Torvalds 已提交
628
	xprt_clear_connected(xprt);
629
	xprt_wake_pending_tasks(xprt, -EAGAIN);
C
Chuck Lever 已提交
630
	spin_unlock_bh(&xprt->transport_lock);
L
Linus Torvalds 已提交
631
}
632
EXPORT_SYMBOL_GPL(xprt_disconnect_done);
L
Linus Torvalds 已提交
633

634 635 636 637 638 639 640 641 642 643 644 645 646
/**
 * xprt_force_disconnect - force a transport to disconnect
 * @xprt: transport to disconnect
 *
 */
void xprt_force_disconnect(struct rpc_xprt *xprt)
{
	/* Don't race with the test_bit() in xprt_clear_locked() */
	spin_lock_bh(&xprt->transport_lock);
	set_bit(XPRT_CLOSE_WAIT, &xprt->state);
	/* Try to schedule an autoclose RPC call */
	if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
		queue_work(rpciod_workqueue, &xprt->task_cleanup);
647
	xprt_wake_pending_tasks(xprt, -EAGAIN);
648 649 650
	spin_unlock_bh(&xprt->transport_lock);
}

651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673
/**
 * xprt_conditional_disconnect - force a transport to disconnect
 * @xprt: transport to disconnect
 * @cookie: 'connection cookie'
 *
 * This attempts to break the connection if and only if 'cookie' matches
 * the current transport 'connection cookie'. It ensures that we don't
 * try to break the connection more than once when we need to retransmit
 * a batch of RPC requests.
 *
 */
void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie)
{
	/* Don't race with the test_bit() in xprt_clear_locked() */
	spin_lock_bh(&xprt->transport_lock);
	if (cookie != xprt->connect_cookie)
		goto out;
	if (test_bit(XPRT_CLOSING, &xprt->state) || !xprt_connected(xprt))
		goto out;
	set_bit(XPRT_CLOSE_WAIT, &xprt->state);
	/* Try to schedule an autoclose RPC call */
	if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
		queue_work(rpciod_workqueue, &xprt->task_cleanup);
674
	xprt_wake_pending_tasks(xprt, -EAGAIN);
675 676 677 678
out:
	spin_unlock_bh(&xprt->transport_lock);
}

L
Linus Torvalds 已提交
679 680 681 682 683
static void
xprt_init_autodisconnect(unsigned long data)
{
	struct rpc_xprt *xprt = (struct rpc_xprt *)data;

C
Chuck Lever 已提交
684
	spin_lock(&xprt->transport_lock);
685
	if (!list_empty(&xprt->recv))
L
Linus Torvalds 已提交
686
		goto out_abort;
687
	if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
L
Linus Torvalds 已提交
688
		goto out_abort;
C
Chuck Lever 已提交
689
	spin_unlock(&xprt->transport_lock);
690
	queue_work(rpciod_workqueue, &xprt->task_cleanup);
L
Linus Torvalds 已提交
691 692
	return;
out_abort:
C
Chuck Lever 已提交
693
	spin_unlock(&xprt->transport_lock);
L
Linus Torvalds 已提交
694 695
}

696 697 698 699 700 701 702 703 704 705 706
bool xprt_lock_connect(struct rpc_xprt *xprt,
		struct rpc_task *task,
		void *cookie)
{
	bool ret = false;

	spin_lock_bh(&xprt->transport_lock);
	if (!test_bit(XPRT_LOCKED, &xprt->state))
		goto out;
	if (xprt->snd_task != task)
		goto out;
707
	xprt_task_clear_bytes_sent(task);
708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727
	xprt->snd_task = cookie;
	ret = true;
out:
	spin_unlock_bh(&xprt->transport_lock);
	return ret;
}

void xprt_unlock_connect(struct rpc_xprt *xprt, void *cookie)
{
	spin_lock_bh(&xprt->transport_lock);
	if (xprt->snd_task != cookie)
		goto out;
	if (!test_bit(XPRT_LOCKED, &xprt->state))
		goto out;
	xprt->snd_task =NULL;
	xprt->ops->release_xprt(xprt, NULL);
out:
	spin_unlock_bh(&xprt->transport_lock);
}

728 729 730
/**
 * xprt_connect - schedule a transport connect operation
 * @task: RPC task that is requesting the connect
L
Linus Torvalds 已提交
731 732 733 734
 *
 */
void xprt_connect(struct rpc_task *task)
{
735
	struct rpc_xprt	*xprt = task->tk_rqstp->rq_xprt;
L
Linus Torvalds 已提交
736

737
	dprintk("RPC: %5u xprt_connect xprt %p %s connected\n", task->tk_pid,
L
Linus Torvalds 已提交
738 739
			xprt, (xprt_connected(xprt) ? "is" : "is not"));

740
	if (!xprt_bound(xprt)) {
741
		task->tk_status = -EAGAIN;
L
Linus Torvalds 已提交
742 743 744 745
		return;
	}
	if (!xprt_lock_write(xprt, task))
		return;
746 747 748 749

	if (test_and_clear_bit(XPRT_CLOSE_WAIT, &xprt->state))
		xprt->ops->close(xprt);

750
	if (!xprt_connected(xprt)) {
751
		task->tk_rqstp->rq_bytes_sent = 0;
752
		task->tk_timeout = task->tk_rqstp->rq_timeout;
753
		rpc_sleep_on(&xprt->pending, task, xprt_connect_status);
754 755 756 757 758

		if (test_bit(XPRT_CLOSING, &xprt->state))
			return;
		if (xprt_test_and_set_connecting(xprt))
			return;
759
		xprt->stat.connect_start = jiffies;
760
		xprt->ops->connect(xprt, task);
L
Linus Torvalds 已提交
761
	}
762
	xprt_release_write(xprt, task);
L
Linus Torvalds 已提交
763 764
}

765
static void xprt_connect_status(struct rpc_task *task)
L
Linus Torvalds 已提交
766
{
767
	struct rpc_xprt	*xprt = task->tk_rqstp->rq_xprt;
L
Linus Torvalds 已提交
768

769
	if (task->tk_status == 0) {
770 771
		xprt->stat.connect_count++;
		xprt->stat.connect_time += (long)jiffies - xprt->stat.connect_start;
772
		dprintk("RPC: %5u xprt_connect_status: connection established\n",
L
Linus Torvalds 已提交
773 774 775 776 777
				task->tk_pid);
		return;
	}

	switch (task->tk_status) {
778 779 780 781 782
	case -ECONNREFUSED:
	case -ECONNRESET:
	case -ECONNABORTED:
	case -ENETUNREACH:
	case -EHOSTUNREACH:
783
	case -EPIPE:
784 785
	case -EAGAIN:
		dprintk("RPC: %5u xprt_connect_status: retrying\n", task->tk_pid);
786
		break;
L
Linus Torvalds 已提交
787
	case -ETIMEDOUT:
788 789
		dprintk("RPC: %5u xprt_connect_status: connect attempt timed "
				"out\n", task->tk_pid);
L
Linus Torvalds 已提交
790 791
		break;
	default:
792 793
		dprintk("RPC: %5u xprt_connect_status: error %d connecting to "
				"server %s\n", task->tk_pid, -task->tk_status,
794
				xprt->servername);
795
		task->tk_status = -EIO;
L
Linus Torvalds 已提交
796 797 798
	}
}

799 800 801 802 803
/**
 * xprt_lookup_rqst - find an RPC request corresponding to an XID
 * @xprt: transport on which the original request was transmitted
 * @xid: RPC XID of incoming reply
 *
L
Linus Torvalds 已提交
804
 */
805
struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid)
L
Linus Torvalds 已提交
806
{
807
	struct rpc_rqst *entry;
L
Linus Torvalds 已提交
808

809
	list_for_each_entry(entry, &xprt->recv, rq_list)
810 811
		if (entry->rq_xid == xid) {
			trace_xprt_lookup_rqst(xprt, xid, 0);
812
			return entry;
813
		}
814 815 816

	dprintk("RPC:       xprt_lookup_rqst did not find xid %08x\n",
			ntohl(xid));
817
	trace_xprt_lookup_rqst(xprt, xid, -ENOENT);
818 819
	xprt->stat.bad_xids++;
	return NULL;
L
Linus Torvalds 已提交
820
}
821
EXPORT_SYMBOL_GPL(xprt_lookup_rqst);
L
Linus Torvalds 已提交
822

823
static void xprt_update_rtt(struct rpc_task *task)
824 825 826
{
	struct rpc_rqst *req = task->tk_rqstp;
	struct rpc_rtt *rtt = task->tk_client->cl_rtt;
827
	unsigned int timer = task->tk_msg.rpc_proc->p_timer;
828
	long m = usecs_to_jiffies(ktime_to_us(req->rq_rtt));
829 830 831

	if (timer) {
		if (req->rq_ntrans == 1)
832
			rpc_update_rtt(rtt, timer, m);
833 834 835 836
		rpc_set_timeo(rtt, timer, req->rq_ntrans - 1);
	}
}

837 838
/**
 * xprt_complete_rqst - called when reply processing is complete
839
 * @task: RPC request that recently completed
840 841
 * @copied: actual number of bytes received from the transport
 *
842
 * Caller holds transport lock.
L
Linus Torvalds 已提交
843
 */
844
void xprt_complete_rqst(struct rpc_task *task, int copied)
L
Linus Torvalds 已提交
845
{
846
	struct rpc_rqst *req = task->tk_rqstp;
847
	struct rpc_xprt *xprt = req->rq_xprt;
L
Linus Torvalds 已提交
848

849 850
	dprintk("RPC: %5u xid %08x complete (%d bytes received)\n",
			task->tk_pid, ntohl(req->rq_xid), copied);
851
	trace_xprt_complete_rqst(xprt, req->rq_xid, copied);
L
Linus Torvalds 已提交
852

853
	xprt->stat.recvs++;
854
	req->rq_rtt = ktime_sub(ktime_get(), req->rq_xtime);
855 856
	if (xprt->ops->timer != NULL)
		xprt_update_rtt(task);
857

L
Linus Torvalds 已提交
858
	list_del_init(&req->rq_list);
859
	req->rq_private_buf.len = copied;
860 861
	/* Ensure all writes are done before we update */
	/* req->rq_reply_bytes_recvd */
862
	smp_wmb();
863
	req->rq_reply_bytes_recvd = copied;
864
	rpc_wake_up_queued_task(&xprt->pending, task);
L
Linus Torvalds 已提交
865
}
866
EXPORT_SYMBOL_GPL(xprt_complete_rqst);
L
Linus Torvalds 已提交
867

868
static void xprt_timer(struct rpc_task *task)
L
Linus Torvalds 已提交
869
{
870
	struct rpc_rqst *req = task->tk_rqstp;
L
Linus Torvalds 已提交
871 872
	struct rpc_xprt *xprt = req->rq_xprt;

873 874
	if (task->tk_status != -ETIMEDOUT)
		return;
875
	dprintk("RPC: %5u xprt_timer\n", task->tk_pid);
L
Linus Torvalds 已提交
876

877
	spin_lock_bh(&xprt->transport_lock);
878
	if (!req->rq_reply_bytes_recvd) {
879
		if (xprt->ops->timer)
880
			xprt->ops->timer(xprt, task);
881 882 883
	} else
		task->tk_status = 0;
	spin_unlock_bh(&xprt->transport_lock);
L
Linus Torvalds 已提交
884 885
}

886 887 888 889 890
static inline int xprt_has_timer(struct rpc_xprt *xprt)
{
	return xprt->idle_timeout != 0;
}

891 892 893 894
/**
 * xprt_prepare_transmit - reserve the transport before sending a request
 * @task: RPC task about to send a request
 *
L
Linus Torvalds 已提交
895
 */
896
bool xprt_prepare_transmit(struct rpc_task *task)
L
Linus Torvalds 已提交
897 898 899
{
	struct rpc_rqst	*req = task->tk_rqstp;
	struct rpc_xprt	*xprt = req->rq_xprt;
900
	bool ret = false;
L
Linus Torvalds 已提交
901

902
	dprintk("RPC: %5u xprt_prepare_transmit\n", task->tk_pid);
L
Linus Torvalds 已提交
903

C
Chuck Lever 已提交
904
	spin_lock_bh(&xprt->transport_lock);
905 906 907 908 909 910 911 912 913 914 915 916
	if (!req->rq_bytes_sent) {
		if (req->rq_reply_bytes_recvd) {
			task->tk_status = req->rq_reply_bytes_recvd;
			goto out_unlock;
		}
		if ((task->tk_flags & RPC_TASK_NO_RETRANS_TIMEOUT)
		    && xprt_connected(xprt)
		    && req->rq_connect_cookie == xprt->connect_cookie) {
			xprt->ops->set_retrans_timeout(task);
			rpc_sleep_on(&xprt->pending, task, xprt_timer);
			goto out_unlock;
		}
L
Linus Torvalds 已提交
917
	}
918 919 920 921 922
	if (!xprt->ops->reserve_xprt(xprt, task)) {
		task->tk_status = -EAGAIN;
		goto out_unlock;
	}
	ret = true;
L
Linus Torvalds 已提交
923
out_unlock:
C
Chuck Lever 已提交
924
	spin_unlock_bh(&xprt->transport_lock);
925
	return ret;
L
Linus Torvalds 已提交
926 927
}

928
void xprt_end_transmit(struct rpc_task *task)
929
{
930
	xprt_release_write(task->tk_rqstp->rq_xprt, task);
931 932
}

933 934 935 936 937 938 939
/**
 * xprt_transmit - send an RPC request on a transport
 * @task: controlling RPC task
 *
 * We have to copy the iovec because sendmsg fiddles with its contents.
 */
void xprt_transmit(struct rpc_task *task)
L
Linus Torvalds 已提交
940 941 942
{
	struct rpc_rqst	*req = task->tk_rqstp;
	struct rpc_xprt	*xprt = req->rq_xprt;
943
	int status, numreqs;
L
Linus Torvalds 已提交
944

945
	dprintk("RPC: %5u xprt_transmit(%u)\n", task->tk_pid, req->rq_slen);
L
Linus Torvalds 已提交
946

947
	if (!req->rq_reply_bytes_recvd) {
948 949 950 951
		if (list_empty(&req->rq_list) && rpc_reply_expected(task)) {
			/*
			 * Add to the list only if we're expecting a reply
			 */
C
Chuck Lever 已提交
952
			spin_lock_bh(&xprt->transport_lock);
L
Linus Torvalds 已提交
953 954 955 956 957
			/* Update the softirq receive buffer */
			memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
					sizeof(req->rq_private_buf));
			/* Add request to the receive list */
			list_add_tail(&req->rq_list, &xprt->recv);
C
Chuck Lever 已提交
958
			spin_unlock_bh(&xprt->transport_lock);
L
Linus Torvalds 已提交
959
			xprt_reset_majortimeo(req);
960 961
			/* Turn off autodisconnect */
			del_singleshot_timer_sync(&xprt->timer);
L
Linus Torvalds 已提交
962 963 964 965
		}
	} else if (!req->rq_bytes_sent)
		return;

966
	req->rq_xtime = ktime_get();
967
	status = xprt->ops->send_request(task);
968
	trace_xprt_transmit(xprt, req->rq_xid, status);
969 970 971 972
	if (status != 0) {
		task->tk_status = status;
		return;
	}
C
Chuck Lever 已提交
973
	xprt_inject_disconnect(xprt);
974

975
	dprintk("RPC: %5u xmit complete\n", task->tk_pid);
976
	task->tk_flags |= RPC_TASK_SENT;
977
	spin_lock_bh(&xprt->transport_lock);
978

979
	xprt->ops->set_retrans_timeout(task);
980

981 982 983
	numreqs = atomic_read(&xprt->num_reqs);
	if (numreqs > xprt->stat.max_slots)
		xprt->stat.max_slots = numreqs;
984 985 986
	xprt->stat.sends++;
	xprt->stat.req_u += xprt->stat.sends - xprt->stat.recvs;
	xprt->stat.bklog_u += xprt->backlog.qlen;
987 988
	xprt->stat.sending_u += xprt->sending.qlen;
	xprt->stat.pending_u += xprt->pending.qlen;
L
Linus Torvalds 已提交
989

990 991 992
	/* Don't race with disconnect */
	if (!xprt_connected(xprt))
		task->tk_status = -ENOTCONN;
993
	else {
994 995 996 997
		/*
		 * Sleep on the pending queue since
		 * we're expecting a reply.
		 */
998 999 1000
		if (!req->rq_reply_bytes_recvd && rpc_reply_expected(task))
			rpc_sleep_on(&xprt->pending, task, xprt_timer);
		req->rq_connect_cookie = xprt->connect_cookie;
1001
	}
1002
	spin_unlock_bh(&xprt->transport_lock);
L
Linus Torvalds 已提交
1003 1004
}

1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032
static void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task)
{
	set_bit(XPRT_CONGESTED, &xprt->state);
	rpc_sleep_on(&xprt->backlog, task, NULL);
}

static void xprt_wake_up_backlog(struct rpc_xprt *xprt)
{
	if (rpc_wake_up_next(&xprt->backlog) == NULL)
		clear_bit(XPRT_CONGESTED, &xprt->state);
}

static bool xprt_throttle_congested(struct rpc_xprt *xprt, struct rpc_task *task)
{
	bool ret = false;

	if (!test_bit(XPRT_CONGESTED, &xprt->state))
		goto out;
	spin_lock(&xprt->reserve_lock);
	if (test_bit(XPRT_CONGESTED, &xprt->state)) {
		rpc_sleep_on(&xprt->backlog, task, NULL);
		ret = true;
	}
	spin_unlock(&xprt->reserve_lock);
out:
	return ret;
}

1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056
static struct rpc_rqst *xprt_dynamic_alloc_slot(struct rpc_xprt *xprt, gfp_t gfp_flags)
{
	struct rpc_rqst *req = ERR_PTR(-EAGAIN);

	if (!atomic_add_unless(&xprt->num_reqs, 1, xprt->max_reqs))
		goto out;
	req = kzalloc(sizeof(struct rpc_rqst), gfp_flags);
	if (req != NULL)
		goto out;
	atomic_dec(&xprt->num_reqs);
	req = ERR_PTR(-ENOMEM);
out:
	return req;
}

static bool xprt_dynamic_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
{
	if (atomic_add_unless(&xprt->num_reqs, -1, xprt->min_reqs)) {
		kfree(req);
		return true;
	}
	return false;
}

1057
void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
L
Linus Torvalds 已提交
1058
{
1059
	struct rpc_rqst *req;
L
Linus Torvalds 已提交
1060

1061
	spin_lock(&xprt->reserve_lock);
L
Linus Torvalds 已提交
1062
	if (!list_empty(&xprt->free)) {
1063 1064 1065 1066
		req = list_entry(xprt->free.next, struct rpc_rqst, rq_list);
		list_del(&req->rq_list);
		goto out_init_req;
	}
1067
	req = xprt_dynamic_alloc_slot(xprt, GFP_NOWAIT|__GFP_NOWARN);
1068 1069 1070 1071 1072 1073
	if (!IS_ERR(req))
		goto out_init_req;
	switch (PTR_ERR(req)) {
	case -ENOMEM:
		dprintk("RPC:       dynamic allocation of request slot "
				"failed! Retrying\n");
1074
		task->tk_status = -ENOMEM;
1075 1076
		break;
	case -EAGAIN:
1077
		xprt_add_backlog(xprt, task);
1078
		dprintk("RPC:       waiting for request slot\n");
1079 1080
	default:
		task->tk_status = -EAGAIN;
L
Linus Torvalds 已提交
1081
	}
1082
	spin_unlock(&xprt->reserve_lock);
1083 1084 1085 1086 1087
	return;
out_init_req:
	task->tk_status = 0;
	task->tk_rqstp = req;
	xprt_request_init(task, xprt);
1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102
	spin_unlock(&xprt->reserve_lock);
}
EXPORT_SYMBOL_GPL(xprt_alloc_slot);

void xprt_lock_and_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
{
	/* Note: grabbing the xprt_lock_write() ensures that we throttle
	 * new slot allocation if the transport is congested (i.e. when
	 * reconnecting a stream transport or when out of socket write
	 * buffer space).
	 */
	if (xprt_lock_write(xprt, task)) {
		xprt_alloc_slot(xprt, task);
		xprt_release_write(xprt, task);
	}
L
Linus Torvalds 已提交
1103
}
1104
EXPORT_SYMBOL_GPL(xprt_lock_and_alloc_slot);
L
Linus Torvalds 已提交
1105

1106 1107 1108
static void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
{
	spin_lock(&xprt->reserve_lock);
1109 1110 1111 1112
	if (!xprt_dynamic_free_slot(xprt, req)) {
		memset(req, 0, sizeof(*req));	/* mark unused */
		list_add(&req->rq_list, &xprt->free);
	}
1113
	xprt_wake_up_backlog(xprt);
1114 1115 1116
	spin_unlock(&xprt->reserve_lock);
}

1117 1118 1119 1120 1121 1122 1123 1124 1125 1126
static void xprt_free_all_slots(struct rpc_xprt *xprt)
{
	struct rpc_rqst *req;
	while (!list_empty(&xprt->free)) {
		req = list_first_entry(&xprt->free, struct rpc_rqst, rq_list);
		list_del(&req->rq_list);
		kfree(req);
	}
}

1127 1128 1129
struct rpc_xprt *xprt_alloc(struct net *net, size_t size,
		unsigned int num_prealloc,
		unsigned int max_alloc)
1130 1131
{
	struct rpc_xprt *xprt;
1132 1133
	struct rpc_rqst *req;
	int i;
1134 1135 1136 1137 1138

	xprt = kzalloc(size, GFP_KERNEL);
	if (xprt == NULL)
		goto out;

1139 1140 1141 1142 1143
	xprt_init(xprt, net);

	for (i = 0; i < num_prealloc; i++) {
		req = kzalloc(sizeof(struct rpc_rqst), GFP_KERNEL);
		if (!req)
1144
			goto out_free;
1145 1146
		list_add(&req->rq_list, &xprt->free);
	}
1147 1148 1149 1150 1151 1152
	if (max_alloc > num_prealloc)
		xprt->max_reqs = max_alloc;
	else
		xprt->max_reqs = num_prealloc;
	xprt->min_reqs = num_prealloc;
	atomic_set(&xprt->num_reqs, num_prealloc);
1153 1154 1155 1156

	return xprt;

out_free:
1157
	xprt_free(xprt);
1158 1159 1160 1161 1162
out:
	return NULL;
}
EXPORT_SYMBOL_GPL(xprt_alloc);

1163 1164
void xprt_free(struct rpc_xprt *xprt)
{
P
Pavel Emelyanov 已提交
1165
	put_net(xprt->xprt_net);
1166
	xprt_free_all_slots(xprt);
1167 1168 1169 1170
	kfree(xprt);
}
EXPORT_SYMBOL_GPL(xprt_free);

1171 1172 1173 1174
/**
 * xprt_reserve - allocate an RPC request slot
 * @task: RPC task requesting a slot allocation
 *
1175 1176
 * If the transport is marked as being congested, or if no more
 * slots are available, place the task on the transport's
1177 1178 1179
 * backlog queue.
 */
void xprt_reserve(struct rpc_task *task)
L
Linus Torvalds 已提交
1180
{
1181
	struct rpc_xprt	*xprt;
L
Linus Torvalds 已提交
1182

1183 1184 1185 1186 1187 1188
	task->tk_status = 0;
	if (task->tk_rqstp != NULL)
		return;

	task->tk_timeout = 0;
	task->tk_status = -EAGAIN;
1189 1190
	rcu_read_lock();
	xprt = rcu_dereference(task->tk_client->cl_xprt);
1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216
	if (!xprt_throttle_congested(xprt, task))
		xprt->ops->alloc_slot(xprt, task);
	rcu_read_unlock();
}

/**
 * xprt_retry_reserve - allocate an RPC request slot
 * @task: RPC task requesting a slot allocation
 *
 * If no more slots are available, place the task on the transport's
 * backlog queue.
 * Note that the only difference with xprt_reserve is that we now
 * ignore the value of the XPRT_CONGESTED flag.
 */
void xprt_retry_reserve(struct rpc_task *task)
{
	struct rpc_xprt	*xprt;

	task->tk_status = 0;
	if (task->tk_rqstp != NULL)
		return;

	task->tk_timeout = 0;
	task->tk_status = -EAGAIN;
	rcu_read_lock();
	xprt = rcu_dereference(task->tk_client->cl_xprt);
1217
	xprt->ops->alloc_slot(xprt, task);
1218
	rcu_read_unlock();
L
Linus Torvalds 已提交
1219 1220
}

1221
static inline __be32 xprt_alloc_xid(struct rpc_xprt *xprt)
L
Linus Torvalds 已提交
1222
{
1223
	return (__force __be32)xprt->xid++;
L
Linus Torvalds 已提交
1224 1225 1226 1227
}

static inline void xprt_init_xid(struct rpc_xprt *xprt)
{
1228
	xprt->xid = prandom_u32();
L
Linus Torvalds 已提交
1229 1230
}

1231
static void xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt)
L
Linus Torvalds 已提交
1232 1233 1234
{
	struct rpc_rqst	*req = task->tk_rqstp;

1235
	INIT_LIST_HEAD(&req->rq_list);
1236
	req->rq_timeout = task->tk_client->cl_timeout->to_initval;
L
Linus Torvalds 已提交
1237 1238
	req->rq_task	= task;
	req->rq_xprt    = xprt;
1239
	req->rq_buffer  = NULL;
L
Linus Torvalds 已提交
1240
	req->rq_xid     = xprt_alloc_xid(xprt);
1241
	req->rq_connect_cookie = xprt->connect_cookie - 1;
1242 1243 1244 1245 1246
	req->rq_bytes_sent = 0;
	req->rq_snd_buf.len = 0;
	req->rq_snd_buf.buflen = 0;
	req->rq_rcv_buf.len = 0;
	req->rq_rcv_buf.buflen = 0;
1247
	req->rq_release_snd_buf = NULL;
1248
	xprt_reset_majortimeo(req);
1249
	dprintk("RPC: %5u reserved req %p xid %08x\n", task->tk_pid,
L
Linus Torvalds 已提交
1250 1251 1252
			req, ntohl(req->rq_xid));
}

1253 1254 1255 1256
/**
 * xprt_release - release an RPC request slot
 * @task: task which is finished with the slot
 *
L
Linus Torvalds 已提交
1257
 */
1258
void xprt_release(struct rpc_task *task)
L
Linus Torvalds 已提交
1259
{
1260
	struct rpc_xprt	*xprt;
1261
	struct rpc_rqst	*req = task->tk_rqstp;
L
Linus Torvalds 已提交
1262

1263 1264 1265 1266 1267 1268 1269 1270
	if (req == NULL) {
		if (task->tk_client) {
			rcu_read_lock();
			xprt = rcu_dereference(task->tk_client->cl_xprt);
			if (xprt->snd_task == task)
				xprt_release_write(xprt, task);
			rcu_read_unlock();
		}
L
Linus Torvalds 已提交
1271
		return;
1272
	}
1273 1274

	xprt = req->rq_xprt;
1275 1276 1277 1278
	if (task->tk_ops->rpc_count_stats != NULL)
		task->tk_ops->rpc_count_stats(task, task->tk_calldata);
	else if (task->tk_client)
		rpc_count_iostats(task, task->tk_client->cl_metrics);
C
Chuck Lever 已提交
1279
	spin_lock_bh(&xprt->transport_lock);
1280
	xprt->ops->release_xprt(xprt, task);
1281 1282
	if (xprt->ops->release_request)
		xprt->ops->release_request(task);
L
Linus Torvalds 已提交
1283 1284 1285
	if (!list_empty(&req->rq_list))
		list_del(&req->rq_list);
	xprt->last_used = jiffies;
1286
	if (list_empty(&xprt->recv) && xprt_has_timer(xprt))
1287
		mod_timer(&xprt->timer,
1288
				xprt->last_used + xprt->idle_timeout);
C
Chuck Lever 已提交
1289
	spin_unlock_bh(&xprt->transport_lock);
1290
	if (req->rq_buffer)
1291
		xprt->ops->buf_free(req->rq_buffer);
C
Chuck Lever 已提交
1292
	xprt_inject_disconnect(xprt);
1293 1294
	if (req->rq_cred != NULL)
		put_rpccred(req->rq_cred);
L
Linus Torvalds 已提交
1295
	task->tk_rqstp = NULL;
1296 1297
	if (req->rq_release_snd_buf)
		req->rq_release_snd_buf(req);
1298

1299
	dprintk("RPC: %5u release request %p\n", task->tk_pid, req);
1300 1301 1302
	if (likely(!bc_prealloc(req)))
		xprt_free_slot(xprt, req);
	else
1303
		xprt_free_bc_request(req);
L
Linus Torvalds 已提交
1304 1305
}

1306
static void xprt_init(struct rpc_xprt *xprt, struct net *net)
1307
{
1308
	atomic_set(&xprt->count, 1);
1309 1310 1311 1312 1313 1314

	spin_lock_init(&xprt->transport_lock);
	spin_lock_init(&xprt->reserve_lock);

	INIT_LIST_HEAD(&xprt->free);
	INIT_LIST_HEAD(&xprt->recv);
1315
#if defined(CONFIG_SUNRPC_BACKCHANNEL)
1316 1317
	spin_lock_init(&xprt->bc_pa_lock);
	INIT_LIST_HEAD(&xprt->bc_pa_list);
1318
#endif /* CONFIG_SUNRPC_BACKCHANNEL */
1319

1320 1321
	xprt->last_used = jiffies;
	xprt->cwnd = RPC_INITCWND;
1322
	xprt->bind_index = 0;
1323 1324 1325

	rpc_init_wait_queue(&xprt->binding, "xprt_binding");
	rpc_init_wait_queue(&xprt->pending, "xprt_pending");
1326
	rpc_init_priority_wait_queue(&xprt->sending, "xprt_sending");
1327 1328 1329 1330
	rpc_init_priority_wait_queue(&xprt->backlog, "xprt_backlog");

	xprt_init_xid(xprt);

1331
	xprt->xprt_net = get_net(net);
1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351
}

/**
 * xprt_create_transport - create an RPC transport
 * @args: rpc transport creation arguments
 *
 */
struct rpc_xprt *xprt_create_transport(struct xprt_create *args)
{
	struct rpc_xprt	*xprt;
	struct xprt_class *t;

	spin_lock(&xprt_list_lock);
	list_for_each_entry(t, &xprt_list, list) {
		if (t->ident == args->ident) {
			spin_unlock(&xprt_list_lock);
			goto found;
		}
	}
	spin_unlock(&xprt_list_lock);
1352
	dprintk("RPC: transport (%d) not supported\n", args->ident);
1353 1354 1355 1356 1357 1358 1359
	return ERR_PTR(-EIO);

found:
	xprt = t->setup(args);
	if (IS_ERR(xprt)) {
		dprintk("RPC:       xprt_create_transport: failed, %ld\n",
				-PTR_ERR(xprt));
1360
		goto out;
1361
	}
1362 1363
	if (args->flags & XPRT_CREATE_NO_IDLE_TIMEOUT)
		xprt->idle_timeout = 0;
1364 1365 1366 1367 1368 1369
	INIT_WORK(&xprt->task_cleanup, xprt_autoclose);
	if (xprt_has_timer(xprt))
		setup_timer(&xprt->timer, xprt_init_autodisconnect,
			    (unsigned long)xprt);
	else
		init_timer(&xprt->timer);
1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380

	if (strlen(args->servername) > RPC_MAXNETNAMELEN) {
		xprt_destroy(xprt);
		return ERR_PTR(-EINVAL);
	}
	xprt->servername = kstrdup(args->servername, GFP_KERNEL);
	if (xprt->servername == NULL) {
		xprt_destroy(xprt);
		return ERR_PTR(-ENOMEM);
	}

1381
	rpc_xprt_debugfs_register(xprt);
1382

1383
	dprintk("RPC:       created transport %p with %u slots\n", xprt,
1384
			xprt->max_reqs);
1385
out:
1386 1387 1388
	return xprt;
}

1389 1390
/**
 * xprt_destroy - destroy an RPC transport, killing off all requests.
1391
 * @xprt: transport to destroy
1392
 *
L
Linus Torvalds 已提交
1393
 */
1394
static void xprt_destroy(struct rpc_xprt *xprt)
L
Linus Torvalds 已提交
1395
{
1396
	dprintk("RPC:       destroying transport %p\n", xprt);
1397
	del_timer_sync(&xprt->timer);
1398

1399
	rpc_xprt_debugfs_unregister(xprt);
1400 1401 1402 1403
	rpc_destroy_wait_queue(&xprt->binding);
	rpc_destroy_wait_queue(&xprt->pending);
	rpc_destroy_wait_queue(&xprt->sending);
	rpc_destroy_wait_queue(&xprt->backlog);
1404
	cancel_work_sync(&xprt->task_cleanup);
1405
	kfree(xprt->servername);
1406 1407 1408
	/*
	 * Tear down transport state and free the rpc_xprt
	 */
1409
	xprt->ops->destroy(xprt);
1410
}
L
Linus Torvalds 已提交
1411

1412 1413 1414 1415 1416 1417 1418
/**
 * xprt_put - release a reference to an RPC transport.
 * @xprt: pointer to the transport
 *
 */
void xprt_put(struct rpc_xprt *xprt)
{
1419 1420
	if (atomic_dec_and_test(&xprt->count))
		xprt_destroy(xprt);
1421
}