xprt.c 37.6 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12
/*
 *  linux/net/sunrpc/xprt.c
 *
 *  This is a generic RPC call interface supporting congestion avoidance,
 *  and asynchronous calls.
 *
 *  The interface works like this:
 *
 *  -	When a process places a call, it allocates a request slot if
 *	one is available. Otherwise, it sleeps on the backlog queue
 *	(xprt_reserve).
 *  -	Next, the caller puts together the RPC message, stuffs it into
13 14
 *	the request struct, and calls xprt_transmit().
 *  -	xprt_transmit sends the message and installs the caller on the
15 16 17
 *	transport's wait list. At the same time, if a reply is expected,
 *	it installs a timer that is run after the packet's timeout has
 *	expired.
L
Linus Torvalds 已提交
18
 *  -	When a packet arrives, the data_ready handler walks the list of
19
 *	pending requests for that transport. If a matching XID is found, the
L
Linus Torvalds 已提交
20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35
 *	caller is woken up, and the timer removed.
 *  -	When no reply arrives within the timeout interval, the timer is
 *	fired by the kernel and runs xprt_timer(). It either adjusts the
 *	timeout values (minor timeout) or wakes up the caller with a status
 *	of -ETIMEDOUT.
 *  -	When the caller receives a notification from RPC that a reply arrived,
 *	it should release the RPC slot, and process the reply.
 *	If the call timed out, it may choose to retry the operation by
 *	adjusting the initial timeout value, and simply calling rpc_call
 *	again.
 *
 *  Support for async RPC is done through a set of RPC-specific scheduling
 *  primitives that `transparently' work for processes as well as async
 *  tasks that rely on callbacks.
 *
 *  Copyright (C) 1995-1997, Olaf Kirch <okir@monad.swb.de>
36 37
 *
 *  Transport switch API copyright (C) 2005, Chuck Lever <cel@netapp.com>
L
Linus Torvalds 已提交
38 39
 */

40 41
#include <linux/module.h>

L
Linus Torvalds 已提交
42
#include <linux/types.h>
43
#include <linux/interrupt.h>
L
Linus Torvalds 已提交
44
#include <linux/workqueue.h>
45
#include <linux/net.h>
46
#include <linux/ktime.h>
L
Linus Torvalds 已提交
47

48
#include <linux/sunrpc/clnt.h>
49
#include <linux/sunrpc/metrics.h>
50
#include <linux/sunrpc/bc_xprt.h>
51
#include <linux/rcupdate.h>
L
Linus Torvalds 已提交
52

53 54
#include <trace/events/sunrpc.h>

55 56
#include "sunrpc.h"

L
Linus Torvalds 已提交
57 58 59 60
/*
 * Local variables
 */

J
Jeff Layton 已提交
61
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
L
Linus Torvalds 已提交
62 63 64 65 66 67
# define RPCDBG_FACILITY	RPCDBG_XPRT
#endif

/*
 * Local functions
 */
68
static void	 xprt_init(struct rpc_xprt *xprt, struct net *net);
L
Linus Torvalds 已提交
69 70 71
static void	xprt_request_init(struct rpc_task *, struct rpc_xprt *);
static void	xprt_connect_status(struct rpc_task *task);
static int      __xprt_get_cong(struct rpc_xprt *, struct rpc_task *);
72
static void     __xprt_put_cong(struct rpc_xprt *, struct rpc_rqst *);
73
static void	 xprt_destroy(struct rpc_xprt *xprt);
L
Linus Torvalds 已提交
74

J
Jiri Slaby 已提交
75
static DEFINE_SPINLOCK(xprt_list_lock);
76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98
static LIST_HEAD(xprt_list);

/**
 * xprt_register_transport - register a transport implementation
 * @transport: transport to register
 *
 * If a transport implementation is loaded as a kernel module, it can
 * call this interface to make itself known to the RPC client.
 *
 * Returns:
 * 0:		transport successfully registered
 * -EEXIST:	transport already registered
 * -EINVAL:	transport module being unloaded
 */
int xprt_register_transport(struct xprt_class *transport)
{
	struct xprt_class *t;
	int result;

	result = -EEXIST;
	spin_lock(&xprt_list_lock);
	list_for_each_entry(t, &xprt_list, list) {
		/* don't register the same transport class twice */
99
		if (t->ident == transport->ident)
100 101 102
			goto out;
	}

103 104 105 106
	list_add_tail(&transport->list, &xprt_list);
	printk(KERN_INFO "RPC: Registered %s transport module.\n",
	       transport->name);
	result = 0;
107 108 109 110 111 112 113 114 115

out:
	spin_unlock(&xprt_list_lock);
	return result;
}
EXPORT_SYMBOL_GPL(xprt_register_transport);

/**
 * xprt_unregister_transport - unregister a transport implementation
116
 * @transport: transport to unregister
117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145
 *
 * Returns:
 * 0:		transport successfully unregistered
 * -ENOENT:	transport never registered
 */
int xprt_unregister_transport(struct xprt_class *transport)
{
	struct xprt_class *t;
	int result;

	result = 0;
	spin_lock(&xprt_list_lock);
	list_for_each_entry(t, &xprt_list, list) {
		if (t == transport) {
			printk(KERN_INFO
				"RPC: Unregistered %s transport module.\n",
				transport->name);
			list_del_init(&transport->list);
			goto out;
		}
	}
	result = -ENOENT;

out:
	spin_unlock(&xprt_list_lock);
	return result;
}
EXPORT_SYMBOL_GPL(xprt_unregister_transport);

146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167
/**
 * xprt_load_transport - load a transport implementation
 * @transport_name: transport to load
 *
 * Returns:
 * 0:		transport successfully loaded
 * -ENOENT:	transport module not available
 */
int xprt_load_transport(const char *transport_name)
{
	struct xprt_class *t;
	int result;

	result = 0;
	spin_lock(&xprt_list_lock);
	list_for_each_entry(t, &xprt_list, list) {
		if (strcmp(t->name, transport_name) == 0) {
			spin_unlock(&xprt_list_lock);
			goto out;
		}
	}
	spin_unlock(&xprt_list_lock);
168
	result = request_module("xprt%s", transport_name);
169 170 171 172 173
out:
	return result;
}
EXPORT_SYMBOL_GPL(xprt_load_transport);

174 175 176
/**
 * xprt_reserve_xprt - serialize write access to transports
 * @task: task that is requesting access to the transport
177
 * @xprt: pointer to the target transport
178 179 180 181 182
 *
 * This prevents mixing the payload of separate requests, and prevents
 * transport connects from colliding with writes.  No congestion control
 * is provided.
 */
183
int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
184 185
{
	struct rpc_rqst *req = task->tk_rqstp;
186
	int priority;
187 188 189 190 191 192 193

	if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
		if (task == xprt->snd_task)
			return 1;
		goto out_sleep;
	}
	xprt->snd_task = task;
194
	if (req != NULL)
195
		req->rq_ntrans++;
196

197 198 199
	return 1;

out_sleep:
200
	dprintk("RPC: %5u failed to lock transport %p\n",
201 202 203
			task->tk_pid, xprt);
	task->tk_timeout = 0;
	task->tk_status = -EAGAIN;
204 205 206 207
	if (req == NULL)
		priority = RPC_PRIORITY_LOW;
	else if (!req->rq_ntrans)
		priority = RPC_PRIORITY_NORMAL;
208
	else
209 210
		priority = RPC_PRIORITY_HIGH;
	rpc_sleep_on_priority(&xprt->sending, task, NULL, priority);
211 212
	return 0;
}
213
EXPORT_SYMBOL_GPL(xprt_reserve_xprt);
214

215 216 217
static void xprt_clear_locked(struct rpc_xprt *xprt)
{
	xprt->snd_task = NULL;
218
	if (!test_bit(XPRT_CLOSE_WAIT, &xprt->state)) {
219
		smp_mb__before_atomic();
220
		clear_bit(XPRT_LOCKED, &xprt->state);
221
		smp_mb__after_atomic();
222
	} else
223
		queue_work(rpciod_workqueue, &xprt->task_cleanup);
224 225
}

L
Linus Torvalds 已提交
226
/*
227 228 229 230 231 232
 * xprt_reserve_xprt_cong - serialize write access to transports
 * @task: task that is requesting access to the transport
 *
 * Same as xprt_reserve_xprt, but Van Jacobson congestion control is
 * integrated into the decision of whether a request is allowed to be
 * woken up and given access to the transport.
L
Linus Torvalds 已提交
233
 */
234
int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
L
Linus Torvalds 已提交
235 236
{
	struct rpc_rqst *req = task->tk_rqstp;
237
	int priority;
L
Linus Torvalds 已提交
238

239
	if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
L
Linus Torvalds 已提交
240 241 242 243
		if (task == xprt->snd_task)
			return 1;
		goto out_sleep;
	}
244 245 246 247
	if (req == NULL) {
		xprt->snd_task = task;
		return 1;
	}
248
	if (__xprt_get_cong(xprt, task)) {
L
Linus Torvalds 已提交
249
		xprt->snd_task = task;
250
		req->rq_ntrans++;
L
Linus Torvalds 已提交
251 252
		return 1;
	}
253
	xprt_clear_locked(xprt);
L
Linus Torvalds 已提交
254
out_sleep:
255 256
	if (req)
		__xprt_put_cong(xprt, req);
257
	dprintk("RPC: %5u failed to lock transport %p\n", task->tk_pid, xprt);
L
Linus Torvalds 已提交
258 259
	task->tk_timeout = 0;
	task->tk_status = -EAGAIN;
260 261 262 263
	if (req == NULL)
		priority = RPC_PRIORITY_LOW;
	else if (!req->rq_ntrans)
		priority = RPC_PRIORITY_NORMAL;
L
Linus Torvalds 已提交
264
	else
265 266
		priority = RPC_PRIORITY_HIGH;
	rpc_sleep_on_priority(&xprt->sending, task, NULL, priority);
L
Linus Torvalds 已提交
267 268
	return 0;
}
269
EXPORT_SYMBOL_GPL(xprt_reserve_xprt_cong);
L
Linus Torvalds 已提交
270

271
static inline int xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task)
L
Linus Torvalds 已提交
272 273 274
{
	int retval;

C
Chuck Lever 已提交
275
	spin_lock_bh(&xprt->transport_lock);
276
	retval = xprt->ops->reserve_xprt(xprt, task);
C
Chuck Lever 已提交
277
	spin_unlock_bh(&xprt->transport_lock);
L
Linus Torvalds 已提交
278 279 280
	return retval;
}

281
static bool __xprt_lock_write_func(struct rpc_task *task, void *data)
282
{
283
	struct rpc_xprt *xprt = data;
284 285 286 287
	struct rpc_rqst *req;

	req = task->tk_rqstp;
	xprt->snd_task = task;
288
	if (req)
289
		req->rq_ntrans++;
290 291
	return true;
}
292

293 294 295 296 297 298 299
static void __xprt_lock_write_next(struct rpc_xprt *xprt)
{
	if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
		return;

	if (rpc_wake_up_first(&xprt->sending, __xprt_lock_write_func, xprt))
		return;
300
	xprt_clear_locked(xprt);
301 302
}

303
static bool __xprt_lock_write_cong_func(struct rpc_task *task, void *data)
L
Linus Torvalds 已提交
304
{
305
	struct rpc_xprt *xprt = data;
306
	struct rpc_rqst *req;
L
Linus Torvalds 已提交
307

308 309 310
	req = task->tk_rqstp;
	if (req == NULL) {
		xprt->snd_task = task;
311
		return true;
312
	}
313
	if (__xprt_get_cong(xprt, task)) {
L
Linus Torvalds 已提交
314
		xprt->snd_task = task;
315
		req->rq_ntrans++;
316
		return true;
L
Linus Torvalds 已提交
317
	}
318 319 320 321 322 323 324 325 326 327 328
	return false;
}

static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt)
{
	if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
		return;
	if (RPCXPRT_CONGESTED(xprt))
		goto out_unlock;
	if (rpc_wake_up_first(&xprt->sending, __xprt_lock_write_cong_func, xprt))
		return;
L
Linus Torvalds 已提交
329
out_unlock:
330
	xprt_clear_locked(xprt);
L
Linus Torvalds 已提交
331 332
}

333 334 335 336 337 338 339 340 341
static void xprt_task_clear_bytes_sent(struct rpc_task *task)
{
	if (task != NULL) {
		struct rpc_rqst *req = task->tk_rqstp;
		if (req != NULL)
			req->rq_bytes_sent = 0;
	}
}

342 343 344 345 346 347
/**
 * xprt_release_xprt - allow other requests to use a transport
 * @xprt: transport with other tasks potentially waiting
 * @task: task that is releasing access to the transport
 *
 * Note that "task" can be NULL.  No congestion control is provided.
L
Linus Torvalds 已提交
348
 */
349
void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
L
Linus Torvalds 已提交
350 351
{
	if (xprt->snd_task == task) {
352
		xprt_task_clear_bytes_sent(task);
353
		xprt_clear_locked(xprt);
L
Linus Torvalds 已提交
354 355 356
		__xprt_lock_write_next(xprt);
	}
}
357
EXPORT_SYMBOL_GPL(xprt_release_xprt);
L
Linus Torvalds 已提交
358

359 360 361 362 363 364 365 366 367 368 369
/**
 * xprt_release_xprt_cong - allow other requests to use a transport
 * @xprt: transport with other tasks potentially waiting
 * @task: task that is releasing access to the transport
 *
 * Note that "task" can be NULL.  Another task is awoken to use the
 * transport if the transport's congestion window allows it.
 */
void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
{
	if (xprt->snd_task == task) {
370
		xprt_task_clear_bytes_sent(task);
371
		xprt_clear_locked(xprt);
372 373 374
		__xprt_lock_write_next_cong(xprt);
	}
}
375
EXPORT_SYMBOL_GPL(xprt_release_xprt_cong);
376 377

static inline void xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task)
L
Linus Torvalds 已提交
378
{
C
Chuck Lever 已提交
379
	spin_lock_bh(&xprt->transport_lock);
380
	xprt->ops->release_xprt(xprt, task);
C
Chuck Lever 已提交
381
	spin_unlock_bh(&xprt->transport_lock);
L
Linus Torvalds 已提交
382 383 384 385 386 387 388 389 390 391 392 393 394
}

/*
 * Van Jacobson congestion avoidance. Check if the congestion window
 * overflowed. Put the task to sleep if this is the case.
 */
static int
__xprt_get_cong(struct rpc_xprt *xprt, struct rpc_task *task)
{
	struct rpc_rqst *req = task->tk_rqstp;

	if (req->rq_cong)
		return 1;
395
	dprintk("RPC: %5u xprt_cwnd_limited cong = %lu cwnd = %lu\n",
L
Linus Torvalds 已提交
396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414
			task->tk_pid, xprt->cong, xprt->cwnd);
	if (RPCXPRT_CONGESTED(xprt))
		return 0;
	req->rq_cong = 1;
	xprt->cong += RPC_CWNDSCALE;
	return 1;
}

/*
 * Adjust the congestion window, and wake up the next task
 * that has been sleeping due to congestion
 */
static void
__xprt_put_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
{
	if (!req->rq_cong)
		return;
	req->rq_cong = 0;
	xprt->cong -= RPC_CWNDSCALE;
415
	__xprt_lock_write_next_cong(xprt);
L
Linus Torvalds 已提交
416 417
}

418 419 420 421 422 423 424 425
/**
 * xprt_release_rqst_cong - housekeeping when request is complete
 * @task: RPC request that recently completed
 *
 * Useful for transports that require congestion control.
 */
void xprt_release_rqst_cong(struct rpc_task *task)
{
426 427 428
	struct rpc_rqst *req = task->tk_rqstp;

	__xprt_put_cong(req->rq_xprt, req);
429
}
430
EXPORT_SYMBOL_GPL(xprt_release_rqst_cong);
431

432 433
/**
 * xprt_adjust_cwnd - adjust transport congestion window
434
 * @xprt: pointer to xprt
435 436 437
 * @task: recently completed RPC request used to adjust window
 * @result: result code of completed RPC request
 *
438 439 440 441 442 443 444 445 446
 * The transport code maintains an estimate on the maximum number of out-
 * standing RPC requests, using a smoothed version of the congestion
 * avoidance implemented in 44BSD. This is basically the Van Jacobson
 * congestion algorithm: If a retransmit occurs, the congestion window is
 * halved; otherwise, it is incremented by 1/cwnd when
 *
 *	-	a reply is received and
 *	-	a full number of requests are outstanding and
 *	-	the congestion window hasn't been updated recently.
L
Linus Torvalds 已提交
447
 */
448
void xprt_adjust_cwnd(struct rpc_xprt *xprt, struct rpc_task *task, int result)
L
Linus Torvalds 已提交
449
{
450 451
	struct rpc_rqst *req = task->tk_rqstp;
	unsigned long cwnd = xprt->cwnd;
L
Linus Torvalds 已提交
452 453 454 455 456 457 458

	if (result >= 0 && cwnd <= xprt->cong) {
		/* The (cwnd >> 1) term makes sure
		 * the result gets rounded properly. */
		cwnd += (RPC_CWNDSCALE * RPC_CWNDSCALE + (cwnd >> 1)) / cwnd;
		if (cwnd > RPC_MAXCWND(xprt))
			cwnd = RPC_MAXCWND(xprt);
459
		__xprt_lock_write_next_cong(xprt);
L
Linus Torvalds 已提交
460 461 462 463 464
	} else if (result == -ETIMEDOUT) {
		cwnd >>= 1;
		if (cwnd < RPC_CWNDSCALE)
			cwnd = RPC_CWNDSCALE;
	}
465
	dprintk("RPC:       cong %ld, cwnd was %ld, now %ld\n",
L
Linus Torvalds 已提交
466 467
			xprt->cong, xprt->cwnd, cwnd);
	xprt->cwnd = cwnd;
468
	__xprt_put_cong(xprt, req);
L
Linus Torvalds 已提交
469
}
470
EXPORT_SYMBOL_GPL(xprt_adjust_cwnd);
L
Linus Torvalds 已提交
471

472 473 474 475 476 477 478 479 480 481 482 483 484
/**
 * xprt_wake_pending_tasks - wake all tasks on a transport's pending queue
 * @xprt: transport with waiting tasks
 * @status: result code to plant in each task before waking it
 *
 */
void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status)
{
	if (status < 0)
		rpc_wake_up_status(&xprt->pending, status);
	else
		rpc_wake_up(&xprt->pending);
}
485
EXPORT_SYMBOL_GPL(xprt_wake_pending_tasks);
486

487 488 489
/**
 * xprt_wait_for_buffer_space - wait for transport output buffer to clear
 * @task: task to be put to sleep
R
Randy Dunlap 已提交
490
 * @action: function pointer to be executed after wait
491 492 493 494
 *
 * Note that we only set the timer for the case of RPC_IS_SOFT(), since
 * we don't in general want to force a socket disconnection due to
 * an incomplete RPC call transmission.
495
 */
496
void xprt_wait_for_buffer_space(struct rpc_task *task, rpc_action action)
497 498 499 500
{
	struct rpc_rqst *req = task->tk_rqstp;
	struct rpc_xprt *xprt = req->rq_xprt;

501
	task->tk_timeout = RPC_IS_SOFT(task) ? req->rq_timeout : 0;
502
	rpc_sleep_on(&xprt->pending, task, action);
503
}
504
EXPORT_SYMBOL_GPL(xprt_wait_for_buffer_space);
505 506 507 508 509 510 511 512 513 514 515

/**
 * xprt_write_space - wake the task waiting for transport output buffer space
 * @xprt: transport with waiting tasks
 *
 * Can be called in a soft IRQ context, so xprt_write_space never sleeps.
 */
void xprt_write_space(struct rpc_xprt *xprt)
{
	spin_lock_bh(&xprt->transport_lock);
	if (xprt->snd_task) {
516 517
		dprintk("RPC:       write space: waking waiting task on "
				"xprt %p\n", xprt);
518
		rpc_wake_up_queued_task(&xprt->pending, xprt->snd_task);
519 520 521
	}
	spin_unlock_bh(&xprt->transport_lock);
}
522
EXPORT_SYMBOL_GPL(xprt_write_space);
523

524 525 526 527 528 529 530 531 532 533 534 535
/**
 * xprt_set_retrans_timeout_def - set a request's retransmit timeout
 * @task: task whose timeout is to be set
 *
 * Set a request's retransmit timeout based on the transport's
 * default timeout parameters.  Used by transports that don't adjust
 * the retransmit timeout based on round-trip time estimation.
 */
void xprt_set_retrans_timeout_def(struct rpc_task *task)
{
	task->tk_timeout = task->tk_rqstp->rq_timeout;
}
536
EXPORT_SYMBOL_GPL(xprt_set_retrans_timeout_def);
537

538
/**
539 540
 * xprt_set_retrans_timeout_rtt - set a request's retransmit timeout
 * @task: task whose timeout is to be set
541
 *
542 543 544 545 546
 * Set a request's retransmit timeout using the RTT estimator.
 */
void xprt_set_retrans_timeout_rtt(struct rpc_task *task)
{
	int timer = task->tk_msg.rpc_proc->p_timer;
547 548
	struct rpc_clnt *clnt = task->tk_client;
	struct rpc_rtt *rtt = clnt->cl_rtt;
549
	struct rpc_rqst *req = task->tk_rqstp;
550
	unsigned long max_timeout = clnt->cl_timeout->to_maxval;
551 552 553 554 555 556

	task->tk_timeout = rpc_calc_rto(rtt, timer);
	task->tk_timeout <<= rpc_ntimeo(rtt, timer) + req->rq_retries;
	if (task->tk_timeout > max_timeout || task->tk_timeout == 0)
		task->tk_timeout = max_timeout;
}
557
EXPORT_SYMBOL_GPL(xprt_set_retrans_timeout_rtt);
558

L
Linus Torvalds 已提交
559 560
static void xprt_reset_majortimeo(struct rpc_rqst *req)
{
561
	const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
L
Linus Torvalds 已提交
562 563 564 565 566 567 568 569 570 571 572

	req->rq_majortimeo = req->rq_timeout;
	if (to->to_exponential)
		req->rq_majortimeo <<= to->to_retries;
	else
		req->rq_majortimeo += to->to_increment * to->to_retries;
	if (req->rq_majortimeo > to->to_maxval || req->rq_majortimeo == 0)
		req->rq_majortimeo = to->to_maxval;
	req->rq_majortimeo += jiffies;
}

573 574 575 576
/**
 * xprt_adjust_timeout - adjust timeout values for next retransmit
 * @req: RPC request containing parameters to use for the adjustment
 *
L
Linus Torvalds 已提交
577 578 579 580
 */
int xprt_adjust_timeout(struct rpc_rqst *req)
{
	struct rpc_xprt *xprt = req->rq_xprt;
581
	const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
L
Linus Torvalds 已提交
582 583 584 585 586 587 588 589 590 591 592 593 594 595 596
	int status = 0;

	if (time_before(jiffies, req->rq_majortimeo)) {
		if (to->to_exponential)
			req->rq_timeout <<= 1;
		else
			req->rq_timeout += to->to_increment;
		if (to->to_maxval && req->rq_timeout >= to->to_maxval)
			req->rq_timeout = to->to_maxval;
		req->rq_retries++;
	} else {
		req->rq_timeout = to->to_initval;
		req->rq_retries = 0;
		xprt_reset_majortimeo(req);
		/* Reset the RTT counters == "slow start" */
C
Chuck Lever 已提交
597
		spin_lock_bh(&xprt->transport_lock);
L
Linus Torvalds 已提交
598
		rpc_init_rtt(req->rq_task->tk_client->cl_rtt, to->to_initval);
C
Chuck Lever 已提交
599
		spin_unlock_bh(&xprt->transport_lock);
L
Linus Torvalds 已提交
600 601 602 603 604 605 606 607 608 609
		status = -ETIMEDOUT;
	}

	if (req->rq_timeout == 0) {
		printk(KERN_WARNING "xprt_adjust_timeout: rq_timeout = 0!\n");
		req->rq_timeout = 5 * HZ;
	}
	return status;
}

610
static void xprt_autoclose(struct work_struct *work)
L
Linus Torvalds 已提交
611
{
612 613
	struct rpc_xprt *xprt =
		container_of(work, struct rpc_xprt, task_cleanup);
L
Linus Torvalds 已提交
614

615
	clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
616
	xprt->ops->close(xprt);
L
Linus Torvalds 已提交
617
	xprt_release_write(xprt, NULL);
618
	wake_up_bit(&xprt->state, XPRT_LOCKED);
L
Linus Torvalds 已提交
619 620
}

621
/**
622
 * xprt_disconnect_done - mark a transport as disconnected
623 624
 * @xprt: transport to flag for disconnect
 *
L
Linus Torvalds 已提交
625
 */
626
void xprt_disconnect_done(struct rpc_xprt *xprt)
L
Linus Torvalds 已提交
627
{
628
	dprintk("RPC:       disconnected transport %p\n", xprt);
C
Chuck Lever 已提交
629
	spin_lock_bh(&xprt->transport_lock);
L
Linus Torvalds 已提交
630
	xprt_clear_connected(xprt);
631
	xprt_wake_pending_tasks(xprt, -EAGAIN);
C
Chuck Lever 已提交
632
	spin_unlock_bh(&xprt->transport_lock);
L
Linus Torvalds 已提交
633
}
634
EXPORT_SYMBOL_GPL(xprt_disconnect_done);
L
Linus Torvalds 已提交
635

636 637 638 639 640 641 642 643 644 645 646 647 648
/**
 * xprt_force_disconnect - force a transport to disconnect
 * @xprt: transport to disconnect
 *
 */
void xprt_force_disconnect(struct rpc_xprt *xprt)
{
	/* Don't race with the test_bit() in xprt_clear_locked() */
	spin_lock_bh(&xprt->transport_lock);
	set_bit(XPRT_CLOSE_WAIT, &xprt->state);
	/* Try to schedule an autoclose RPC call */
	if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
		queue_work(rpciod_workqueue, &xprt->task_cleanup);
649
	xprt_wake_pending_tasks(xprt, -EAGAIN);
650 651 652
	spin_unlock_bh(&xprt->transport_lock);
}

653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675
/**
 * xprt_conditional_disconnect - force a transport to disconnect
 * @xprt: transport to disconnect
 * @cookie: 'connection cookie'
 *
 * This attempts to break the connection if and only if 'cookie' matches
 * the current transport 'connection cookie'. It ensures that we don't
 * try to break the connection more than once when we need to retransmit
 * a batch of RPC requests.
 *
 */
void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie)
{
	/* Don't race with the test_bit() in xprt_clear_locked() */
	spin_lock_bh(&xprt->transport_lock);
	if (cookie != xprt->connect_cookie)
		goto out;
	if (test_bit(XPRT_CLOSING, &xprt->state) || !xprt_connected(xprt))
		goto out;
	set_bit(XPRT_CLOSE_WAIT, &xprt->state);
	/* Try to schedule an autoclose RPC call */
	if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
		queue_work(rpciod_workqueue, &xprt->task_cleanup);
676
	xprt_wake_pending_tasks(xprt, -EAGAIN);
677 678 679 680
out:
	spin_unlock_bh(&xprt->transport_lock);
}

L
Linus Torvalds 已提交
681 682 683 684 685
static void
xprt_init_autodisconnect(unsigned long data)
{
	struct rpc_xprt *xprt = (struct rpc_xprt *)data;

C
Chuck Lever 已提交
686
	spin_lock(&xprt->transport_lock);
687
	if (!list_empty(&xprt->recv))
L
Linus Torvalds 已提交
688
		goto out_abort;
689
	if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
L
Linus Torvalds 已提交
690
		goto out_abort;
C
Chuck Lever 已提交
691
	spin_unlock(&xprt->transport_lock);
692
	queue_work(rpciod_workqueue, &xprt->task_cleanup);
L
Linus Torvalds 已提交
693 694
	return;
out_abort:
C
Chuck Lever 已提交
695
	spin_unlock(&xprt->transport_lock);
L
Linus Torvalds 已提交
696 697
}

698 699 700 701 702 703 704 705 706 707 708
bool xprt_lock_connect(struct rpc_xprt *xprt,
		struct rpc_task *task,
		void *cookie)
{
	bool ret = false;

	spin_lock_bh(&xprt->transport_lock);
	if (!test_bit(XPRT_LOCKED, &xprt->state))
		goto out;
	if (xprt->snd_task != task)
		goto out;
709
	xprt_task_clear_bytes_sent(task);
710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727
	xprt->snd_task = cookie;
	ret = true;
out:
	spin_unlock_bh(&xprt->transport_lock);
	return ret;
}

void xprt_unlock_connect(struct rpc_xprt *xprt, void *cookie)
{
	spin_lock_bh(&xprt->transport_lock);
	if (xprt->snd_task != cookie)
		goto out;
	if (!test_bit(XPRT_LOCKED, &xprt->state))
		goto out;
	xprt->snd_task =NULL;
	xprt->ops->release_xprt(xprt, NULL);
out:
	spin_unlock_bh(&xprt->transport_lock);
728
	wake_up_bit(&xprt->state, XPRT_LOCKED);
729 730
}

731 732 733
/**
 * xprt_connect - schedule a transport connect operation
 * @task: RPC task that is requesting the connect
L
Linus Torvalds 已提交
734 735 736 737
 *
 */
void xprt_connect(struct rpc_task *task)
{
738
	struct rpc_xprt	*xprt = task->tk_rqstp->rq_xprt;
L
Linus Torvalds 已提交
739

740
	dprintk("RPC: %5u xprt_connect xprt %p %s connected\n", task->tk_pid,
L
Linus Torvalds 已提交
741 742
			xprt, (xprt_connected(xprt) ? "is" : "is not"));

743
	if (!xprt_bound(xprt)) {
744
		task->tk_status = -EAGAIN;
L
Linus Torvalds 已提交
745 746 747 748
		return;
	}
	if (!xprt_lock_write(xprt, task))
		return;
749 750 751 752

	if (test_and_clear_bit(XPRT_CLOSE_WAIT, &xprt->state))
		xprt->ops->close(xprt);

753
	if (!xprt_connected(xprt)) {
754
		task->tk_rqstp->rq_bytes_sent = 0;
755
		task->tk_timeout = task->tk_rqstp->rq_timeout;
756
		rpc_sleep_on(&xprt->pending, task, xprt_connect_status);
757 758 759 760 761

		if (test_bit(XPRT_CLOSING, &xprt->state))
			return;
		if (xprt_test_and_set_connecting(xprt))
			return;
762
		xprt->stat.connect_start = jiffies;
763
		xprt->ops->connect(xprt, task);
L
Linus Torvalds 已提交
764
	}
765
	xprt_release_write(xprt, task);
L
Linus Torvalds 已提交
766 767
}

768
static void xprt_connect_status(struct rpc_task *task)
L
Linus Torvalds 已提交
769
{
770
	struct rpc_xprt	*xprt = task->tk_rqstp->rq_xprt;
L
Linus Torvalds 已提交
771

772
	if (task->tk_status == 0) {
773 774
		xprt->stat.connect_count++;
		xprt->stat.connect_time += (long)jiffies - xprt->stat.connect_start;
775
		dprintk("RPC: %5u xprt_connect_status: connection established\n",
L
Linus Torvalds 已提交
776 777 778 779 780
				task->tk_pid);
		return;
	}

	switch (task->tk_status) {
781 782 783 784 785
	case -ECONNREFUSED:
	case -ECONNRESET:
	case -ECONNABORTED:
	case -ENETUNREACH:
	case -EHOSTUNREACH:
786
	case -EPIPE:
787 788
	case -EAGAIN:
		dprintk("RPC: %5u xprt_connect_status: retrying\n", task->tk_pid);
789
		break;
L
Linus Torvalds 已提交
790
	case -ETIMEDOUT:
791 792
		dprintk("RPC: %5u xprt_connect_status: connect attempt timed "
				"out\n", task->tk_pid);
L
Linus Torvalds 已提交
793 794
		break;
	default:
795 796
		dprintk("RPC: %5u xprt_connect_status: error %d connecting to "
				"server %s\n", task->tk_pid, -task->tk_status,
797
				xprt->servername);
798
		task->tk_status = -EIO;
L
Linus Torvalds 已提交
799 800 801
	}
}

802 803 804 805 806
/**
 * xprt_lookup_rqst - find an RPC request corresponding to an XID
 * @xprt: transport on which the original request was transmitted
 * @xid: RPC XID of incoming reply
 *
L
Linus Torvalds 已提交
807
 */
808
struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid)
L
Linus Torvalds 已提交
809
{
810
	struct rpc_rqst *entry;
L
Linus Torvalds 已提交
811

812
	list_for_each_entry(entry, &xprt->recv, rq_list)
813 814
		if (entry->rq_xid == xid) {
			trace_xprt_lookup_rqst(xprt, xid, 0);
815
			return entry;
816
		}
817 818 819

	dprintk("RPC:       xprt_lookup_rqst did not find xid %08x\n",
			ntohl(xid));
820
	trace_xprt_lookup_rqst(xprt, xid, -ENOENT);
821 822
	xprt->stat.bad_xids++;
	return NULL;
L
Linus Torvalds 已提交
823
}
824
EXPORT_SYMBOL_GPL(xprt_lookup_rqst);
L
Linus Torvalds 已提交
825

826
static void xprt_update_rtt(struct rpc_task *task)
827 828 829
{
	struct rpc_rqst *req = task->tk_rqstp;
	struct rpc_rtt *rtt = task->tk_client->cl_rtt;
830
	unsigned int timer = task->tk_msg.rpc_proc->p_timer;
831
	long m = usecs_to_jiffies(ktime_to_us(req->rq_rtt));
832 833 834

	if (timer) {
		if (req->rq_ntrans == 1)
835
			rpc_update_rtt(rtt, timer, m);
836 837 838 839
		rpc_set_timeo(rtt, timer, req->rq_ntrans - 1);
	}
}

840 841
/**
 * xprt_complete_rqst - called when reply processing is complete
842
 * @task: RPC request that recently completed
843 844
 * @copied: actual number of bytes received from the transport
 *
845
 * Caller holds transport lock.
L
Linus Torvalds 已提交
846
 */
847
void xprt_complete_rqst(struct rpc_task *task, int copied)
L
Linus Torvalds 已提交
848
{
849
	struct rpc_rqst *req = task->tk_rqstp;
850
	struct rpc_xprt *xprt = req->rq_xprt;
L
Linus Torvalds 已提交
851

852 853
	dprintk("RPC: %5u xid %08x complete (%d bytes received)\n",
			task->tk_pid, ntohl(req->rq_xid), copied);
854
	trace_xprt_complete_rqst(xprt, req->rq_xid, copied);
L
Linus Torvalds 已提交
855

856
	xprt->stat.recvs++;
857
	req->rq_rtt = ktime_sub(ktime_get(), req->rq_xtime);
858 859
	if (xprt->ops->timer != NULL)
		xprt_update_rtt(task);
860

L
Linus Torvalds 已提交
861
	list_del_init(&req->rq_list);
862
	req->rq_private_buf.len = copied;
863 864
	/* Ensure all writes are done before we update */
	/* req->rq_reply_bytes_recvd */
865
	smp_wmb();
866
	req->rq_reply_bytes_recvd = copied;
867
	rpc_wake_up_queued_task(&xprt->pending, task);
L
Linus Torvalds 已提交
868
}
869
EXPORT_SYMBOL_GPL(xprt_complete_rqst);
L
Linus Torvalds 已提交
870

871
static void xprt_timer(struct rpc_task *task)
L
Linus Torvalds 已提交
872
{
873
	struct rpc_rqst *req = task->tk_rqstp;
L
Linus Torvalds 已提交
874 875
	struct rpc_xprt *xprt = req->rq_xprt;

876 877
	if (task->tk_status != -ETIMEDOUT)
		return;
878
	dprintk("RPC: %5u xprt_timer\n", task->tk_pid);
L
Linus Torvalds 已提交
879

880
	spin_lock_bh(&xprt->transport_lock);
881
	if (!req->rq_reply_bytes_recvd) {
882
		if (xprt->ops->timer)
883
			xprt->ops->timer(xprt, task);
884 885 886
	} else
		task->tk_status = 0;
	spin_unlock_bh(&xprt->transport_lock);
L
Linus Torvalds 已提交
887 888
}

889 890 891 892 893
static inline int xprt_has_timer(struct rpc_xprt *xprt)
{
	return xprt->idle_timeout != 0;
}

894 895 896 897
/**
 * xprt_prepare_transmit - reserve the transport before sending a request
 * @task: RPC task about to send a request
 *
L
Linus Torvalds 已提交
898
 */
899
bool xprt_prepare_transmit(struct rpc_task *task)
L
Linus Torvalds 已提交
900 901 902
{
	struct rpc_rqst	*req = task->tk_rqstp;
	struct rpc_xprt	*xprt = req->rq_xprt;
903
	bool ret = false;
L
Linus Torvalds 已提交
904

905
	dprintk("RPC: %5u xprt_prepare_transmit\n", task->tk_pid);
L
Linus Torvalds 已提交
906

C
Chuck Lever 已提交
907
	spin_lock_bh(&xprt->transport_lock);
908 909 910 911 912 913 914 915 916 917 918 919
	if (!req->rq_bytes_sent) {
		if (req->rq_reply_bytes_recvd) {
			task->tk_status = req->rq_reply_bytes_recvd;
			goto out_unlock;
		}
		if ((task->tk_flags & RPC_TASK_NO_RETRANS_TIMEOUT)
		    && xprt_connected(xprt)
		    && req->rq_connect_cookie == xprt->connect_cookie) {
			xprt->ops->set_retrans_timeout(task);
			rpc_sleep_on(&xprt->pending, task, xprt_timer);
			goto out_unlock;
		}
L
Linus Torvalds 已提交
920
	}
921 922 923 924 925
	if (!xprt->ops->reserve_xprt(xprt, task)) {
		task->tk_status = -EAGAIN;
		goto out_unlock;
	}
	ret = true;
L
Linus Torvalds 已提交
926
out_unlock:
C
Chuck Lever 已提交
927
	spin_unlock_bh(&xprt->transport_lock);
928
	return ret;
L
Linus Torvalds 已提交
929 930
}

931
void xprt_end_transmit(struct rpc_task *task)
932
{
933
	xprt_release_write(task->tk_rqstp->rq_xprt, task);
934 935
}

936 937 938 939 940 941 942
/**
 * xprt_transmit - send an RPC request on a transport
 * @task: controlling RPC task
 *
 * We have to copy the iovec because sendmsg fiddles with its contents.
 */
void xprt_transmit(struct rpc_task *task)
L
Linus Torvalds 已提交
943 944 945
{
	struct rpc_rqst	*req = task->tk_rqstp;
	struct rpc_xprt	*xprt = req->rq_xprt;
946
	int status, numreqs;
L
Linus Torvalds 已提交
947

948
	dprintk("RPC: %5u xprt_transmit(%u)\n", task->tk_pid, req->rq_slen);
L
Linus Torvalds 已提交
949

950
	if (!req->rq_reply_bytes_recvd) {
951 952 953 954
		if (list_empty(&req->rq_list) && rpc_reply_expected(task)) {
			/*
			 * Add to the list only if we're expecting a reply
			 */
C
Chuck Lever 已提交
955
			spin_lock_bh(&xprt->transport_lock);
L
Linus Torvalds 已提交
956 957 958 959 960
			/* Update the softirq receive buffer */
			memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
					sizeof(req->rq_private_buf));
			/* Add request to the receive list */
			list_add_tail(&req->rq_list, &xprt->recv);
C
Chuck Lever 已提交
961
			spin_unlock_bh(&xprt->transport_lock);
L
Linus Torvalds 已提交
962
			xprt_reset_majortimeo(req);
963 964
			/* Turn off autodisconnect */
			del_singleshot_timer_sync(&xprt->timer);
L
Linus Torvalds 已提交
965 966 967 968
		}
	} else if (!req->rq_bytes_sent)
		return;

969
	req->rq_xtime = ktime_get();
970
	status = xprt->ops->send_request(task);
971
	trace_xprt_transmit(xprt, req->rq_xid, status);
972 973 974 975
	if (status != 0) {
		task->tk_status = status;
		return;
	}
C
Chuck Lever 已提交
976
	xprt_inject_disconnect(xprt);
977

978
	dprintk("RPC: %5u xmit complete\n", task->tk_pid);
979
	task->tk_flags |= RPC_TASK_SENT;
980
	spin_lock_bh(&xprt->transport_lock);
981

982
	xprt->ops->set_retrans_timeout(task);
983

984 985 986
	numreqs = atomic_read(&xprt->num_reqs);
	if (numreqs > xprt->stat.max_slots)
		xprt->stat.max_slots = numreqs;
987 988 989
	xprt->stat.sends++;
	xprt->stat.req_u += xprt->stat.sends - xprt->stat.recvs;
	xprt->stat.bklog_u += xprt->backlog.qlen;
990 991
	xprt->stat.sending_u += xprt->sending.qlen;
	xprt->stat.pending_u += xprt->pending.qlen;
L
Linus Torvalds 已提交
992

993 994 995
	/* Don't race with disconnect */
	if (!xprt_connected(xprt))
		task->tk_status = -ENOTCONN;
996
	else {
997 998 999 1000
		/*
		 * Sleep on the pending queue since
		 * we're expecting a reply.
		 */
1001 1002 1003
		if (!req->rq_reply_bytes_recvd && rpc_reply_expected(task))
			rpc_sleep_on(&xprt->pending, task, xprt_timer);
		req->rq_connect_cookie = xprt->connect_cookie;
1004
	}
1005
	spin_unlock_bh(&xprt->transport_lock);
L
Linus Torvalds 已提交
1006 1007
}

1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035
static void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task)
{
	set_bit(XPRT_CONGESTED, &xprt->state);
	rpc_sleep_on(&xprt->backlog, task, NULL);
}

static void xprt_wake_up_backlog(struct rpc_xprt *xprt)
{
	if (rpc_wake_up_next(&xprt->backlog) == NULL)
		clear_bit(XPRT_CONGESTED, &xprt->state);
}

static bool xprt_throttle_congested(struct rpc_xprt *xprt, struct rpc_task *task)
{
	bool ret = false;

	if (!test_bit(XPRT_CONGESTED, &xprt->state))
		goto out;
	spin_lock(&xprt->reserve_lock);
	if (test_bit(XPRT_CONGESTED, &xprt->state)) {
		rpc_sleep_on(&xprt->backlog, task, NULL);
		ret = true;
	}
	spin_unlock(&xprt->reserve_lock);
out:
	return ret;
}

1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059
static struct rpc_rqst *xprt_dynamic_alloc_slot(struct rpc_xprt *xprt, gfp_t gfp_flags)
{
	struct rpc_rqst *req = ERR_PTR(-EAGAIN);

	if (!atomic_add_unless(&xprt->num_reqs, 1, xprt->max_reqs))
		goto out;
	req = kzalloc(sizeof(struct rpc_rqst), gfp_flags);
	if (req != NULL)
		goto out;
	atomic_dec(&xprt->num_reqs);
	req = ERR_PTR(-ENOMEM);
out:
	return req;
}

static bool xprt_dynamic_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
{
	if (atomic_add_unless(&xprt->num_reqs, -1, xprt->min_reqs)) {
		kfree(req);
		return true;
	}
	return false;
}

1060
void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
L
Linus Torvalds 已提交
1061
{
1062
	struct rpc_rqst *req;
L
Linus Torvalds 已提交
1063

1064
	spin_lock(&xprt->reserve_lock);
L
Linus Torvalds 已提交
1065
	if (!list_empty(&xprt->free)) {
1066 1067 1068 1069
		req = list_entry(xprt->free.next, struct rpc_rqst, rq_list);
		list_del(&req->rq_list);
		goto out_init_req;
	}
1070
	req = xprt_dynamic_alloc_slot(xprt, GFP_NOWAIT|__GFP_NOWARN);
1071 1072 1073 1074 1075 1076
	if (!IS_ERR(req))
		goto out_init_req;
	switch (PTR_ERR(req)) {
	case -ENOMEM:
		dprintk("RPC:       dynamic allocation of request slot "
				"failed! Retrying\n");
1077
		task->tk_status = -ENOMEM;
1078 1079
		break;
	case -EAGAIN:
1080
		xprt_add_backlog(xprt, task);
1081
		dprintk("RPC:       waiting for request slot\n");
1082 1083
	default:
		task->tk_status = -EAGAIN;
L
Linus Torvalds 已提交
1084
	}
1085
	spin_unlock(&xprt->reserve_lock);
1086 1087 1088 1089 1090
	return;
out_init_req:
	task->tk_status = 0;
	task->tk_rqstp = req;
	xprt_request_init(task, xprt);
1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105
	spin_unlock(&xprt->reserve_lock);
}
EXPORT_SYMBOL_GPL(xprt_alloc_slot);

void xprt_lock_and_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
{
	/* Note: grabbing the xprt_lock_write() ensures that we throttle
	 * new slot allocation if the transport is congested (i.e. when
	 * reconnecting a stream transport or when out of socket write
	 * buffer space).
	 */
	if (xprt_lock_write(xprt, task)) {
		xprt_alloc_slot(xprt, task);
		xprt_release_write(xprt, task);
	}
L
Linus Torvalds 已提交
1106
}
1107
EXPORT_SYMBOL_GPL(xprt_lock_and_alloc_slot);
L
Linus Torvalds 已提交
1108

1109 1110 1111
static void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
{
	spin_lock(&xprt->reserve_lock);
1112 1113 1114 1115
	if (!xprt_dynamic_free_slot(xprt, req)) {
		memset(req, 0, sizeof(*req));	/* mark unused */
		list_add(&req->rq_list, &xprt->free);
	}
1116
	xprt_wake_up_backlog(xprt);
1117 1118 1119
	spin_unlock(&xprt->reserve_lock);
}

1120 1121 1122 1123 1124 1125 1126 1127 1128 1129
static void xprt_free_all_slots(struct rpc_xprt *xprt)
{
	struct rpc_rqst *req;
	while (!list_empty(&xprt->free)) {
		req = list_first_entry(&xprt->free, struct rpc_rqst, rq_list);
		list_del(&req->rq_list);
		kfree(req);
	}
}

1130 1131 1132
struct rpc_xprt *xprt_alloc(struct net *net, size_t size,
		unsigned int num_prealloc,
		unsigned int max_alloc)
1133 1134
{
	struct rpc_xprt *xprt;
1135 1136
	struct rpc_rqst *req;
	int i;
1137 1138 1139 1140 1141

	xprt = kzalloc(size, GFP_KERNEL);
	if (xprt == NULL)
		goto out;

1142 1143 1144 1145 1146
	xprt_init(xprt, net);

	for (i = 0; i < num_prealloc; i++) {
		req = kzalloc(sizeof(struct rpc_rqst), GFP_KERNEL);
		if (!req)
1147
			goto out_free;
1148 1149
		list_add(&req->rq_list, &xprt->free);
	}
1150 1151 1152 1153 1154 1155
	if (max_alloc > num_prealloc)
		xprt->max_reqs = max_alloc;
	else
		xprt->max_reqs = num_prealloc;
	xprt->min_reqs = num_prealloc;
	atomic_set(&xprt->num_reqs, num_prealloc);
1156 1157 1158 1159

	return xprt;

out_free:
1160
	xprt_free(xprt);
1161 1162 1163 1164 1165
out:
	return NULL;
}
EXPORT_SYMBOL_GPL(xprt_alloc);

1166 1167
void xprt_free(struct rpc_xprt *xprt)
{
P
Pavel Emelyanov 已提交
1168
	put_net(xprt->xprt_net);
1169
	xprt_free_all_slots(xprt);
1170
	kfree_rcu(xprt, rcu);
1171 1172 1173
}
EXPORT_SYMBOL_GPL(xprt_free);

1174 1175 1176 1177
/**
 * xprt_reserve - allocate an RPC request slot
 * @task: RPC task requesting a slot allocation
 *
1178 1179
 * If the transport is marked as being congested, or if no more
 * slots are available, place the task on the transport's
1180 1181 1182
 * backlog queue.
 */
void xprt_reserve(struct rpc_task *task)
L
Linus Torvalds 已提交
1183
{
1184
	struct rpc_xprt *xprt = task->tk_xprt;
L
Linus Torvalds 已提交
1185

1186 1187 1188 1189 1190 1191
	task->tk_status = 0;
	if (task->tk_rqstp != NULL)
		return;

	task->tk_timeout = 0;
	task->tk_status = -EAGAIN;
1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206
	if (!xprt_throttle_congested(xprt, task))
		xprt->ops->alloc_slot(xprt, task);
}

/**
 * xprt_retry_reserve - allocate an RPC request slot
 * @task: RPC task requesting a slot allocation
 *
 * If no more slots are available, place the task on the transport's
 * backlog queue.
 * Note that the only difference with xprt_reserve is that we now
 * ignore the value of the XPRT_CONGESTED flag.
 */
void xprt_retry_reserve(struct rpc_task *task)
{
1207
	struct rpc_xprt *xprt = task->tk_xprt;
1208 1209 1210 1211 1212 1213 1214

	task->tk_status = 0;
	if (task->tk_rqstp != NULL)
		return;

	task->tk_timeout = 0;
	task->tk_status = -EAGAIN;
1215
	xprt->ops->alloc_slot(xprt, task);
L
Linus Torvalds 已提交
1216 1217
}

1218
static inline __be32 xprt_alloc_xid(struct rpc_xprt *xprt)
L
Linus Torvalds 已提交
1219
{
1220
	return (__force __be32)xprt->xid++;
L
Linus Torvalds 已提交
1221 1222 1223 1224
}

static inline void xprt_init_xid(struct rpc_xprt *xprt)
{
1225
	xprt->xid = prandom_u32();
L
Linus Torvalds 已提交
1226 1227
}

1228
static void xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt)
L
Linus Torvalds 已提交
1229 1230 1231
{
	struct rpc_rqst	*req = task->tk_rqstp;

1232
	INIT_LIST_HEAD(&req->rq_list);
1233
	req->rq_timeout = task->tk_client->cl_timeout->to_initval;
L
Linus Torvalds 已提交
1234 1235
	req->rq_task	= task;
	req->rq_xprt    = xprt;
1236
	req->rq_buffer  = NULL;
L
Linus Torvalds 已提交
1237
	req->rq_xid     = xprt_alloc_xid(xprt);
1238
	req->rq_connect_cookie = xprt->connect_cookie - 1;
1239 1240 1241 1242 1243
	req->rq_bytes_sent = 0;
	req->rq_snd_buf.len = 0;
	req->rq_snd_buf.buflen = 0;
	req->rq_rcv_buf.len = 0;
	req->rq_rcv_buf.buflen = 0;
1244
	req->rq_release_snd_buf = NULL;
1245
	xprt_reset_majortimeo(req);
1246
	dprintk("RPC: %5u reserved req %p xid %08x\n", task->tk_pid,
L
Linus Torvalds 已提交
1247 1248 1249
			req, ntohl(req->rq_xid));
}

1250 1251 1252 1253
/**
 * xprt_release - release an RPC request slot
 * @task: task which is finished with the slot
 *
L
Linus Torvalds 已提交
1254
 */
1255
void xprt_release(struct rpc_task *task)
L
Linus Torvalds 已提交
1256
{
1257
	struct rpc_xprt	*xprt;
1258
	struct rpc_rqst	*req = task->tk_rqstp;
L
Linus Torvalds 已提交
1259

1260 1261
	if (req == NULL) {
		if (task->tk_client) {
1262
			xprt = task->tk_xprt;
1263 1264 1265
			if (xprt->snd_task == task)
				xprt_release_write(xprt, task);
		}
L
Linus Torvalds 已提交
1266
		return;
1267
	}
1268 1269

	xprt = req->rq_xprt;
1270 1271 1272 1273
	if (task->tk_ops->rpc_count_stats != NULL)
		task->tk_ops->rpc_count_stats(task, task->tk_calldata);
	else if (task->tk_client)
		rpc_count_iostats(task, task->tk_client->cl_metrics);
C
Chuck Lever 已提交
1274
	spin_lock_bh(&xprt->transport_lock);
1275
	xprt->ops->release_xprt(xprt, task);
1276 1277
	if (xprt->ops->release_request)
		xprt->ops->release_request(task);
L
Linus Torvalds 已提交
1278 1279 1280
	if (!list_empty(&req->rq_list))
		list_del(&req->rq_list);
	xprt->last_used = jiffies;
1281
	if (list_empty(&xprt->recv) && xprt_has_timer(xprt))
1282
		mod_timer(&xprt->timer,
1283
				xprt->last_used + xprt->idle_timeout);
C
Chuck Lever 已提交
1284
	spin_unlock_bh(&xprt->transport_lock);
1285
	if (req->rq_buffer)
1286
		xprt->ops->buf_free(req->rq_buffer);
C
Chuck Lever 已提交
1287
	xprt_inject_disconnect(xprt);
1288 1289
	if (req->rq_cred != NULL)
		put_rpccred(req->rq_cred);
L
Linus Torvalds 已提交
1290
	task->tk_rqstp = NULL;
1291 1292
	if (req->rq_release_snd_buf)
		req->rq_release_snd_buf(req);
1293

1294
	dprintk("RPC: %5u release request %p\n", task->tk_pid, req);
1295 1296 1297
	if (likely(!bc_prealloc(req)))
		xprt_free_slot(xprt, req);
	else
1298
		xprt_free_bc_request(req);
L
Linus Torvalds 已提交
1299 1300
}

1301
static void xprt_init(struct rpc_xprt *xprt, struct net *net)
1302
{
1303
	kref_init(&xprt->kref);
1304 1305 1306 1307 1308 1309

	spin_lock_init(&xprt->transport_lock);
	spin_lock_init(&xprt->reserve_lock);

	INIT_LIST_HEAD(&xprt->free);
	INIT_LIST_HEAD(&xprt->recv);
1310
#if defined(CONFIG_SUNRPC_BACKCHANNEL)
1311 1312
	spin_lock_init(&xprt->bc_pa_lock);
	INIT_LIST_HEAD(&xprt->bc_pa_list);
1313
#endif /* CONFIG_SUNRPC_BACKCHANNEL */
1314
	INIT_LIST_HEAD(&xprt->xprt_switch);
1315

1316 1317
	xprt->last_used = jiffies;
	xprt->cwnd = RPC_INITCWND;
1318
	xprt->bind_index = 0;
1319 1320 1321

	rpc_init_wait_queue(&xprt->binding, "xprt_binding");
	rpc_init_wait_queue(&xprt->pending, "xprt_pending");
1322
	rpc_init_priority_wait_queue(&xprt->sending, "xprt_sending");
1323 1324 1325 1326
	rpc_init_priority_wait_queue(&xprt->backlog, "xprt_backlog");

	xprt_init_xid(xprt);

1327
	xprt->xprt_net = get_net(net);
1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347
}

/**
 * xprt_create_transport - create an RPC transport
 * @args: rpc transport creation arguments
 *
 */
struct rpc_xprt *xprt_create_transport(struct xprt_create *args)
{
	struct rpc_xprt	*xprt;
	struct xprt_class *t;

	spin_lock(&xprt_list_lock);
	list_for_each_entry(t, &xprt_list, list) {
		if (t->ident == args->ident) {
			spin_unlock(&xprt_list_lock);
			goto found;
		}
	}
	spin_unlock(&xprt_list_lock);
1348
	dprintk("RPC: transport (%d) not supported\n", args->ident);
1349 1350 1351 1352 1353 1354 1355
	return ERR_PTR(-EIO);

found:
	xprt = t->setup(args);
	if (IS_ERR(xprt)) {
		dprintk("RPC:       xprt_create_transport: failed, %ld\n",
				-PTR_ERR(xprt));
1356
		goto out;
1357
	}
1358 1359
	if (args->flags & XPRT_CREATE_NO_IDLE_TIMEOUT)
		xprt->idle_timeout = 0;
1360 1361 1362 1363 1364 1365
	INIT_WORK(&xprt->task_cleanup, xprt_autoclose);
	if (xprt_has_timer(xprt))
		setup_timer(&xprt->timer, xprt_init_autodisconnect,
			    (unsigned long)xprt);
	else
		init_timer(&xprt->timer);
1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376

	if (strlen(args->servername) > RPC_MAXNETNAMELEN) {
		xprt_destroy(xprt);
		return ERR_PTR(-EINVAL);
	}
	xprt->servername = kstrdup(args->servername, GFP_KERNEL);
	if (xprt->servername == NULL) {
		xprt_destroy(xprt);
		return ERR_PTR(-ENOMEM);
	}

1377
	rpc_xprt_debugfs_register(xprt);
1378

1379
	dprintk("RPC:       created transport %p with %u slots\n", xprt,
1380
			xprt->max_reqs);
1381
out:
1382 1383 1384
	return xprt;
}

1385 1386
/**
 * xprt_destroy - destroy an RPC transport, killing off all requests.
1387
 * @xprt: transport to destroy
1388
 *
L
Linus Torvalds 已提交
1389
 */
1390
static void xprt_destroy(struct rpc_xprt *xprt)
L
Linus Torvalds 已提交
1391
{
1392
	dprintk("RPC:       destroying transport %p\n", xprt);
1393 1394 1395 1396

	/* Exclude transport connect/disconnect handlers */
	wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_UNINTERRUPTIBLE);

1397
	del_timer_sync(&xprt->timer);
1398

1399
	rpc_xprt_debugfs_unregister(xprt);
1400 1401 1402 1403
	rpc_destroy_wait_queue(&xprt->binding);
	rpc_destroy_wait_queue(&xprt->pending);
	rpc_destroy_wait_queue(&xprt->sending);
	rpc_destroy_wait_queue(&xprt->backlog);
1404
	cancel_work_sync(&xprt->task_cleanup);
1405
	kfree(xprt->servername);
1406 1407 1408
	/*
	 * Tear down transport state and free the rpc_xprt
	 */
1409
	xprt->ops->destroy(xprt);
1410
}
L
Linus Torvalds 已提交
1411

1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429
static void xprt_destroy_kref(struct kref *kref)
{
	xprt_destroy(container_of(kref, struct rpc_xprt, kref));
}

/**
 * xprt_get - return a reference to an RPC transport.
 * @xprt: pointer to the transport
 *
 */
struct rpc_xprt *xprt_get(struct rpc_xprt *xprt)
{
	if (xprt != NULL && kref_get_unless_zero(&xprt->kref))
		return xprt;
	return NULL;
}
EXPORT_SYMBOL_GPL(xprt_get);

1430 1431 1432 1433 1434 1435 1436
/**
 * xprt_put - release a reference to an RPC transport.
 * @xprt: pointer to the transport
 *
 */
void xprt_put(struct rpc_xprt *xprt)
{
1437 1438
	if (xprt != NULL)
		kref_put(&xprt->kref, xprt_destroy_kref);
1439
}
1440
EXPORT_SYMBOL_GPL(xprt_put);