xprt.c 37.0 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12
/*
 *  linux/net/sunrpc/xprt.c
 *
 *  This is a generic RPC call interface supporting congestion avoidance,
 *  and asynchronous calls.
 *
 *  The interface works like this:
 *
 *  -	When a process places a call, it allocates a request slot if
 *	one is available. Otherwise, it sleeps on the backlog queue
 *	(xprt_reserve).
 *  -	Next, the caller puts together the RPC message, stuffs it into
13 14
 *	the request struct, and calls xprt_transmit().
 *  -	xprt_transmit sends the message and installs the caller on the
15 16 17
 *	transport's wait list. At the same time, if a reply is expected,
 *	it installs a timer that is run after the packet's timeout has
 *	expired.
L
Linus Torvalds 已提交
18
 *  -	When a packet arrives, the data_ready handler walks the list of
19
 *	pending requests for that transport. If a matching XID is found, the
L
Linus Torvalds 已提交
20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35
 *	caller is woken up, and the timer removed.
 *  -	When no reply arrives within the timeout interval, the timer is
 *	fired by the kernel and runs xprt_timer(). It either adjusts the
 *	timeout values (minor timeout) or wakes up the caller with a status
 *	of -ETIMEDOUT.
 *  -	When the caller receives a notification from RPC that a reply arrived,
 *	it should release the RPC slot, and process the reply.
 *	If the call timed out, it may choose to retry the operation by
 *	adjusting the initial timeout value, and simply calling rpc_call
 *	again.
 *
 *  Support for async RPC is done through a set of RPC-specific scheduling
 *  primitives that `transparently' work for processes as well as async
 *  tasks that rely on callbacks.
 *
 *  Copyright (C) 1995-1997, Olaf Kirch <okir@monad.swb.de>
36 37
 *
 *  Transport switch API copyright (C) 2005, Chuck Lever <cel@netapp.com>
L
Linus Torvalds 已提交
38 39
 */

40 41
#include <linux/module.h>

L
Linus Torvalds 已提交
42
#include <linux/types.h>
43
#include <linux/interrupt.h>
L
Linus Torvalds 已提交
44
#include <linux/workqueue.h>
45
#include <linux/net.h>
46
#include <linux/ktime.h>
L
Linus Torvalds 已提交
47

48
#include <linux/sunrpc/clnt.h>
49
#include <linux/sunrpc/metrics.h>
50
#include <linux/sunrpc/bc_xprt.h>
L
Linus Torvalds 已提交
51

52 53
#include <trace/events/sunrpc.h>

54 55
#include "sunrpc.h"

L
Linus Torvalds 已提交
56 57 58 59
/*
 * Local variables
 */

J
Jeff Layton 已提交
60
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
L
Linus Torvalds 已提交
61 62 63 64 65 66
# define RPCDBG_FACILITY	RPCDBG_XPRT
#endif

/*
 * Local functions
 */
67
static void	 xprt_init(struct rpc_xprt *xprt, struct net *net);
L
Linus Torvalds 已提交
68 69 70
static void	xprt_request_init(struct rpc_task *, struct rpc_xprt *);
static void	xprt_connect_status(struct rpc_task *task);
static int      __xprt_get_cong(struct rpc_xprt *, struct rpc_task *);
71
static void	 xprt_destroy(struct rpc_xprt *xprt);
L
Linus Torvalds 已提交
72

J
Jiri Slaby 已提交
73
static DEFINE_SPINLOCK(xprt_list_lock);
74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96
static LIST_HEAD(xprt_list);

/**
 * xprt_register_transport - register a transport implementation
 * @transport: transport to register
 *
 * If a transport implementation is loaded as a kernel module, it can
 * call this interface to make itself known to the RPC client.
 *
 * Returns:
 * 0:		transport successfully registered
 * -EEXIST:	transport already registered
 * -EINVAL:	transport module being unloaded
 */
int xprt_register_transport(struct xprt_class *transport)
{
	struct xprt_class *t;
	int result;

	result = -EEXIST;
	spin_lock(&xprt_list_lock);
	list_for_each_entry(t, &xprt_list, list) {
		/* don't register the same transport class twice */
97
		if (t->ident == transport->ident)
98 99 100
			goto out;
	}

101 102 103 104
	list_add_tail(&transport->list, &xprt_list);
	printk(KERN_INFO "RPC: Registered %s transport module.\n",
	       transport->name);
	result = 0;
105 106 107 108 109 110 111 112 113

out:
	spin_unlock(&xprt_list_lock);
	return result;
}
EXPORT_SYMBOL_GPL(xprt_register_transport);

/**
 * xprt_unregister_transport - unregister a transport implementation
114
 * @transport: transport to unregister
115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143
 *
 * Returns:
 * 0:		transport successfully unregistered
 * -ENOENT:	transport never registered
 */
int xprt_unregister_transport(struct xprt_class *transport)
{
	struct xprt_class *t;
	int result;

	result = 0;
	spin_lock(&xprt_list_lock);
	list_for_each_entry(t, &xprt_list, list) {
		if (t == transport) {
			printk(KERN_INFO
				"RPC: Unregistered %s transport module.\n",
				transport->name);
			list_del_init(&transport->list);
			goto out;
		}
	}
	result = -ENOENT;

out:
	spin_unlock(&xprt_list_lock);
	return result;
}
EXPORT_SYMBOL_GPL(xprt_unregister_transport);

144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165
/**
 * xprt_load_transport - load a transport implementation
 * @transport_name: transport to load
 *
 * Returns:
 * 0:		transport successfully loaded
 * -ENOENT:	transport module not available
 */
int xprt_load_transport(const char *transport_name)
{
	struct xprt_class *t;
	int result;

	result = 0;
	spin_lock(&xprt_list_lock);
	list_for_each_entry(t, &xprt_list, list) {
		if (strcmp(t->name, transport_name) == 0) {
			spin_unlock(&xprt_list_lock);
			goto out;
		}
	}
	spin_unlock(&xprt_list_lock);
166
	result = request_module("xprt%s", transport_name);
167 168 169 170 171
out:
	return result;
}
EXPORT_SYMBOL_GPL(xprt_load_transport);

172 173 174
/**
 * xprt_reserve_xprt - serialize write access to transports
 * @task: task that is requesting access to the transport
175
 * @xprt: pointer to the target transport
176 177 178 179 180
 *
 * This prevents mixing the payload of separate requests, and prevents
 * transport connects from colliding with writes.  No congestion control
 * is provided.
 */
181
int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
182 183
{
	struct rpc_rqst *req = task->tk_rqstp;
184
	int priority;
185 186 187 188 189 190 191

	if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
		if (task == xprt->snd_task)
			return 1;
		goto out_sleep;
	}
	xprt->snd_task = task;
192
	if (req != NULL)
193
		req->rq_ntrans++;
194

195 196 197
	return 1;

out_sleep:
198
	dprintk("RPC: %5u failed to lock transport %p\n",
199 200 201
			task->tk_pid, xprt);
	task->tk_timeout = 0;
	task->tk_status = -EAGAIN;
202 203 204 205
	if (req == NULL)
		priority = RPC_PRIORITY_LOW;
	else if (!req->rq_ntrans)
		priority = RPC_PRIORITY_NORMAL;
206
	else
207 208
		priority = RPC_PRIORITY_HIGH;
	rpc_sleep_on_priority(&xprt->sending, task, NULL, priority);
209 210
	return 0;
}
211
EXPORT_SYMBOL_GPL(xprt_reserve_xprt);
212

213 214 215
static void xprt_clear_locked(struct rpc_xprt *xprt)
{
	xprt->snd_task = NULL;
216
	if (!test_bit(XPRT_CLOSE_WAIT, &xprt->state)) {
217
		smp_mb__before_atomic();
218
		clear_bit(XPRT_LOCKED, &xprt->state);
219
		smp_mb__after_atomic();
220
	} else
221
		queue_work(rpciod_workqueue, &xprt->task_cleanup);
222 223
}

L
Linus Torvalds 已提交
224
/*
225 226 227 228 229 230
 * xprt_reserve_xprt_cong - serialize write access to transports
 * @task: task that is requesting access to the transport
 *
 * Same as xprt_reserve_xprt, but Van Jacobson congestion control is
 * integrated into the decision of whether a request is allowed to be
 * woken up and given access to the transport.
L
Linus Torvalds 已提交
231
 */
232
int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
L
Linus Torvalds 已提交
233 234
{
	struct rpc_rqst *req = task->tk_rqstp;
235
	int priority;
L
Linus Torvalds 已提交
236

237
	if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
L
Linus Torvalds 已提交
238 239 240 241
		if (task == xprt->snd_task)
			return 1;
		goto out_sleep;
	}
242 243 244 245
	if (req == NULL) {
		xprt->snd_task = task;
		return 1;
	}
246
	if (__xprt_get_cong(xprt, task)) {
L
Linus Torvalds 已提交
247
		xprt->snd_task = task;
248
		req->rq_ntrans++;
L
Linus Torvalds 已提交
249 250
		return 1;
	}
251
	xprt_clear_locked(xprt);
L
Linus Torvalds 已提交
252
out_sleep:
253
	dprintk("RPC: %5u failed to lock transport %p\n", task->tk_pid, xprt);
L
Linus Torvalds 已提交
254 255
	task->tk_timeout = 0;
	task->tk_status = -EAGAIN;
256 257 258 259
	if (req == NULL)
		priority = RPC_PRIORITY_LOW;
	else if (!req->rq_ntrans)
		priority = RPC_PRIORITY_NORMAL;
L
Linus Torvalds 已提交
260
	else
261 262
		priority = RPC_PRIORITY_HIGH;
	rpc_sleep_on_priority(&xprt->sending, task, NULL, priority);
L
Linus Torvalds 已提交
263 264
	return 0;
}
265
EXPORT_SYMBOL_GPL(xprt_reserve_xprt_cong);
L
Linus Torvalds 已提交
266

267
static inline int xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task)
L
Linus Torvalds 已提交
268 269 270
{
	int retval;

C
Chuck Lever 已提交
271
	spin_lock_bh(&xprt->transport_lock);
272
	retval = xprt->ops->reserve_xprt(xprt, task);
C
Chuck Lever 已提交
273
	spin_unlock_bh(&xprt->transport_lock);
L
Linus Torvalds 已提交
274 275 276
	return retval;
}

277
static bool __xprt_lock_write_func(struct rpc_task *task, void *data)
278
{
279
	struct rpc_xprt *xprt = data;
280 281 282 283
	struct rpc_rqst *req;

	req = task->tk_rqstp;
	xprt->snd_task = task;
284
	if (req)
285
		req->rq_ntrans++;
286 287
	return true;
}
288

289 290 291 292 293 294 295
static void __xprt_lock_write_next(struct rpc_xprt *xprt)
{
	if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
		return;

	if (rpc_wake_up_first(&xprt->sending, __xprt_lock_write_func, xprt))
		return;
296
	xprt_clear_locked(xprt);
297 298
}

299
static bool __xprt_lock_write_cong_func(struct rpc_task *task, void *data)
L
Linus Torvalds 已提交
300
{
301
	struct rpc_xprt *xprt = data;
302
	struct rpc_rqst *req;
L
Linus Torvalds 已提交
303

304 305 306
	req = task->tk_rqstp;
	if (req == NULL) {
		xprt->snd_task = task;
307
		return true;
308
	}
309
	if (__xprt_get_cong(xprt, task)) {
L
Linus Torvalds 已提交
310
		xprt->snd_task = task;
311
		req->rq_ntrans++;
312
		return true;
L
Linus Torvalds 已提交
313
	}
314 315 316 317 318 319 320 321 322 323 324
	return false;
}

static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt)
{
	if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
		return;
	if (RPCXPRT_CONGESTED(xprt))
		goto out_unlock;
	if (rpc_wake_up_first(&xprt->sending, __xprt_lock_write_cong_func, xprt))
		return;
L
Linus Torvalds 已提交
325
out_unlock:
326
	xprt_clear_locked(xprt);
L
Linus Torvalds 已提交
327 328
}

329 330 331 332 333 334
/**
 * xprt_release_xprt - allow other requests to use a transport
 * @xprt: transport with other tasks potentially waiting
 * @task: task that is releasing access to the transport
 *
 * Note that "task" can be NULL.  No congestion control is provided.
L
Linus Torvalds 已提交
335
 */
336
void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
L
Linus Torvalds 已提交
337 338
{
	if (xprt->snd_task == task) {
339 340 341 342 343
		if (task != NULL) {
			struct rpc_rqst *req = task->tk_rqstp;
			if (req != NULL)
				req->rq_bytes_sent = 0;
		}
344
		xprt_clear_locked(xprt);
L
Linus Torvalds 已提交
345 346 347
		__xprt_lock_write_next(xprt);
	}
}
348
EXPORT_SYMBOL_GPL(xprt_release_xprt);
L
Linus Torvalds 已提交
349

350 351 352 353 354 355 356 357 358 359 360
/**
 * xprt_release_xprt_cong - allow other requests to use a transport
 * @xprt: transport with other tasks potentially waiting
 * @task: task that is releasing access to the transport
 *
 * Note that "task" can be NULL.  Another task is awoken to use the
 * transport if the transport's congestion window allows it.
 */
void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
{
	if (xprt->snd_task == task) {
361 362 363 364 365
		if (task != NULL) {
			struct rpc_rqst *req = task->tk_rqstp;
			if (req != NULL)
				req->rq_bytes_sent = 0;
		}
366
		xprt_clear_locked(xprt);
367 368 369
		__xprt_lock_write_next_cong(xprt);
	}
}
370
EXPORT_SYMBOL_GPL(xprt_release_xprt_cong);
371 372

static inline void xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task)
L
Linus Torvalds 已提交
373
{
C
Chuck Lever 已提交
374
	spin_lock_bh(&xprt->transport_lock);
375
	xprt->ops->release_xprt(xprt, task);
C
Chuck Lever 已提交
376
	spin_unlock_bh(&xprt->transport_lock);
L
Linus Torvalds 已提交
377 378 379 380 381 382 383 384 385 386 387 388 389
}

/*
 * Van Jacobson congestion avoidance. Check if the congestion window
 * overflowed. Put the task to sleep if this is the case.
 */
static int
__xprt_get_cong(struct rpc_xprt *xprt, struct rpc_task *task)
{
	struct rpc_rqst *req = task->tk_rqstp;

	if (req->rq_cong)
		return 1;
390
	dprintk("RPC: %5u xprt_cwnd_limited cong = %lu cwnd = %lu\n",
L
Linus Torvalds 已提交
391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409
			task->tk_pid, xprt->cong, xprt->cwnd);
	if (RPCXPRT_CONGESTED(xprt))
		return 0;
	req->rq_cong = 1;
	xprt->cong += RPC_CWNDSCALE;
	return 1;
}

/*
 * Adjust the congestion window, and wake up the next task
 * that has been sleeping due to congestion
 */
static void
__xprt_put_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
{
	if (!req->rq_cong)
		return;
	req->rq_cong = 0;
	xprt->cong -= RPC_CWNDSCALE;
410
	__xprt_lock_write_next_cong(xprt);
L
Linus Torvalds 已提交
411 412
}

413 414 415 416 417 418 419 420
/**
 * xprt_release_rqst_cong - housekeeping when request is complete
 * @task: RPC request that recently completed
 *
 * Useful for transports that require congestion control.
 */
void xprt_release_rqst_cong(struct rpc_task *task)
{
421 422 423
	struct rpc_rqst *req = task->tk_rqstp;

	__xprt_put_cong(req->rq_xprt, req);
424
}
425
EXPORT_SYMBOL_GPL(xprt_release_rqst_cong);
426

427 428
/**
 * xprt_adjust_cwnd - adjust transport congestion window
429
 * @xprt: pointer to xprt
430 431 432
 * @task: recently completed RPC request used to adjust window
 * @result: result code of completed RPC request
 *
433 434 435 436 437 438 439 440 441
 * The transport code maintains an estimate on the maximum number of out-
 * standing RPC requests, using a smoothed version of the congestion
 * avoidance implemented in 44BSD. This is basically the Van Jacobson
 * congestion algorithm: If a retransmit occurs, the congestion window is
 * halved; otherwise, it is incremented by 1/cwnd when
 *
 *	-	a reply is received and
 *	-	a full number of requests are outstanding and
 *	-	the congestion window hasn't been updated recently.
L
Linus Torvalds 已提交
442
 */
443
void xprt_adjust_cwnd(struct rpc_xprt *xprt, struct rpc_task *task, int result)
L
Linus Torvalds 已提交
444
{
445 446
	struct rpc_rqst *req = task->tk_rqstp;
	unsigned long cwnd = xprt->cwnd;
L
Linus Torvalds 已提交
447 448 449 450 451 452 453

	if (result >= 0 && cwnd <= xprt->cong) {
		/* The (cwnd >> 1) term makes sure
		 * the result gets rounded properly. */
		cwnd += (RPC_CWNDSCALE * RPC_CWNDSCALE + (cwnd >> 1)) / cwnd;
		if (cwnd > RPC_MAXCWND(xprt))
			cwnd = RPC_MAXCWND(xprt);
454
		__xprt_lock_write_next_cong(xprt);
L
Linus Torvalds 已提交
455 456 457 458 459
	} else if (result == -ETIMEDOUT) {
		cwnd >>= 1;
		if (cwnd < RPC_CWNDSCALE)
			cwnd = RPC_CWNDSCALE;
	}
460
	dprintk("RPC:       cong %ld, cwnd was %ld, now %ld\n",
L
Linus Torvalds 已提交
461 462
			xprt->cong, xprt->cwnd, cwnd);
	xprt->cwnd = cwnd;
463
	__xprt_put_cong(xprt, req);
L
Linus Torvalds 已提交
464
}
465
EXPORT_SYMBOL_GPL(xprt_adjust_cwnd);
L
Linus Torvalds 已提交
466

467 468 469 470 471 472 473 474 475 476 477 478 479
/**
 * xprt_wake_pending_tasks - wake all tasks on a transport's pending queue
 * @xprt: transport with waiting tasks
 * @status: result code to plant in each task before waking it
 *
 */
void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status)
{
	if (status < 0)
		rpc_wake_up_status(&xprt->pending, status);
	else
		rpc_wake_up(&xprt->pending);
}
480
EXPORT_SYMBOL_GPL(xprt_wake_pending_tasks);
481

482 483 484
/**
 * xprt_wait_for_buffer_space - wait for transport output buffer to clear
 * @task: task to be put to sleep
R
Randy Dunlap 已提交
485
 * @action: function pointer to be executed after wait
486 487 488 489
 *
 * Note that we only set the timer for the case of RPC_IS_SOFT(), since
 * we don't in general want to force a socket disconnection due to
 * an incomplete RPC call transmission.
490
 */
491
void xprt_wait_for_buffer_space(struct rpc_task *task, rpc_action action)
492 493 494 495
{
	struct rpc_rqst *req = task->tk_rqstp;
	struct rpc_xprt *xprt = req->rq_xprt;

496
	task->tk_timeout = RPC_IS_SOFT(task) ? req->rq_timeout : 0;
497
	rpc_sleep_on(&xprt->pending, task, action);
498
}
499
EXPORT_SYMBOL_GPL(xprt_wait_for_buffer_space);
500 501 502 503 504 505 506 507 508 509 510

/**
 * xprt_write_space - wake the task waiting for transport output buffer space
 * @xprt: transport with waiting tasks
 *
 * Can be called in a soft IRQ context, so xprt_write_space never sleeps.
 */
void xprt_write_space(struct rpc_xprt *xprt)
{
	spin_lock_bh(&xprt->transport_lock);
	if (xprt->snd_task) {
511 512
		dprintk("RPC:       write space: waking waiting task on "
				"xprt %p\n", xprt);
513
		rpc_wake_up_queued_task(&xprt->pending, xprt->snd_task);
514 515 516
	}
	spin_unlock_bh(&xprt->transport_lock);
}
517
EXPORT_SYMBOL_GPL(xprt_write_space);
518

519 520 521 522 523 524 525 526 527 528 529 530
/**
 * xprt_set_retrans_timeout_def - set a request's retransmit timeout
 * @task: task whose timeout is to be set
 *
 * Set a request's retransmit timeout based on the transport's
 * default timeout parameters.  Used by transports that don't adjust
 * the retransmit timeout based on round-trip time estimation.
 */
void xprt_set_retrans_timeout_def(struct rpc_task *task)
{
	task->tk_timeout = task->tk_rqstp->rq_timeout;
}
531
EXPORT_SYMBOL_GPL(xprt_set_retrans_timeout_def);
532

533
/**
534 535
 * xprt_set_retrans_timeout_rtt - set a request's retransmit timeout
 * @task: task whose timeout is to be set
536
 *
537 538 539 540 541
 * Set a request's retransmit timeout using the RTT estimator.
 */
void xprt_set_retrans_timeout_rtt(struct rpc_task *task)
{
	int timer = task->tk_msg.rpc_proc->p_timer;
542 543
	struct rpc_clnt *clnt = task->tk_client;
	struct rpc_rtt *rtt = clnt->cl_rtt;
544
	struct rpc_rqst *req = task->tk_rqstp;
545
	unsigned long max_timeout = clnt->cl_timeout->to_maxval;
546 547 548 549 550 551

	task->tk_timeout = rpc_calc_rto(rtt, timer);
	task->tk_timeout <<= rpc_ntimeo(rtt, timer) + req->rq_retries;
	if (task->tk_timeout > max_timeout || task->tk_timeout == 0)
		task->tk_timeout = max_timeout;
}
552
EXPORT_SYMBOL_GPL(xprt_set_retrans_timeout_rtt);
553

L
Linus Torvalds 已提交
554 555
static void xprt_reset_majortimeo(struct rpc_rqst *req)
{
556
	const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
L
Linus Torvalds 已提交
557 558 559 560 561 562 563 564 565 566 567

	req->rq_majortimeo = req->rq_timeout;
	if (to->to_exponential)
		req->rq_majortimeo <<= to->to_retries;
	else
		req->rq_majortimeo += to->to_increment * to->to_retries;
	if (req->rq_majortimeo > to->to_maxval || req->rq_majortimeo == 0)
		req->rq_majortimeo = to->to_maxval;
	req->rq_majortimeo += jiffies;
}

568 569 570 571
/**
 * xprt_adjust_timeout - adjust timeout values for next retransmit
 * @req: RPC request containing parameters to use for the adjustment
 *
L
Linus Torvalds 已提交
572 573 574 575
 */
int xprt_adjust_timeout(struct rpc_rqst *req)
{
	struct rpc_xprt *xprt = req->rq_xprt;
576
	const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
L
Linus Torvalds 已提交
577 578 579 580 581 582 583 584 585 586 587 588 589 590 591
	int status = 0;

	if (time_before(jiffies, req->rq_majortimeo)) {
		if (to->to_exponential)
			req->rq_timeout <<= 1;
		else
			req->rq_timeout += to->to_increment;
		if (to->to_maxval && req->rq_timeout >= to->to_maxval)
			req->rq_timeout = to->to_maxval;
		req->rq_retries++;
	} else {
		req->rq_timeout = to->to_initval;
		req->rq_retries = 0;
		xprt_reset_majortimeo(req);
		/* Reset the RTT counters == "slow start" */
C
Chuck Lever 已提交
592
		spin_lock_bh(&xprt->transport_lock);
L
Linus Torvalds 已提交
593
		rpc_init_rtt(req->rq_task->tk_client->cl_rtt, to->to_initval);
C
Chuck Lever 已提交
594
		spin_unlock_bh(&xprt->transport_lock);
L
Linus Torvalds 已提交
595 596 597 598 599 600 601 602 603 604
		status = -ETIMEDOUT;
	}

	if (req->rq_timeout == 0) {
		printk(KERN_WARNING "xprt_adjust_timeout: rq_timeout = 0!\n");
		req->rq_timeout = 5 * HZ;
	}
	return status;
}

605
static void xprt_autoclose(struct work_struct *work)
L
Linus Torvalds 已提交
606
{
607 608
	struct rpc_xprt *xprt =
		container_of(work, struct rpc_xprt, task_cleanup);
L
Linus Torvalds 已提交
609

610
	xprt->ops->close(xprt);
611
	clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
L
Linus Torvalds 已提交
612 613 614
	xprt_release_write(xprt, NULL);
}

615
/**
616
 * xprt_disconnect_done - mark a transport as disconnected
617 618
 * @xprt: transport to flag for disconnect
 *
L
Linus Torvalds 已提交
619
 */
620
void xprt_disconnect_done(struct rpc_xprt *xprt)
L
Linus Torvalds 已提交
621
{
622
	dprintk("RPC:       disconnected transport %p\n", xprt);
C
Chuck Lever 已提交
623
	spin_lock_bh(&xprt->transport_lock);
L
Linus Torvalds 已提交
624
	xprt_clear_connected(xprt);
625
	xprt_wake_pending_tasks(xprt, -EAGAIN);
C
Chuck Lever 已提交
626
	spin_unlock_bh(&xprt->transport_lock);
L
Linus Torvalds 已提交
627
}
628
EXPORT_SYMBOL_GPL(xprt_disconnect_done);
L
Linus Torvalds 已提交
629

630 631 632 633 634 635 636 637 638 639 640 641 642
/**
 * xprt_force_disconnect - force a transport to disconnect
 * @xprt: transport to disconnect
 *
 */
void xprt_force_disconnect(struct rpc_xprt *xprt)
{
	/* Don't race with the test_bit() in xprt_clear_locked() */
	spin_lock_bh(&xprt->transport_lock);
	set_bit(XPRT_CLOSE_WAIT, &xprt->state);
	/* Try to schedule an autoclose RPC call */
	if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
		queue_work(rpciod_workqueue, &xprt->task_cleanup);
643
	xprt_wake_pending_tasks(xprt, -EAGAIN);
644 645 646
	spin_unlock_bh(&xprt->transport_lock);
}

647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669
/**
 * xprt_conditional_disconnect - force a transport to disconnect
 * @xprt: transport to disconnect
 * @cookie: 'connection cookie'
 *
 * This attempts to break the connection if and only if 'cookie' matches
 * the current transport 'connection cookie'. It ensures that we don't
 * try to break the connection more than once when we need to retransmit
 * a batch of RPC requests.
 *
 */
void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie)
{
	/* Don't race with the test_bit() in xprt_clear_locked() */
	spin_lock_bh(&xprt->transport_lock);
	if (cookie != xprt->connect_cookie)
		goto out;
	if (test_bit(XPRT_CLOSING, &xprt->state) || !xprt_connected(xprt))
		goto out;
	set_bit(XPRT_CLOSE_WAIT, &xprt->state);
	/* Try to schedule an autoclose RPC call */
	if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
		queue_work(rpciod_workqueue, &xprt->task_cleanup);
670
	xprt_wake_pending_tasks(xprt, -EAGAIN);
671 672 673 674
out:
	spin_unlock_bh(&xprt->transport_lock);
}

L
Linus Torvalds 已提交
675 676 677 678 679
static void
xprt_init_autodisconnect(unsigned long data)
{
	struct rpc_xprt *xprt = (struct rpc_xprt *)data;

C
Chuck Lever 已提交
680
	spin_lock(&xprt->transport_lock);
681
	if (!list_empty(&xprt->recv))
L
Linus Torvalds 已提交
682
		goto out_abort;
683
	if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
L
Linus Torvalds 已提交
684
		goto out_abort;
C
Chuck Lever 已提交
685
	spin_unlock(&xprt->transport_lock);
686
	queue_work(rpciod_workqueue, &xprt->task_cleanup);
L
Linus Torvalds 已提交
687 688
	return;
out_abort:
C
Chuck Lever 已提交
689
	spin_unlock(&xprt->transport_lock);
L
Linus Torvalds 已提交
690 691
}

692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722
bool xprt_lock_connect(struct rpc_xprt *xprt,
		struct rpc_task *task,
		void *cookie)
{
	bool ret = false;

	spin_lock_bh(&xprt->transport_lock);
	if (!test_bit(XPRT_LOCKED, &xprt->state))
		goto out;
	if (xprt->snd_task != task)
		goto out;
	xprt->snd_task = cookie;
	ret = true;
out:
	spin_unlock_bh(&xprt->transport_lock);
	return ret;
}

void xprt_unlock_connect(struct rpc_xprt *xprt, void *cookie)
{
	spin_lock_bh(&xprt->transport_lock);
	if (xprt->snd_task != cookie)
		goto out;
	if (!test_bit(XPRT_LOCKED, &xprt->state))
		goto out;
	xprt->snd_task =NULL;
	xprt->ops->release_xprt(xprt, NULL);
out:
	spin_unlock_bh(&xprt->transport_lock);
}

723 724 725
/**
 * xprt_connect - schedule a transport connect operation
 * @task: RPC task that is requesting the connect
L
Linus Torvalds 已提交
726 727 728 729
 *
 */
void xprt_connect(struct rpc_task *task)
{
730
	struct rpc_xprt	*xprt = task->tk_rqstp->rq_xprt;
L
Linus Torvalds 已提交
731

732
	dprintk("RPC: %5u xprt_connect xprt %p %s connected\n", task->tk_pid,
L
Linus Torvalds 已提交
733 734
			xprt, (xprt_connected(xprt) ? "is" : "is not"));

735
	if (!xprt_bound(xprt)) {
736
		task->tk_status = -EAGAIN;
L
Linus Torvalds 已提交
737 738 739 740
		return;
	}
	if (!xprt_lock_write(xprt, task))
		return;
741 742 743 744

	if (test_and_clear_bit(XPRT_CLOSE_WAIT, &xprt->state))
		xprt->ops->close(xprt);

745
	if (!xprt_connected(xprt)) {
746
		task->tk_rqstp->rq_bytes_sent = 0;
747
		task->tk_timeout = task->tk_rqstp->rq_timeout;
748
		rpc_sleep_on(&xprt->pending, task, xprt_connect_status);
749 750 751 752 753

		if (test_bit(XPRT_CLOSING, &xprt->state))
			return;
		if (xprt_test_and_set_connecting(xprt))
			return;
754
		xprt->stat.connect_start = jiffies;
755
		xprt->ops->connect(xprt, task);
L
Linus Torvalds 已提交
756
	}
757
	xprt_release_write(xprt, task);
L
Linus Torvalds 已提交
758 759
}

760
static void xprt_connect_status(struct rpc_task *task)
L
Linus Torvalds 已提交
761
{
762
	struct rpc_xprt	*xprt = task->tk_rqstp->rq_xprt;
L
Linus Torvalds 已提交
763

764
	if (task->tk_status == 0) {
765 766
		xprt->stat.connect_count++;
		xprt->stat.connect_time += (long)jiffies - xprt->stat.connect_start;
767
		dprintk("RPC: %5u xprt_connect_status: connection established\n",
L
Linus Torvalds 已提交
768 769 770 771 772
				task->tk_pid);
		return;
	}

	switch (task->tk_status) {
773 774 775 776 777
	case -ECONNREFUSED:
	case -ECONNRESET:
	case -ECONNABORTED:
	case -ENETUNREACH:
	case -EHOSTUNREACH:
778
	case -EPIPE:
779 780
	case -EAGAIN:
		dprintk("RPC: %5u xprt_connect_status: retrying\n", task->tk_pid);
781
		break;
L
Linus Torvalds 已提交
782
	case -ETIMEDOUT:
783 784
		dprintk("RPC: %5u xprt_connect_status: connect attempt timed "
				"out\n", task->tk_pid);
L
Linus Torvalds 已提交
785 786
		break;
	default:
787 788
		dprintk("RPC: %5u xprt_connect_status: error %d connecting to "
				"server %s\n", task->tk_pid, -task->tk_status,
789
				xprt->servername);
790
		task->tk_status = -EIO;
L
Linus Torvalds 已提交
791 792 793
	}
}

794 795 796 797 798
/**
 * xprt_lookup_rqst - find an RPC request corresponding to an XID
 * @xprt: transport on which the original request was transmitted
 * @xid: RPC XID of incoming reply
 *
L
Linus Torvalds 已提交
799
 */
800
struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid)
L
Linus Torvalds 已提交
801
{
802
	struct rpc_rqst *entry;
L
Linus Torvalds 已提交
803

804
	list_for_each_entry(entry, &xprt->recv, rq_list)
805 806
		if (entry->rq_xid == xid) {
			trace_xprt_lookup_rqst(xprt, xid, 0);
807
			return entry;
808
		}
809 810 811

	dprintk("RPC:       xprt_lookup_rqst did not find xid %08x\n",
			ntohl(xid));
812
	trace_xprt_lookup_rqst(xprt, xid, -ENOENT);
813 814
	xprt->stat.bad_xids++;
	return NULL;
L
Linus Torvalds 已提交
815
}
816
EXPORT_SYMBOL_GPL(xprt_lookup_rqst);
L
Linus Torvalds 已提交
817

818
static void xprt_update_rtt(struct rpc_task *task)
819 820 821
{
	struct rpc_rqst *req = task->tk_rqstp;
	struct rpc_rtt *rtt = task->tk_client->cl_rtt;
822
	unsigned int timer = task->tk_msg.rpc_proc->p_timer;
823
	long m = usecs_to_jiffies(ktime_to_us(req->rq_rtt));
824 825 826

	if (timer) {
		if (req->rq_ntrans == 1)
827
			rpc_update_rtt(rtt, timer, m);
828 829 830 831
		rpc_set_timeo(rtt, timer, req->rq_ntrans - 1);
	}
}

832 833
/**
 * xprt_complete_rqst - called when reply processing is complete
834
 * @task: RPC request that recently completed
835 836
 * @copied: actual number of bytes received from the transport
 *
837
 * Caller holds transport lock.
L
Linus Torvalds 已提交
838
 */
839
void xprt_complete_rqst(struct rpc_task *task, int copied)
L
Linus Torvalds 已提交
840
{
841
	struct rpc_rqst *req = task->tk_rqstp;
842
	struct rpc_xprt *xprt = req->rq_xprt;
L
Linus Torvalds 已提交
843

844 845
	dprintk("RPC: %5u xid %08x complete (%d bytes received)\n",
			task->tk_pid, ntohl(req->rq_xid), copied);
846
	trace_xprt_complete_rqst(xprt, req->rq_xid, copied);
L
Linus Torvalds 已提交
847

848
	xprt->stat.recvs++;
849
	req->rq_rtt = ktime_sub(ktime_get(), req->rq_xtime);
850 851
	if (xprt->ops->timer != NULL)
		xprt_update_rtt(task);
852

L
Linus Torvalds 已提交
853
	list_del_init(&req->rq_list);
854
	req->rq_private_buf.len = copied;
855 856
	/* Ensure all writes are done before we update */
	/* req->rq_reply_bytes_recvd */
857
	smp_wmb();
858
	req->rq_reply_bytes_recvd = copied;
859
	rpc_wake_up_queued_task(&xprt->pending, task);
L
Linus Torvalds 已提交
860
}
861
EXPORT_SYMBOL_GPL(xprt_complete_rqst);
L
Linus Torvalds 已提交
862

863
static void xprt_timer(struct rpc_task *task)
L
Linus Torvalds 已提交
864
{
865
	struct rpc_rqst *req = task->tk_rqstp;
L
Linus Torvalds 已提交
866 867
	struct rpc_xprt *xprt = req->rq_xprt;

868 869
	if (task->tk_status != -ETIMEDOUT)
		return;
870
	dprintk("RPC: %5u xprt_timer\n", task->tk_pid);
L
Linus Torvalds 已提交
871

872
	spin_lock_bh(&xprt->transport_lock);
873
	if (!req->rq_reply_bytes_recvd) {
874
		if (xprt->ops->timer)
875
			xprt->ops->timer(xprt, task);
876 877 878
	} else
		task->tk_status = 0;
	spin_unlock_bh(&xprt->transport_lock);
L
Linus Torvalds 已提交
879 880
}

881 882 883 884 885
static inline int xprt_has_timer(struct rpc_xprt *xprt)
{
	return xprt->idle_timeout != 0;
}

886 887 888 889
/**
 * xprt_prepare_transmit - reserve the transport before sending a request
 * @task: RPC task about to send a request
 *
L
Linus Torvalds 已提交
890
 */
891
bool xprt_prepare_transmit(struct rpc_task *task)
L
Linus Torvalds 已提交
892 893 894
{
	struct rpc_rqst	*req = task->tk_rqstp;
	struct rpc_xprt	*xprt = req->rq_xprt;
895
	bool ret = false;
L
Linus Torvalds 已提交
896

897
	dprintk("RPC: %5u xprt_prepare_transmit\n", task->tk_pid);
L
Linus Torvalds 已提交
898

C
Chuck Lever 已提交
899
	spin_lock_bh(&xprt->transport_lock);
900 901 902 903 904 905 906 907 908 909 910 911
	if (!req->rq_bytes_sent) {
		if (req->rq_reply_bytes_recvd) {
			task->tk_status = req->rq_reply_bytes_recvd;
			goto out_unlock;
		}
		if ((task->tk_flags & RPC_TASK_NO_RETRANS_TIMEOUT)
		    && xprt_connected(xprt)
		    && req->rq_connect_cookie == xprt->connect_cookie) {
			xprt->ops->set_retrans_timeout(task);
			rpc_sleep_on(&xprt->pending, task, xprt_timer);
			goto out_unlock;
		}
L
Linus Torvalds 已提交
912
	}
913 914 915 916 917
	if (!xprt->ops->reserve_xprt(xprt, task)) {
		task->tk_status = -EAGAIN;
		goto out_unlock;
	}
	ret = true;
L
Linus Torvalds 已提交
918
out_unlock:
C
Chuck Lever 已提交
919
	spin_unlock_bh(&xprt->transport_lock);
920
	return ret;
L
Linus Torvalds 已提交
921 922
}

923
void xprt_end_transmit(struct rpc_task *task)
924
{
925
	xprt_release_write(task->tk_rqstp->rq_xprt, task);
926 927
}

928 929 930 931 932 933 934
/**
 * xprt_transmit - send an RPC request on a transport
 * @task: controlling RPC task
 *
 * We have to copy the iovec because sendmsg fiddles with its contents.
 */
void xprt_transmit(struct rpc_task *task)
L
Linus Torvalds 已提交
935 936 937
{
	struct rpc_rqst	*req = task->tk_rqstp;
	struct rpc_xprt	*xprt = req->rq_xprt;
938
	int status, numreqs;
L
Linus Torvalds 已提交
939

940
	dprintk("RPC: %5u xprt_transmit(%u)\n", task->tk_pid, req->rq_slen);
L
Linus Torvalds 已提交
941

942
	if (!req->rq_reply_bytes_recvd) {
943 944 945 946
		if (list_empty(&req->rq_list) && rpc_reply_expected(task)) {
			/*
			 * Add to the list only if we're expecting a reply
			 */
C
Chuck Lever 已提交
947
			spin_lock_bh(&xprt->transport_lock);
L
Linus Torvalds 已提交
948 949 950 951 952
			/* Update the softirq receive buffer */
			memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
					sizeof(req->rq_private_buf));
			/* Add request to the receive list */
			list_add_tail(&req->rq_list, &xprt->recv);
C
Chuck Lever 已提交
953
			spin_unlock_bh(&xprt->transport_lock);
L
Linus Torvalds 已提交
954
			xprt_reset_majortimeo(req);
955 956
			/* Turn off autodisconnect */
			del_singleshot_timer_sync(&xprt->timer);
L
Linus Torvalds 已提交
957 958 959 960
		}
	} else if (!req->rq_bytes_sent)
		return;

961
	req->rq_xtime = ktime_get();
962
	status = xprt->ops->send_request(task);
963
	trace_xprt_transmit(xprt, req->rq_xid, status);
964 965 966 967
	if (status != 0) {
		task->tk_status = status;
		return;
	}
968

969
	dprintk("RPC: %5u xmit complete\n", task->tk_pid);
970
	task->tk_flags |= RPC_TASK_SENT;
971
	spin_lock_bh(&xprt->transport_lock);
972

973
	xprt->ops->set_retrans_timeout(task);
974

975 976 977
	numreqs = atomic_read(&xprt->num_reqs);
	if (numreqs > xprt->stat.max_slots)
		xprt->stat.max_slots = numreqs;
978 979 980
	xprt->stat.sends++;
	xprt->stat.req_u += xprt->stat.sends - xprt->stat.recvs;
	xprt->stat.bklog_u += xprt->backlog.qlen;
981 982
	xprt->stat.sending_u += xprt->sending.qlen;
	xprt->stat.pending_u += xprt->pending.qlen;
L
Linus Torvalds 已提交
983

984 985 986
	/* Don't race with disconnect */
	if (!xprt_connected(xprt))
		task->tk_status = -ENOTCONN;
987
	else {
988 989 990 991
		/*
		 * Sleep on the pending queue since
		 * we're expecting a reply.
		 */
992 993 994
		if (!req->rq_reply_bytes_recvd && rpc_reply_expected(task))
			rpc_sleep_on(&xprt->pending, task, xprt_timer);
		req->rq_connect_cookie = xprt->connect_cookie;
995
	}
996
	spin_unlock_bh(&xprt->transport_lock);
L
Linus Torvalds 已提交
997 998
}

999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026
static void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task)
{
	set_bit(XPRT_CONGESTED, &xprt->state);
	rpc_sleep_on(&xprt->backlog, task, NULL);
}

static void xprt_wake_up_backlog(struct rpc_xprt *xprt)
{
	if (rpc_wake_up_next(&xprt->backlog) == NULL)
		clear_bit(XPRT_CONGESTED, &xprt->state);
}

static bool xprt_throttle_congested(struct rpc_xprt *xprt, struct rpc_task *task)
{
	bool ret = false;

	if (!test_bit(XPRT_CONGESTED, &xprt->state))
		goto out;
	spin_lock(&xprt->reserve_lock);
	if (test_bit(XPRT_CONGESTED, &xprt->state)) {
		rpc_sleep_on(&xprt->backlog, task, NULL);
		ret = true;
	}
	spin_unlock(&xprt->reserve_lock);
out:
	return ret;
}

1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050
static struct rpc_rqst *xprt_dynamic_alloc_slot(struct rpc_xprt *xprt, gfp_t gfp_flags)
{
	struct rpc_rqst *req = ERR_PTR(-EAGAIN);

	if (!atomic_add_unless(&xprt->num_reqs, 1, xprt->max_reqs))
		goto out;
	req = kzalloc(sizeof(struct rpc_rqst), gfp_flags);
	if (req != NULL)
		goto out;
	atomic_dec(&xprt->num_reqs);
	req = ERR_PTR(-ENOMEM);
out:
	return req;
}

static bool xprt_dynamic_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
{
	if (atomic_add_unless(&xprt->num_reqs, -1, xprt->min_reqs)) {
		kfree(req);
		return true;
	}
	return false;
}

1051
void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
L
Linus Torvalds 已提交
1052
{
1053
	struct rpc_rqst *req;
L
Linus Torvalds 已提交
1054

1055
	spin_lock(&xprt->reserve_lock);
L
Linus Torvalds 已提交
1056
	if (!list_empty(&xprt->free)) {
1057 1058 1059 1060
		req = list_entry(xprt->free.next, struct rpc_rqst, rq_list);
		list_del(&req->rq_list);
		goto out_init_req;
	}
1061
	req = xprt_dynamic_alloc_slot(xprt, GFP_NOWAIT|__GFP_NOWARN);
1062 1063 1064 1065 1066 1067
	if (!IS_ERR(req))
		goto out_init_req;
	switch (PTR_ERR(req)) {
	case -ENOMEM:
		dprintk("RPC:       dynamic allocation of request slot "
				"failed! Retrying\n");
1068
		task->tk_status = -ENOMEM;
1069 1070
		break;
	case -EAGAIN:
1071
		xprt_add_backlog(xprt, task);
1072
		dprintk("RPC:       waiting for request slot\n");
1073 1074
	default:
		task->tk_status = -EAGAIN;
L
Linus Torvalds 已提交
1075
	}
1076
	spin_unlock(&xprt->reserve_lock);
1077 1078 1079 1080 1081
	return;
out_init_req:
	task->tk_status = 0;
	task->tk_rqstp = req;
	xprt_request_init(task, xprt);
1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096
	spin_unlock(&xprt->reserve_lock);
}
EXPORT_SYMBOL_GPL(xprt_alloc_slot);

void xprt_lock_and_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
{
	/* Note: grabbing the xprt_lock_write() ensures that we throttle
	 * new slot allocation if the transport is congested (i.e. when
	 * reconnecting a stream transport or when out of socket write
	 * buffer space).
	 */
	if (xprt_lock_write(xprt, task)) {
		xprt_alloc_slot(xprt, task);
		xprt_release_write(xprt, task);
	}
L
Linus Torvalds 已提交
1097
}
1098
EXPORT_SYMBOL_GPL(xprt_lock_and_alloc_slot);
L
Linus Torvalds 已提交
1099

1100 1101 1102
static void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
{
	spin_lock(&xprt->reserve_lock);
1103 1104 1105 1106
	if (!xprt_dynamic_free_slot(xprt, req)) {
		memset(req, 0, sizeof(*req));	/* mark unused */
		list_add(&req->rq_list, &xprt->free);
	}
1107
	xprt_wake_up_backlog(xprt);
1108 1109 1110
	spin_unlock(&xprt->reserve_lock);
}

1111 1112 1113 1114 1115 1116 1117 1118 1119 1120
static void xprt_free_all_slots(struct rpc_xprt *xprt)
{
	struct rpc_rqst *req;
	while (!list_empty(&xprt->free)) {
		req = list_first_entry(&xprt->free, struct rpc_rqst, rq_list);
		list_del(&req->rq_list);
		kfree(req);
	}
}

1121 1122 1123
struct rpc_xprt *xprt_alloc(struct net *net, size_t size,
		unsigned int num_prealloc,
		unsigned int max_alloc)
1124 1125
{
	struct rpc_xprt *xprt;
1126 1127
	struct rpc_rqst *req;
	int i;
1128 1129 1130 1131 1132

	xprt = kzalloc(size, GFP_KERNEL);
	if (xprt == NULL)
		goto out;

1133 1134 1135 1136 1137
	xprt_init(xprt, net);

	for (i = 0; i < num_prealloc; i++) {
		req = kzalloc(sizeof(struct rpc_rqst), GFP_KERNEL);
		if (!req)
1138
			goto out_free;
1139 1140
		list_add(&req->rq_list, &xprt->free);
	}
1141 1142 1143 1144 1145 1146
	if (max_alloc > num_prealloc)
		xprt->max_reqs = max_alloc;
	else
		xprt->max_reqs = num_prealloc;
	xprt->min_reqs = num_prealloc;
	atomic_set(&xprt->num_reqs, num_prealloc);
1147 1148 1149 1150

	return xprt;

out_free:
1151
	xprt_free(xprt);
1152 1153 1154 1155 1156
out:
	return NULL;
}
EXPORT_SYMBOL_GPL(xprt_alloc);

1157 1158
void xprt_free(struct rpc_xprt *xprt)
{
P
Pavel Emelyanov 已提交
1159
	put_net(xprt->xprt_net);
1160
	xprt_free_all_slots(xprt);
1161 1162 1163 1164
	kfree(xprt);
}
EXPORT_SYMBOL_GPL(xprt_free);

1165 1166 1167 1168
/**
 * xprt_reserve - allocate an RPC request slot
 * @task: RPC task requesting a slot allocation
 *
1169 1170
 * If the transport is marked as being congested, or if no more
 * slots are available, place the task on the transport's
1171 1172 1173
 * backlog queue.
 */
void xprt_reserve(struct rpc_task *task)
L
Linus Torvalds 已提交
1174
{
1175
	struct rpc_xprt	*xprt;
L
Linus Torvalds 已提交
1176

1177 1178 1179 1180 1181 1182
	task->tk_status = 0;
	if (task->tk_rqstp != NULL)
		return;

	task->tk_timeout = 0;
	task->tk_status = -EAGAIN;
1183 1184
	rcu_read_lock();
	xprt = rcu_dereference(task->tk_client->cl_xprt);
1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210
	if (!xprt_throttle_congested(xprt, task))
		xprt->ops->alloc_slot(xprt, task);
	rcu_read_unlock();
}

/**
 * xprt_retry_reserve - allocate an RPC request slot
 * @task: RPC task requesting a slot allocation
 *
 * If no more slots are available, place the task on the transport's
 * backlog queue.
 * Note that the only difference with xprt_reserve is that we now
 * ignore the value of the XPRT_CONGESTED flag.
 */
void xprt_retry_reserve(struct rpc_task *task)
{
	struct rpc_xprt	*xprt;

	task->tk_status = 0;
	if (task->tk_rqstp != NULL)
		return;

	task->tk_timeout = 0;
	task->tk_status = -EAGAIN;
	rcu_read_lock();
	xprt = rcu_dereference(task->tk_client->cl_xprt);
1211
	xprt->ops->alloc_slot(xprt, task);
1212
	rcu_read_unlock();
L
Linus Torvalds 已提交
1213 1214
}

1215
static inline __be32 xprt_alloc_xid(struct rpc_xprt *xprt)
L
Linus Torvalds 已提交
1216
{
1217
	return (__force __be32)xprt->xid++;
L
Linus Torvalds 已提交
1218 1219 1220 1221
}

static inline void xprt_init_xid(struct rpc_xprt *xprt)
{
1222
	xprt->xid = prandom_u32();
L
Linus Torvalds 已提交
1223 1224
}

1225
static void xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt)
L
Linus Torvalds 已提交
1226 1227 1228
{
	struct rpc_rqst	*req = task->tk_rqstp;

1229
	INIT_LIST_HEAD(&req->rq_list);
1230
	req->rq_timeout = task->tk_client->cl_timeout->to_initval;
L
Linus Torvalds 已提交
1231 1232
	req->rq_task	= task;
	req->rq_xprt    = xprt;
1233
	req->rq_buffer  = NULL;
L
Linus Torvalds 已提交
1234
	req->rq_xid     = xprt_alloc_xid(xprt);
1235
	req->rq_connect_cookie = xprt->connect_cookie - 1;
1236 1237 1238 1239 1240
	req->rq_bytes_sent = 0;
	req->rq_snd_buf.len = 0;
	req->rq_snd_buf.buflen = 0;
	req->rq_rcv_buf.len = 0;
	req->rq_rcv_buf.buflen = 0;
1241
	req->rq_release_snd_buf = NULL;
1242
	xprt_reset_majortimeo(req);
1243
	dprintk("RPC: %5u reserved req %p xid %08x\n", task->tk_pid,
L
Linus Torvalds 已提交
1244 1245 1246
			req, ntohl(req->rq_xid));
}

1247 1248 1249 1250
/**
 * xprt_release - release an RPC request slot
 * @task: task which is finished with the slot
 *
L
Linus Torvalds 已提交
1251
 */
1252
void xprt_release(struct rpc_task *task)
L
Linus Torvalds 已提交
1253
{
1254
	struct rpc_xprt	*xprt;
1255
	struct rpc_rqst	*req = task->tk_rqstp;
L
Linus Torvalds 已提交
1256

1257 1258 1259 1260 1261 1262 1263 1264
	if (req == NULL) {
		if (task->tk_client) {
			rcu_read_lock();
			xprt = rcu_dereference(task->tk_client->cl_xprt);
			if (xprt->snd_task == task)
				xprt_release_write(xprt, task);
			rcu_read_unlock();
		}
L
Linus Torvalds 已提交
1265
		return;
1266
	}
1267 1268

	xprt = req->rq_xprt;
1269 1270 1271 1272
	if (task->tk_ops->rpc_count_stats != NULL)
		task->tk_ops->rpc_count_stats(task, task->tk_calldata);
	else if (task->tk_client)
		rpc_count_iostats(task, task->tk_client->cl_metrics);
C
Chuck Lever 已提交
1273
	spin_lock_bh(&xprt->transport_lock);
1274
	xprt->ops->release_xprt(xprt, task);
1275 1276
	if (xprt->ops->release_request)
		xprt->ops->release_request(task);
L
Linus Torvalds 已提交
1277 1278 1279
	if (!list_empty(&req->rq_list))
		list_del(&req->rq_list);
	xprt->last_used = jiffies;
1280
	if (list_empty(&xprt->recv) && xprt_has_timer(xprt))
1281
		mod_timer(&xprt->timer,
1282
				xprt->last_used + xprt->idle_timeout);
C
Chuck Lever 已提交
1283
	spin_unlock_bh(&xprt->transport_lock);
1284
	if (req->rq_buffer)
1285
		xprt->ops->buf_free(req->rq_buffer);
1286 1287
	if (req->rq_cred != NULL)
		put_rpccred(req->rq_cred);
L
Linus Torvalds 已提交
1288
	task->tk_rqstp = NULL;
1289 1290
	if (req->rq_release_snd_buf)
		req->rq_release_snd_buf(req);
1291

1292
	dprintk("RPC: %5u release request %p\n", task->tk_pid, req);
1293 1294 1295
	if (likely(!bc_prealloc(req)))
		xprt_free_slot(xprt, req);
	else
1296
		xprt_free_bc_request(req);
L
Linus Torvalds 已提交
1297 1298
}

1299
static void xprt_init(struct rpc_xprt *xprt, struct net *net)
1300
{
1301
	atomic_set(&xprt->count, 1);
1302 1303 1304 1305 1306 1307

	spin_lock_init(&xprt->transport_lock);
	spin_lock_init(&xprt->reserve_lock);

	INIT_LIST_HEAD(&xprt->free);
	INIT_LIST_HEAD(&xprt->recv);
1308
#if defined(CONFIG_SUNRPC_BACKCHANNEL)
1309 1310
	spin_lock_init(&xprt->bc_pa_lock);
	INIT_LIST_HEAD(&xprt->bc_pa_list);
1311
#endif /* CONFIG_SUNRPC_BACKCHANNEL */
1312

1313 1314
	xprt->last_used = jiffies;
	xprt->cwnd = RPC_INITCWND;
1315
	xprt->bind_index = 0;
1316 1317 1318

	rpc_init_wait_queue(&xprt->binding, "xprt_binding");
	rpc_init_wait_queue(&xprt->pending, "xprt_pending");
1319
	rpc_init_priority_wait_queue(&xprt->sending, "xprt_sending");
1320 1321 1322 1323
	rpc_init_priority_wait_queue(&xprt->backlog, "xprt_backlog");

	xprt_init_xid(xprt);

1324
	xprt->xprt_net = get_net(net);
1325 1326 1327 1328 1329 1330 1331 1332 1333
}

/**
 * xprt_create_transport - create an RPC transport
 * @args: rpc transport creation arguments
 *
 */
struct rpc_xprt *xprt_create_transport(struct xprt_create *args)
{
1334
	int err;
1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345
	struct rpc_xprt	*xprt;
	struct xprt_class *t;

	spin_lock(&xprt_list_lock);
	list_for_each_entry(t, &xprt_list, list) {
		if (t->ident == args->ident) {
			spin_unlock(&xprt_list_lock);
			goto found;
		}
	}
	spin_unlock(&xprt_list_lock);
1346
	dprintk("RPC: transport (%d) not supported\n", args->ident);
1347 1348 1349 1350 1351 1352 1353
	return ERR_PTR(-EIO);

found:
	xprt = t->setup(args);
	if (IS_ERR(xprt)) {
		dprintk("RPC:       xprt_create_transport: failed, %ld\n",
				-PTR_ERR(xprt));
1354
		goto out;
1355
	}
1356 1357
	if (args->flags & XPRT_CREATE_NO_IDLE_TIMEOUT)
		xprt->idle_timeout = 0;
1358 1359 1360 1361 1362 1363
	INIT_WORK(&xprt->task_cleanup, xprt_autoclose);
	if (xprt_has_timer(xprt))
		setup_timer(&xprt->timer, xprt_init_autodisconnect,
			    (unsigned long)xprt);
	else
		init_timer(&xprt->timer);
1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374

	if (strlen(args->servername) > RPC_MAXNETNAMELEN) {
		xprt_destroy(xprt);
		return ERR_PTR(-EINVAL);
	}
	xprt->servername = kstrdup(args->servername, GFP_KERNEL);
	if (xprt->servername == NULL) {
		xprt_destroy(xprt);
		return ERR_PTR(-ENOMEM);
	}

1375 1376 1377 1378 1379 1380
	err = rpc_xprt_debugfs_register(xprt);
	if (err) {
		xprt_destroy(xprt);
		return ERR_PTR(err);
	}

1381
	dprintk("RPC:       created transport %p with %u slots\n", xprt,
1382
			xprt->max_reqs);
1383
out:
1384 1385 1386
	return xprt;
}

1387 1388
/**
 * xprt_destroy - destroy an RPC transport, killing off all requests.
1389
 * @xprt: transport to destroy
1390
 *
L
Linus Torvalds 已提交
1391
 */
1392
static void xprt_destroy(struct rpc_xprt *xprt)
L
Linus Torvalds 已提交
1393
{
1394
	dprintk("RPC:       destroying transport %p\n", xprt);
1395
	del_timer_sync(&xprt->timer);
1396

1397
	rpc_xprt_debugfs_unregister(xprt);
1398 1399 1400 1401
	rpc_destroy_wait_queue(&xprt->binding);
	rpc_destroy_wait_queue(&xprt->pending);
	rpc_destroy_wait_queue(&xprt->sending);
	rpc_destroy_wait_queue(&xprt->backlog);
1402
	cancel_work_sync(&xprt->task_cleanup);
1403
	kfree(xprt->servername);
1404 1405 1406
	/*
	 * Tear down transport state and free the rpc_xprt
	 */
1407
	xprt->ops->destroy(xprt);
1408
}
L
Linus Torvalds 已提交
1409

1410 1411 1412 1413 1414 1415 1416
/**
 * xprt_put - release a reference to an RPC transport.
 * @xprt: pointer to the transport
 *
 */
void xprt_put(struct rpc_xprt *xprt)
{
1417 1418
	if (atomic_dec_and_test(&xprt->count))
		xprt_destroy(xprt);
1419
}