xprt.c 36.0 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12
/*
 *  linux/net/sunrpc/xprt.c
 *
 *  This is a generic RPC call interface supporting congestion avoidance,
 *  and asynchronous calls.
 *
 *  The interface works like this:
 *
 *  -	When a process places a call, it allocates a request slot if
 *	one is available. Otherwise, it sleeps on the backlog queue
 *	(xprt_reserve).
 *  -	Next, the caller puts together the RPC message, stuffs it into
13 14
 *	the request struct, and calls xprt_transmit().
 *  -	xprt_transmit sends the message and installs the caller on the
15 16 17
 *	transport's wait list. At the same time, if a reply is expected,
 *	it installs a timer that is run after the packet's timeout has
 *	expired.
L
Linus Torvalds 已提交
18
 *  -	When a packet arrives, the data_ready handler walks the list of
19
 *	pending requests for that transport. If a matching XID is found, the
L
Linus Torvalds 已提交
20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35
 *	caller is woken up, and the timer removed.
 *  -	When no reply arrives within the timeout interval, the timer is
 *	fired by the kernel and runs xprt_timer(). It either adjusts the
 *	timeout values (minor timeout) or wakes up the caller with a status
 *	of -ETIMEDOUT.
 *  -	When the caller receives a notification from RPC that a reply arrived,
 *	it should release the RPC slot, and process the reply.
 *	If the call timed out, it may choose to retry the operation by
 *	adjusting the initial timeout value, and simply calling rpc_call
 *	again.
 *
 *  Support for async RPC is done through a set of RPC-specific scheduling
 *  primitives that `transparently' work for processes as well as async
 *  tasks that rely on callbacks.
 *
 *  Copyright (C) 1995-1997, Olaf Kirch <okir@monad.swb.de>
36 37
 *
 *  Transport switch API copyright (C) 2005, Chuck Lever <cel@netapp.com>
L
Linus Torvalds 已提交
38 39
 */

40 41
#include <linux/module.h>

L
Linus Torvalds 已提交
42
#include <linux/types.h>
43
#include <linux/interrupt.h>
L
Linus Torvalds 已提交
44
#include <linux/workqueue.h>
45
#include <linux/net.h>
46
#include <linux/ktime.h>
L
Linus Torvalds 已提交
47

48
#include <linux/sunrpc/clnt.h>
49
#include <linux/sunrpc/metrics.h>
50
#include <linux/sunrpc/bc_xprt.h>
L
Linus Torvalds 已提交
51

52 53
#include "sunrpc.h"

L
Linus Torvalds 已提交
54 55 56 57 58 59 60 61 62 63 64
/*
 * Local variables
 */

#ifdef RPC_DEBUG
# define RPCDBG_FACILITY	RPCDBG_XPRT
#endif

/*
 * Local functions
 */
65
static void	 xprt_init(struct rpc_xprt *xprt, struct net *net);
L
Linus Torvalds 已提交
66 67 68
static void	xprt_request_init(struct rpc_task *, struct rpc_xprt *);
static void	xprt_connect_status(struct rpc_task *task);
static int      __xprt_get_cong(struct rpc_xprt *, struct rpc_task *);
69
static void	 xprt_destroy(struct rpc_xprt *xprt);
L
Linus Torvalds 已提交
70

J
Jiri Slaby 已提交
71
static DEFINE_SPINLOCK(xprt_list_lock);
72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94
static LIST_HEAD(xprt_list);

/**
 * xprt_register_transport - register a transport implementation
 * @transport: transport to register
 *
 * If a transport implementation is loaded as a kernel module, it can
 * call this interface to make itself known to the RPC client.
 *
 * Returns:
 * 0:		transport successfully registered
 * -EEXIST:	transport already registered
 * -EINVAL:	transport module being unloaded
 */
int xprt_register_transport(struct xprt_class *transport)
{
	struct xprt_class *t;
	int result;

	result = -EEXIST;
	spin_lock(&xprt_list_lock);
	list_for_each_entry(t, &xprt_list, list) {
		/* don't register the same transport class twice */
95
		if (t->ident == transport->ident)
96 97 98
			goto out;
	}

99 100 101 102
	list_add_tail(&transport->list, &xprt_list);
	printk(KERN_INFO "RPC: Registered %s transport module.\n",
	       transport->name);
	result = 0;
103 104 105 106 107 108 109 110 111

out:
	spin_unlock(&xprt_list_lock);
	return result;
}
EXPORT_SYMBOL_GPL(xprt_register_transport);

/**
 * xprt_unregister_transport - unregister a transport implementation
112
 * @transport: transport to unregister
113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141
 *
 * Returns:
 * 0:		transport successfully unregistered
 * -ENOENT:	transport never registered
 */
int xprt_unregister_transport(struct xprt_class *transport)
{
	struct xprt_class *t;
	int result;

	result = 0;
	spin_lock(&xprt_list_lock);
	list_for_each_entry(t, &xprt_list, list) {
		if (t == transport) {
			printk(KERN_INFO
				"RPC: Unregistered %s transport module.\n",
				transport->name);
			list_del_init(&transport->list);
			goto out;
		}
	}
	result = -ENOENT;

out:
	spin_unlock(&xprt_list_lock);
	return result;
}
EXPORT_SYMBOL_GPL(xprt_unregister_transport);

142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163
/**
 * xprt_load_transport - load a transport implementation
 * @transport_name: transport to load
 *
 * Returns:
 * 0:		transport successfully loaded
 * -ENOENT:	transport module not available
 */
int xprt_load_transport(const char *transport_name)
{
	struct xprt_class *t;
	int result;

	result = 0;
	spin_lock(&xprt_list_lock);
	list_for_each_entry(t, &xprt_list, list) {
		if (strcmp(t->name, transport_name) == 0) {
			spin_unlock(&xprt_list_lock);
			goto out;
		}
	}
	spin_unlock(&xprt_list_lock);
164
	result = request_module("xprt%s", transport_name);
165 166 167 168 169
out:
	return result;
}
EXPORT_SYMBOL_GPL(xprt_load_transport);

170 171 172
/**
 * xprt_reserve_xprt - serialize write access to transports
 * @task: task that is requesting access to the transport
173
 * @xprt: pointer to the target transport
174 175 176 177 178
 *
 * This prevents mixing the payload of separate requests, and prevents
 * transport connects from colliding with writes.  No congestion control
 * is provided.
 */
179
int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
180 181
{
	struct rpc_rqst *req = task->tk_rqstp;
182
	int priority;
183 184 185 186 187 188 189

	if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
		if (task == xprt->snd_task)
			return 1;
		goto out_sleep;
	}
	xprt->snd_task = task;
190
	if (req != NULL)
191
		req->rq_ntrans++;
192

193 194 195
	return 1;

out_sleep:
196
	dprintk("RPC: %5u failed to lock transport %p\n",
197 198 199
			task->tk_pid, xprt);
	task->tk_timeout = 0;
	task->tk_status = -EAGAIN;
200 201 202 203
	if (req == NULL)
		priority = RPC_PRIORITY_LOW;
	else if (!req->rq_ntrans)
		priority = RPC_PRIORITY_NORMAL;
204
	else
205 206
		priority = RPC_PRIORITY_HIGH;
	rpc_sleep_on_priority(&xprt->sending, task, NULL, priority);
207 208
	return 0;
}
209
EXPORT_SYMBOL_GPL(xprt_reserve_xprt);
210

211 212 213
static void xprt_clear_locked(struct rpc_xprt *xprt)
{
	xprt->snd_task = NULL;
214
	if (!test_bit(XPRT_CLOSE_WAIT, &xprt->state)) {
215 216 217 218
		smp_mb__before_clear_bit();
		clear_bit(XPRT_LOCKED, &xprt->state);
		smp_mb__after_clear_bit();
	} else
219
		queue_work(rpciod_workqueue, &xprt->task_cleanup);
220 221
}

L
Linus Torvalds 已提交
222
/*
223 224 225 226 227 228
 * xprt_reserve_xprt_cong - serialize write access to transports
 * @task: task that is requesting access to the transport
 *
 * Same as xprt_reserve_xprt, but Van Jacobson congestion control is
 * integrated into the decision of whether a request is allowed to be
 * woken up and given access to the transport.
L
Linus Torvalds 已提交
229
 */
230
int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
L
Linus Torvalds 已提交
231 232
{
	struct rpc_rqst *req = task->tk_rqstp;
233
	int priority;
L
Linus Torvalds 已提交
234

235
	if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
L
Linus Torvalds 已提交
236 237 238 239
		if (task == xprt->snd_task)
			return 1;
		goto out_sleep;
	}
240 241 242 243
	if (req == NULL) {
		xprt->snd_task = task;
		return 1;
	}
244
	if (__xprt_get_cong(xprt, task)) {
L
Linus Torvalds 已提交
245
		xprt->snd_task = task;
246
		req->rq_ntrans++;
L
Linus Torvalds 已提交
247 248
		return 1;
	}
249
	xprt_clear_locked(xprt);
L
Linus Torvalds 已提交
250
out_sleep:
251
	dprintk("RPC: %5u failed to lock transport %p\n", task->tk_pid, xprt);
L
Linus Torvalds 已提交
252 253
	task->tk_timeout = 0;
	task->tk_status = -EAGAIN;
254 255 256 257
	if (req == NULL)
		priority = RPC_PRIORITY_LOW;
	else if (!req->rq_ntrans)
		priority = RPC_PRIORITY_NORMAL;
L
Linus Torvalds 已提交
258
	else
259 260
		priority = RPC_PRIORITY_HIGH;
	rpc_sleep_on_priority(&xprt->sending, task, NULL, priority);
L
Linus Torvalds 已提交
261 262
	return 0;
}
263
EXPORT_SYMBOL_GPL(xprt_reserve_xprt_cong);
L
Linus Torvalds 已提交
264

265
static inline int xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task)
L
Linus Torvalds 已提交
266 267 268
{
	int retval;

C
Chuck Lever 已提交
269
	spin_lock_bh(&xprt->transport_lock);
270
	retval = xprt->ops->reserve_xprt(xprt, task);
C
Chuck Lever 已提交
271
	spin_unlock_bh(&xprt->transport_lock);
L
Linus Torvalds 已提交
272 273 274
	return retval;
}

275
static bool __xprt_lock_write_func(struct rpc_task *task, void *data)
276
{
277
	struct rpc_xprt *xprt = data;
278 279 280 281
	struct rpc_rqst *req;

	req = task->tk_rqstp;
	xprt->snd_task = task;
282
	if (req)
283
		req->rq_ntrans++;
284 285
	return true;
}
286

287 288 289 290 291 292 293
static void __xprt_lock_write_next(struct rpc_xprt *xprt)
{
	if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
		return;

	if (rpc_wake_up_first(&xprt->sending, __xprt_lock_write_func, xprt))
		return;
294
	xprt_clear_locked(xprt);
295 296
}

297
static bool __xprt_lock_write_cong_func(struct rpc_task *task, void *data)
L
Linus Torvalds 已提交
298
{
299
	struct rpc_xprt *xprt = data;
300
	struct rpc_rqst *req;
L
Linus Torvalds 已提交
301

302 303 304
	req = task->tk_rqstp;
	if (req == NULL) {
		xprt->snd_task = task;
305
		return true;
306
	}
307
	if (__xprt_get_cong(xprt, task)) {
L
Linus Torvalds 已提交
308
		xprt->snd_task = task;
309
		req->rq_ntrans++;
310
		return true;
L
Linus Torvalds 已提交
311
	}
312 313 314 315 316 317 318 319 320 321 322
	return false;
}

static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt)
{
	if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
		return;
	if (RPCXPRT_CONGESTED(xprt))
		goto out_unlock;
	if (rpc_wake_up_first(&xprt->sending, __xprt_lock_write_cong_func, xprt))
		return;
L
Linus Torvalds 已提交
323
out_unlock:
324
	xprt_clear_locked(xprt);
L
Linus Torvalds 已提交
325 326
}

327 328 329 330 331 332
/**
 * xprt_release_xprt - allow other requests to use a transport
 * @xprt: transport with other tasks potentially waiting
 * @task: task that is releasing access to the transport
 *
 * Note that "task" can be NULL.  No congestion control is provided.
L
Linus Torvalds 已提交
333
 */
334
void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
L
Linus Torvalds 已提交
335 336
{
	if (xprt->snd_task == task) {
337 338 339 340 341
		if (task != NULL) {
			struct rpc_rqst *req = task->tk_rqstp;
			if (req != NULL)
				req->rq_bytes_sent = 0;
		}
342
		xprt_clear_locked(xprt);
L
Linus Torvalds 已提交
343 344 345
		__xprt_lock_write_next(xprt);
	}
}
346
EXPORT_SYMBOL_GPL(xprt_release_xprt);
L
Linus Torvalds 已提交
347

348 349 350 351 352 353 354 355 356 357 358
/**
 * xprt_release_xprt_cong - allow other requests to use a transport
 * @xprt: transport with other tasks potentially waiting
 * @task: task that is releasing access to the transport
 *
 * Note that "task" can be NULL.  Another task is awoken to use the
 * transport if the transport's congestion window allows it.
 */
void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
{
	if (xprt->snd_task == task) {
359 360 361 362 363
		if (task != NULL) {
			struct rpc_rqst *req = task->tk_rqstp;
			if (req != NULL)
				req->rq_bytes_sent = 0;
		}
364
		xprt_clear_locked(xprt);
365 366 367
		__xprt_lock_write_next_cong(xprt);
	}
}
368
EXPORT_SYMBOL_GPL(xprt_release_xprt_cong);
369 370

static inline void xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task)
L
Linus Torvalds 已提交
371
{
C
Chuck Lever 已提交
372
	spin_lock_bh(&xprt->transport_lock);
373
	xprt->ops->release_xprt(xprt, task);
C
Chuck Lever 已提交
374
	spin_unlock_bh(&xprt->transport_lock);
L
Linus Torvalds 已提交
375 376 377 378 379 380 381 382 383 384 385 386 387
}

/*
 * Van Jacobson congestion avoidance. Check if the congestion window
 * overflowed. Put the task to sleep if this is the case.
 */
static int
__xprt_get_cong(struct rpc_xprt *xprt, struct rpc_task *task)
{
	struct rpc_rqst *req = task->tk_rqstp;

	if (req->rq_cong)
		return 1;
388
	dprintk("RPC: %5u xprt_cwnd_limited cong = %lu cwnd = %lu\n",
L
Linus Torvalds 已提交
389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407
			task->tk_pid, xprt->cong, xprt->cwnd);
	if (RPCXPRT_CONGESTED(xprt))
		return 0;
	req->rq_cong = 1;
	xprt->cong += RPC_CWNDSCALE;
	return 1;
}

/*
 * Adjust the congestion window, and wake up the next task
 * that has been sleeping due to congestion
 */
static void
__xprt_put_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
{
	if (!req->rq_cong)
		return;
	req->rq_cong = 0;
	xprt->cong -= RPC_CWNDSCALE;
408
	__xprt_lock_write_next_cong(xprt);
L
Linus Torvalds 已提交
409 410
}

411 412 413 414 415 416 417 418
/**
 * xprt_release_rqst_cong - housekeeping when request is complete
 * @task: RPC request that recently completed
 *
 * Useful for transports that require congestion control.
 */
void xprt_release_rqst_cong(struct rpc_task *task)
{
419 420 421
	struct rpc_rqst *req = task->tk_rqstp;

	__xprt_put_cong(req->rq_xprt, req);
422
}
423
EXPORT_SYMBOL_GPL(xprt_release_rqst_cong);
424

425 426
/**
 * xprt_adjust_cwnd - adjust transport congestion window
427
 * @xprt: pointer to xprt
428 429 430
 * @task: recently completed RPC request used to adjust window
 * @result: result code of completed RPC request
 *
431 432 433 434 435 436 437 438 439
 * The transport code maintains an estimate on the maximum number of out-
 * standing RPC requests, using a smoothed version of the congestion
 * avoidance implemented in 44BSD. This is basically the Van Jacobson
 * congestion algorithm: If a retransmit occurs, the congestion window is
 * halved; otherwise, it is incremented by 1/cwnd when
 *
 *	-	a reply is received and
 *	-	a full number of requests are outstanding and
 *	-	the congestion window hasn't been updated recently.
L
Linus Torvalds 已提交
440
 */
441
void xprt_adjust_cwnd(struct rpc_xprt *xprt, struct rpc_task *task, int result)
L
Linus Torvalds 已提交
442
{
443 444
	struct rpc_rqst *req = task->tk_rqstp;
	unsigned long cwnd = xprt->cwnd;
L
Linus Torvalds 已提交
445 446 447 448 449 450 451

	if (result >= 0 && cwnd <= xprt->cong) {
		/* The (cwnd >> 1) term makes sure
		 * the result gets rounded properly. */
		cwnd += (RPC_CWNDSCALE * RPC_CWNDSCALE + (cwnd >> 1)) / cwnd;
		if (cwnd > RPC_MAXCWND(xprt))
			cwnd = RPC_MAXCWND(xprt);
452
		__xprt_lock_write_next_cong(xprt);
L
Linus Torvalds 已提交
453 454 455 456 457
	} else if (result == -ETIMEDOUT) {
		cwnd >>= 1;
		if (cwnd < RPC_CWNDSCALE)
			cwnd = RPC_CWNDSCALE;
	}
458
	dprintk("RPC:       cong %ld, cwnd was %ld, now %ld\n",
L
Linus Torvalds 已提交
459 460
			xprt->cong, xprt->cwnd, cwnd);
	xprt->cwnd = cwnd;
461
	__xprt_put_cong(xprt, req);
L
Linus Torvalds 已提交
462
}
463
EXPORT_SYMBOL_GPL(xprt_adjust_cwnd);
L
Linus Torvalds 已提交
464

465 466 467 468 469 470 471 472 473 474 475 476 477
/**
 * xprt_wake_pending_tasks - wake all tasks on a transport's pending queue
 * @xprt: transport with waiting tasks
 * @status: result code to plant in each task before waking it
 *
 */
void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status)
{
	if (status < 0)
		rpc_wake_up_status(&xprt->pending, status);
	else
		rpc_wake_up(&xprt->pending);
}
478
EXPORT_SYMBOL_GPL(xprt_wake_pending_tasks);
479

480 481 482
/**
 * xprt_wait_for_buffer_space - wait for transport output buffer to clear
 * @task: task to be put to sleep
R
Randy Dunlap 已提交
483
 * @action: function pointer to be executed after wait
484 485 486 487
 *
 * Note that we only set the timer for the case of RPC_IS_SOFT(), since
 * we don't in general want to force a socket disconnection due to
 * an incomplete RPC call transmission.
488
 */
489
void xprt_wait_for_buffer_space(struct rpc_task *task, rpc_action action)
490 491 492 493
{
	struct rpc_rqst *req = task->tk_rqstp;
	struct rpc_xprt *xprt = req->rq_xprt;

494
	task->tk_timeout = RPC_IS_SOFT(task) ? req->rq_timeout : 0;
495
	rpc_sleep_on(&xprt->pending, task, action);
496
}
497
EXPORT_SYMBOL_GPL(xprt_wait_for_buffer_space);
498 499 500 501 502 503 504 505 506 507 508

/**
 * xprt_write_space - wake the task waiting for transport output buffer space
 * @xprt: transport with waiting tasks
 *
 * Can be called in a soft IRQ context, so xprt_write_space never sleeps.
 */
void xprt_write_space(struct rpc_xprt *xprt)
{
	spin_lock_bh(&xprt->transport_lock);
	if (xprt->snd_task) {
509 510
		dprintk("RPC:       write space: waking waiting task on "
				"xprt %p\n", xprt);
511
		rpc_wake_up_queued_task(&xprt->pending, xprt->snd_task);
512 513 514
	}
	spin_unlock_bh(&xprt->transport_lock);
}
515
EXPORT_SYMBOL_GPL(xprt_write_space);
516

517 518 519 520 521 522 523 524 525 526 527 528
/**
 * xprt_set_retrans_timeout_def - set a request's retransmit timeout
 * @task: task whose timeout is to be set
 *
 * Set a request's retransmit timeout based on the transport's
 * default timeout parameters.  Used by transports that don't adjust
 * the retransmit timeout based on round-trip time estimation.
 */
void xprt_set_retrans_timeout_def(struct rpc_task *task)
{
	task->tk_timeout = task->tk_rqstp->rq_timeout;
}
529
EXPORT_SYMBOL_GPL(xprt_set_retrans_timeout_def);
530

531
/**
532 533
 * xprt_set_retrans_timeout_rtt - set a request's retransmit timeout
 * @task: task whose timeout is to be set
534
 *
535 536 537 538 539
 * Set a request's retransmit timeout using the RTT estimator.
 */
void xprt_set_retrans_timeout_rtt(struct rpc_task *task)
{
	int timer = task->tk_msg.rpc_proc->p_timer;
540 541
	struct rpc_clnt *clnt = task->tk_client;
	struct rpc_rtt *rtt = clnt->cl_rtt;
542
	struct rpc_rqst *req = task->tk_rqstp;
543
	unsigned long max_timeout = clnt->cl_timeout->to_maxval;
544 545 546 547 548 549

	task->tk_timeout = rpc_calc_rto(rtt, timer);
	task->tk_timeout <<= rpc_ntimeo(rtt, timer) + req->rq_retries;
	if (task->tk_timeout > max_timeout || task->tk_timeout == 0)
		task->tk_timeout = max_timeout;
}
550
EXPORT_SYMBOL_GPL(xprt_set_retrans_timeout_rtt);
551

L
Linus Torvalds 已提交
552 553
static void xprt_reset_majortimeo(struct rpc_rqst *req)
{
554
	const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
L
Linus Torvalds 已提交
555 556 557 558 559 560 561 562 563 564 565

	req->rq_majortimeo = req->rq_timeout;
	if (to->to_exponential)
		req->rq_majortimeo <<= to->to_retries;
	else
		req->rq_majortimeo += to->to_increment * to->to_retries;
	if (req->rq_majortimeo > to->to_maxval || req->rq_majortimeo == 0)
		req->rq_majortimeo = to->to_maxval;
	req->rq_majortimeo += jiffies;
}

566 567 568 569
/**
 * xprt_adjust_timeout - adjust timeout values for next retransmit
 * @req: RPC request containing parameters to use for the adjustment
 *
L
Linus Torvalds 已提交
570 571 572 573
 */
int xprt_adjust_timeout(struct rpc_rqst *req)
{
	struct rpc_xprt *xprt = req->rq_xprt;
574
	const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
L
Linus Torvalds 已提交
575 576 577 578 579 580 581 582 583 584 585 586 587 588 589
	int status = 0;

	if (time_before(jiffies, req->rq_majortimeo)) {
		if (to->to_exponential)
			req->rq_timeout <<= 1;
		else
			req->rq_timeout += to->to_increment;
		if (to->to_maxval && req->rq_timeout >= to->to_maxval)
			req->rq_timeout = to->to_maxval;
		req->rq_retries++;
	} else {
		req->rq_timeout = to->to_initval;
		req->rq_retries = 0;
		xprt_reset_majortimeo(req);
		/* Reset the RTT counters == "slow start" */
C
Chuck Lever 已提交
590
		spin_lock_bh(&xprt->transport_lock);
L
Linus Torvalds 已提交
591
		rpc_init_rtt(req->rq_task->tk_client->cl_rtt, to->to_initval);
C
Chuck Lever 已提交
592
		spin_unlock_bh(&xprt->transport_lock);
L
Linus Torvalds 已提交
593 594 595 596 597 598 599 600 601 602
		status = -ETIMEDOUT;
	}

	if (req->rq_timeout == 0) {
		printk(KERN_WARNING "xprt_adjust_timeout: rq_timeout = 0!\n");
		req->rq_timeout = 5 * HZ;
	}
	return status;
}

603
static void xprt_autoclose(struct work_struct *work)
L
Linus Torvalds 已提交
604
{
605 606
	struct rpc_xprt *xprt =
		container_of(work, struct rpc_xprt, task_cleanup);
L
Linus Torvalds 已提交
607

608
	xprt->ops->close(xprt);
609
	clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
L
Linus Torvalds 已提交
610 611 612
	xprt_release_write(xprt, NULL);
}

613
/**
614
 * xprt_disconnect_done - mark a transport as disconnected
615 616
 * @xprt: transport to flag for disconnect
 *
L
Linus Torvalds 已提交
617
 */
618
void xprt_disconnect_done(struct rpc_xprt *xprt)
L
Linus Torvalds 已提交
619
{
620
	dprintk("RPC:       disconnected transport %p\n", xprt);
C
Chuck Lever 已提交
621
	spin_lock_bh(&xprt->transport_lock);
L
Linus Torvalds 已提交
622
	xprt_clear_connected(xprt);
623
	xprt_wake_pending_tasks(xprt, -EAGAIN);
C
Chuck Lever 已提交
624
	spin_unlock_bh(&xprt->transport_lock);
L
Linus Torvalds 已提交
625
}
626
EXPORT_SYMBOL_GPL(xprt_disconnect_done);
L
Linus Torvalds 已提交
627

628 629 630 631 632 633 634 635 636 637 638 639 640
/**
 * xprt_force_disconnect - force a transport to disconnect
 * @xprt: transport to disconnect
 *
 */
void xprt_force_disconnect(struct rpc_xprt *xprt)
{
	/* Don't race with the test_bit() in xprt_clear_locked() */
	spin_lock_bh(&xprt->transport_lock);
	set_bit(XPRT_CLOSE_WAIT, &xprt->state);
	/* Try to schedule an autoclose RPC call */
	if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
		queue_work(rpciod_workqueue, &xprt->task_cleanup);
641
	xprt_wake_pending_tasks(xprt, -EAGAIN);
642 643 644
	spin_unlock_bh(&xprt->transport_lock);
}

645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667
/**
 * xprt_conditional_disconnect - force a transport to disconnect
 * @xprt: transport to disconnect
 * @cookie: 'connection cookie'
 *
 * This attempts to break the connection if and only if 'cookie' matches
 * the current transport 'connection cookie'. It ensures that we don't
 * try to break the connection more than once when we need to retransmit
 * a batch of RPC requests.
 *
 */
void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie)
{
	/* Don't race with the test_bit() in xprt_clear_locked() */
	spin_lock_bh(&xprt->transport_lock);
	if (cookie != xprt->connect_cookie)
		goto out;
	if (test_bit(XPRT_CLOSING, &xprt->state) || !xprt_connected(xprt))
		goto out;
	set_bit(XPRT_CLOSE_WAIT, &xprt->state);
	/* Try to schedule an autoclose RPC call */
	if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
		queue_work(rpciod_workqueue, &xprt->task_cleanup);
668
	xprt_wake_pending_tasks(xprt, -EAGAIN);
669 670 671 672
out:
	spin_unlock_bh(&xprt->transport_lock);
}

L
Linus Torvalds 已提交
673 674 675 676 677
static void
xprt_init_autodisconnect(unsigned long data)
{
	struct rpc_xprt *xprt = (struct rpc_xprt *)data;

C
Chuck Lever 已提交
678
	spin_lock(&xprt->transport_lock);
679
	if (!list_empty(&xprt->recv))
L
Linus Torvalds 已提交
680
		goto out_abort;
681
	if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
L
Linus Torvalds 已提交
682
		goto out_abort;
C
Chuck Lever 已提交
683
	spin_unlock(&xprt->transport_lock);
684 685
	set_bit(XPRT_CONNECTION_CLOSE, &xprt->state);
	queue_work(rpciod_workqueue, &xprt->task_cleanup);
L
Linus Torvalds 已提交
686 687
	return;
out_abort:
C
Chuck Lever 已提交
688
	spin_unlock(&xprt->transport_lock);
L
Linus Torvalds 已提交
689 690
}

691 692 693
/**
 * xprt_connect - schedule a transport connect operation
 * @task: RPC task that is requesting the connect
L
Linus Torvalds 已提交
694 695 696 697
 *
 */
void xprt_connect(struct rpc_task *task)
{
698
	struct rpc_xprt	*xprt = task->tk_rqstp->rq_xprt;
L
Linus Torvalds 已提交
699

700
	dprintk("RPC: %5u xprt_connect xprt %p %s connected\n", task->tk_pid,
L
Linus Torvalds 已提交
701 702
			xprt, (xprt_connected(xprt) ? "is" : "is not"));

703
	if (!xprt_bound(xprt)) {
704
		task->tk_status = -EAGAIN;
L
Linus Torvalds 已提交
705 706 707 708
		return;
	}
	if (!xprt_lock_write(xprt, task))
		return;
709 710 711 712

	if (test_and_clear_bit(XPRT_CLOSE_WAIT, &xprt->state))
		xprt->ops->close(xprt);

L
Linus Torvalds 已提交
713
	if (xprt_connected(xprt))
714 715
		xprt_release_write(xprt, task);
	else {
716
		task->tk_rqstp->rq_bytes_sent = 0;
717
		task->tk_timeout = task->tk_rqstp->rq_timeout;
718
		rpc_sleep_on(&xprt->pending, task, xprt_connect_status);
719 720 721 722 723

		if (test_bit(XPRT_CLOSING, &xprt->state))
			return;
		if (xprt_test_and_set_connecting(xprt))
			return;
724
		xprt->stat.connect_start = jiffies;
725
		xprt->ops->connect(xprt, task);
L
Linus Torvalds 已提交
726 727 728
	}
}

729
static void xprt_connect_status(struct rpc_task *task)
L
Linus Torvalds 已提交
730
{
731
	struct rpc_xprt	*xprt = task->tk_rqstp->rq_xprt;
L
Linus Torvalds 已提交
732

733
	if (task->tk_status == 0) {
734 735
		xprt->stat.connect_count++;
		xprt->stat.connect_time += (long)jiffies - xprt->stat.connect_start;
736
		dprintk("RPC: %5u xprt_connect_status: connection established\n",
L
Linus Torvalds 已提交
737 738 739 740 741
				task->tk_pid);
		return;
	}

	switch (task->tk_status) {
742 743 744 745 746
	case -ECONNREFUSED:
	case -ECONNRESET:
	case -ECONNABORTED:
	case -ENETUNREACH:
	case -EHOSTUNREACH:
747 748
	case -EAGAIN:
		dprintk("RPC: %5u xprt_connect_status: retrying\n", task->tk_pid);
749
		break;
L
Linus Torvalds 已提交
750
	case -ETIMEDOUT:
751 752
		dprintk("RPC: %5u xprt_connect_status: connect attempt timed "
				"out\n", task->tk_pid);
L
Linus Torvalds 已提交
753 754
		break;
	default:
755 756
		dprintk("RPC: %5u xprt_connect_status: error %d connecting to "
				"server %s\n", task->tk_pid, -task->tk_status,
757
				xprt->servername);
758 759
		xprt_release_write(xprt, task);
		task->tk_status = -EIO;
L
Linus Torvalds 已提交
760 761 762
	}
}

763 764 765 766 767
/**
 * xprt_lookup_rqst - find an RPC request corresponding to an XID
 * @xprt: transport on which the original request was transmitted
 * @xid: RPC XID of incoming reply
 *
L
Linus Torvalds 已提交
768
 */
769
struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid)
L
Linus Torvalds 已提交
770
{
771
	struct rpc_rqst *entry;
L
Linus Torvalds 已提交
772

773
	list_for_each_entry(entry, &xprt->recv, rq_list)
774 775
		if (entry->rq_xid == xid)
			return entry;
776 777 778

	dprintk("RPC:       xprt_lookup_rqst did not find xid %08x\n",
			ntohl(xid));
779 780
	xprt->stat.bad_xids++;
	return NULL;
L
Linus Torvalds 已提交
781
}
782
EXPORT_SYMBOL_GPL(xprt_lookup_rqst);
L
Linus Torvalds 已提交
783

784
static void xprt_update_rtt(struct rpc_task *task)
785 786 787
{
	struct rpc_rqst *req = task->tk_rqstp;
	struct rpc_rtt *rtt = task->tk_client->cl_rtt;
788
	unsigned int timer = task->tk_msg.rpc_proc->p_timer;
789
	long m = usecs_to_jiffies(ktime_to_us(req->rq_rtt));
790 791 792

	if (timer) {
		if (req->rq_ntrans == 1)
793
			rpc_update_rtt(rtt, timer, m);
794 795 796 797
		rpc_set_timeo(rtt, timer, req->rq_ntrans - 1);
	}
}

798 799
/**
 * xprt_complete_rqst - called when reply processing is complete
800
 * @task: RPC request that recently completed
801 802
 * @copied: actual number of bytes received from the transport
 *
803
 * Caller holds transport lock.
L
Linus Torvalds 已提交
804
 */
805
void xprt_complete_rqst(struct rpc_task *task, int copied)
L
Linus Torvalds 已提交
806
{
807
	struct rpc_rqst *req = task->tk_rqstp;
808
	struct rpc_xprt *xprt = req->rq_xprt;
L
Linus Torvalds 已提交
809

810 811
	dprintk("RPC: %5u xid %08x complete (%d bytes received)\n",
			task->tk_pid, ntohl(req->rq_xid), copied);
L
Linus Torvalds 已提交
812

813
	xprt->stat.recvs++;
814
	req->rq_rtt = ktime_sub(ktime_get(), req->rq_xtime);
815 816
	if (xprt->ops->timer != NULL)
		xprt_update_rtt(task);
817

L
Linus Torvalds 已提交
818
	list_del_init(&req->rq_list);
819
	req->rq_private_buf.len = copied;
820 821
	/* Ensure all writes are done before we update */
	/* req->rq_reply_bytes_recvd */
822
	smp_wmb();
823
	req->rq_reply_bytes_recvd = copied;
824
	rpc_wake_up_queued_task(&xprt->pending, task);
L
Linus Torvalds 已提交
825
}
826
EXPORT_SYMBOL_GPL(xprt_complete_rqst);
L
Linus Torvalds 已提交
827

828
static void xprt_timer(struct rpc_task *task)
L
Linus Torvalds 已提交
829
{
830
	struct rpc_rqst *req = task->tk_rqstp;
L
Linus Torvalds 已提交
831 832
	struct rpc_xprt *xprt = req->rq_xprt;

833 834
	if (task->tk_status != -ETIMEDOUT)
		return;
835
	dprintk("RPC: %5u xprt_timer\n", task->tk_pid);
L
Linus Torvalds 已提交
836

837
	spin_lock_bh(&xprt->transport_lock);
838
	if (!req->rq_reply_bytes_recvd) {
839
		if (xprt->ops->timer)
840
			xprt->ops->timer(xprt, task);
841 842 843
	} else
		task->tk_status = 0;
	spin_unlock_bh(&xprt->transport_lock);
L
Linus Torvalds 已提交
844 845
}

846 847 848 849 850
static inline int xprt_has_timer(struct rpc_xprt *xprt)
{
	return xprt->idle_timeout != 0;
}

851 852 853 854
/**
 * xprt_prepare_transmit - reserve the transport before sending a request
 * @task: RPC task about to send a request
 *
L
Linus Torvalds 已提交
855
 */
856
bool xprt_prepare_transmit(struct rpc_task *task)
L
Linus Torvalds 已提交
857 858 859
{
	struct rpc_rqst	*req = task->tk_rqstp;
	struct rpc_xprt	*xprt = req->rq_xprt;
860
	bool ret = false;
L
Linus Torvalds 已提交
861

862
	dprintk("RPC: %5u xprt_prepare_transmit\n", task->tk_pid);
L
Linus Torvalds 已提交
863

C
Chuck Lever 已提交
864
	spin_lock_bh(&xprt->transport_lock);
865 866 867 868 869 870 871 872 873 874 875 876
	if (!req->rq_bytes_sent) {
		if (req->rq_reply_bytes_recvd) {
			task->tk_status = req->rq_reply_bytes_recvd;
			goto out_unlock;
		}
		if ((task->tk_flags & RPC_TASK_NO_RETRANS_TIMEOUT)
		    && xprt_connected(xprt)
		    && req->rq_connect_cookie == xprt->connect_cookie) {
			xprt->ops->set_retrans_timeout(task);
			rpc_sleep_on(&xprt->pending, task, xprt_timer);
			goto out_unlock;
		}
L
Linus Torvalds 已提交
877
	}
878 879 880 881 882
	if (!xprt->ops->reserve_xprt(xprt, task)) {
		task->tk_status = -EAGAIN;
		goto out_unlock;
	}
	ret = true;
L
Linus Torvalds 已提交
883
out_unlock:
C
Chuck Lever 已提交
884
	spin_unlock_bh(&xprt->transport_lock);
885
	return ret;
L
Linus Torvalds 已提交
886 887
}

888
void xprt_end_transmit(struct rpc_task *task)
889
{
890
	xprt_release_write(task->tk_rqstp->rq_xprt, task);
891 892
}

893 894 895 896 897 898 899
/**
 * xprt_transmit - send an RPC request on a transport
 * @task: controlling RPC task
 *
 * We have to copy the iovec because sendmsg fiddles with its contents.
 */
void xprt_transmit(struct rpc_task *task)
L
Linus Torvalds 已提交
900 901 902
{
	struct rpc_rqst	*req = task->tk_rqstp;
	struct rpc_xprt	*xprt = req->rq_xprt;
903
	int status, numreqs;
L
Linus Torvalds 已提交
904

905
	dprintk("RPC: %5u xprt_transmit(%u)\n", task->tk_pid, req->rq_slen);
L
Linus Torvalds 已提交
906

907
	if (!req->rq_reply_bytes_recvd) {
908 909 910 911
		if (list_empty(&req->rq_list) && rpc_reply_expected(task)) {
			/*
			 * Add to the list only if we're expecting a reply
			 */
C
Chuck Lever 已提交
912
			spin_lock_bh(&xprt->transport_lock);
L
Linus Torvalds 已提交
913 914 915 916 917
			/* Update the softirq receive buffer */
			memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
					sizeof(req->rq_private_buf));
			/* Add request to the receive list */
			list_add_tail(&req->rq_list, &xprt->recv);
C
Chuck Lever 已提交
918
			spin_unlock_bh(&xprt->transport_lock);
L
Linus Torvalds 已提交
919
			xprt_reset_majortimeo(req);
920 921
			/* Turn off autodisconnect */
			del_singleshot_timer_sync(&xprt->timer);
L
Linus Torvalds 已提交
922 923 924 925
		}
	} else if (!req->rq_bytes_sent)
		return;

926
	req->rq_xtime = ktime_get();
927
	status = xprt->ops->send_request(task);
928 929 930 931
	if (status != 0) {
		task->tk_status = status;
		return;
	}
932

933
	dprintk("RPC: %5u xmit complete\n", task->tk_pid);
934
	task->tk_flags |= RPC_TASK_SENT;
935
	spin_lock_bh(&xprt->transport_lock);
936

937
	xprt->ops->set_retrans_timeout(task);
938

939 940 941
	numreqs = atomic_read(&xprt->num_reqs);
	if (numreqs > xprt->stat.max_slots)
		xprt->stat.max_slots = numreqs;
942 943 944
	xprt->stat.sends++;
	xprt->stat.req_u += xprt->stat.sends - xprt->stat.recvs;
	xprt->stat.bklog_u += xprt->backlog.qlen;
945 946
	xprt->stat.sending_u += xprt->sending.qlen;
	xprt->stat.pending_u += xprt->pending.qlen;
L
Linus Torvalds 已提交
947

948 949 950
	/* Don't race with disconnect */
	if (!xprt_connected(xprt))
		task->tk_status = -ENOTCONN;
951
	else {
952 953 954 955
		/*
		 * Sleep on the pending queue since
		 * we're expecting a reply.
		 */
956 957 958
		if (!req->rq_reply_bytes_recvd && rpc_reply_expected(task))
			rpc_sleep_on(&xprt->pending, task, xprt_timer);
		req->rq_connect_cookie = xprt->connect_cookie;
959
	}
960
	spin_unlock_bh(&xprt->transport_lock);
L
Linus Torvalds 已提交
961 962
}

963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990
static void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task)
{
	set_bit(XPRT_CONGESTED, &xprt->state);
	rpc_sleep_on(&xprt->backlog, task, NULL);
}

static void xprt_wake_up_backlog(struct rpc_xprt *xprt)
{
	if (rpc_wake_up_next(&xprt->backlog) == NULL)
		clear_bit(XPRT_CONGESTED, &xprt->state);
}

static bool xprt_throttle_congested(struct rpc_xprt *xprt, struct rpc_task *task)
{
	bool ret = false;

	if (!test_bit(XPRT_CONGESTED, &xprt->state))
		goto out;
	spin_lock(&xprt->reserve_lock);
	if (test_bit(XPRT_CONGESTED, &xprt->state)) {
		rpc_sleep_on(&xprt->backlog, task, NULL);
		ret = true;
	}
	spin_unlock(&xprt->reserve_lock);
out:
	return ret;
}

991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014
static struct rpc_rqst *xprt_dynamic_alloc_slot(struct rpc_xprt *xprt, gfp_t gfp_flags)
{
	struct rpc_rqst *req = ERR_PTR(-EAGAIN);

	if (!atomic_add_unless(&xprt->num_reqs, 1, xprt->max_reqs))
		goto out;
	req = kzalloc(sizeof(struct rpc_rqst), gfp_flags);
	if (req != NULL)
		goto out;
	atomic_dec(&xprt->num_reqs);
	req = ERR_PTR(-ENOMEM);
out:
	return req;
}

static bool xprt_dynamic_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
{
	if (atomic_add_unless(&xprt->num_reqs, -1, xprt->min_reqs)) {
		kfree(req);
		return true;
	}
	return false;
}

1015
void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
L
Linus Torvalds 已提交
1016
{
1017
	struct rpc_rqst *req;
L
Linus Torvalds 已提交
1018

1019
	spin_lock(&xprt->reserve_lock);
L
Linus Torvalds 已提交
1020
	if (!list_empty(&xprt->free)) {
1021 1022 1023 1024
		req = list_entry(xprt->free.next, struct rpc_rqst, rq_list);
		list_del(&req->rq_list);
		goto out_init_req;
	}
1025
	req = xprt_dynamic_alloc_slot(xprt, GFP_NOWAIT|__GFP_NOWARN);
1026 1027 1028 1029 1030 1031
	if (!IS_ERR(req))
		goto out_init_req;
	switch (PTR_ERR(req)) {
	case -ENOMEM:
		dprintk("RPC:       dynamic allocation of request slot "
				"failed! Retrying\n");
1032
		task->tk_status = -ENOMEM;
1033 1034
		break;
	case -EAGAIN:
1035
		xprt_add_backlog(xprt, task);
1036
		dprintk("RPC:       waiting for request slot\n");
1037 1038
	default:
		task->tk_status = -EAGAIN;
L
Linus Torvalds 已提交
1039
	}
1040
	spin_unlock(&xprt->reserve_lock);
1041 1042 1043 1044 1045
	return;
out_init_req:
	task->tk_status = 0;
	task->tk_rqstp = req;
	xprt_request_init(task, xprt);
1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060
	spin_unlock(&xprt->reserve_lock);
}
EXPORT_SYMBOL_GPL(xprt_alloc_slot);

void xprt_lock_and_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
{
	/* Note: grabbing the xprt_lock_write() ensures that we throttle
	 * new slot allocation if the transport is congested (i.e. when
	 * reconnecting a stream transport or when out of socket write
	 * buffer space).
	 */
	if (xprt_lock_write(xprt, task)) {
		xprt_alloc_slot(xprt, task);
		xprt_release_write(xprt, task);
	}
L
Linus Torvalds 已提交
1061
}
1062
EXPORT_SYMBOL_GPL(xprt_lock_and_alloc_slot);
L
Linus Torvalds 已提交
1063

1064 1065 1066
static void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
{
	spin_lock(&xprt->reserve_lock);
1067 1068 1069 1070
	if (!xprt_dynamic_free_slot(xprt, req)) {
		memset(req, 0, sizeof(*req));	/* mark unused */
		list_add(&req->rq_list, &xprt->free);
	}
1071
	xprt_wake_up_backlog(xprt);
1072 1073 1074
	spin_unlock(&xprt->reserve_lock);
}

1075 1076 1077 1078 1079 1080 1081 1082 1083 1084
static void xprt_free_all_slots(struct rpc_xprt *xprt)
{
	struct rpc_rqst *req;
	while (!list_empty(&xprt->free)) {
		req = list_first_entry(&xprt->free, struct rpc_rqst, rq_list);
		list_del(&req->rq_list);
		kfree(req);
	}
}

1085 1086 1087
struct rpc_xprt *xprt_alloc(struct net *net, size_t size,
		unsigned int num_prealloc,
		unsigned int max_alloc)
1088 1089
{
	struct rpc_xprt *xprt;
1090 1091
	struct rpc_rqst *req;
	int i;
1092 1093 1094 1095 1096

	xprt = kzalloc(size, GFP_KERNEL);
	if (xprt == NULL)
		goto out;

1097 1098 1099 1100 1101
	xprt_init(xprt, net);

	for (i = 0; i < num_prealloc; i++) {
		req = kzalloc(sizeof(struct rpc_rqst), GFP_KERNEL);
		if (!req)
1102
			goto out_free;
1103 1104
		list_add(&req->rq_list, &xprt->free);
	}
1105 1106 1107 1108 1109 1110
	if (max_alloc > num_prealloc)
		xprt->max_reqs = max_alloc;
	else
		xprt->max_reqs = num_prealloc;
	xprt->min_reqs = num_prealloc;
	atomic_set(&xprt->num_reqs, num_prealloc);
1111 1112 1113 1114

	return xprt;

out_free:
1115
	xprt_free(xprt);
1116 1117 1118 1119 1120
out:
	return NULL;
}
EXPORT_SYMBOL_GPL(xprt_alloc);

1121 1122
void xprt_free(struct rpc_xprt *xprt)
{
P
Pavel Emelyanov 已提交
1123
	put_net(xprt->xprt_net);
1124
	xprt_free_all_slots(xprt);
1125 1126 1127 1128
	kfree(xprt);
}
EXPORT_SYMBOL_GPL(xprt_free);

1129 1130 1131 1132
/**
 * xprt_reserve - allocate an RPC request slot
 * @task: RPC task requesting a slot allocation
 *
1133 1134
 * If the transport is marked as being congested, or if no more
 * slots are available, place the task on the transport's
1135 1136 1137
 * backlog queue.
 */
void xprt_reserve(struct rpc_task *task)
L
Linus Torvalds 已提交
1138
{
1139
	struct rpc_xprt	*xprt;
L
Linus Torvalds 已提交
1140

1141 1142 1143 1144 1145 1146
	task->tk_status = 0;
	if (task->tk_rqstp != NULL)
		return;

	task->tk_timeout = 0;
	task->tk_status = -EAGAIN;
1147 1148
	rcu_read_lock();
	xprt = rcu_dereference(task->tk_client->cl_xprt);
1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174
	if (!xprt_throttle_congested(xprt, task))
		xprt->ops->alloc_slot(xprt, task);
	rcu_read_unlock();
}

/**
 * xprt_retry_reserve - allocate an RPC request slot
 * @task: RPC task requesting a slot allocation
 *
 * If no more slots are available, place the task on the transport's
 * backlog queue.
 * Note that the only difference with xprt_reserve is that we now
 * ignore the value of the XPRT_CONGESTED flag.
 */
void xprt_retry_reserve(struct rpc_task *task)
{
	struct rpc_xprt	*xprt;

	task->tk_status = 0;
	if (task->tk_rqstp != NULL)
		return;

	task->tk_timeout = 0;
	task->tk_status = -EAGAIN;
	rcu_read_lock();
	xprt = rcu_dereference(task->tk_client->cl_xprt);
1175
	xprt->ops->alloc_slot(xprt, task);
1176
	rcu_read_unlock();
L
Linus Torvalds 已提交
1177 1178
}

1179
static inline __be32 xprt_alloc_xid(struct rpc_xprt *xprt)
L
Linus Torvalds 已提交
1180
{
1181
	return (__force __be32)xprt->xid++;
L
Linus Torvalds 已提交
1182 1183 1184 1185
}

static inline void xprt_init_xid(struct rpc_xprt *xprt)
{
1186
	xprt->xid = prandom_u32();
L
Linus Torvalds 已提交
1187 1188
}

1189
static void xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt)
L
Linus Torvalds 已提交
1190 1191 1192
{
	struct rpc_rqst	*req = task->tk_rqstp;

1193
	INIT_LIST_HEAD(&req->rq_list);
1194
	req->rq_timeout = task->tk_client->cl_timeout->to_initval;
L
Linus Torvalds 已提交
1195 1196
	req->rq_task	= task;
	req->rq_xprt    = xprt;
1197
	req->rq_buffer  = NULL;
L
Linus Torvalds 已提交
1198
	req->rq_xid     = xprt_alloc_xid(xprt);
1199
	req->rq_connect_cookie = xprt->connect_cookie - 1;
1200 1201 1202 1203 1204
	req->rq_bytes_sent = 0;
	req->rq_snd_buf.len = 0;
	req->rq_snd_buf.buflen = 0;
	req->rq_rcv_buf.len = 0;
	req->rq_rcv_buf.buflen = 0;
1205
	req->rq_release_snd_buf = NULL;
1206
	xprt_reset_majortimeo(req);
1207
	dprintk("RPC: %5u reserved req %p xid %08x\n", task->tk_pid,
L
Linus Torvalds 已提交
1208 1209 1210
			req, ntohl(req->rq_xid));
}

1211 1212 1213 1214
/**
 * xprt_release - release an RPC request slot
 * @task: task which is finished with the slot
 *
L
Linus Torvalds 已提交
1215
 */
1216
void xprt_release(struct rpc_task *task)
L
Linus Torvalds 已提交
1217
{
1218
	struct rpc_xprt	*xprt;
1219
	struct rpc_rqst	*req = task->tk_rqstp;
L
Linus Torvalds 已提交
1220

1221 1222 1223 1224 1225 1226 1227 1228
	if (req == NULL) {
		if (task->tk_client) {
			rcu_read_lock();
			xprt = rcu_dereference(task->tk_client->cl_xprt);
			if (xprt->snd_task == task)
				xprt_release_write(xprt, task);
			rcu_read_unlock();
		}
L
Linus Torvalds 已提交
1229
		return;
1230
	}
1231 1232

	xprt = req->rq_xprt;
1233 1234 1235 1236
	if (task->tk_ops->rpc_count_stats != NULL)
		task->tk_ops->rpc_count_stats(task, task->tk_calldata);
	else if (task->tk_client)
		rpc_count_iostats(task, task->tk_client->cl_metrics);
C
Chuck Lever 已提交
1237
	spin_lock_bh(&xprt->transport_lock);
1238
	xprt->ops->release_xprt(xprt, task);
1239 1240
	if (xprt->ops->release_request)
		xprt->ops->release_request(task);
L
Linus Torvalds 已提交
1241 1242 1243
	if (!list_empty(&req->rq_list))
		list_del(&req->rq_list);
	xprt->last_used = jiffies;
1244
	if (list_empty(&xprt->recv) && xprt_has_timer(xprt))
1245
		mod_timer(&xprt->timer,
1246
				xprt->last_used + xprt->idle_timeout);
C
Chuck Lever 已提交
1247
	spin_unlock_bh(&xprt->transport_lock);
1248
	if (req->rq_buffer)
1249
		xprt->ops->buf_free(req->rq_buffer);
1250 1251
	if (req->rq_cred != NULL)
		put_rpccred(req->rq_cred);
L
Linus Torvalds 已提交
1252
	task->tk_rqstp = NULL;
1253 1254
	if (req->rq_release_snd_buf)
		req->rq_release_snd_buf(req);
1255

1256
	dprintk("RPC: %5u release request %p\n", task->tk_pid, req);
1257 1258 1259
	if (likely(!bc_prealloc(req)))
		xprt_free_slot(xprt, req);
	else
1260
		xprt_free_bc_request(req);
L
Linus Torvalds 已提交
1261 1262
}

1263
static void xprt_init(struct rpc_xprt *xprt, struct net *net)
1264
{
1265
	atomic_set(&xprt->count, 1);
1266 1267 1268 1269 1270 1271

	spin_lock_init(&xprt->transport_lock);
	spin_lock_init(&xprt->reserve_lock);

	INIT_LIST_HEAD(&xprt->free);
	INIT_LIST_HEAD(&xprt->recv);
1272
#if defined(CONFIG_SUNRPC_BACKCHANNEL)
1273 1274
	spin_lock_init(&xprt->bc_pa_lock);
	INIT_LIST_HEAD(&xprt->bc_pa_list);
1275
#endif /* CONFIG_SUNRPC_BACKCHANNEL */
1276

1277 1278
	xprt->last_used = jiffies;
	xprt->cwnd = RPC_INITCWND;
1279
	xprt->bind_index = 0;
1280 1281 1282

	rpc_init_wait_queue(&xprt->binding, "xprt_binding");
	rpc_init_wait_queue(&xprt->pending, "xprt_pending");
1283
	rpc_init_priority_wait_queue(&xprt->sending, "xprt_sending");
1284 1285 1286 1287
	rpc_init_priority_wait_queue(&xprt->backlog, "xprt_backlog");

	xprt_init_xid(xprt);

1288
	xprt->xprt_net = get_net(net);
1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316
}

/**
 * xprt_create_transport - create an RPC transport
 * @args: rpc transport creation arguments
 *
 */
struct rpc_xprt *xprt_create_transport(struct xprt_create *args)
{
	struct rpc_xprt	*xprt;
	struct xprt_class *t;

	spin_lock(&xprt_list_lock);
	list_for_each_entry(t, &xprt_list, list) {
		if (t->ident == args->ident) {
			spin_unlock(&xprt_list_lock);
			goto found;
		}
	}
	spin_unlock(&xprt_list_lock);
	printk(KERN_ERR "RPC: transport (%d) not supported\n", args->ident);
	return ERR_PTR(-EIO);

found:
	xprt = t->setup(args);
	if (IS_ERR(xprt)) {
		dprintk("RPC:       xprt_create_transport: failed, %ld\n",
				-PTR_ERR(xprt));
1317
		goto out;
1318
	}
1319 1320
	if (args->flags & XPRT_CREATE_NO_IDLE_TIMEOUT)
		xprt->idle_timeout = 0;
1321 1322 1323 1324 1325 1326
	INIT_WORK(&xprt->task_cleanup, xprt_autoclose);
	if (xprt_has_timer(xprt))
		setup_timer(&xprt->timer, xprt_init_autodisconnect,
			    (unsigned long)xprt);
	else
		init_timer(&xprt->timer);
1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337

	if (strlen(args->servername) > RPC_MAXNETNAMELEN) {
		xprt_destroy(xprt);
		return ERR_PTR(-EINVAL);
	}
	xprt->servername = kstrdup(args->servername, GFP_KERNEL);
	if (xprt->servername == NULL) {
		xprt_destroy(xprt);
		return ERR_PTR(-ENOMEM);
	}

1338
	dprintk("RPC:       created transport %p with %u slots\n", xprt,
1339
			xprt->max_reqs);
1340
out:
1341 1342 1343
	return xprt;
}

1344 1345
/**
 * xprt_destroy - destroy an RPC transport, killing off all requests.
1346
 * @xprt: transport to destroy
1347
 *
L
Linus Torvalds 已提交
1348
 */
1349
static void xprt_destroy(struct rpc_xprt *xprt)
L
Linus Torvalds 已提交
1350
{
1351
	dprintk("RPC:       destroying transport %p\n", xprt);
1352
	del_timer_sync(&xprt->timer);
1353

1354 1355 1356 1357
	rpc_destroy_wait_queue(&xprt->binding);
	rpc_destroy_wait_queue(&xprt->pending);
	rpc_destroy_wait_queue(&xprt->sending);
	rpc_destroy_wait_queue(&xprt->backlog);
1358
	cancel_work_sync(&xprt->task_cleanup);
1359
	kfree(xprt->servername);
1360 1361 1362
	/*
	 * Tear down transport state and free the rpc_xprt
	 */
1363
	xprt->ops->destroy(xprt);
1364
}
L
Linus Torvalds 已提交
1365

1366 1367 1368 1369 1370 1371 1372
/**
 * xprt_put - release a reference to an RPC transport.
 * @xprt: pointer to the transport
 *
 */
void xprt_put(struct rpc_xprt *xprt)
{
1373 1374
	if (atomic_dec_and_test(&xprt->count))
		xprt_destroy(xprt);
1375
}