xprt.c 53.7 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
L
Linus Torvalds 已提交
2 3 4 5 6 7 8 9 10 11 12 13
/*
 *  linux/net/sunrpc/xprt.c
 *
 *  This is a generic RPC call interface supporting congestion avoidance,
 *  and asynchronous calls.
 *
 *  The interface works like this:
 *
 *  -	When a process places a call, it allocates a request slot if
 *	one is available. Otherwise, it sleeps on the backlog queue
 *	(xprt_reserve).
 *  -	Next, the caller puts together the RPC message, stuffs it into
14 15
 *	the request struct, and calls xprt_transmit().
 *  -	xprt_transmit sends the message and installs the caller on the
16 17 18
 *	transport's wait list. At the same time, if a reply is expected,
 *	it installs a timer that is run after the packet's timeout has
 *	expired.
L
Linus Torvalds 已提交
19
 *  -	When a packet arrives, the data_ready handler walks the list of
20
 *	pending requests for that transport. If a matching XID is found, the
L
Linus Torvalds 已提交
21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36
 *	caller is woken up, and the timer removed.
 *  -	When no reply arrives within the timeout interval, the timer is
 *	fired by the kernel and runs xprt_timer(). It either adjusts the
 *	timeout values (minor timeout) or wakes up the caller with a status
 *	of -ETIMEDOUT.
 *  -	When the caller receives a notification from RPC that a reply arrived,
 *	it should release the RPC slot, and process the reply.
 *	If the call timed out, it may choose to retry the operation by
 *	adjusting the initial timeout value, and simply calling rpc_call
 *	again.
 *
 *  Support for async RPC is done through a set of RPC-specific scheduling
 *  primitives that `transparently' work for processes as well as async
 *  tasks that rely on callbacks.
 *
 *  Copyright (C) 1995-1997, Olaf Kirch <okir@monad.swb.de>
37 38
 *
 *  Transport switch API copyright (C) 2005, Chuck Lever <cel@netapp.com>
L
Linus Torvalds 已提交
39 40
 */

41 42
#include <linux/module.h>

L
Linus Torvalds 已提交
43
#include <linux/types.h>
44
#include <linux/interrupt.h>
L
Linus Torvalds 已提交
45
#include <linux/workqueue.h>
46
#include <linux/net.h>
47
#include <linux/ktime.h>
L
Linus Torvalds 已提交
48

49
#include <linux/sunrpc/clnt.h>
50
#include <linux/sunrpc/metrics.h>
51
#include <linux/sunrpc/bc_xprt.h>
52
#include <linux/rcupdate.h>
53
#include <linux/sched/mm.h>
L
Linus Torvalds 已提交
54

55 56
#include <trace/events/sunrpc.h>

57
#include "sunrpc.h"
58
#include "sysfs.h"
59
#include "fail.h"
60

L
Linus Torvalds 已提交
61 62 63 64
/*
 * Local variables
 */

J
Jeff Layton 已提交
65
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
L
Linus Torvalds 已提交
66 67 68 69 70 71
# define RPCDBG_FACILITY	RPCDBG_XPRT
#endif

/*
 * Local functions
 */
72
static void	 xprt_init(struct rpc_xprt *xprt, struct net *net);
73
static __be32	xprt_alloc_xid(struct rpc_xprt *xprt);
74
static void	 xprt_destroy(struct rpc_xprt *xprt);
75
static void	 xprt_request_init(struct rpc_task *task);
L
Linus Torvalds 已提交
76

J
Jiri Slaby 已提交
77
static DEFINE_SPINLOCK(xprt_list_lock);
78 79
static LIST_HEAD(xprt_list);

80 81 82 83 84 85 86 87 88
static unsigned long xprt_request_timeout(const struct rpc_rqst *req)
{
	unsigned long timeout = jiffies + req->rq_timeout;

	if (time_before(timeout, req->rq_majortimeo))
		return timeout;
	return req->rq_majortimeo;
}

89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109
/**
 * xprt_register_transport - register a transport implementation
 * @transport: transport to register
 *
 * If a transport implementation is loaded as a kernel module, it can
 * call this interface to make itself known to the RPC client.
 *
 * Returns:
 * 0:		transport successfully registered
 * -EEXIST:	transport already registered
 * -EINVAL:	transport module being unloaded
 */
int xprt_register_transport(struct xprt_class *transport)
{
	struct xprt_class *t;
	int result;

	result = -EEXIST;
	spin_lock(&xprt_list_lock);
	list_for_each_entry(t, &xprt_list, list) {
		/* don't register the same transport class twice */
110
		if (t->ident == transport->ident)
111 112 113
			goto out;
	}

114 115 116 117
	list_add_tail(&transport->list, &xprt_list);
	printk(KERN_INFO "RPC: Registered %s transport module.\n",
	       transport->name);
	result = 0;
118 119 120 121 122 123 124 125 126

out:
	spin_unlock(&xprt_list_lock);
	return result;
}
EXPORT_SYMBOL_GPL(xprt_register_transport);

/**
 * xprt_unregister_transport - unregister a transport implementation
127
 * @transport: transport to unregister
128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156
 *
 * Returns:
 * 0:		transport successfully unregistered
 * -ENOENT:	transport never registered
 */
int xprt_unregister_transport(struct xprt_class *transport)
{
	struct xprt_class *t;
	int result;

	result = 0;
	spin_lock(&xprt_list_lock);
	list_for_each_entry(t, &xprt_list, list) {
		if (t == transport) {
			printk(KERN_INFO
				"RPC: Unregistered %s transport module.\n",
				transport->name);
			list_del_init(&transport->list);
			goto out;
		}
	}
	result = -ENOENT;

out:
	spin_unlock(&xprt_list_lock);
	return result;
}
EXPORT_SYMBOL_GPL(xprt_unregister_transport);

157 158 159 160 161 162
static void
xprt_class_release(const struct xprt_class *t)
{
	module_put(t->owner);
}

163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188
static const struct xprt_class *
xprt_class_find_by_ident_locked(int ident)
{
	const struct xprt_class *t;

	list_for_each_entry(t, &xprt_list, list) {
		if (t->ident != ident)
			continue;
		if (!try_module_get(t->owner))
			continue;
		return t;
	}
	return NULL;
}

static const struct xprt_class *
xprt_class_find_by_ident(int ident)
{
	const struct xprt_class *t;

	spin_lock(&xprt_list_lock);
	t = xprt_class_find_by_ident_locked(ident);
	spin_unlock(&xprt_list_lock);
	return t;
}

189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223
static const struct xprt_class *
xprt_class_find_by_netid_locked(const char *netid)
{
	const struct xprt_class *t;
	unsigned int i;

	list_for_each_entry(t, &xprt_list, list) {
		for (i = 0; t->netid[i][0] != '\0'; i++) {
			if (strcmp(t->netid[i], netid) != 0)
				continue;
			if (!try_module_get(t->owner))
				continue;
			return t;
		}
	}
	return NULL;
}

static const struct xprt_class *
xprt_class_find_by_netid(const char *netid)
{
	const struct xprt_class *t;

	spin_lock(&xprt_list_lock);
	t = xprt_class_find_by_netid_locked(netid);
	if (!t) {
		spin_unlock(&xprt_list_lock);
		request_module("rpc%s", netid);
		spin_lock(&xprt_list_lock);
		t = xprt_class_find_by_netid_locked(netid);
	}
	spin_unlock(&xprt_list_lock);
	return t;
}

224
/**
225
 * xprt_find_transport_ident - convert a netid into a transport identifier
226
 * @netid: transport to load
227 228
 *
 * Returns:
229
 * > 0:		transport identifier
230 231
 * -ENOENT:	transport module not available
 */
232
int xprt_find_transport_ident(const char *netid)
233
{
234
	const struct xprt_class *t;
235
	int ret;
236

237 238 239
	t = xprt_class_find_by_netid(netid);
	if (!t)
		return -ENOENT;
240
	ret = t->ident;
241
	xprt_class_release(t);
242 243 244 245
	return ret;
}
EXPORT_SYMBOL_GPL(xprt_find_transport_ident);

246 247 248
static void xprt_clear_locked(struct rpc_xprt *xprt)
{
	xprt->snd_task = NULL;
249 250 251
	if (!test_bit(XPRT_CLOSE_WAIT, &xprt->state))
		clear_bit_unlock(XPRT_LOCKED, &xprt->state);
	else
252 253 254
		queue_work(xprtiod_workqueue, &xprt->task_cleanup);
}

255 256 257
/**
 * xprt_reserve_xprt - serialize write access to transports
 * @task: task that is requesting access to the transport
258
 * @xprt: pointer to the target transport
259 260 261 262 263
 *
 * This prevents mixing the payload of separate requests, and prevents
 * transport connects from colliding with writes.  No congestion control
 * is provided.
 */
264
int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
265 266 267 268 269
{
	struct rpc_rqst *req = task->tk_rqstp;

	if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
		if (task == xprt->snd_task)
270
			goto out_locked;
271 272
		goto out_sleep;
	}
273 274
	if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
		goto out_unlock;
275
	xprt->snd_task = task;
276

277 278
out_locked:
	trace_xprt_reserve_xprt(xprt, task);
279 280
	return 1;

281 282
out_unlock:
	xprt_clear_locked(xprt);
283 284
out_sleep:
	task->tk_status = -EAGAIN;
285 286
	if  (RPC_IS_SOFT(task))
		rpc_sleep_on_timeout(&xprt->sending, task, NULL,
287
				xprt_request_timeout(req));
288 289
	else
		rpc_sleep_on(&xprt->sending, task, NULL);
290 291
	return 0;
}
292
EXPORT_SYMBOL_GPL(xprt_reserve_xprt);
293

294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318
static bool
xprt_need_congestion_window_wait(struct rpc_xprt *xprt)
{
	return test_bit(XPRT_CWND_WAIT, &xprt->state);
}

static void
xprt_set_congestion_window_wait(struct rpc_xprt *xprt)
{
	if (!list_empty(&xprt->xmit_queue)) {
		/* Peek at head of queue to see if it can make progress */
		if (list_first_entry(&xprt->xmit_queue, struct rpc_rqst,
					rq_xmit)->rq_cong)
			return;
	}
	set_bit(XPRT_CWND_WAIT, &xprt->state);
}

static void
xprt_test_and_clear_congestion_window_wait(struct rpc_xprt *xprt)
{
	if (!RPCXPRT_CONGESTED(xprt))
		clear_bit(XPRT_CWND_WAIT, &xprt->state);
}

L
Linus Torvalds 已提交
319
/*
320 321 322 323 324 325
 * xprt_reserve_xprt_cong - serialize write access to transports
 * @task: task that is requesting access to the transport
 *
 * Same as xprt_reserve_xprt, but Van Jacobson congestion control is
 * integrated into the decision of whether a request is allowed to be
 * woken up and given access to the transport.
326
 * Note that the lock is only granted if we know there are free slots.
L
Linus Torvalds 已提交
327
 */
328
int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
L
Linus Torvalds 已提交
329 330 331
{
	struct rpc_rqst *req = task->tk_rqstp;

332
	if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
L
Linus Torvalds 已提交
333
		if (task == xprt->snd_task)
334
			goto out_locked;
L
Linus Torvalds 已提交
335 336
		goto out_sleep;
	}
337 338
	if (req == NULL) {
		xprt->snd_task = task;
339
		goto out_locked;
340
	}
341 342
	if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
		goto out_unlock;
343
	if (!xprt_need_congestion_window_wait(xprt)) {
L
Linus Torvalds 已提交
344
		xprt->snd_task = task;
345
		goto out_locked;
L
Linus Torvalds 已提交
346
	}
347
out_unlock:
348
	xprt_clear_locked(xprt);
L
Linus Torvalds 已提交
349 350
out_sleep:
	task->tk_status = -EAGAIN;
351 352
	if (RPC_IS_SOFT(task))
		rpc_sleep_on_timeout(&xprt->sending, task, NULL,
353
				xprt_request_timeout(req));
354 355
	else
		rpc_sleep_on(&xprt->sending, task, NULL);
L
Linus Torvalds 已提交
356
	return 0;
357 358 359
out_locked:
	trace_xprt_reserve_cong(xprt, task);
	return 1;
L
Linus Torvalds 已提交
360
}
361
EXPORT_SYMBOL_GPL(xprt_reserve_xprt_cong);
L
Linus Torvalds 已提交
362

363
static inline int xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task)
L
Linus Torvalds 已提交
364 365 366
{
	int retval;

367 368
	if (test_bit(XPRT_LOCKED, &xprt->state) && xprt->snd_task == task)
		return 1;
369
	spin_lock(&xprt->transport_lock);
370
	retval = xprt->ops->reserve_xprt(xprt, task);
371
	spin_unlock(&xprt->transport_lock);
L
Linus Torvalds 已提交
372 373 374
	return retval;
}

375
static bool __xprt_lock_write_func(struct rpc_task *task, void *data)
376
{
377
	struct rpc_xprt *xprt = data;
378 379

	xprt->snd_task = task;
380 381
	return true;
}
382

383 384 385 386
static void __xprt_lock_write_next(struct rpc_xprt *xprt)
{
	if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
		return;
387 388
	if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
		goto out_unlock;
389 390
	if (rpc_wake_up_first_on_wq(xprtiod_workqueue, &xprt->sending,
				__xprt_lock_write_func, xprt))
391
		return;
392
out_unlock:
393
	xprt_clear_locked(xprt);
394 395
}

396 397 398 399
static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt)
{
	if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
		return;
400 401
	if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
		goto out_unlock;
402
	if (xprt_need_congestion_window_wait(xprt))
403
		goto out_unlock;
404
	if (rpc_wake_up_first_on_wq(xprtiod_workqueue, &xprt->sending,
405
				__xprt_lock_write_func, xprt))
406
		return;
L
Linus Torvalds 已提交
407
out_unlock:
408
	xprt_clear_locked(xprt);
L
Linus Torvalds 已提交
409 410
}

411 412 413 414 415 416
/**
 * xprt_release_xprt - allow other requests to use a transport
 * @xprt: transport with other tasks potentially waiting
 * @task: task that is releasing access to the transport
 *
 * Note that "task" can be NULL.  No congestion control is provided.
L
Linus Torvalds 已提交
417
 */
418
void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
L
Linus Torvalds 已提交
419 420
{
	if (xprt->snd_task == task) {
421
		xprt_clear_locked(xprt);
L
Linus Torvalds 已提交
422 423
		__xprt_lock_write_next(xprt);
	}
424
	trace_xprt_release_xprt(xprt, task);
L
Linus Torvalds 已提交
425
}
426
EXPORT_SYMBOL_GPL(xprt_release_xprt);
L
Linus Torvalds 已提交
427

428 429 430 431 432 433 434 435 436 437 438
/**
 * xprt_release_xprt_cong - allow other requests to use a transport
 * @xprt: transport with other tasks potentially waiting
 * @task: task that is releasing access to the transport
 *
 * Note that "task" can be NULL.  Another task is awoken to use the
 * transport if the transport's congestion window allows it.
 */
void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
{
	if (xprt->snd_task == task) {
439
		xprt_clear_locked(xprt);
440 441
		__xprt_lock_write_next_cong(xprt);
	}
442
	trace_xprt_release_cong(xprt, task);
443
}
444
EXPORT_SYMBOL_GPL(xprt_release_xprt_cong);
445

446
void xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task)
L
Linus Torvalds 已提交
447
{
448 449
	if (xprt->snd_task != task)
		return;
450
	spin_lock(&xprt->transport_lock);
451
	xprt->ops->release_xprt(xprt, task);
452
	spin_unlock(&xprt->transport_lock);
L
Linus Torvalds 已提交
453 454 455 456 457 458 459
}

/*
 * Van Jacobson congestion avoidance. Check if the congestion window
 * overflowed. Put the task to sleep if this is the case.
 */
static int
460
__xprt_get_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
L
Linus Torvalds 已提交
461 462 463
{
	if (req->rq_cong)
		return 1;
464
	trace_xprt_get_cong(xprt, req->rq_task);
465 466
	if (RPCXPRT_CONGESTED(xprt)) {
		xprt_set_congestion_window_wait(xprt);
L
Linus Torvalds 已提交
467
		return 0;
468
	}
L
Linus Torvalds 已提交
469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484
	req->rq_cong = 1;
	xprt->cong += RPC_CWNDSCALE;
	return 1;
}

/*
 * Adjust the congestion window, and wake up the next task
 * that has been sleeping due to congestion
 */
static void
__xprt_put_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
{
	if (!req->rq_cong)
		return;
	req->rq_cong = 0;
	xprt->cong -= RPC_CWNDSCALE;
485
	xprt_test_and_clear_congestion_window_wait(xprt);
486
	trace_xprt_put_cong(xprt, req->rq_task);
487
	__xprt_lock_write_next_cong(xprt);
L
Linus Torvalds 已提交
488 489
}

490 491 492 493 494 495 496 497 498 499 500 501 502 503
/**
 * xprt_request_get_cong - Request congestion control credits
 * @xprt: pointer to transport
 * @req: pointer to RPC request
 *
 * Useful for transports that require congestion control.
 */
bool
xprt_request_get_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
{
	bool ret = false;

	if (req->rq_cong)
		return true;
504
	spin_lock(&xprt->transport_lock);
505
	ret = __xprt_get_cong(xprt, req) != 0;
506
	spin_unlock(&xprt->transport_lock);
507 508 509 510
	return ret;
}
EXPORT_SYMBOL_GPL(xprt_request_get_cong);

511 512 513 514 515 516 517 518
/**
 * xprt_release_rqst_cong - housekeeping when request is complete
 * @task: RPC request that recently completed
 *
 * Useful for transports that require congestion control.
 */
void xprt_release_rqst_cong(struct rpc_task *task)
{
519 520 521
	struct rpc_rqst *req = task->tk_rqstp;

	__xprt_put_cong(req->rq_xprt, req);
522
}
523
EXPORT_SYMBOL_GPL(xprt_release_rqst_cong);
524

525 526 527 528 529 530
static void xprt_clear_congestion_window_wait_locked(struct rpc_xprt *xprt)
{
	if (test_and_clear_bit(XPRT_CWND_WAIT, &xprt->state))
		__xprt_lock_write_next_cong(xprt);
}

531 532 533 534 535 536 537 538
/*
 * Clear the congestion window wait flag and wake up the next
 * entry on xprt->sending
 */
static void
xprt_clear_congestion_window_wait(struct rpc_xprt *xprt)
{
	if (test_and_clear_bit(XPRT_CWND_WAIT, &xprt->state)) {
539
		spin_lock(&xprt->transport_lock);
540
		__xprt_lock_write_next_cong(xprt);
541
		spin_unlock(&xprt->transport_lock);
542 543 544
	}
}

545 546
/**
 * xprt_adjust_cwnd - adjust transport congestion window
547
 * @xprt: pointer to xprt
548 549 550
 * @task: recently completed RPC request used to adjust window
 * @result: result code of completed RPC request
 *
551 552 553 554 555 556 557 558 559
 * The transport code maintains an estimate on the maximum number of out-
 * standing RPC requests, using a smoothed version of the congestion
 * avoidance implemented in 44BSD. This is basically the Van Jacobson
 * congestion algorithm: If a retransmit occurs, the congestion window is
 * halved; otherwise, it is incremented by 1/cwnd when
 *
 *	-	a reply is received and
 *	-	a full number of requests are outstanding and
 *	-	the congestion window hasn't been updated recently.
L
Linus Torvalds 已提交
560
 */
561
void xprt_adjust_cwnd(struct rpc_xprt *xprt, struct rpc_task *task, int result)
L
Linus Torvalds 已提交
562
{
563 564
	struct rpc_rqst *req = task->tk_rqstp;
	unsigned long cwnd = xprt->cwnd;
L
Linus Torvalds 已提交
565 566 567 568 569 570 571

	if (result >= 0 && cwnd <= xprt->cong) {
		/* The (cwnd >> 1) term makes sure
		 * the result gets rounded properly. */
		cwnd += (RPC_CWNDSCALE * RPC_CWNDSCALE + (cwnd >> 1)) / cwnd;
		if (cwnd > RPC_MAXCWND(xprt))
			cwnd = RPC_MAXCWND(xprt);
572
		__xprt_lock_write_next_cong(xprt);
L
Linus Torvalds 已提交
573 574 575 576 577
	} else if (result == -ETIMEDOUT) {
		cwnd >>= 1;
		if (cwnd < RPC_CWNDSCALE)
			cwnd = RPC_CWNDSCALE;
	}
578
	dprintk("RPC:       cong %ld, cwnd was %ld, now %ld\n",
L
Linus Torvalds 已提交
579 580
			xprt->cong, xprt->cwnd, cwnd);
	xprt->cwnd = cwnd;
581
	__xprt_put_cong(xprt, req);
L
Linus Torvalds 已提交
582
}
583
EXPORT_SYMBOL_GPL(xprt_adjust_cwnd);
L
Linus Torvalds 已提交
584

585 586 587 588 589 590 591 592 593 594 595 596 597
/**
 * xprt_wake_pending_tasks - wake all tasks on a transport's pending queue
 * @xprt: transport with waiting tasks
 * @status: result code to plant in each task before waking it
 *
 */
void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status)
{
	if (status < 0)
		rpc_wake_up_status(&xprt->pending, status);
	else
		rpc_wake_up(&xprt->pending);
}
598
EXPORT_SYMBOL_GPL(xprt_wake_pending_tasks);
599

600 601
/**
 * xprt_wait_for_buffer_space - wait for transport output buffer to clear
602
 * @xprt: transport
603 604 605 606
 *
 * Note that we only set the timer for the case of RPC_IS_SOFT(), since
 * we don't in general want to force a socket disconnection due to
 * an incomplete RPC call transmission.
607
 */
608
void xprt_wait_for_buffer_space(struct rpc_xprt *xprt)
609
{
610
	set_bit(XPRT_WRITE_SPACE, &xprt->state);
611
}
612
EXPORT_SYMBOL_GPL(xprt_wait_for_buffer_space);
613

614 615 616 617 618 619 620 621 622 623 624 625
static bool
xprt_clear_write_space_locked(struct rpc_xprt *xprt)
{
	if (test_and_clear_bit(XPRT_WRITE_SPACE, &xprt->state)) {
		__xprt_lock_write_next(xprt);
		dprintk("RPC:       write space: waking waiting task on "
				"xprt %p\n", xprt);
		return true;
	}
	return false;
}

626 627 628 629 630 631
/**
 * xprt_write_space - wake the task waiting for transport output buffer space
 * @xprt: transport with waiting tasks
 *
 * Can be called in a soft IRQ context, so xprt_write_space never sleeps.
 */
632
bool xprt_write_space(struct rpc_xprt *xprt)
633
{
634 635 636 637
	bool ret;

	if (!test_bit(XPRT_WRITE_SPACE, &xprt->state))
		return false;
638
	spin_lock(&xprt->transport_lock);
639
	ret = xprt_clear_write_space_locked(xprt);
640
	spin_unlock(&xprt->transport_lock);
641
	return ret;
642
}
643
EXPORT_SYMBOL_GPL(xprt_write_space);
644

645 646 647 648 649 650 651 652 653
static unsigned long xprt_abs_ktime_to_jiffies(ktime_t abstime)
{
	s64 delta = ktime_to_ns(ktime_get() - abstime);
	return likely(delta >= 0) ?
		jiffies - nsecs_to_jiffies(delta) :
		jiffies + nsecs_to_jiffies(-delta);
}

static unsigned long xprt_calc_majortimeo(struct rpc_rqst *req)
L
Linus Torvalds 已提交
654
{
655
	const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
656
	unsigned long majortimeo = req->rq_timeout;
L
Linus Torvalds 已提交
657 658

	if (to->to_exponential)
659 660 661 662 663 664 665 666 667 668 669 670 671
		majortimeo <<= to->to_retries;
	else
		majortimeo += to->to_increment * to->to_retries;
	if (majortimeo > to->to_maxval || majortimeo == 0)
		majortimeo = to->to_maxval;
	return majortimeo;
}

static void xprt_reset_majortimeo(struct rpc_rqst *req)
{
	req->rq_majortimeo += xprt_calc_majortimeo(req);
}

672 673 674 675 676
static void xprt_reset_minortimeo(struct rpc_rqst *req)
{
	req->rq_minortimeo += req->rq_timeout;
}

677 678 679 680 681 682 683
static void xprt_init_majortimeo(struct rpc_task *task, struct rpc_rqst *req)
{
	unsigned long time_init;
	struct rpc_xprt *xprt = req->rq_xprt;

	if (likely(xprt && xprt_connected(xprt)))
		time_init = jiffies;
L
Linus Torvalds 已提交
684
	else
685 686 687
		time_init = xprt_abs_ktime_to_jiffies(task->tk_start);
	req->rq_timeout = task->tk_client->cl_timeout->to_initval;
	req->rq_majortimeo = time_init + xprt_calc_majortimeo(req);
688
	req->rq_minortimeo = time_init + req->rq_timeout;
L
Linus Torvalds 已提交
689 690
}

691 692 693 694
/**
 * xprt_adjust_timeout - adjust timeout values for next retransmit
 * @req: RPC request containing parameters to use for the adjustment
 *
L
Linus Torvalds 已提交
695 696 697 698
 */
int xprt_adjust_timeout(struct rpc_rqst *req)
{
	struct rpc_xprt *xprt = req->rq_xprt;
699
	const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
L
Linus Torvalds 已提交
700 701 702
	int status = 0;

	if (time_before(jiffies, req->rq_majortimeo)) {
703 704
		if (time_before(jiffies, req->rq_minortimeo))
			return status;
L
Linus Torvalds 已提交
705 706 707 708 709 710 711 712 713 714 715 716
		if (to->to_exponential)
			req->rq_timeout <<= 1;
		else
			req->rq_timeout += to->to_increment;
		if (to->to_maxval && req->rq_timeout >= to->to_maxval)
			req->rq_timeout = to->to_maxval;
		req->rq_retries++;
	} else {
		req->rq_timeout = to->to_initval;
		req->rq_retries = 0;
		xprt_reset_majortimeo(req);
		/* Reset the RTT counters == "slow start" */
717
		spin_lock(&xprt->transport_lock);
L
Linus Torvalds 已提交
718
		rpc_init_rtt(req->rq_task->tk_client->cl_rtt, to->to_initval);
719
		spin_unlock(&xprt->transport_lock);
L
Linus Torvalds 已提交
720 721
		status = -ETIMEDOUT;
	}
722
	xprt_reset_minortimeo(req);
L
Linus Torvalds 已提交
723 724 725 726 727 728 729 730

	if (req->rq_timeout == 0) {
		printk(KERN_WARNING "xprt_adjust_timeout: rq_timeout = 0!\n");
		req->rq_timeout = 5 * HZ;
	}
	return status;
}

731
static void xprt_autoclose(struct work_struct *work)
L
Linus Torvalds 已提交
732
{
733 734
	struct rpc_xprt *xprt =
		container_of(work, struct rpc_xprt, task_cleanup);
735
	unsigned int pflags = memalloc_nofs_save();
L
Linus Torvalds 已提交
736

737
	trace_xprt_disconnect_auto(xprt);
738 739
	xprt->connect_cookie++;
	smp_mb__before_atomic();
740
	clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
741
	xprt->ops->close(xprt);
L
Linus Torvalds 已提交
742
	xprt_release_write(xprt, NULL);
743
	wake_up_bit(&xprt->state, XPRT_LOCKED);
744
	memalloc_nofs_restore(pflags);
L
Linus Torvalds 已提交
745 746
}

747
/**
748
 * xprt_disconnect_done - mark a transport as disconnected
749 750
 * @xprt: transport to flag for disconnect
 *
L
Linus Torvalds 已提交
751
 */
752
void xprt_disconnect_done(struct rpc_xprt *xprt)
L
Linus Torvalds 已提交
753
{
754
	trace_xprt_disconnect_done(xprt);
755
	spin_lock(&xprt->transport_lock);
L
Linus Torvalds 已提交
756
	xprt_clear_connected(xprt);
757
	xprt_clear_write_space_locked(xprt);
758
	xprt_clear_congestion_window_wait_locked(xprt);
759
	xprt_wake_pending_tasks(xprt, -ENOTCONN);
760
	spin_unlock(&xprt->transport_lock);
L
Linus Torvalds 已提交
761
}
762
EXPORT_SYMBOL_GPL(xprt_disconnect_done);
L
Linus Torvalds 已提交
763

764 765 766 767 768 769
/**
 * xprt_schedule_autoclose_locked - Try to schedule an autoclose RPC call
 * @xprt: transport to disconnect
 */
static void xprt_schedule_autoclose_locked(struct rpc_xprt *xprt)
{
770 771
	if (test_and_set_bit(XPRT_CLOSE_WAIT, &xprt->state))
		return;
772 773 774 775 776 777 778
	if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
		queue_work(xprtiod_workqueue, &xprt->task_cleanup);
	else if (xprt->snd_task && !test_bit(XPRT_SND_IS_COOKIE, &xprt->state))
		rpc_wake_up_queued_task_set_status(&xprt->pending,
						   xprt->snd_task, -ENOTCONN);
}

779 780 781 782 783 784 785
/**
 * xprt_force_disconnect - force a transport to disconnect
 * @xprt: transport to disconnect
 *
 */
void xprt_force_disconnect(struct rpc_xprt *xprt)
{
786 787
	trace_xprt_disconnect_force(xprt);

788
	/* Don't race with the test_bit() in xprt_clear_locked() */
789
	spin_lock(&xprt->transport_lock);
790
	xprt_schedule_autoclose_locked(xprt);
791
	spin_unlock(&xprt->transport_lock);
792
}
793
EXPORT_SYMBOL_GPL(xprt_force_disconnect);
794

795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810
static unsigned int
xprt_connect_cookie(struct rpc_xprt *xprt)
{
	return READ_ONCE(xprt->connect_cookie);
}

static bool
xprt_request_retransmit_after_disconnect(struct rpc_task *task)
{
	struct rpc_rqst *req = task->tk_rqstp;
	struct rpc_xprt *xprt = req->rq_xprt;

	return req->rq_connect_cookie != xprt_connect_cookie(xprt) ||
		!xprt_connected(xprt);
}

811 812 813 814 815 816 817 818 819 820 821 822 823 824
/**
 * xprt_conditional_disconnect - force a transport to disconnect
 * @xprt: transport to disconnect
 * @cookie: 'connection cookie'
 *
 * This attempts to break the connection if and only if 'cookie' matches
 * the current transport 'connection cookie'. It ensures that we don't
 * try to break the connection more than once when we need to retransmit
 * a batch of RPC requests.
 *
 */
void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie)
{
	/* Don't race with the test_bit() in xprt_clear_locked() */
825
	spin_lock(&xprt->transport_lock);
826 827
	if (cookie != xprt->connect_cookie)
		goto out;
828
	if (test_bit(XPRT_CLOSING, &xprt->state))
829
		goto out;
830
	xprt_schedule_autoclose_locked(xprt);
831
out:
832
	spin_unlock(&xprt->transport_lock);
833 834
}

835 836 837 838 839 840 841 842 843 844
static bool
xprt_has_timer(const struct rpc_xprt *xprt)
{
	return xprt->idle_timeout != 0;
}

static void
xprt_schedule_autodisconnect(struct rpc_xprt *xprt)
	__must_hold(&xprt->transport_lock)
{
845
	xprt->last_used = jiffies;
846
	if (RB_EMPTY_ROOT(&xprt->recv_queue) && xprt_has_timer(xprt))
847 848 849
		mod_timer(&xprt->timer, xprt->last_used + xprt->idle_timeout);
}

L
Linus Torvalds 已提交
850
static void
851
xprt_init_autodisconnect(struct timer_list *t)
L
Linus Torvalds 已提交
852
{
853
	struct rpc_xprt *xprt = from_timer(xprt, t, timer);
L
Linus Torvalds 已提交
854

855
	if (!RB_EMPTY_ROOT(&xprt->recv_queue))
856
		return;
857 858
	/* Reset xprt->last_used to avoid connect/autodisconnect cycling */
	xprt->last_used = jiffies;
859
	if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
860
		return;
861
	queue_work(xprtiod_workqueue, &xprt->task_cleanup);
L
Linus Torvalds 已提交
862 863
}

864 865 866 867 868 869 870 871 872 873 874 875 876
#if IS_ENABLED(CONFIG_FAIL_SUNRPC)
static void xprt_inject_disconnect(struct rpc_xprt *xprt)
{
	if (!fail_sunrpc.ignore_client_disconnect &&
	    should_fail(&fail_sunrpc.attr, 1))
		xprt->ops->inject_disconnect(xprt);
}
#else
static inline void xprt_inject_disconnect(struct rpc_xprt *xprt)
{
}
#endif

877 878 879 880 881 882
bool xprt_lock_connect(struct rpc_xprt *xprt,
		struct rpc_task *task,
		void *cookie)
{
	bool ret = false;

883
	spin_lock(&xprt->transport_lock);
884 885 886 887
	if (!test_bit(XPRT_LOCKED, &xprt->state))
		goto out;
	if (xprt->snd_task != task)
		goto out;
888
	set_bit(XPRT_SND_IS_COOKIE, &xprt->state);
889 890 891
	xprt->snd_task = cookie;
	ret = true;
out:
892
	spin_unlock(&xprt->transport_lock);
893 894
	return ret;
}
895
EXPORT_SYMBOL_GPL(xprt_lock_connect);
896 897 898

void xprt_unlock_connect(struct rpc_xprt *xprt, void *cookie)
{
899
	spin_lock(&xprt->transport_lock);
900 901 902 903 904
	if (xprt->snd_task != cookie)
		goto out;
	if (!test_bit(XPRT_LOCKED, &xprt->state))
		goto out;
	xprt->snd_task =NULL;
905
	clear_bit(XPRT_SND_IS_COOKIE, &xprt->state);
906
	xprt->ops->release_xprt(xprt, NULL);
907
	xprt_schedule_autodisconnect(xprt);
908
out:
909
	spin_unlock(&xprt->transport_lock);
910
	wake_up_bit(&xprt->state, XPRT_LOCKED);
911
}
912
EXPORT_SYMBOL_GPL(xprt_unlock_connect);
913

914 915 916
/**
 * xprt_connect - schedule a transport connect operation
 * @task: RPC task that is requesting the connect
L
Linus Torvalds 已提交
917 918 919 920
 *
 */
void xprt_connect(struct rpc_task *task)
{
921
	struct rpc_xprt	*xprt = task->tk_rqstp->rq_xprt;
L
Linus Torvalds 已提交
922

923
	trace_xprt_connect(xprt);
L
Linus Torvalds 已提交
924

925
	if (!xprt_bound(xprt)) {
926
		task->tk_status = -EAGAIN;
L
Linus Torvalds 已提交
927 928 929 930
		return;
	}
	if (!xprt_lock_write(xprt, task))
		return;
931

932 933
	if (test_and_clear_bit(XPRT_CLOSE_WAIT, &xprt->state)) {
		trace_xprt_disconnect_cleanup(xprt);
934
		xprt->ops->close(xprt);
935
	}
936

937
	if (!xprt_connected(xprt)) {
938
		task->tk_rqstp->rq_connect_cookie = xprt->connect_cookie;
939
		rpc_sleep_on_timeout(&xprt->pending, task, NULL,
940
				xprt_request_timeout(task->tk_rqstp));
941 942 943 944 945

		if (test_bit(XPRT_CLOSING, &xprt->state))
			return;
		if (xprt_test_and_set_connecting(xprt))
			return;
946 947 948 949 950 951 952 953 954
		/* Race breaker */
		if (!xprt_connected(xprt)) {
			xprt->stat.connect_start = jiffies;
			xprt->ops->connect(xprt, task);
		} else {
			xprt_clear_connecting(xprt);
			task->tk_status = 0;
			rpc_wake_up_queued_task(&xprt->pending, task);
		}
L
Linus Torvalds 已提交
955
	}
956
	xprt_release_write(xprt, task);
L
Linus Torvalds 已提交
957 958
}

C
Chuck Lever 已提交
959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990
/**
 * xprt_reconnect_delay - compute the wait before scheduling a connect
 * @xprt: transport instance
 *
 */
unsigned long xprt_reconnect_delay(const struct rpc_xprt *xprt)
{
	unsigned long start, now = jiffies;

	start = xprt->stat.connect_start + xprt->reestablish_timeout;
	if (time_after(start, now))
		return start - now;
	return 0;
}
EXPORT_SYMBOL_GPL(xprt_reconnect_delay);

/**
 * xprt_reconnect_backoff - compute the new re-establish timeout
 * @xprt: transport instance
 * @init_to: initial reestablish timeout
 *
 */
void xprt_reconnect_backoff(struct rpc_xprt *xprt, unsigned long init_to)
{
	xprt->reestablish_timeout <<= 1;
	if (xprt->reestablish_timeout > xprt->max_reconnect_timeout)
		xprt->reestablish_timeout = xprt->max_reconnect_timeout;
	if (xprt->reestablish_timeout < init_to)
		xprt->reestablish_timeout = init_to;
}
EXPORT_SYMBOL_GPL(xprt_reconnect_backoff);

991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059
enum xprt_xid_rb_cmp {
	XID_RB_EQUAL,
	XID_RB_LEFT,
	XID_RB_RIGHT,
};
static enum xprt_xid_rb_cmp
xprt_xid_cmp(__be32 xid1, __be32 xid2)
{
	if (xid1 == xid2)
		return XID_RB_EQUAL;
	if ((__force u32)xid1 < (__force u32)xid2)
		return XID_RB_LEFT;
	return XID_RB_RIGHT;
}

static struct rpc_rqst *
xprt_request_rb_find(struct rpc_xprt *xprt, __be32 xid)
{
	struct rb_node *n = xprt->recv_queue.rb_node;
	struct rpc_rqst *req;

	while (n != NULL) {
		req = rb_entry(n, struct rpc_rqst, rq_recv);
		switch (xprt_xid_cmp(xid, req->rq_xid)) {
		case XID_RB_LEFT:
			n = n->rb_left;
			break;
		case XID_RB_RIGHT:
			n = n->rb_right;
			break;
		case XID_RB_EQUAL:
			return req;
		}
	}
	return NULL;
}

static void
xprt_request_rb_insert(struct rpc_xprt *xprt, struct rpc_rqst *new)
{
	struct rb_node **p = &xprt->recv_queue.rb_node;
	struct rb_node *n = NULL;
	struct rpc_rqst *req;

	while (*p != NULL) {
		n = *p;
		req = rb_entry(n, struct rpc_rqst, rq_recv);
		switch(xprt_xid_cmp(new->rq_xid, req->rq_xid)) {
		case XID_RB_LEFT:
			p = &n->rb_left;
			break;
		case XID_RB_RIGHT:
			p = &n->rb_right;
			break;
		case XID_RB_EQUAL:
			WARN_ON_ONCE(new != req);
			return;
		}
	}
	rb_link_node(&new->rq_recv, n, p);
	rb_insert_color(&new->rq_recv, &xprt->recv_queue);
}

static void
xprt_request_rb_remove(struct rpc_xprt *xprt, struct rpc_rqst *req)
{
	rb_erase(&req->rq_recv, &xprt->recv_queue);
}

1060 1061 1062 1063 1064
/**
 * xprt_lookup_rqst - find an RPC request corresponding to an XID
 * @xprt: transport on which the original request was transmitted
 * @xid: RPC XID of incoming reply
 *
1065
 * Caller holds xprt->queue_lock.
L
Linus Torvalds 已提交
1066
 */
1067
struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid)
L
Linus Torvalds 已提交
1068
{
1069
	struct rpc_rqst *entry;
L
Linus Torvalds 已提交
1070

1071 1072 1073 1074 1075 1076
	entry = xprt_request_rb_find(xprt, xid);
	if (entry != NULL) {
		trace_xprt_lookup_rqst(xprt, xid, 0);
		entry->rq_rtt = ktime_sub(ktime_get(), entry->rq_xtime);
		return entry;
	}
1077 1078 1079

	dprintk("RPC:       xprt_lookup_rqst did not find xid %08x\n",
			ntohl(xid));
1080
	trace_xprt_lookup_rqst(xprt, xid, -ENOENT);
1081 1082
	xprt->stat.bad_xids++;
	return NULL;
L
Linus Torvalds 已提交
1083
}
1084
EXPORT_SYMBOL_GPL(xprt_lookup_rqst);
L
Linus Torvalds 已提交
1085

1086 1087 1088 1089 1090 1091
static bool
xprt_is_pinned_rqst(struct rpc_rqst *req)
{
	return atomic_read(&req->rq_pin) != 0;
}

1092 1093 1094 1095 1096
/**
 * xprt_pin_rqst - Pin a request on the transport receive list
 * @req: Request to pin
 *
 * Caller must ensure this is atomic with the call to xprt_lookup_rqst()
1097
 * so should be holding xprt->queue_lock.
1098 1099 1100
 */
void xprt_pin_rqst(struct rpc_rqst *req)
{
1101
	atomic_inc(&req->rq_pin);
1102
}
1103
EXPORT_SYMBOL_GPL(xprt_pin_rqst);
1104 1105 1106 1107 1108

/**
 * xprt_unpin_rqst - Unpin a request on the transport receive list
 * @req: Request to pin
 *
1109
 * Caller should be holding xprt->queue_lock.
1110 1111 1112
 */
void xprt_unpin_rqst(struct rpc_rqst *req)
{
1113 1114 1115 1116 1117 1118
	if (!test_bit(RPC_TASK_MSG_PIN_WAIT, &req->rq_task->tk_runstate)) {
		atomic_dec(&req->rq_pin);
		return;
	}
	if (atomic_dec_and_test(&req->rq_pin))
		wake_up_var(&req->rq_pin);
1119
}
1120
EXPORT_SYMBOL_GPL(xprt_unpin_rqst);
1121 1122 1123

static void xprt_wait_on_pinned_rqst(struct rpc_rqst *req)
{
1124
	wait_var_event(&req->rq_pin, !xprt_is_pinned_rqst(req));
1125 1126
}

1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153
static bool
xprt_request_data_received(struct rpc_task *task)
{
	return !test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) &&
		READ_ONCE(task->tk_rqstp->rq_reply_bytes_recvd) != 0;
}

static bool
xprt_request_need_enqueue_receive(struct rpc_task *task, struct rpc_rqst *req)
{
	return !test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) &&
		READ_ONCE(task->tk_rqstp->rq_reply_bytes_recvd) == 0;
}

/**
 * xprt_request_enqueue_receive - Add an request to the receive queue
 * @task: RPC task
 *
 */
void
xprt_request_enqueue_receive(struct rpc_task *task)
{
	struct rpc_rqst *req = task->tk_rqstp;
	struct rpc_xprt *xprt = req->rq_xprt;

	if (!xprt_request_need_enqueue_receive(task, req))
		return;
1154 1155

	xprt_request_prepare(task->tk_rqstp);
1156 1157 1158 1159 1160 1161 1162
	spin_lock(&xprt->queue_lock);

	/* Update the softirq receive buffer */
	memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
			sizeof(req->rq_private_buf));

	/* Add request to the receive list */
1163
	xprt_request_rb_insert(xprt, req);
1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179
	set_bit(RPC_TASK_NEED_RECV, &task->tk_runstate);
	spin_unlock(&xprt->queue_lock);

	/* Turn off autodisconnect */
	del_singleshot_timer_sync(&xprt->timer);
}

/**
 * xprt_request_dequeue_receive_locked - Remove a request from the receive queue
 * @task: RPC task
 *
 * Caller must hold xprt->queue_lock.
 */
static void
xprt_request_dequeue_receive_locked(struct rpc_task *task)
{
1180 1181
	struct rpc_rqst *req = task->tk_rqstp;

1182
	if (test_and_clear_bit(RPC_TASK_NEED_RECV, &task->tk_runstate))
1183
		xprt_request_rb_remove(req->rq_xprt, req);
1184 1185
}

1186 1187 1188 1189
/**
 * xprt_update_rtt - Update RPC RTT statistics
 * @task: RPC request that recently completed
 *
1190
 * Caller holds xprt->queue_lock.
1191 1192
 */
void xprt_update_rtt(struct rpc_task *task)
1193 1194 1195
{
	struct rpc_rqst *req = task->tk_rqstp;
	struct rpc_rtt *rtt = task->tk_client->cl_rtt;
1196
	unsigned int timer = task->tk_msg.rpc_proc->p_timer;
1197
	long m = usecs_to_jiffies(ktime_to_us(req->rq_rtt));
1198 1199 1200

	if (timer) {
		if (req->rq_ntrans == 1)
1201
			rpc_update_rtt(rtt, timer, m);
1202 1203 1204
		rpc_set_timeo(rtt, timer, req->rq_ntrans - 1);
	}
}
1205
EXPORT_SYMBOL_GPL(xprt_update_rtt);
1206

1207 1208
/**
 * xprt_complete_rqst - called when reply processing is complete
1209
 * @task: RPC request that recently completed
1210 1211
 * @copied: actual number of bytes received from the transport
 *
1212
 * Caller holds xprt->queue_lock.
L
Linus Torvalds 已提交
1213
 */
1214
void xprt_complete_rqst(struct rpc_task *task, int copied)
L
Linus Torvalds 已提交
1215
{
1216
	struct rpc_rqst *req = task->tk_rqstp;
1217
	struct rpc_xprt *xprt = req->rq_xprt;
L
Linus Torvalds 已提交
1218

1219
	xprt->stat.recvs++;
1220

1221
	req->rq_private_buf.len = copied;
1222 1223
	/* Ensure all writes are done before we update */
	/* req->rq_reply_bytes_recvd */
1224
	smp_wmb();
1225
	req->rq_reply_bytes_recvd = copied;
1226
	xprt_request_dequeue_receive_locked(task);
1227
	rpc_wake_up_queued_task(&xprt->pending, task);
L
Linus Torvalds 已提交
1228
}
1229
EXPORT_SYMBOL_GPL(xprt_complete_rqst);
L
Linus Torvalds 已提交
1230

1231
static void xprt_timer(struct rpc_task *task)
L
Linus Torvalds 已提交
1232
{
1233
	struct rpc_rqst *req = task->tk_rqstp;
L
Linus Torvalds 已提交
1234 1235
	struct rpc_xprt *xprt = req->rq_xprt;

1236 1237
	if (task->tk_status != -ETIMEDOUT)
		return;
L
Linus Torvalds 已提交
1238

C
Chuck Lever 已提交
1239
	trace_xprt_timer(xprt, req->rq_xid, task->tk_status);
1240
	if (!req->rq_reply_bytes_recvd) {
1241
		if (xprt->ops->timer)
1242
			xprt->ops->timer(xprt, task);
1243 1244
	} else
		task->tk_status = 0;
L
Linus Torvalds 已提交
1245 1246
}

1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259
/**
 * xprt_wait_for_reply_request_def - wait for reply
 * @task: pointer to rpc_task
 *
 * Set a request's retransmit timeout based on the transport's
 * default timeout parameters.  Used by transports that don't adjust
 * the retransmit timeout based on round-trip time estimation,
 * and put the task to sleep on the pending queue.
 */
void xprt_wait_for_reply_request_def(struct rpc_task *task)
{
	struct rpc_rqst *req = task->tk_rqstp;

1260
	rpc_sleep_on_timeout(&req->rq_xprt->pending, task, xprt_timer,
1261
			xprt_request_timeout(req));
1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278
}
EXPORT_SYMBOL_GPL(xprt_wait_for_reply_request_def);

/**
 * xprt_wait_for_reply_request_rtt - wait for reply using RTT estimator
 * @task: pointer to rpc_task
 *
 * Set a request's retransmit timeout using the RTT estimator,
 * and put the task to sleep on the pending queue.
 */
void xprt_wait_for_reply_request_rtt(struct rpc_task *task)
{
	int timer = task->tk_msg.rpc_proc->p_timer;
	struct rpc_clnt *clnt = task->tk_client;
	struct rpc_rtt *rtt = clnt->cl_rtt;
	struct rpc_rqst *req = task->tk_rqstp;
	unsigned long max_timeout = clnt->cl_timeout->to_maxval;
1279
	unsigned long timeout;
1280

1281 1282 1283 1284 1285 1286
	timeout = rpc_calc_rto(rtt, timer);
	timeout <<= rpc_ntimeo(rtt, timer) + req->rq_retries;
	if (timeout > max_timeout || timeout == 0)
		timeout = max_timeout;
	rpc_sleep_on_timeout(&req->rq_xprt->pending, task, xprt_timer,
			jiffies + timeout);
1287 1288 1289
}
EXPORT_SYMBOL_GPL(xprt_wait_for_reply_request_rtt);

1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308
/**
 * xprt_request_wait_receive - wait for the reply to an RPC request
 * @task: RPC task about to send a request
 *
 */
void xprt_request_wait_receive(struct rpc_task *task)
{
	struct rpc_rqst *req = task->tk_rqstp;
	struct rpc_xprt *xprt = req->rq_xprt;

	if (!test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate))
		return;
	/*
	 * Sleep on the pending queue if we're expecting a reply.
	 * The spinlock ensures atomicity between the test of
	 * req->rq_reply_bytes_recvd, and the call to rpc_sleep_on().
	 */
	spin_lock(&xprt->queue_lock);
	if (test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate)) {
1309
		xprt->ops->wait_for_reply_request(task);
1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321
		/*
		 * Send an extra queue wakeup call if the
		 * connection was dropped in case the call to
		 * rpc_sleep_on() raced.
		 */
		if (xprt_request_retransmit_after_disconnect(task))
			rpc_wake_up_queued_task_set_status(&xprt->pending,
					task, -ENOTCONN);
	}
	spin_unlock(&xprt->queue_lock);
}

1322 1323 1324
static bool
xprt_request_need_enqueue_transmit(struct rpc_task *task, struct rpc_rqst *req)
{
1325
	return !test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate);
1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336
}

/**
 * xprt_request_enqueue_transmit - queue a task for transmission
 * @task: pointer to rpc_task
 *
 * Add a task to the transmission queue.
 */
void
xprt_request_enqueue_transmit(struct rpc_task *task)
{
1337
	struct rpc_rqst *pos, *req = task->tk_rqstp;
1338 1339 1340
	struct rpc_xprt *xprt = req->rq_xprt;

	if (xprt_request_need_enqueue_transmit(task, req)) {
1341
		req->rq_bytes_sent = 0;
1342
		spin_lock(&xprt->queue_lock);
1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356
		/*
		 * Requests that carry congestion control credits are added
		 * to the head of the list to avoid starvation issues.
		 */
		if (req->rq_cong) {
			xprt_clear_congestion_window_wait(xprt);
			list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) {
				if (pos->rq_cong)
					continue;
				/* Note: req is added _before_ pos */
				list_add_tail(&req->rq_xmit, &pos->rq_xmit);
				INIT_LIST_HEAD(&req->rq_xmit2);
				goto out;
			}
1357
		} else if (!req->rq_seqno) {
1358 1359 1360 1361 1362 1363 1364
			list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) {
				if (pos->rq_task->tk_owner != task->tk_owner)
					continue;
				list_add_tail(&req->rq_xmit2, &pos->rq_xmit2);
				INIT_LIST_HEAD(&req->rq_xmit);
				goto out;
			}
1365
		}
1366
		list_add_tail(&req->rq_xmit, &xprt->xmit_queue);
1367 1368
		INIT_LIST_HEAD(&req->rq_xmit2);
out:
1369
		atomic_long_inc(&xprt->xmit_queuelen);
1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384
		set_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate);
		spin_unlock(&xprt->queue_lock);
	}
}

/**
 * xprt_request_dequeue_transmit_locked - remove a task from the transmission queue
 * @task: pointer to rpc_task
 *
 * Remove a task from the transmission queue
 * Caller must hold xprt->queue_lock
 */
static void
xprt_request_dequeue_transmit_locked(struct rpc_task *task)
{
1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398
	struct rpc_rqst *req = task->tk_rqstp;

	if (!test_and_clear_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
		return;
	if (!list_empty(&req->rq_xmit)) {
		list_del(&req->rq_xmit);
		if (!list_empty(&req->rq_xmit2)) {
			struct rpc_rqst *next = list_first_entry(&req->rq_xmit2,
					struct rpc_rqst, rq_xmit2);
			list_del(&req->rq_xmit2);
			list_add_tail(&next->rq_xmit, &next->rq_xprt->xmit_queue);
		}
	} else
		list_del(&req->rq_xmit2);
1399
	atomic_long_dec(&req->rq_xprt->xmit_queuelen);
1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418
}

/**
 * xprt_request_dequeue_transmit - remove a task from the transmission queue
 * @task: pointer to rpc_task
 *
 * Remove a task from the transmission queue
 */
static void
xprt_request_dequeue_transmit(struct rpc_task *task)
{
	struct rpc_rqst *req = task->tk_rqstp;
	struct rpc_xprt *xprt = req->rq_xprt;

	spin_lock(&xprt->queue_lock);
	xprt_request_dequeue_transmit_locked(task);
	spin_unlock(&xprt->queue_lock);
}

1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448
/**
 * xprt_request_dequeue_xprt - remove a task from the transmit+receive queue
 * @task: pointer to rpc_task
 *
 * Remove a task from the transmit and receive queues, and ensure that
 * it is not pinned by the receive work item.
 */
void
xprt_request_dequeue_xprt(struct rpc_task *task)
{
	struct rpc_rqst	*req = task->tk_rqstp;
	struct rpc_xprt *xprt = req->rq_xprt;

	if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate) ||
	    test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) ||
	    xprt_is_pinned_rqst(req)) {
		spin_lock(&xprt->queue_lock);
		xprt_request_dequeue_transmit_locked(task);
		xprt_request_dequeue_receive_locked(task);
		while (xprt_is_pinned_rqst(req)) {
			set_bit(RPC_TASK_MSG_PIN_WAIT, &task->tk_runstate);
			spin_unlock(&xprt->queue_lock);
			xprt_wait_on_pinned_rqst(req);
			spin_lock(&xprt->queue_lock);
			clear_bit(RPC_TASK_MSG_PIN_WAIT, &task->tk_runstate);
		}
		spin_unlock(&xprt->queue_lock);
	}
}

1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464
/**
 * xprt_request_prepare - prepare an encoded request for transport
 * @req: pointer to rpc_rqst
 *
 * Calls into the transport layer to do whatever is needed to prepare
 * the request for transmission or receive.
 */
void
xprt_request_prepare(struct rpc_rqst *req)
{
	struct rpc_xprt *xprt = req->rq_xprt;

	if (xprt->ops->prepare_request)
		xprt->ops->prepare_request(req);
}

1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476
/**
 * xprt_request_need_retransmit - Test if a task needs retransmission
 * @task: pointer to rpc_task
 *
 * Test for whether a connection breakage requires the task to retransmit
 */
bool
xprt_request_need_retransmit(struct rpc_task *task)
{
	return xprt_request_retransmit_after_disconnect(task);
}

1477 1478 1479 1480
/**
 * xprt_prepare_transmit - reserve the transport before sending a request
 * @task: RPC task about to send a request
 *
L
Linus Torvalds 已提交
1481
 */
1482
bool xprt_prepare_transmit(struct rpc_task *task)
L
Linus Torvalds 已提交
1483 1484 1485 1486
{
	struct rpc_rqst	*req = task->tk_rqstp;
	struct rpc_xprt	*xprt = req->rq_xprt;

1487 1488
	if (!xprt_lock_write(xprt, task)) {
		/* Race breaker: someone may have transmitted us */
1489
		if (!test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
1490 1491 1492 1493
			rpc_wake_up_queued_task_set_status(&xprt->sending,
					task, 0);
		return false;

1494
	}
1495 1496 1497
	if (atomic_read(&xprt->swapper))
		/* This will be clear in __rpc_execute */
		current->flags |= PF_MEMALLOC;
1498
	return true;
L
Linus Torvalds 已提交
1499 1500
}

1501
void xprt_end_transmit(struct rpc_task *task)
1502
{
1503 1504 1505 1506
	struct rpc_xprt	*xprt = task->tk_rqstp->rq_xprt;

	xprt_inject_disconnect(xprt);
	xprt_release_write(xprt, task);
1507 1508
}

1509
/**
1510 1511 1512
 * xprt_request_transmit - send an RPC request on a transport
 * @req: pointer to request to transmit
 * @snd_task: RPC task that owns the transport lock
1513
 *
1514 1515 1516 1517
 * This performs the transmission of a single request.
 * Note that if the request is not the same as snd_task, then it
 * does need to be pinned.
 * Returns '0' on success.
1518
 */
1519 1520
static int
xprt_request_transmit(struct rpc_rqst *req, struct rpc_task *snd_task)
L
Linus Torvalds 已提交
1521
{
1522 1523
	struct rpc_xprt *xprt = req->rq_xprt;
	struct rpc_task *task = req->rq_task;
1524
	unsigned int connect_cookie;
1525
	int is_retrans = RPC_WAS_SENT(task);
1526
	int status;
L
Linus Torvalds 已提交
1527

1528
	if (!req->rq_bytes_sent) {
1529 1530
		if (xprt_request_data_received(task)) {
			status = 0;
1531
			goto out_dequeue;
1532
		}
1533
		/* Verify that our message lies in the RPCSEC_GSS window */
1534
		if (rpcauth_xmit_need_reencode(task)) {
1535
			status = -EBADMSG;
1536
			goto out_dequeue;
1537
		}
T
Trond Myklebust 已提交
1538 1539 1540
		if (RPC_SIGNALLED(task)) {
			status = -ERESTARTSYS;
			goto out_dequeue;
1541
		}
1542
	}
L
Linus Torvalds 已提交
1543

1544 1545 1546 1547 1548 1549 1550
	/*
	 * Update req->rq_ntrans before transmitting to avoid races with
	 * xprt_update_rtt(), which needs to know that it is recording a
	 * reply to the first transmission.
	 */
	req->rq_ntrans++;

1551
	trace_rpc_xdr_sendto(task, &req->rq_snd_buf);
1552
	connect_cookie = xprt->connect_cookie;
1553
	status = xprt->ops->send_request(req);
1554
	if (status != 0) {
1555
		req->rq_ntrans--;
1556
		trace_xprt_transmit(req, status);
1557
		return status;
1558
	}
1559

1560
	if (is_retrans) {
1561
		task->tk_client->cl_stats->rpcretrans++;
1562 1563
		trace_xprt_retransmit(req);
	}
1564

C
Chuck Lever 已提交
1565
	xprt_inject_disconnect(xprt);
1566

1567
	task->tk_flags |= RPC_TASK_SENT;
1568
	spin_lock(&xprt->transport_lock);
1569

1570 1571 1572
	xprt->stat.sends++;
	xprt->stat.req_u += xprt->stat.sends - xprt->stat.recvs;
	xprt->stat.bklog_u += xprt->backlog.qlen;
1573 1574
	xprt->stat.sending_u += xprt->sending.qlen;
	xprt->stat.pending_u += xprt->pending.qlen;
1575
	spin_unlock(&xprt->transport_lock);
L
Linus Torvalds 已提交
1576

1577
	req->rq_connect_cookie = connect_cookie;
1578
out_dequeue:
1579
	trace_xprt_transmit(req, status);
1580
	xprt_request_dequeue_transmit(task);
1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598
	rpc_wake_up_queued_task_set_status(&xprt->sending, task, status);
	return status;
}

/**
 * xprt_transmit - send an RPC request on a transport
 * @task: controlling RPC task
 *
 * Attempts to drain the transmit queue. On exit, either the transport
 * signalled an error that needs to be handled before transmission can
 * resume, or @task finished transmitting, and detected that it already
 * received a reply.
 */
void
xprt_transmit(struct rpc_task *task)
{
	struct rpc_rqst *next, *req = task->tk_rqstp;
	struct rpc_xprt	*xprt = req->rq_xprt;
1599
	int status;
1600 1601

	spin_lock(&xprt->queue_lock);
1602 1603 1604 1605
	for (;;) {
		next = list_first_entry_or_null(&xprt->xmit_queue,
						struct rpc_rqst, rq_xmit);
		if (!next)
1606
			break;
1607 1608 1609 1610 1611 1612 1613
		xprt_pin_rqst(next);
		spin_unlock(&xprt->queue_lock);
		status = xprt_request_transmit(next, task);
		if (status == -EBADMSG && next != req)
			status = 0;
		spin_lock(&xprt->queue_lock);
		xprt_unpin_rqst(next);
1614 1615 1616 1617 1618 1619 1620 1621 1622 1623
		if (status < 0) {
			if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
				task->tk_status = status;
			break;
		}
		/* Was @task transmitted, and has it received a reply? */
		if (xprt_request_data_received(task) &&
		    !test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
			break;
		cond_resched_lock(&xprt->queue_lock);
1624 1625
	}
	spin_unlock(&xprt->queue_lock);
L
Linus Torvalds 已提交
1626 1627
}

1628 1629 1630 1631 1632 1633 1634
static void xprt_complete_request_init(struct rpc_task *task)
{
	if (task->tk_rqstp)
		xprt_request_init(task);
}

void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task)
1635 1636
{
	set_bit(XPRT_CONGESTED, &xprt->state);
1637
	rpc_sleep_on(&xprt->backlog, task, xprt_complete_request_init);
1638
}
1639
EXPORT_SYMBOL_GPL(xprt_add_backlog);
1640

1641
static bool __xprt_set_rq(struct rpc_task *task, void *data)
1642
{
1643 1644 1645 1646 1647 1648 1649 1650 1651 1652
	struct rpc_rqst *req = data;

	if (task->tk_rqstp == NULL) {
		memset(req, 0, sizeof(*req));	/* mark unused */
		task->tk_rqstp = req;
		return true;
	}
	return false;
}

1653
bool xprt_wake_up_backlog(struct rpc_xprt *xprt, struct rpc_rqst *req)
1654 1655
{
	if (rpc_wake_up_first(&xprt->backlog, __xprt_set_rq, req) == NULL) {
1656
		clear_bit(XPRT_CONGESTED, &xprt->state);
1657 1658 1659
		return false;
	}
	return true;
1660
}
1661
EXPORT_SYMBOL_GPL(xprt_wake_up_backlog);
1662 1663 1664 1665 1666 1667 1668 1669 1670

static bool xprt_throttle_congested(struct rpc_xprt *xprt, struct rpc_task *task)
{
	bool ret = false;

	if (!test_bit(XPRT_CONGESTED, &xprt->state))
		goto out;
	spin_lock(&xprt->reserve_lock);
	if (test_bit(XPRT_CONGESTED, &xprt->state)) {
1671
		xprt_add_backlog(xprt, task);
1672 1673 1674 1675 1676 1677 1678
		ret = true;
	}
	spin_unlock(&xprt->reserve_lock);
out:
	return ret;
}

1679
static struct rpc_rqst *xprt_dynamic_alloc_slot(struct rpc_xprt *xprt)
1680 1681 1682
{
	struct rpc_rqst *req = ERR_PTR(-EAGAIN);

1683
	if (xprt->num_reqs >= xprt->max_reqs)
1684
		goto out;
1685
	++xprt->num_reqs;
1686
	spin_unlock(&xprt->reserve_lock);
1687
	req = kzalloc(sizeof(*req), rpc_task_gfp_mask());
1688
	spin_lock(&xprt->reserve_lock);
1689 1690
	if (req != NULL)
		goto out;
1691
	--xprt->num_reqs;
1692 1693 1694 1695 1696 1697 1698
	req = ERR_PTR(-ENOMEM);
out:
	return req;
}

static bool xprt_dynamic_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
{
1699 1700
	if (xprt->num_reqs > xprt->min_reqs) {
		--xprt->num_reqs;
1701 1702 1703 1704 1705 1706
		kfree(req);
		return true;
	}
	return false;
}

1707
void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
L
Linus Torvalds 已提交
1708
{
1709
	struct rpc_rqst *req;
L
Linus Torvalds 已提交
1710

1711
	spin_lock(&xprt->reserve_lock);
L
Linus Torvalds 已提交
1712
	if (!list_empty(&xprt->free)) {
1713 1714 1715 1716
		req = list_entry(xprt->free.next, struct rpc_rqst, rq_list);
		list_del(&req->rq_list);
		goto out_init_req;
	}
1717
	req = xprt_dynamic_alloc_slot(xprt);
1718 1719 1720 1721 1722 1723
	if (!IS_ERR(req))
		goto out_init_req;
	switch (PTR_ERR(req)) {
	case -ENOMEM:
		dprintk("RPC:       dynamic allocation of request slot "
				"failed! Retrying\n");
1724
		task->tk_status = -ENOMEM;
1725 1726
		break;
	case -EAGAIN:
1727
		xprt_add_backlog(xprt, task);
1728
		dprintk("RPC:       waiting for request slot\n");
1729
		fallthrough;
1730 1731
	default:
		task->tk_status = -EAGAIN;
L
Linus Torvalds 已提交
1732
	}
1733
	spin_unlock(&xprt->reserve_lock);
1734 1735
	return;
out_init_req:
1736 1737
	xprt->stat.max_slots = max_t(unsigned int, xprt->stat.max_slots,
				     xprt->num_reqs);
1738 1739
	spin_unlock(&xprt->reserve_lock);

1740 1741
	task->tk_status = 0;
	task->tk_rqstp = req;
1742 1743 1744
}
EXPORT_SYMBOL_GPL(xprt_alloc_slot);

1745
void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
1746 1747
{
	spin_lock(&xprt->reserve_lock);
1748 1749
	if (!xprt_wake_up_backlog(xprt, req) &&
	    !xprt_dynamic_free_slot(xprt, req)) {
1750 1751 1752
		memset(req, 0, sizeof(*req));	/* mark unused */
		list_add(&req->rq_list, &xprt->free);
	}
1753 1754
	spin_unlock(&xprt->reserve_lock);
}
1755
EXPORT_SYMBOL_GPL(xprt_free_slot);
1756

1757 1758 1759 1760 1761 1762 1763 1764 1765 1766
static void xprt_free_all_slots(struct rpc_xprt *xprt)
{
	struct rpc_rqst *req;
	while (!list_empty(&xprt->free)) {
		req = list_first_entry(&xprt->free, struct rpc_rqst, rq_list);
		list_del(&req->rq_list);
		kfree(req);
	}
}

O
Olga Kornievskaia 已提交
1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790
static DEFINE_IDA(rpc_xprt_ids);

void xprt_cleanup_ids(void)
{
	ida_destroy(&rpc_xprt_ids);
}

static int xprt_alloc_id(struct rpc_xprt *xprt)
{
	int id;

	id = ida_simple_get(&rpc_xprt_ids, 0, 0, GFP_KERNEL);
	if (id < 0)
		return id;

	xprt->id = id;
	return 0;
}

static void xprt_free_id(struct rpc_xprt *xprt)
{
	ida_simple_remove(&rpc_xprt_ids, xprt->id);
}

1791 1792 1793
struct rpc_xprt *xprt_alloc(struct net *net, size_t size,
		unsigned int num_prealloc,
		unsigned int max_alloc)
1794 1795
{
	struct rpc_xprt *xprt;
1796 1797
	struct rpc_rqst *req;
	int i;
1798 1799 1800 1801 1802

	xprt = kzalloc(size, GFP_KERNEL);
	if (xprt == NULL)
		goto out;

O
Olga Kornievskaia 已提交
1803
	xprt_alloc_id(xprt);
1804 1805 1806 1807 1808
	xprt_init(xprt, net);

	for (i = 0; i < num_prealloc; i++) {
		req = kzalloc(sizeof(struct rpc_rqst), GFP_KERNEL);
		if (!req)
1809
			goto out_free;
1810 1811
		list_add(&req->rq_list, &xprt->free);
	}
1812 1813 1814 1815 1816
	if (max_alloc > num_prealloc)
		xprt->max_reqs = max_alloc;
	else
		xprt->max_reqs = num_prealloc;
	xprt->min_reqs = num_prealloc;
1817
	xprt->num_reqs = num_prealloc;
1818 1819 1820 1821

	return xprt;

out_free:
1822
	xprt_free(xprt);
1823 1824 1825 1826 1827
out:
	return NULL;
}
EXPORT_SYMBOL_GPL(xprt_alloc);

1828 1829
void xprt_free(struct rpc_xprt *xprt)
{
P
Pavel Emelyanov 已提交
1830
	put_net(xprt->xprt_net);
1831
	xprt_free_all_slots(xprt);
O
Olga Kornievskaia 已提交
1832
	xprt_free_id(xprt);
1833
	rpc_sysfs_xprt_destroy(xprt);
1834
	kfree_rcu(xprt, rcu);
1835 1836 1837
}
EXPORT_SYMBOL_GPL(xprt_free);

1838 1839 1840 1841 1842 1843
static void
xprt_init_connect_cookie(struct rpc_rqst *req, struct rpc_xprt *xprt)
{
	req->rq_connect_cookie = xprt_connect_cookie(xprt) - 1;
}

1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870
static __be32
xprt_alloc_xid(struct rpc_xprt *xprt)
{
	__be32 xid;

	spin_lock(&xprt->reserve_lock);
	xid = (__force __be32)xprt->xid++;
	spin_unlock(&xprt->reserve_lock);
	return xid;
}

static void
xprt_init_xid(struct rpc_xprt *xprt)
{
	xprt->xid = prandom_u32();
}

static void
xprt_request_init(struct rpc_task *task)
{
	struct rpc_xprt *xprt = task->tk_xprt;
	struct rpc_rqst	*req = task->tk_rqstp;

	req->rq_task	= task;
	req->rq_xprt    = xprt;
	req->rq_buffer  = NULL;
	req->rq_xid	= xprt_alloc_xid(xprt);
1871
	xprt_init_connect_cookie(req, xprt);
1872 1873 1874 1875
	req->rq_snd_buf.len = 0;
	req->rq_snd_buf.buflen = 0;
	req->rq_rcv_buf.len = 0;
	req->rq_rcv_buf.buflen = 0;
1876 1877
	req->rq_snd_buf.bvec = NULL;
	req->rq_rcv_buf.bvec = NULL;
1878
	req->rq_release_snd_buf = NULL;
1879
	xprt_init_majortimeo(task, req);
1880 1881

	trace_xprt_reserve(req);
1882 1883 1884 1885 1886 1887 1888 1889 1890 1891
}

static void
xprt_do_reserve(struct rpc_xprt *xprt, struct rpc_task *task)
{
	xprt->ops->alloc_slot(xprt, task);
	if (task->tk_rqstp != NULL)
		xprt_request_init(task);
}

1892 1893 1894 1895
/**
 * xprt_reserve - allocate an RPC request slot
 * @task: RPC task requesting a slot allocation
 *
1896 1897
 * If the transport is marked as being congested, or if no more
 * slots are available, place the task on the transport's
1898 1899 1900
 * backlog queue.
 */
void xprt_reserve(struct rpc_task *task)
L
Linus Torvalds 已提交
1901
{
1902
	struct rpc_xprt *xprt = task->tk_xprt;
L
Linus Torvalds 已提交
1903

1904 1905 1906 1907 1908
	task->tk_status = 0;
	if (task->tk_rqstp != NULL)
		return;

	task->tk_status = -EAGAIN;
1909
	if (!xprt_throttle_congested(xprt, task))
1910
		xprt_do_reserve(xprt, task);
1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923
}

/**
 * xprt_retry_reserve - allocate an RPC request slot
 * @task: RPC task requesting a slot allocation
 *
 * If no more slots are available, place the task on the transport's
 * backlog queue.
 * Note that the only difference with xprt_reserve is that we now
 * ignore the value of the XPRT_CONGESTED flag.
 */
void xprt_retry_reserve(struct rpc_task *task)
{
1924
	struct rpc_xprt *xprt = task->tk_xprt;
1925 1926

	task->tk_status = 0;
1927
	if (task->tk_rqstp != NULL)
1928 1929 1930
		return;

	task->tk_status = -EAGAIN;
1931
	xprt_do_reserve(xprt, task);
L
Linus Torvalds 已提交
1932 1933
}

1934 1935 1936 1937
/**
 * xprt_release - release an RPC request slot
 * @task: task which is finished with the slot
 *
L
Linus Torvalds 已提交
1938
 */
1939
void xprt_release(struct rpc_task *task)
L
Linus Torvalds 已提交
1940
{
1941
	struct rpc_xprt	*xprt;
1942
	struct rpc_rqst	*req = task->tk_rqstp;
L
Linus Torvalds 已提交
1943

1944 1945
	if (req == NULL) {
		if (task->tk_client) {
1946
			xprt = task->tk_xprt;
1947
			xprt_release_write(xprt, task);
1948
		}
L
Linus Torvalds 已提交
1949
		return;
1950
	}
1951 1952

	xprt = req->rq_xprt;
1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967
	xprt_request_dequeue_xprt(task);
	spin_lock(&xprt->transport_lock);
	xprt->ops->release_xprt(xprt, task);
	if (xprt->ops->release_request)
		xprt->ops->release_request(task);
	xprt_schedule_autodisconnect(xprt);
	spin_unlock(&xprt->transport_lock);
	if (req->rq_buffer)
		xprt->ops->buf_free(task);
	xdr_free_bvec(&req->rq_rcv_buf);
	xdr_free_bvec(&req->rq_snd_buf);
	if (req->rq_cred != NULL)
		put_rpccred(req->rq_cred);
	if (req->rq_release_snd_buf)
		req->rq_release_snd_buf(req);
1968

1969
	task->tk_rqstp = NULL;
1970
	if (likely(!bc_prealloc(req)))
1971
		xprt->ops->free_slot(xprt, req);
1972
	else
1973
		xprt_free_bc_request(req);
L
Linus Torvalds 已提交
1974 1975
}

1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993
#ifdef CONFIG_SUNRPC_BACKCHANNEL
void
xprt_init_bc_request(struct rpc_rqst *req, struct rpc_task *task)
{
	struct xdr_buf *xbufp = &req->rq_snd_buf;

	task->tk_rqstp = req;
	req->rq_task = task;
	xprt_init_connect_cookie(req, req->rq_xprt);
	/*
	 * Set up the xdr_buf length.
	 * This also indicates that the buffer is XDR encoded already.
	 */
	xbufp->len = xbufp->head[0].iov_len + xbufp->page_len +
		xbufp->tail[0].iov_len;
}
#endif

1994
static void xprt_init(struct rpc_xprt *xprt, struct net *net)
1995
{
1996
	kref_init(&xprt->kref);
1997 1998 1999

	spin_lock_init(&xprt->transport_lock);
	spin_lock_init(&xprt->reserve_lock);
2000
	spin_lock_init(&xprt->queue_lock);
2001 2002

	INIT_LIST_HEAD(&xprt->free);
2003
	xprt->recv_queue = RB_ROOT;
2004
	INIT_LIST_HEAD(&xprt->xmit_queue);
2005
#if defined(CONFIG_SUNRPC_BACKCHANNEL)
2006 2007
	spin_lock_init(&xprt->bc_pa_lock);
	INIT_LIST_HEAD(&xprt->bc_pa_list);
2008
#endif /* CONFIG_SUNRPC_BACKCHANNEL */
2009
	INIT_LIST_HEAD(&xprt->xprt_switch);
2010

2011 2012
	xprt->last_used = jiffies;
	xprt->cwnd = RPC_INITCWND;
2013
	xprt->bind_index = 0;
2014 2015 2016

	rpc_init_wait_queue(&xprt->binding, "xprt_binding");
	rpc_init_wait_queue(&xprt->pending, "xprt_pending");
2017
	rpc_init_wait_queue(&xprt->sending, "xprt_sending");
2018 2019 2020 2021
	rpc_init_priority_wait_queue(&xprt->backlog, "xprt_backlog");

	xprt_init_xid(xprt);

2022
	xprt->xprt_net = get_net(net);
2023 2024 2025 2026 2027 2028 2029 2030 2031 2032
}

/**
 * xprt_create_transport - create an RPC transport
 * @args: rpc transport creation arguments
 *
 */
struct rpc_xprt *xprt_create_transport(struct xprt_create *args)
{
	struct rpc_xprt	*xprt;
2033
	const struct xprt_class *t;
2034

2035 2036 2037 2038
	t = xprt_class_find_by_ident(args->ident);
	if (!t) {
		dprintk("RPC: transport (%d) not supported\n", args->ident);
		return ERR_PTR(-EIO);
2039 2040 2041
	}

	xprt = t->setup(args);
2042 2043
	xprt_class_release(t);

2044
	if (IS_ERR(xprt))
2045
		goto out;
2046 2047
	if (args->flags & XPRT_CREATE_NO_IDLE_TIMEOUT)
		xprt->idle_timeout = 0;
2048 2049
	INIT_WORK(&xprt->task_cleanup, xprt_autoclose);
	if (xprt_has_timer(xprt))
2050
		timer_setup(&xprt->timer, xprt_init_autodisconnect, 0);
2051
	else
2052
		timer_setup(&xprt->timer, NULL, 0);
2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063

	if (strlen(args->servername) > RPC_MAXNETNAMELEN) {
		xprt_destroy(xprt);
		return ERR_PTR(-EINVAL);
	}
	xprt->servername = kstrdup(args->servername, GFP_KERNEL);
	if (xprt->servername == NULL) {
		xprt_destroy(xprt);
		return ERR_PTR(-ENOMEM);
	}

2064
	rpc_xprt_debugfs_register(xprt);
2065

2066
	trace_xprt_create(xprt);
2067
out:
2068 2069 2070
	return xprt;
}

2071 2072 2073 2074 2075
static void xprt_destroy_cb(struct work_struct *work)
{
	struct rpc_xprt *xprt =
		container_of(work, struct rpc_xprt, task_cleanup);

2076 2077
	trace_xprt_destroy(xprt);

2078 2079 2080 2081 2082 2083
	rpc_xprt_debugfs_unregister(xprt);
	rpc_destroy_wait_queue(&xprt->binding);
	rpc_destroy_wait_queue(&xprt->pending);
	rpc_destroy_wait_queue(&xprt->sending);
	rpc_destroy_wait_queue(&xprt->backlog);
	kfree(xprt->servername);
2084 2085 2086 2087 2088
	/*
	 * Destroy any existing back channel
	 */
	xprt_destroy_backchannel(xprt, UINT_MAX);

2089 2090 2091 2092 2093 2094
	/*
	 * Tear down transport state and free the rpc_xprt
	 */
	xprt->ops->destroy(xprt);
}

2095 2096
/**
 * xprt_destroy - destroy an RPC transport, killing off all requests.
2097
 * @xprt: transport to destroy
2098
 *
L
Linus Torvalds 已提交
2099
 */
2100
static void xprt_destroy(struct rpc_xprt *xprt)
L
Linus Torvalds 已提交
2101
{
2102 2103 2104
	/*
	 * Exclude transport connect/disconnect handlers and autoclose
	 */
2105 2106
	wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_UNINTERRUPTIBLE);

2107 2108 2109 2110 2111 2112
	/*
	 * xprt_schedule_autodisconnect() can run after XPRT_LOCKED
	 * is cleared.  We use ->transport_lock to ensure the mod_timer()
	 * can only run *before* del_time_sync(), never after.
	 */
	spin_lock(&xprt->transport_lock);
2113
	del_timer_sync(&xprt->timer);
2114
	spin_unlock(&xprt->transport_lock);
2115 2116

	/*
2117 2118
	 * Destroy sockets etc from the system workqueue so they can
	 * safely flush receive work running on rpciod.
2119
	 */
2120 2121
	INIT_WORK(&xprt->task_cleanup, xprt_destroy_cb);
	schedule_work(&xprt->task_cleanup);
2122
}
L
Linus Torvalds 已提交
2123

2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141
static void xprt_destroy_kref(struct kref *kref)
{
	xprt_destroy(container_of(kref, struct rpc_xprt, kref));
}

/**
 * xprt_get - return a reference to an RPC transport.
 * @xprt: pointer to the transport
 *
 */
struct rpc_xprt *xprt_get(struct rpc_xprt *xprt)
{
	if (xprt != NULL && kref_get_unless_zero(&xprt->kref))
		return xprt;
	return NULL;
}
EXPORT_SYMBOL_GPL(xprt_get);

2142 2143 2144 2145 2146 2147 2148
/**
 * xprt_put - release a reference to an RPC transport.
 * @xprt: pointer to the transport
 *
 */
void xprt_put(struct rpc_xprt *xprt)
{
2149 2150
	if (xprt != NULL)
		kref_put(&xprt->kref, xprt_destroy_kref);
2151
}
2152
EXPORT_SYMBOL_GPL(xprt_put);