xprt_rdma.h 20.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44
/*
 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the BSD-type
 * license below:
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 *
 *      Redistributions of source code must retain the above copyright
 *      notice, this list of conditions and the following disclaimer.
 *
 *      Redistributions in binary form must reproduce the above
 *      copyright notice, this list of conditions and the following
 *      disclaimer in the documentation and/or other materials provided
 *      with the distribution.
 *
 *      Neither the name of the Network Appliance, Inc. nor the names of
 *      its contributors may be used to endorse or promote products
 *      derived from this software without specific prior written
 *      permission.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 */

#ifndef _LINUX_SUNRPC_XPRT_RDMA_H
#define _LINUX_SUNRPC_XPRT_RDMA_H

#include <linux/wait.h> 		/* wait_queue_head_t, etc */
#include <linux/spinlock.h> 		/* spinlock_t, etc */
A
Arun Sharma 已提交
45
#include <linux/atomic.h>			/* atomic_t, etc */
46
#include <linux/workqueue.h>		/* struct work_struct */
47 48 49 50 51 52 53 54

#include <rdma/rdma_cm.h>		/* RDMA connection api */
#include <rdma/ib_verbs.h>		/* RDMA verbs api */

#include <linux/sunrpc/clnt.h> 		/* rpc_xprt */
#include <linux/sunrpc/rpc_rdma.h> 	/* RPC/RDMA protocol */
#include <linux/sunrpc/xprtrdma.h> 	/* xprt parameters */

55 56 57
#define RDMA_RESOLVE_TIMEOUT	(5000)	/* 5 seconds */
#define RDMA_CONNECT_RETRY_MAX	(2)	/* retries if no listener backlog */

58 59 60 61 62
#define RPCRDMA_BIND_TO		(60U * HZ)
#define RPCRDMA_INIT_REEST_TO	(5U * HZ)
#define RPCRDMA_MAX_REEST_TO	(30U * HZ)
#define RPCRDMA_IDLE_DISC_TO	(5U * 60 * HZ)

63 64 65 66
/*
 * Interface Adapter -- one per transport instance
 */
struct rpcrdma_ia {
67
	const struct rpcrdma_memreg_ops	*ri_ops;
68
	struct ib_device	*ri_device;
69 70 71
	struct rdma_cm_id 	*ri_id;
	struct ib_pd		*ri_pd;
	struct completion	ri_done;
72
	struct completion	ri_remove_done;
73
	int			ri_async_rc;
74
	unsigned int		ri_max_segs;
75
	unsigned int		ri_max_frmr_depth;
C
Chuck Lever 已提交
76 77
	unsigned int		ri_max_inline_write;
	unsigned int		ri_max_inline_read;
78
	unsigned int		ri_max_send_sges;
79
	bool			ri_reminv_expected;
80
	bool			ri_implicit_roundup;
C
Chuck Lever 已提交
81
	enum ib_mr_type		ri_mrtype;
82
	unsigned long		ri_flags;
83 84
	struct ib_qp_attr	ri_qp_attr;
	struct ib_qp_init_attr	ri_qp_init_attr;
85 86
};

87 88 89 90
enum {
	RPCRDMA_IAF_REMOVING = 0,
};

91 92 93 94 95 96 97 98 99 100
/*
 * RDMA Endpoint -- one per transport instance
 */

struct rpcrdma_ep {
	atomic_t		rep_cqcount;
	int			rep_cqinit;
	int			rep_connected;
	struct ib_qp_init_attr	rep_attr;
	wait_queue_head_t 	rep_connect_wait;
101
	struct rpcrdma_connect_private	rep_cm_private;
102 103
	struct rdma_conn_param	rep_remote_cma;
	struct sockaddr_storage	rep_remote_addr;
104
	struct delayed_work	rep_connect_worker;
105 106
};

107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124
static inline void
rpcrdma_init_cqcount(struct rpcrdma_ep *ep, int count)
{
	atomic_set(&ep->rep_cqcount, ep->rep_cqinit - count);
}

/* To update send queue accounting, provider must take a
 * send completion every now and then.
 */
static inline void
rpcrdma_set_signaled(struct rpcrdma_ep *ep, struct ib_send_wr *send_wr)
{
	send_wr->send_flags = 0;
	if (unlikely(atomic_sub_return(1, &ep->rep_cqcount) <= 0)) {
		rpcrdma_init_cqcount(ep, 0);
		send_wr->send_flags = IB_SEND_SIGNALED;
	}
}
125

126 127 128 129 130 131 132 133 134 135
/* Pre-allocate extra Work Requests for handling backward receives
 * and sends. This is a fixed value because the Work Queues are
 * allocated when the forward channel is set up.
 */
#if defined(CONFIG_SUNRPC_BACKCHANNEL)
#define RPCRDMA_BACKWARD_WRS		(8)
#else
#define RPCRDMA_BACKWARD_WRS		(0)
#endif

136 137 138 139 140 141 142 143
/* Registered buffer -- registered kmalloc'd memory for RDMA SEND/RECV
 *
 * The below structure appears at the front of a large region of kmalloc'd
 * memory, which always starts on a good alignment boundary.
 */

struct rpcrdma_regbuf {
	struct ib_sge		rg_iov;
144
	struct ib_device	*rg_device;
145
	enum dma_data_direction	rg_direction;
146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172
	__be32			rg_base[0] __attribute__ ((aligned(256)));
};

static inline u64
rdmab_addr(struct rpcrdma_regbuf *rb)
{
	return rb->rg_iov.addr;
}

static inline u32
rdmab_length(struct rpcrdma_regbuf *rb)
{
	return rb->rg_iov.length;
}

static inline u32
rdmab_lkey(struct rpcrdma_regbuf *rb)
{
	return rb->rg_iov.lkey;
}

static inline struct rpcrdma_msg *
rdmab_to_msg(struct rpcrdma_regbuf *rb)
{
	return (struct rpcrdma_msg *)rb->rg_base;
}

173 174 175 176 177 178
static inline struct ib_device *
rdmab_device(struct rpcrdma_regbuf *rb)
{
	return rb->rg_device;
}

179 180
#define RPCRDMA_DEF_GFP		(GFP_NOIO | __GFP_NOWARN)

181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198
/* To ensure a transport can always make forward progress,
 * the number of RDMA segments allowed in header chunk lists
 * is capped at 8. This prevents less-capable devices and
 * memory registrations from overrunning the Send buffer
 * while building chunk lists.
 *
 * Elements of the Read list take up more room than the
 * Write list or Reply chunk. 8 read segments means the Read
 * list (or Write list or Reply chunk) cannot consume more
 * than
 *
 * ((8 + 2) * read segment size) + 1 XDR words, or 244 bytes.
 *
 * And the fixed part of the header is another 24 bytes.
 *
 * The smallest inline threshold is 1024 bytes, ensuring that
 * at least 750 bytes are available for RPC messages.
 */
199 200 201 202
enum {
	RPCRDMA_MAX_HDR_SEGS = 8,
	RPCRDMA_HDRBUF_SIZE = 256,
};
203

204 205 206 207 208 209 210
/*
 * struct rpcrdma_rep -- this structure encapsulates state required to recv
 * and complete a reply, asychronously. It needs several pieces of
 * state:
 *   o recv buffer (posted to provider)
 *   o ib_sge (also donated to provider)
 *   o status of reply (length, success or not)
211
 *   o bookkeeping state to get run by reply handler (list, etc)
212
 *
213
 * These are allocated during initialization, per-transport instance.
214 215 216 217 218 219
 *
 * N of these are associated with a transport instance, and stored in
 * struct rpcrdma_buffer. N is the max number of outstanding requests.
 */

struct rpcrdma_rep {
220
	struct ib_cqe		rr_cqe;
221
	unsigned int		rr_len;
222 223
	int			rr_wc_flags;
	u32			rr_inv_rkey;
224
	struct rpcrdma_xprt	*rr_rxprt;
225
	struct work_struct	rr_work;
226
	struct list_head	rr_list;
227
	struct ib_recv_wr	rr_recv_wr;
228
	struct rpcrdma_regbuf	*rr_rdmabuf;
229 230
};

231 232
#define RPCRDMA_BAD_LEN		(~0U)

233 234 235 236 237 238
/*
 * struct rpcrdma_mw - external memory region metadata
 *
 * An external memory region is any buffer or page that is registered
 * on the fly (ie, not pre-registered).
 *
239
 * Each rpcrdma_buffer has a list of free MWs anchored in rb_mws. During
240 241 242 243 244 245 246 247 248
 * call_allocate, rpcrdma_buffer_get() assigns one to each segment in
 * an rpcrdma_req. Then rpcrdma_register_external() grabs these to keep
 * track of registration metadata while each RPC is pending.
 * rpcrdma_deregister_external() uses this metadata to unmap and
 * release these resources when an RPC is complete.
 */
enum rpcrdma_frmr_state {
	FRMR_IS_INVALID,	/* ready to be used */
	FRMR_IS_VALID,		/* in use */
249 250
	FRMR_FLUSHED_FR,	/* flushed FASTREG WR */
	FRMR_FLUSHED_LI,	/* flushed LOCALINV WR */
251 252 253 254
};

struct rpcrdma_frmr {
	struct ib_mr			*fr_mr;
255
	struct ib_cqe			fr_cqe;
256
	enum rpcrdma_frmr_state		fr_state;
257
	struct completion		fr_linv_done;
258 259 260 261
	union {
		struct ib_reg_wr	fr_regwr;
		struct ib_send_wr	fr_invwr;
	};
262 263
};

264
struct rpcrdma_fmr {
265 266
	struct ib_fmr		*fm_mr;
	u64			*fm_physaddrs;
267 268 269
};

struct rpcrdma_mw {
270 271 272 273
	struct list_head	mw_list;
	struct scatterlist	*mw_sg;
	int			mw_nents;
	enum dma_data_direction	mw_dir;
274
	unsigned long		mw_flags;
275
	union {
276
		struct rpcrdma_fmr	fmr;
277
		struct rpcrdma_frmr	frmr;
278
	};
279
	struct rpcrdma_xprt	*mw_xprt;
280 281 282
	u32			mw_handle;
	u32			mw_length;
	u64			mw_offset;
283
	struct list_head	mw_all;
284 285
};

286 287 288 289 290
/* mw_flags */
enum {
	RPCRDMA_MW_F_RI		= 1,
};

291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308
/*
 * struct rpcrdma_req -- structure central to the request/reply sequence.
 *
 * N of these are associated with a transport instance, and stored in
 * struct rpcrdma_buffer. N is the max number of outstanding requests.
 *
 * It includes pre-registered buffer memory for send AND recv.
 * The recv buffer, however, is not owned by this structure, and
 * is "donated" to the hardware when a recv is posted. When a
 * reply is handled, the recv buffer used is given back to the
 * struct rpcrdma_req associated with the request.
 *
 * In addition to the basic memory, this structure includes an array
 * of iovs for send operations. The reason is that the iovs passed to
 * ib_post_{send,recv} must not be modified until the work request
 * completes.
 */

309 310 311 312 313 314 315 316 317 318
/* Maximum number of page-sized "segments" per chunk list to be
 * registered or invalidated. Must handle a Reply chunk:
 */
enum {
	RPCRDMA_MAX_IOV_SEGS	= 3,
	RPCRDMA_MAX_DATA_SEGS	= ((1 * 1024 * 1024) / PAGE_SIZE) + 1,
	RPCRDMA_MAX_SEGS	= RPCRDMA_MAX_DATA_SEGS +
				  RPCRDMA_MAX_IOV_SEGS,
};

319 320 321 322 323 324
struct rpcrdma_mr_seg {		/* chunk descriptors */
	u32		mr_len;		/* length of chunk or segment */
	struct page	*mr_page;	/* owning page, if any */
	char		*mr_offset;	/* kva if no page, else offset */
};

C
Chuck Lever 已提交
325 326
/* The Send SGE array is provisioned to send a maximum size
 * inline request:
327 328
 * - RPC-over-RDMA header
 * - xdr_buf head iovec
C
Chuck Lever 已提交
329
 * - RPCRDMA_MAX_INLINE bytes, in pages
330
 * - xdr_buf tail iovec
C
Chuck Lever 已提交
331 332 333
 *
 * The actual number of array elements consumed by each RPC
 * depends on the device's max_sge limit.
334 335
 */
enum {
336
	RPCRDMA_MIN_SEND_SGES = 3,
C
Chuck Lever 已提交
337
	RPCRDMA_MAX_PAGE_SGES = RPCRDMA_MAX_INLINE >> PAGE_SHIFT,
338 339
	RPCRDMA_MAX_SEND_SGES = 1 + 1 + RPCRDMA_MAX_PAGE_SGES + 1,
};
340

341
struct rpcrdma_buffer;
342
struct rpcrdma_req {
343
	struct list_head	rl_list;
344
	__be32			rl_xid;
345
	unsigned int		rl_mapped_sges;
346 347
	unsigned int		rl_connect_cookie;
	struct rpcrdma_buffer	*rl_buffer;
348 349
	struct rpcrdma_rep	*rl_reply;
	struct ib_send_wr	rl_send_wr;
350
	struct ib_sge		rl_send_sge[RPCRDMA_MAX_SEND_SGES];
351 352 353
	struct rpcrdma_regbuf	*rl_rdmabuf;	/* xprt header */
	struct rpcrdma_regbuf	*rl_sendbuf;	/* rq_snd_buf */
	struct rpcrdma_regbuf	*rl_recvbuf;	/* rq_rcv_buf */
354

355
	struct ib_cqe		rl_cqe;
356 357
	struct list_head	rl_all;
	bool			rl_backchannel;
358 359 360

	struct list_head	rl_registered;	/* registered segments */
	struct rpcrdma_mr_seg	rl_segments[RPCRDMA_MAX_SEGS];
361
};
362

363 364 365 366 367 368
static inline void
rpcrdma_set_xprtdata(struct rpc_rqst *rqst, struct rpcrdma_req *req)
{
	rqst->rq_xprtdata = req;
}

369 370 371
static inline struct rpcrdma_req *
rpcr_to_rdmar(struct rpc_rqst *rqst)
{
372
	return rqst->rq_xprtdata;
373
}
374

375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390
static inline void
rpcrdma_push_mw(struct rpcrdma_mw *mw, struct list_head *list)
{
	list_add_tail(&mw->mw_list, list);
}

static inline struct rpcrdma_mw *
rpcrdma_pop_mw(struct list_head *list)
{
	struct rpcrdma_mw *mw;

	mw = list_first_entry(list, struct rpcrdma_mw, mw_list);
	list_del(&mw->mw_list);
	return mw;
}

391 392 393 394 395 396 397
/*
 * struct rpcrdma_buffer -- holds list/queue of pre-registered memory for
 * inline requests/replies, and client/server credits.
 *
 * One of these is associated with a transport instance
 */
struct rpcrdma_buffer {
C
Chuck Lever 已提交
398 399 400 401
	spinlock_t		rb_mwlock;	/* protect rb_mws list */
	struct list_head	rb_mws;
	struct list_head	rb_all;

402
	spinlock_t		rb_lock;	/* protect buf lists */
403
	int			rb_send_count, rb_recv_count;
404 405
	struct list_head	rb_send_bufs;
	struct list_head	rb_recv_bufs;
406
	struct list_head	rb_pending;
C
Chuck Lever 已提交
407
	u32			rb_max_requests;
408
	atomic_t		rb_credits;	/* most recent credit grant */
409 410 411 412

	u32			rb_bc_srv_max_requests;
	spinlock_t		rb_reqslock;	/* protect rb_allreqs */
	struct list_head	rb_allreqs;
413 414

	u32			rb_bc_max_requests;
415 416 417 418

	spinlock_t		rb_recovery_lock; /* protect rb_stale_mrs */
	struct list_head	rb_stale_mrs;
	struct delayed_work	rb_recovery_worker;
C
Chuck Lever 已提交
419
	struct delayed_work	rb_refresh_worker;
420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454
};
#define rdmab_to_ia(b) (&container_of((b), struct rpcrdma_xprt, rx_buf)->rx_ia)

/*
 * Internal structure for transport instance creation. This
 * exists primarily for modularity.
 *
 * This data should be set with mount options
 */
struct rpcrdma_create_data_internal {
	struct sockaddr_storage	addr;	/* RDMA server address */
	unsigned int	max_requests;	/* max requests (slots) in flight */
	unsigned int	rsize;		/* mount rsize - max read hdr+data */
	unsigned int	wsize;		/* mount wsize - max write hdr+data */
	unsigned int	inline_rsize;	/* max non-rdma read data payload */
	unsigned int	inline_wsize;	/* max non-rdma write data payload */
	unsigned int	padding;	/* non-rdma write header padding */
};

/*
 * Statistics for RPCRDMA
 */
struct rpcrdma_stats {
	unsigned long		read_chunk_count;
	unsigned long		write_chunk_count;
	unsigned long		reply_chunk_count;

	unsigned long long	total_rdma_request;
	unsigned long long	total_rdma_reply;

	unsigned long long	pullup_copy_count;
	unsigned long long	fixup_copy_count;
	unsigned long		hardway_register_count;
	unsigned long		failed_marshal_count;
	unsigned long		bad_reply_count;
455
	unsigned long		nomsg_call_count;
456
	unsigned long		bcall_count;
457 458
	unsigned long		mrs_recovered;
	unsigned long		mrs_orphaned;
C
Chuck Lever 已提交
459
	unsigned long		mrs_allocated;
460
	unsigned long		local_inv_needed;
461 462
};

463 464 465
/*
 * Per-registration mode operations
 */
466
struct rpcrdma_xprt;
467
struct rpcrdma_memreg_ops {
468
	int		(*ro_map)(struct rpcrdma_xprt *,
469 470
				  struct rpcrdma_mr_seg *, int, bool,
				  struct rpcrdma_mw **);
471
	void		(*ro_unmap_sync)(struct rpcrdma_xprt *,
472
					 struct list_head *);
473 474
	void		(*ro_unmap_safe)(struct rpcrdma_xprt *,
					 struct rpcrdma_req *, bool);
475
	void		(*ro_recover_mr)(struct rpcrdma_mw *);
C
Chuck Lever 已提交
476 477 478
	int		(*ro_open)(struct rpcrdma_ia *,
				   struct rpcrdma_ep *,
				   struct rpcrdma_create_data_internal *);
479
	size_t		(*ro_maxpages)(struct rpcrdma_xprt *);
C
Chuck Lever 已提交
480 481 482
	int		(*ro_init_mr)(struct rpcrdma_ia *,
				      struct rpcrdma_mw *);
	void		(*ro_release_mr)(struct rpcrdma_mw *);
483
	const char	*ro_displayname;
484
	const int	ro_send_w_inv_ok;
485 486 487 488 489
};

extern const struct rpcrdma_memreg_ops rpcrdma_fmr_memreg_ops;
extern const struct rpcrdma_memreg_ops rpcrdma_frwr_memreg_ops;

490 491 492 493 494 495 496 497 498 499 500
/*
 * RPCRDMA transport -- encapsulates the structures above for
 * integration with RPC.
 *
 * The contained structures are embedded, not pointers,
 * for convenience. This structure need not be visible externally.
 *
 * It is allocated and initialized during mount, and released
 * during unmount.
 */
struct rpcrdma_xprt {
501
	struct rpc_xprt		rx_xprt;
502 503 504 505
	struct rpcrdma_ia	rx_ia;
	struct rpcrdma_ep	rx_ep;
	struct rpcrdma_buffer	rx_buf;
	struct rpcrdma_create_data_internal rx_data;
506
	struct delayed_work	rx_connect_worker;
507 508 509
	struct rpcrdma_stats	rx_stats;
};

510
#define rpcx_to_rdmax(x) container_of(x, struct rpcrdma_xprt, rx_xprt)
511 512
#define rpcx_to_rdmad(x) (rpcx_to_rdmax(x)->rx_data)

513 514 515 516 517
/* Setting this to 0 ensures interoperability with early servers.
 * Setting this to 1 enhances certain unaligned read/write performance.
 * Default is 0, see sysctl entry and rpc_rdma.c rpcrdma_convert_iovs() */
extern int xprt_rdma_pad_optimize;

518 519 520 521 522
/* This setting controls the hunt for a supported memory
 * registration strategy.
 */
extern unsigned int xprt_rdma_memreg_strategy;

523 524 525
/*
 * Interface Adapter calls - xprtrdma/verbs.c
 */
526
int rpcrdma_ia_open(struct rpcrdma_xprt *xprt, struct sockaddr *addr);
527
void rpcrdma_ia_remove(struct rpcrdma_ia *ia);
528
void rpcrdma_ia_close(struct rpcrdma_ia *);
529 530
bool frwr_is_supported(struct rpcrdma_ia *);
bool fmr_is_supported(struct rpcrdma_ia *);
531 532 533 534 535 536

/*
 * Endpoint calls - xprtrdma/verbs.c
 */
int rpcrdma_ep_create(struct rpcrdma_ep *, struct rpcrdma_ia *,
				struct rpcrdma_create_data_internal *);
537
void rpcrdma_ep_destroy(struct rpcrdma_ep *, struct rpcrdma_ia *);
538
int rpcrdma_ep_connect(struct rpcrdma_ep *, struct rpcrdma_ia *);
539
void rpcrdma_conn_func(struct rpcrdma_ep *ep);
540
void rpcrdma_ep_disconnect(struct rpcrdma_ep *, struct rpcrdma_ia *);
541 542 543

int rpcrdma_ep_post(struct rpcrdma_ia *, struct rpcrdma_ep *,
				struct rpcrdma_req *);
544
int rpcrdma_ep_post_recv(struct rpcrdma_ia *, struct rpcrdma_rep *);
545 546 547 548

/*
 * Buffer calls - xprtrdma/verbs.c
 */
549 550
struct rpcrdma_req *rpcrdma_create_req(struct rpcrdma_xprt *);
struct rpcrdma_rep *rpcrdma_create_rep(struct rpcrdma_xprt *);
551
void rpcrdma_destroy_req(struct rpcrdma_req *);
552
int rpcrdma_buffer_create(struct rpcrdma_xprt *);
553 554
void rpcrdma_buffer_destroy(struct rpcrdma_buffer *);

555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582
static inline void
rpcrdma_insert_req(struct rpcrdma_buffer *buffers, struct rpcrdma_req *req)
{
	spin_lock(&buffers->rb_lock);
	if (list_empty(&req->rl_list))
		list_add_tail(&req->rl_list, &buffers->rb_pending);
	spin_unlock(&buffers->rb_lock);
}

static inline struct rpcrdma_req *
rpcrdma_lookup_req_locked(struct rpcrdma_buffer *buffers, __be32 xid)
{
	struct rpcrdma_req *pos;

	list_for_each_entry(pos, &buffers->rb_pending, rl_list)
		if (pos->rl_xid == xid)
			return pos;
	return NULL;
}

static inline void
rpcrdma_remove_req(struct rpcrdma_buffer *buffers, struct rpcrdma_req *req)
{
	spin_lock(&buffers->rb_lock);
	list_del(&req->rl_list);
	spin_unlock(&buffers->rb_lock);
}

583 584
struct rpcrdma_mw *rpcrdma_get_mw(struct rpcrdma_xprt *);
void rpcrdma_put_mw(struct rpcrdma_xprt *, struct rpcrdma_mw *);
585 586 587 588 589
struct rpcrdma_req *rpcrdma_buffer_get(struct rpcrdma_buffer *);
void rpcrdma_buffer_put(struct rpcrdma_req *);
void rpcrdma_recv_buffer_get(struct rpcrdma_req *);
void rpcrdma_recv_buffer_put(struct rpcrdma_rep *);

590 591
void rpcrdma_defer_mr_recovery(struct rpcrdma_mw *);

592
struct rpcrdma_regbuf *rpcrdma_alloc_regbuf(size_t, enum dma_data_direction,
593
					    gfp_t);
594
bool __rpcrdma_dma_map_regbuf(struct rpcrdma_ia *, struct rpcrdma_regbuf *);
595
void rpcrdma_free_regbuf(struct rpcrdma_regbuf *);
596

597 598 599 600 601 602 603 604 605 606 607 608 609 610
static inline bool
rpcrdma_regbuf_is_mapped(struct rpcrdma_regbuf *rb)
{
	return rb->rg_device != NULL;
}

static inline bool
rpcrdma_dma_map_regbuf(struct rpcrdma_ia *ia, struct rpcrdma_regbuf *rb)
{
	if (likely(rpcrdma_regbuf_is_mapped(rb)))
		return true;
	return __rpcrdma_dma_map_regbuf(ia, rb);
}

611
int rpcrdma_ep_post_extra_recv(struct rpcrdma_xprt *, unsigned int);
612

613 614 615
int rpcrdma_alloc_wq(void);
void rpcrdma_destroy_wq(void);

616 617 618 619 620 621 622 623 624 625
/*
 * Wrappers for chunk registration, shared by read/write chunk code.
 */

static inline enum dma_data_direction
rpcrdma_data_dir(bool writing)
{
	return writing ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
}

626 627 628
/*
 * RPC/RDMA protocol calls - xprtrdma/rpc_rdma.c
 */
629 630 631 632 633 634 635 636 637 638 639 640

enum rpcrdma_chunktype {
	rpcrdma_noch = 0,
	rpcrdma_readch,
	rpcrdma_areadch,
	rpcrdma_writech,
	rpcrdma_replych
};

bool rpcrdma_prepare_send_sges(struct rpcrdma_ia *, struct rpcrdma_req *,
			       u32, struct xdr_buf *, enum rpcrdma_chunktype);
void rpcrdma_unmap_sges(struct rpcrdma_ia *, struct rpcrdma_req *);
641
int rpcrdma_marshal_req(struct rpc_rqst *);
642
void rpcrdma_set_max_header_sizes(struct rpcrdma_xprt *);
643
void rpcrdma_reply_handler(struct work_struct *work);
644

645 646
/* RPC/RDMA module init - xprtrdma/transport.c
 */
647 648 649
extern unsigned int xprt_rdma_max_inline_read;
void xprt_rdma_format_addresses(struct rpc_xprt *xprt, struct sockaddr *sap);
void xprt_rdma_free_addresses(struct rpc_xprt *xprt);
650
void rpcrdma_connect_worker(struct work_struct *work);
651
void xprt_rdma_print_stats(struct rpc_xprt *xprt, struct seq_file *seq);
652 653 654
int xprt_rdma_init(void);
void xprt_rdma_cleanup(void);

655 656 657 658
/* Backchannel calls - xprtrdma/backchannel.c
 */
#if defined(CONFIG_SUNRPC_BACKCHANNEL)
int xprt_rdma_bc_setup(struct rpc_xprt *, unsigned int);
659
int xprt_rdma_bc_up(struct svc_serv *, struct net *);
660
size_t xprt_rdma_bc_maxpayload(struct rpc_xprt *);
661
int rpcrdma_bc_post_recv(struct rpcrdma_xprt *, unsigned int);
662
void rpcrdma_bc_receive_call(struct rpcrdma_xprt *, struct rpcrdma_rep *);
663
int rpcrdma_bc_marshal_reply(struct rpc_rqst *);
664 665 666 667
void xprt_rdma_bc_free_rqst(struct rpc_rqst *);
void xprt_rdma_bc_destroy(struct rpc_xprt *, unsigned int);
#endif	/* CONFIG_SUNRPC_BACKCHANNEL */

668
extern struct xprt_class xprt_rdma_bc;
669

670
#endif				/* _LINUX_SUNRPC_XPRT_RDMA_H */