xprt_rdma.h 18.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44
/*
 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the BSD-type
 * license below:
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 *
 *      Redistributions of source code must retain the above copyright
 *      notice, this list of conditions and the following disclaimer.
 *
 *      Redistributions in binary form must reproduce the above
 *      copyright notice, this list of conditions and the following
 *      disclaimer in the documentation and/or other materials provided
 *      with the distribution.
 *
 *      Neither the name of the Network Appliance, Inc. nor the names of
 *      its contributors may be used to endorse or promote products
 *      derived from this software without specific prior written
 *      permission.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 */

#ifndef _LINUX_SUNRPC_XPRT_RDMA_H
#define _LINUX_SUNRPC_XPRT_RDMA_H

#include <linux/wait.h> 		/* wait_queue_head_t, etc */
#include <linux/spinlock.h> 		/* spinlock_t, etc */
A
Arun Sharma 已提交
45
#include <linux/atomic.h>			/* atomic_t, etc */
46
#include <linux/workqueue.h>		/* struct work_struct */
47 48 49 50 51 52 53 54

#include <rdma/rdma_cm.h>		/* RDMA connection api */
#include <rdma/ib_verbs.h>		/* RDMA verbs api */

#include <linux/sunrpc/clnt.h> 		/* rpc_xprt */
#include <linux/sunrpc/rpc_rdma.h> 	/* RPC/RDMA protocol */
#include <linux/sunrpc/xprtrdma.h> 	/* xprt parameters */

55 56 57
#define RDMA_RESOLVE_TIMEOUT	(5000)	/* 5 seconds */
#define RDMA_CONNECT_RETRY_MAX	(2)	/* retries if no listener backlog */

58 59 60 61 62
#define RPCRDMA_BIND_TO		(60U * HZ)
#define RPCRDMA_INIT_REEST_TO	(5U * HZ)
#define RPCRDMA_MAX_REEST_TO	(30U * HZ)
#define RPCRDMA_IDLE_DISC_TO	(5U * 60 * HZ)

63 64 65 66
/*
 * Interface Adapter -- one per transport instance
 */
struct rpcrdma_ia {
67
	const struct rpcrdma_memreg_ops	*ri_ops;
68
	struct ib_device	*ri_device;
69 70 71 72
	struct rdma_cm_id 	*ri_id;
	struct ib_pd		*ri_pd;
	struct completion	ri_done;
	int			ri_async_rc;
73
	unsigned int		ri_max_segs;
74
	unsigned int		ri_max_frmr_depth;
C
Chuck Lever 已提交
75 76
	unsigned int		ri_max_inline_write;
	unsigned int		ri_max_inline_read;
77
	unsigned int		ri_max_send_sges;
78
	bool			ri_reminv_expected;
79
	bool			ri_implicit_roundup;
C
Chuck Lever 已提交
80
	enum ib_mr_type		ri_mrtype;
81 82
	struct ib_qp_attr	ri_qp_attr;
	struct ib_qp_init_attr	ri_qp_init_attr;
83 84 85 86 87 88 89 90 91 92 93 94
};

/*
 * RDMA Endpoint -- one per transport instance
 */

struct rpcrdma_ep {
	atomic_t		rep_cqcount;
	int			rep_cqinit;
	int			rep_connected;
	struct ib_qp_init_attr	rep_attr;
	wait_queue_head_t 	rep_connect_wait;
95
	struct rpcrdma_connect_private	rep_cm_private;
96 97
	struct rdma_conn_param	rep_remote_cma;
	struct sockaddr_storage	rep_remote_addr;
98
	struct delayed_work	rep_connect_worker;
99 100
};

101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118
static inline void
rpcrdma_init_cqcount(struct rpcrdma_ep *ep, int count)
{
	atomic_set(&ep->rep_cqcount, ep->rep_cqinit - count);
}

/* To update send queue accounting, provider must take a
 * send completion every now and then.
 */
static inline void
rpcrdma_set_signaled(struct rpcrdma_ep *ep, struct ib_send_wr *send_wr)
{
	send_wr->send_flags = 0;
	if (unlikely(atomic_sub_return(1, &ep->rep_cqcount) <= 0)) {
		rpcrdma_init_cqcount(ep, 0);
		send_wr->send_flags = IB_SEND_SIGNALED;
	}
}
119

120 121 122 123 124 125 126 127 128 129
/* Pre-allocate extra Work Requests for handling backward receives
 * and sends. This is a fixed value because the Work Queues are
 * allocated when the forward channel is set up.
 */
#if defined(CONFIG_SUNRPC_BACKCHANNEL)
#define RPCRDMA_BACKWARD_WRS		(8)
#else
#define RPCRDMA_BACKWARD_WRS		(0)
#endif

130 131 132 133 134 135 136 137
/* Registered buffer -- registered kmalloc'd memory for RDMA SEND/RECV
 *
 * The below structure appears at the front of a large region of kmalloc'd
 * memory, which always starts on a good alignment boundary.
 */

struct rpcrdma_regbuf {
	struct ib_sge		rg_iov;
138
	struct ib_device	*rg_device;
139
	enum dma_data_direction	rg_direction;
140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166
	__be32			rg_base[0] __attribute__ ((aligned(256)));
};

static inline u64
rdmab_addr(struct rpcrdma_regbuf *rb)
{
	return rb->rg_iov.addr;
}

static inline u32
rdmab_length(struct rpcrdma_regbuf *rb)
{
	return rb->rg_iov.length;
}

static inline u32
rdmab_lkey(struct rpcrdma_regbuf *rb)
{
	return rb->rg_iov.lkey;
}

static inline struct rpcrdma_msg *
rdmab_to_msg(struct rpcrdma_regbuf *rb)
{
	return (struct rpcrdma_msg *)rb->rg_base;
}

167 168
#define RPCRDMA_DEF_GFP		(GFP_NOIO | __GFP_NOWARN)

169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186
/* To ensure a transport can always make forward progress,
 * the number of RDMA segments allowed in header chunk lists
 * is capped at 8. This prevents less-capable devices and
 * memory registrations from overrunning the Send buffer
 * while building chunk lists.
 *
 * Elements of the Read list take up more room than the
 * Write list or Reply chunk. 8 read segments means the Read
 * list (or Write list or Reply chunk) cannot consume more
 * than
 *
 * ((8 + 2) * read segment size) + 1 XDR words, or 244 bytes.
 *
 * And the fixed part of the header is another 24 bytes.
 *
 * The smallest inline threshold is 1024 bytes, ensuring that
 * at least 750 bytes are available for RPC messages.
 */
187 188 189 190
enum {
	RPCRDMA_MAX_HDR_SEGS = 8,
	RPCRDMA_HDRBUF_SIZE = 256,
};
191

192 193 194 195 196 197 198
/*
 * struct rpcrdma_rep -- this structure encapsulates state required to recv
 * and complete a reply, asychronously. It needs several pieces of
 * state:
 *   o recv buffer (posted to provider)
 *   o ib_sge (also donated to provider)
 *   o status of reply (length, success or not)
199
 *   o bookkeeping state to get run by reply handler (list, etc)
200
 *
201
 * These are allocated during initialization, per-transport instance.
202 203 204 205 206 207
 *
 * N of these are associated with a transport instance, and stored in
 * struct rpcrdma_buffer. N is the max number of outstanding requests.
 */

struct rpcrdma_rep {
208
	struct ib_cqe		rr_cqe;
209
	unsigned int		rr_len;
210 211
	int			rr_wc_flags;
	u32			rr_inv_rkey;
212
	struct ib_device	*rr_device;
213
	struct rpcrdma_xprt	*rr_rxprt;
214
	struct work_struct	rr_work;
215
	struct list_head	rr_list;
216
	struct ib_recv_wr	rr_recv_wr;
217
	struct rpcrdma_regbuf	*rr_rdmabuf;
218 219
};

220 221
#define RPCRDMA_BAD_LEN		(~0U)

222 223 224 225 226 227
/*
 * struct rpcrdma_mw - external memory region metadata
 *
 * An external memory region is any buffer or page that is registered
 * on the fly (ie, not pre-registered).
 *
228
 * Each rpcrdma_buffer has a list of free MWs anchored in rb_mws. During
229 230 231 232 233 234 235 236 237
 * call_allocate, rpcrdma_buffer_get() assigns one to each segment in
 * an rpcrdma_req. Then rpcrdma_register_external() grabs these to keep
 * track of registration metadata while each RPC is pending.
 * rpcrdma_deregister_external() uses this metadata to unmap and
 * release these resources when an RPC is complete.
 */
enum rpcrdma_frmr_state {
	FRMR_IS_INVALID,	/* ready to be used */
	FRMR_IS_VALID,		/* in use */
238 239
	FRMR_FLUSHED_FR,	/* flushed FASTREG WR */
	FRMR_FLUSHED_LI,	/* flushed LOCALINV WR */
240 241 242 243
};

struct rpcrdma_frmr {
	struct ib_mr			*fr_mr;
244
	struct ib_cqe			fr_cqe;
245
	enum rpcrdma_frmr_state		fr_state;
246
	struct completion		fr_linv_done;
247 248 249 250
	union {
		struct ib_reg_wr	fr_regwr;
		struct ib_send_wr	fr_invwr;
	};
251 252
};

253
struct rpcrdma_fmr {
254 255
	struct ib_fmr		*fm_mr;
	u64			*fm_physaddrs;
256 257 258
};

struct rpcrdma_mw {
259 260 261 262
	struct list_head	mw_list;
	struct scatterlist	*mw_sg;
	int			mw_nents;
	enum dma_data_direction	mw_dir;
263
	union {
264
		struct rpcrdma_fmr	fmr;
265
		struct rpcrdma_frmr	frmr;
266
	};
267
	struct rpcrdma_xprt	*mw_xprt;
268 269 270
	u32			mw_handle;
	u32			mw_length;
	u64			mw_offset;
271
	struct list_head	mw_all;
272 273
};

274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291
/*
 * struct rpcrdma_req -- structure central to the request/reply sequence.
 *
 * N of these are associated with a transport instance, and stored in
 * struct rpcrdma_buffer. N is the max number of outstanding requests.
 *
 * It includes pre-registered buffer memory for send AND recv.
 * The recv buffer, however, is not owned by this structure, and
 * is "donated" to the hardware when a recv is posted. When a
 * reply is handled, the recv buffer used is given back to the
 * struct rpcrdma_req associated with the request.
 *
 * In addition to the basic memory, this structure includes an array
 * of iovs for send operations. The reason is that the iovs passed to
 * ib_post_{send,recv} must not be modified until the work request
 * completes.
 */

292 293 294 295 296 297 298 299 300 301
/* Maximum number of page-sized "segments" per chunk list to be
 * registered or invalidated. Must handle a Reply chunk:
 */
enum {
	RPCRDMA_MAX_IOV_SEGS	= 3,
	RPCRDMA_MAX_DATA_SEGS	= ((1 * 1024 * 1024) / PAGE_SIZE) + 1,
	RPCRDMA_MAX_SEGS	= RPCRDMA_MAX_DATA_SEGS +
				  RPCRDMA_MAX_IOV_SEGS,
};

302 303 304 305 306 307
struct rpcrdma_mr_seg {		/* chunk descriptors */
	u32		mr_len;		/* length of chunk or segment */
	struct page	*mr_page;	/* owning page, if any */
	char		*mr_offset;	/* kva if no page, else offset */
};

C
Chuck Lever 已提交
308 309
/* The Send SGE array is provisioned to send a maximum size
 * inline request:
310 311
 * - RPC-over-RDMA header
 * - xdr_buf head iovec
C
Chuck Lever 已提交
312
 * - RPCRDMA_MAX_INLINE bytes, in pages
313
 * - xdr_buf tail iovec
C
Chuck Lever 已提交
314 315 316
 *
 * The actual number of array elements consumed by each RPC
 * depends on the device's max_sge limit.
317 318
 */
enum {
319
	RPCRDMA_MIN_SEND_SGES = 3,
C
Chuck Lever 已提交
320
	RPCRDMA_MAX_PAGE_SGES = RPCRDMA_MAX_INLINE >> PAGE_SHIFT,
321 322
	RPCRDMA_MAX_SEND_SGES = 1 + 1 + RPCRDMA_MAX_PAGE_SGES + 1,
};
323

324
struct rpcrdma_buffer;
325
struct rpcrdma_req {
326
	struct list_head	rl_free;
327
	unsigned int		rl_mapped_sges;
328 329
	unsigned int		rl_connect_cookie;
	struct rpcrdma_buffer	*rl_buffer;
330 331
	struct rpcrdma_rep	*rl_reply;
	struct ib_send_wr	rl_send_wr;
332
	struct ib_sge		rl_send_sge[RPCRDMA_MAX_SEND_SGES];
333 334 335
	struct rpcrdma_regbuf	*rl_rdmabuf;	/* xprt header */
	struct rpcrdma_regbuf	*rl_sendbuf;	/* rq_snd_buf */
	struct rpcrdma_regbuf	*rl_recvbuf;	/* rq_rcv_buf */
336

337
	struct ib_cqe		rl_cqe;
338 339
	struct list_head	rl_all;
	bool			rl_backchannel;
340 341 342

	struct list_head	rl_registered;	/* registered segments */
	struct rpcrdma_mr_seg	rl_segments[RPCRDMA_MAX_SEGS];
343
};
344

345 346 347 348 349 350
static inline void
rpcrdma_set_xprtdata(struct rpc_rqst *rqst, struct rpcrdma_req *req)
{
	rqst->rq_xprtdata = req;
}

351 352 353
static inline struct rpcrdma_req *
rpcr_to_rdmar(struct rpc_rqst *rqst)
{
354
	return rqst->rq_xprtdata;
355
}
356

357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372
static inline void
rpcrdma_push_mw(struct rpcrdma_mw *mw, struct list_head *list)
{
	list_add_tail(&mw->mw_list, list);
}

static inline struct rpcrdma_mw *
rpcrdma_pop_mw(struct list_head *list)
{
	struct rpcrdma_mw *mw;

	mw = list_first_entry(list, struct rpcrdma_mw, mw_list);
	list_del(&mw->mw_list);
	return mw;
}

373 374 375 376 377 378 379
/*
 * struct rpcrdma_buffer -- holds list/queue of pre-registered memory for
 * inline requests/replies, and client/server credits.
 *
 * One of these is associated with a transport instance
 */
struct rpcrdma_buffer {
C
Chuck Lever 已提交
380 381 382 383 384
	spinlock_t		rb_mwlock;	/* protect rb_mws list */
	struct list_head	rb_mws;
	struct list_head	rb_all;
	char			*rb_pool;

385
	spinlock_t		rb_lock;	/* protect buf lists */
386
	int			rb_send_count, rb_recv_count;
387 388
	struct list_head	rb_send_bufs;
	struct list_head	rb_recv_bufs;
C
Chuck Lever 已提交
389
	u32			rb_max_requests;
390
	atomic_t		rb_credits;	/* most recent credit grant */
391 392 393 394

	u32			rb_bc_srv_max_requests;
	spinlock_t		rb_reqslock;	/* protect rb_allreqs */
	struct list_head	rb_allreqs;
395 396

	u32			rb_bc_max_requests;
397 398 399 400

	spinlock_t		rb_recovery_lock; /* protect rb_stale_mrs */
	struct list_head	rb_stale_mrs;
	struct delayed_work	rb_recovery_worker;
C
Chuck Lever 已提交
401
	struct delayed_work	rb_refresh_worker;
402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436
};
#define rdmab_to_ia(b) (&container_of((b), struct rpcrdma_xprt, rx_buf)->rx_ia)

/*
 * Internal structure for transport instance creation. This
 * exists primarily for modularity.
 *
 * This data should be set with mount options
 */
struct rpcrdma_create_data_internal {
	struct sockaddr_storage	addr;	/* RDMA server address */
	unsigned int	max_requests;	/* max requests (slots) in flight */
	unsigned int	rsize;		/* mount rsize - max read hdr+data */
	unsigned int	wsize;		/* mount wsize - max write hdr+data */
	unsigned int	inline_rsize;	/* max non-rdma read data payload */
	unsigned int	inline_wsize;	/* max non-rdma write data payload */
	unsigned int	padding;	/* non-rdma write header padding */
};

/*
 * Statistics for RPCRDMA
 */
struct rpcrdma_stats {
	unsigned long		read_chunk_count;
	unsigned long		write_chunk_count;
	unsigned long		reply_chunk_count;

	unsigned long long	total_rdma_request;
	unsigned long long	total_rdma_reply;

	unsigned long long	pullup_copy_count;
	unsigned long long	fixup_copy_count;
	unsigned long		hardway_register_count;
	unsigned long		failed_marshal_count;
	unsigned long		bad_reply_count;
437
	unsigned long		nomsg_call_count;
438
	unsigned long		bcall_count;
439 440
	unsigned long		mrs_recovered;
	unsigned long		mrs_orphaned;
C
Chuck Lever 已提交
441
	unsigned long		mrs_allocated;
442
	unsigned long		local_inv_needed;
443 444
};

445 446 447
/*
 * Per-registration mode operations
 */
448
struct rpcrdma_xprt;
449
struct rpcrdma_memreg_ops {
450
	int		(*ro_map)(struct rpcrdma_xprt *,
451 452
				  struct rpcrdma_mr_seg *, int, bool,
				  struct rpcrdma_mw **);
453 454
	void		(*ro_unmap_sync)(struct rpcrdma_xprt *,
					 struct rpcrdma_req *);
455 456
	void		(*ro_unmap_safe)(struct rpcrdma_xprt *,
					 struct rpcrdma_req *, bool);
457
	void		(*ro_recover_mr)(struct rpcrdma_mw *);
C
Chuck Lever 已提交
458 459 460
	int		(*ro_open)(struct rpcrdma_ia *,
				   struct rpcrdma_ep *,
				   struct rpcrdma_create_data_internal *);
461
	size_t		(*ro_maxpages)(struct rpcrdma_xprt *);
C
Chuck Lever 已提交
462 463 464
	int		(*ro_init_mr)(struct rpcrdma_ia *,
				      struct rpcrdma_mw *);
	void		(*ro_release_mr)(struct rpcrdma_mw *);
465
	const char	*ro_displayname;
466
	const int	ro_send_w_inv_ok;
467 468 469 470 471
};

extern const struct rpcrdma_memreg_ops rpcrdma_fmr_memreg_ops;
extern const struct rpcrdma_memreg_ops rpcrdma_frwr_memreg_ops;

472 473 474 475 476 477 478 479 480 481 482
/*
 * RPCRDMA transport -- encapsulates the structures above for
 * integration with RPC.
 *
 * The contained structures are embedded, not pointers,
 * for convenience. This structure need not be visible externally.
 *
 * It is allocated and initialized during mount, and released
 * during unmount.
 */
struct rpcrdma_xprt {
483
	struct rpc_xprt		rx_xprt;
484 485 486 487
	struct rpcrdma_ia	rx_ia;
	struct rpcrdma_ep	rx_ep;
	struct rpcrdma_buffer	rx_buf;
	struct rpcrdma_create_data_internal rx_data;
488
	struct delayed_work	rx_connect_worker;
489 490 491
	struct rpcrdma_stats	rx_stats;
};

492
#define rpcx_to_rdmax(x) container_of(x, struct rpcrdma_xprt, rx_xprt)
493 494
#define rpcx_to_rdmad(x) (rpcx_to_rdmax(x)->rx_data)

495 496 497 498 499
/* Setting this to 0 ensures interoperability with early servers.
 * Setting this to 1 enhances certain unaligned read/write performance.
 * Default is 0, see sysctl entry and rpc_rdma.c rpcrdma_convert_iovs() */
extern int xprt_rdma_pad_optimize;

500 501 502 503 504
/*
 * Interface Adapter calls - xprtrdma/verbs.c
 */
int rpcrdma_ia_open(struct rpcrdma_xprt *, struct sockaddr *, int);
void rpcrdma_ia_close(struct rpcrdma_ia *);
505 506
bool frwr_is_supported(struct rpcrdma_ia *);
bool fmr_is_supported(struct rpcrdma_ia *);
507 508 509 510 511 512

/*
 * Endpoint calls - xprtrdma/verbs.c
 */
int rpcrdma_ep_create(struct rpcrdma_ep *, struct rpcrdma_ia *,
				struct rpcrdma_create_data_internal *);
513
void rpcrdma_ep_destroy(struct rpcrdma_ep *, struct rpcrdma_ia *);
514
int rpcrdma_ep_connect(struct rpcrdma_ep *, struct rpcrdma_ia *);
515
void rpcrdma_conn_func(struct rpcrdma_ep *ep);
516
void rpcrdma_ep_disconnect(struct rpcrdma_ep *, struct rpcrdma_ia *);
517 518 519

int rpcrdma_ep_post(struct rpcrdma_ia *, struct rpcrdma_ep *,
				struct rpcrdma_req *);
520
int rpcrdma_ep_post_recv(struct rpcrdma_ia *, struct rpcrdma_rep *);
521 522 523 524

/*
 * Buffer calls - xprtrdma/verbs.c
 */
525 526
struct rpcrdma_req *rpcrdma_create_req(struct rpcrdma_xprt *);
struct rpcrdma_rep *rpcrdma_create_rep(struct rpcrdma_xprt *);
527
void rpcrdma_destroy_req(struct rpcrdma_req *);
528
int rpcrdma_buffer_create(struct rpcrdma_xprt *);
529 530
void rpcrdma_buffer_destroy(struct rpcrdma_buffer *);

531 532
struct rpcrdma_mw *rpcrdma_get_mw(struct rpcrdma_xprt *);
void rpcrdma_put_mw(struct rpcrdma_xprt *, struct rpcrdma_mw *);
533 534 535 536 537
struct rpcrdma_req *rpcrdma_buffer_get(struct rpcrdma_buffer *);
void rpcrdma_buffer_put(struct rpcrdma_req *);
void rpcrdma_recv_buffer_get(struct rpcrdma_req *);
void rpcrdma_recv_buffer_put(struct rpcrdma_rep *);

538 539
void rpcrdma_defer_mr_recovery(struct rpcrdma_mw *);

540
struct rpcrdma_regbuf *rpcrdma_alloc_regbuf(size_t, enum dma_data_direction,
541
					    gfp_t);
542
bool __rpcrdma_dma_map_regbuf(struct rpcrdma_ia *, struct rpcrdma_regbuf *);
543
void rpcrdma_free_regbuf(struct rpcrdma_regbuf *);
544

545 546 547 548 549 550 551 552 553 554 555 556 557 558
static inline bool
rpcrdma_regbuf_is_mapped(struct rpcrdma_regbuf *rb)
{
	return rb->rg_device != NULL;
}

static inline bool
rpcrdma_dma_map_regbuf(struct rpcrdma_ia *ia, struct rpcrdma_regbuf *rb)
{
	if (likely(rpcrdma_regbuf_is_mapped(rb)))
		return true;
	return __rpcrdma_dma_map_regbuf(ia, rb);
}

559
int rpcrdma_ep_post_extra_recv(struct rpcrdma_xprt *, unsigned int);
560

561 562 563
int rpcrdma_alloc_wq(void);
void rpcrdma_destroy_wq(void);

564 565 566 567 568 569 570 571 572 573
/*
 * Wrappers for chunk registration, shared by read/write chunk code.
 */

static inline enum dma_data_direction
rpcrdma_data_dir(bool writing)
{
	return writing ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
}

574 575 576
/*
 * RPC/RDMA protocol calls - xprtrdma/rpc_rdma.c
 */
577 578 579 580 581 582 583 584 585 586 587 588

enum rpcrdma_chunktype {
	rpcrdma_noch = 0,
	rpcrdma_readch,
	rpcrdma_areadch,
	rpcrdma_writech,
	rpcrdma_replych
};

bool rpcrdma_prepare_send_sges(struct rpcrdma_ia *, struct rpcrdma_req *,
			       u32, struct xdr_buf *, enum rpcrdma_chunktype);
void rpcrdma_unmap_sges(struct rpcrdma_ia *, struct rpcrdma_req *);
589
int rpcrdma_marshal_req(struct rpc_rqst *);
590
void rpcrdma_set_max_header_sizes(struct rpcrdma_xprt *);
591
void rpcrdma_reply_handler(struct work_struct *work);
592

593 594
/* RPC/RDMA module init - xprtrdma/transport.c
 */
595 596 597
extern unsigned int xprt_rdma_max_inline_read;
void xprt_rdma_format_addresses(struct rpc_xprt *xprt, struct sockaddr *sap);
void xprt_rdma_free_addresses(struct rpc_xprt *xprt);
598
void rpcrdma_connect_worker(struct work_struct *work);
599
void xprt_rdma_print_stats(struct rpc_xprt *xprt, struct seq_file *seq);
600 601 602
int xprt_rdma_init(void);
void xprt_rdma_cleanup(void);

603 604 605 606
/* Backchannel calls - xprtrdma/backchannel.c
 */
#if defined(CONFIG_SUNRPC_BACKCHANNEL)
int xprt_rdma_bc_setup(struct rpc_xprt *, unsigned int);
607
int xprt_rdma_bc_up(struct svc_serv *, struct net *);
608
size_t xprt_rdma_bc_maxpayload(struct rpc_xprt *);
609
int rpcrdma_bc_post_recv(struct rpcrdma_xprt *, unsigned int);
610
void rpcrdma_bc_receive_call(struct rpcrdma_xprt *, struct rpcrdma_rep *);
611
int rpcrdma_bc_marshal_reply(struct rpc_rqst *);
612 613 614 615
void xprt_rdma_bc_free_rqst(struct rpc_rqst *);
void xprt_rdma_bc_destroy(struct rpc_xprt *, unsigned int);
#endif	/* CONFIG_SUNRPC_BACKCHANNEL */

616
extern struct xprt_class xprt_rdma_bc;
617

618
#endif				/* _LINUX_SUNRPC_XPRT_RDMA_H */