xprt_rdma.h 17.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44
/*
 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the BSD-type
 * license below:
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 *
 *      Redistributions of source code must retain the above copyright
 *      notice, this list of conditions and the following disclaimer.
 *
 *      Redistributions in binary form must reproduce the above
 *      copyright notice, this list of conditions and the following
 *      disclaimer in the documentation and/or other materials provided
 *      with the distribution.
 *
 *      Neither the name of the Network Appliance, Inc. nor the names of
 *      its contributors may be used to endorse or promote products
 *      derived from this software without specific prior written
 *      permission.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 */

#ifndef _LINUX_SUNRPC_XPRT_RDMA_H
#define _LINUX_SUNRPC_XPRT_RDMA_H

#include <linux/wait.h> 		/* wait_queue_head_t, etc */
#include <linux/spinlock.h> 		/* spinlock_t, etc */
A
Arun Sharma 已提交
45
#include <linux/atomic.h>			/* atomic_t, etc */
46
#include <linux/workqueue.h>		/* struct work_struct */
47 48 49 50 51 52 53 54

#include <rdma/rdma_cm.h>		/* RDMA connection api */
#include <rdma/ib_verbs.h>		/* RDMA verbs api */

#include <linux/sunrpc/clnt.h> 		/* rpc_xprt */
#include <linux/sunrpc/rpc_rdma.h> 	/* RPC/RDMA protocol */
#include <linux/sunrpc/xprtrdma.h> 	/* xprt parameters */

55 56 57
#define RDMA_RESOLVE_TIMEOUT	(5000)	/* 5 seconds */
#define RDMA_CONNECT_RETRY_MAX	(2)	/* retries if no listener backlog */

58 59 60 61 62
#define RPCRDMA_BIND_TO		(60U * HZ)
#define RPCRDMA_INIT_REEST_TO	(5U * HZ)
#define RPCRDMA_MAX_REEST_TO	(30U * HZ)
#define RPCRDMA_IDLE_DISC_TO	(5U * 60 * HZ)

63 64 65 66
/*
 * Interface Adapter -- one per transport instance
 */
struct rpcrdma_ia {
67
	const struct rpcrdma_memreg_ops	*ri_ops;
68
	struct ib_device	*ri_device;
69 70 71 72
	struct rdma_cm_id 	*ri_id;
	struct ib_pd		*ri_pd;
	struct completion	ri_done;
	int			ri_async_rc;
73
	unsigned int		ri_max_frmr_depth;
C
Chuck Lever 已提交
74 75
	unsigned int		ri_max_inline_write;
	unsigned int		ri_max_inline_read;
76 77
	struct ib_qp_attr	ri_qp_attr;
	struct ib_qp_init_attr	ri_qp_init_attr;
78 79 80 81 82 83 84 85 86 87 88 89 90 91
};

/*
 * RDMA Endpoint -- one per transport instance
 */

struct rpcrdma_ep {
	atomic_t		rep_cqcount;
	int			rep_cqinit;
	int			rep_connected;
	struct ib_qp_init_attr	rep_attr;
	wait_queue_head_t 	rep_connect_wait;
	struct rdma_conn_param	rep_remote_cma;
	struct sockaddr_storage	rep_remote_addr;
92
	struct delayed_work	rep_connect_worker;
93 94 95 96 97
};

#define INIT_CQCOUNT(ep) atomic_set(&(ep)->rep_cqcount, (ep)->rep_cqinit)
#define DECR_CQCOUNT(ep) atomic_sub_return(1, &(ep)->rep_cqcount)

98 99 100 101 102 103 104 105 106 107
/* Pre-allocate extra Work Requests for handling backward receives
 * and sends. This is a fixed value because the Work Queues are
 * allocated when the forward channel is set up.
 */
#if defined(CONFIG_SUNRPC_BACKCHANNEL)
#define RPCRDMA_BACKWARD_WRS		(8)
#else
#define RPCRDMA_BACKWARD_WRS		(0)
#endif

108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144
/* Registered buffer -- registered kmalloc'd memory for RDMA SEND/RECV
 *
 * The below structure appears at the front of a large region of kmalloc'd
 * memory, which always starts on a good alignment boundary.
 */

struct rpcrdma_regbuf {
	size_t			rg_size;
	struct rpcrdma_req	*rg_owner;
	struct ib_sge		rg_iov;
	__be32			rg_base[0] __attribute__ ((aligned(256)));
};

static inline u64
rdmab_addr(struct rpcrdma_regbuf *rb)
{
	return rb->rg_iov.addr;
}

static inline u32
rdmab_length(struct rpcrdma_regbuf *rb)
{
	return rb->rg_iov.length;
}

static inline u32
rdmab_lkey(struct rpcrdma_regbuf *rb)
{
	return rb->rg_iov.lkey;
}

static inline struct rpcrdma_msg *
rdmab_to_msg(struct rpcrdma_regbuf *rb)
{
	return (struct rpcrdma_msg *)rb->rg_base;
}

145 146
#define RPCRDMA_DEF_GFP		(GFP_NOIO | __GFP_NOWARN)

147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166
/* To ensure a transport can always make forward progress,
 * the number of RDMA segments allowed in header chunk lists
 * is capped at 8. This prevents less-capable devices and
 * memory registrations from overrunning the Send buffer
 * while building chunk lists.
 *
 * Elements of the Read list take up more room than the
 * Write list or Reply chunk. 8 read segments means the Read
 * list (or Write list or Reply chunk) cannot consume more
 * than
 *
 * ((8 + 2) * read segment size) + 1 XDR words, or 244 bytes.
 *
 * And the fixed part of the header is another 24 bytes.
 *
 * The smallest inline threshold is 1024 bytes, ensuring that
 * at least 750 bytes are available for RPC messages.
 */
#define RPCRDMA_MAX_HDR_SEGS	(8)

167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183
/*
 * struct rpcrdma_rep -- this structure encapsulates state required to recv
 * and complete a reply, asychronously. It needs several pieces of
 * state:
 *   o recv buffer (posted to provider)
 *   o ib_sge (also donated to provider)
 *   o status of reply (length, success or not)
 *   o bookkeeping state to get run by tasklet (list, etc)
 *
 * These are allocated during initialization, per-transport instance;
 * however, the tasklet execution list itself is global, as it should
 * always be pretty short.
 *
 * N of these are associated with a transport instance, and stored in
 * struct rpcrdma_buffer. N is the max number of outstanding requests.
 */

184
#define RPCRDMA_MAX_DATA_SEGS	((1 * 1024 * 1024) / PAGE_SIZE)
185 186 187

/* data segments + head/tail for Call + head/tail for Reply */
#define RPCRDMA_MAX_SEGS 	(RPCRDMA_MAX_DATA_SEGS + 4)
188 189 190 191

struct rpcrdma_buffer;

struct rpcrdma_rep {
192
	struct ib_cqe		rr_cqe;
193
	unsigned int		rr_len;
194
	struct ib_device	*rr_device;
195
	struct rpcrdma_xprt	*rr_rxprt;
196
	struct work_struct	rr_work;
197 198
	struct list_head	rr_list;
	struct rpcrdma_regbuf	*rr_rdmabuf;
199 200
};

201 202
#define RPCRDMA_BAD_LEN		(~0U)

203 204 205 206 207 208
/*
 * struct rpcrdma_mw - external memory region metadata
 *
 * An external memory region is any buffer or page that is registered
 * on the fly (ie, not pre-registered).
 *
209
 * Each rpcrdma_buffer has a list of free MWs anchored in rb_mws. During
210 211 212 213 214 215 216 217 218
 * call_allocate, rpcrdma_buffer_get() assigns one to each segment in
 * an rpcrdma_req. Then rpcrdma_register_external() grabs these to keep
 * track of registration metadata while each RPC is pending.
 * rpcrdma_deregister_external() uses this metadata to unmap and
 * release these resources when an RPC is complete.
 */
enum rpcrdma_frmr_state {
	FRMR_IS_INVALID,	/* ready to be used */
	FRMR_IS_VALID,		/* in use */
219
	FRMR_IS_STALE,		/* failed completion */
220 221 222 223
};

struct rpcrdma_frmr {
	struct ib_mr			*fr_mr;
224
	struct ib_cqe			fr_cqe;
225
	enum rpcrdma_frmr_state		fr_state;
226
	struct completion		fr_linv_done;
227 228 229 230
	union {
		struct ib_reg_wr	fr_regwr;
		struct ib_send_wr	fr_invwr;
	};
231 232
};

233
struct rpcrdma_fmr {
234 235
	struct ib_fmr		*fm_mr;
	u64			*fm_physaddrs;
236 237 238
};

struct rpcrdma_mw {
239 240 241 242
	struct list_head	mw_list;
	struct scatterlist	*mw_sg;
	int			mw_nents;
	enum dma_data_direction	mw_dir;
243
	union {
244
		struct rpcrdma_fmr	fmr;
245
		struct rpcrdma_frmr	frmr;
246
	};
247
	struct rpcrdma_xprt	*mw_xprt;
248
	struct list_head	mw_all;
249 250
};

251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270
/*
 * struct rpcrdma_req -- structure central to the request/reply sequence.
 *
 * N of these are associated with a transport instance, and stored in
 * struct rpcrdma_buffer. N is the max number of outstanding requests.
 *
 * It includes pre-registered buffer memory for send AND recv.
 * The recv buffer, however, is not owned by this structure, and
 * is "donated" to the hardware when a recv is posted. When a
 * reply is handled, the recv buffer used is given back to the
 * struct rpcrdma_req associated with the request.
 *
 * In addition to the basic memory, this structure includes an array
 * of iovs for send operations. The reason is that the iovs passed to
 * ib_post_{send,recv} must not be modified until the work request
 * completes.
 *
 * NOTES:
 *   o RPCRDMA_MAX_SEGS is the max number of addressible chunk elements we
 *     marshal. The number needed varies depending on the iov lists that
271
 *     are passed to us and the memory registration mode we are in.
272 273 274
 */

struct rpcrdma_mr_seg {		/* chunk descriptors */
275
	struct rpcrdma_mw *rl_mw;	/* registered MR */
276 277 278 279 280 281 282 283
	u64		mr_base;	/* registration result */
	u32		mr_rkey;	/* registration result */
	u32		mr_len;		/* length of chunk or segment */
	int		mr_nsegs;	/* number of segments in chunk or 0 */
	struct page	*mr_page;	/* owning page, if any */
	char		*mr_offset;	/* kva if no page, else offset */
};

284 285
#define RPCRDMA_MAX_IOVS	(2)

286
struct rpcrdma_req {
287
	struct list_head	rl_free;
288 289 290
	unsigned int		rl_niovs;
	unsigned int		rl_nchunks;
	unsigned int		rl_connect_cookie;
291
	struct rpc_task		*rl_task;
292
	struct rpcrdma_buffer	*rl_buffer;
293
	struct rpcrdma_rep	*rl_reply;/* holder for reply buffer */
294 295 296 297
	struct ib_sge		rl_send_iov[RPCRDMA_MAX_IOVS];
	struct rpcrdma_regbuf	*rl_rdmabuf;
	struct rpcrdma_regbuf	*rl_sendbuf;
	struct rpcrdma_mr_seg	rl_segments[RPCRDMA_MAX_SEGS];
298
	struct rpcrdma_mr_seg	*rl_nextseg;
299

300
	struct ib_cqe		rl_cqe;
301 302
	struct list_head	rl_all;
	bool			rl_backchannel;
303
};
304 305 306 307

static inline struct rpcrdma_req *
rpcr_to_rdmar(struct rpc_rqst *rqst)
{
308 309 310 311
	void *buffer = rqst->rq_buffer;
	struct rpcrdma_regbuf *rb;

	rb = container_of(buffer, struct rpcrdma_regbuf, rg_base);
312 313
	return rb->rg_owner;
}
314 315 316 317 318 319 320 321

/*
 * struct rpcrdma_buffer -- holds list/queue of pre-registered memory for
 * inline requests/replies, and client/server credits.
 *
 * One of these is associated with a transport instance
 */
struct rpcrdma_buffer {
C
Chuck Lever 已提交
322 323 324 325 326
	spinlock_t		rb_mwlock;	/* protect rb_mws list */
	struct list_head	rb_mws;
	struct list_head	rb_all;
	char			*rb_pool;

327 328 329
	spinlock_t		rb_lock;	/* protect buf lists */
	struct list_head	rb_send_bufs;
	struct list_head	rb_recv_bufs;
C
Chuck Lever 已提交
330
	u32			rb_max_requests;
331
	atomic_t		rb_credits;	/* most recent credit grant */
332 333 334 335

	u32			rb_bc_srv_max_requests;
	spinlock_t		rb_reqslock;	/* protect rb_allreqs */
	struct list_head	rb_allreqs;
336 337

	u32			rb_bc_max_requests;
338 339 340 341

	spinlock_t		rb_recovery_lock; /* protect rb_stale_mrs */
	struct list_head	rb_stale_mrs;
	struct delayed_work	rb_recovery_worker;
342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361
};
#define rdmab_to_ia(b) (&container_of((b), struct rpcrdma_xprt, rx_buf)->rx_ia)

/*
 * Internal structure for transport instance creation. This
 * exists primarily for modularity.
 *
 * This data should be set with mount options
 */
struct rpcrdma_create_data_internal {
	struct sockaddr_storage	addr;	/* RDMA server address */
	unsigned int	max_requests;	/* max requests (slots) in flight */
	unsigned int	rsize;		/* mount rsize - max read hdr+data */
	unsigned int	wsize;		/* mount wsize - max write hdr+data */
	unsigned int	inline_rsize;	/* max non-rdma read data payload */
	unsigned int	inline_wsize;	/* max non-rdma write data payload */
	unsigned int	padding;	/* non-rdma write header padding */
};

#define RPCRDMA_INLINE_READ_THRESHOLD(rq) \
362
	(rpcx_to_rdmad(rq->rq_xprt).inline_rsize)
363 364

#define RPCRDMA_INLINE_WRITE_THRESHOLD(rq)\
365
	(rpcx_to_rdmad(rq->rq_xprt).inline_wsize)
366 367

#define RPCRDMA_INLINE_PAD_VALUE(rq)\
368
	rpcx_to_rdmad(rq->rq_xprt).padding
369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385

/*
 * Statistics for RPCRDMA
 */
struct rpcrdma_stats {
	unsigned long		read_chunk_count;
	unsigned long		write_chunk_count;
	unsigned long		reply_chunk_count;

	unsigned long long	total_rdma_request;
	unsigned long long	total_rdma_reply;

	unsigned long long	pullup_copy_count;
	unsigned long long	fixup_copy_count;
	unsigned long		hardway_register_count;
	unsigned long		failed_marshal_count;
	unsigned long		bad_reply_count;
386
	unsigned long		nomsg_call_count;
387
	unsigned long		bcall_count;
388 389
	unsigned long		mrs_recovered;
	unsigned long		mrs_orphaned;
390 391
};

392 393 394
/*
 * Per-registration mode operations
 */
395
struct rpcrdma_xprt;
396
struct rpcrdma_memreg_ops {
397 398
	int		(*ro_map)(struct rpcrdma_xprt *,
				  struct rpcrdma_mr_seg *, int, bool);
399 400
	void		(*ro_unmap_sync)(struct rpcrdma_xprt *,
					 struct rpcrdma_req *);
401 402
	void		(*ro_unmap_safe)(struct rpcrdma_xprt *,
					 struct rpcrdma_req *, bool);
403
	void		(*ro_recover_mr)(struct rpcrdma_mw *);
C
Chuck Lever 已提交
404 405 406
	int		(*ro_open)(struct rpcrdma_ia *,
				   struct rpcrdma_ep *,
				   struct rpcrdma_create_data_internal *);
407
	size_t		(*ro_maxpages)(struct rpcrdma_xprt *);
C
Chuck Lever 已提交
408
	int		(*ro_init)(struct rpcrdma_xprt *);
409
	void		(*ro_destroy)(struct rpcrdma_buffer *);
410 411 412 413 414 415
	const char	*ro_displayname;
};

extern const struct rpcrdma_memreg_ops rpcrdma_fmr_memreg_ops;
extern const struct rpcrdma_memreg_ops rpcrdma_frwr_memreg_ops;

416 417 418 419 420 421 422 423 424 425 426
/*
 * RPCRDMA transport -- encapsulates the structures above for
 * integration with RPC.
 *
 * The contained structures are embedded, not pointers,
 * for convenience. This structure need not be visible externally.
 *
 * It is allocated and initialized during mount, and released
 * during unmount.
 */
struct rpcrdma_xprt {
427
	struct rpc_xprt		rx_xprt;
428 429 430 431
	struct rpcrdma_ia	rx_ia;
	struct rpcrdma_ep	rx_ep;
	struct rpcrdma_buffer	rx_buf;
	struct rpcrdma_create_data_internal rx_data;
432
	struct delayed_work	rx_connect_worker;
433 434 435
	struct rpcrdma_stats	rx_stats;
};

436
#define rpcx_to_rdmax(x) container_of(x, struct rpcrdma_xprt, rx_xprt)
437 438
#define rpcx_to_rdmad(x) (rpcx_to_rdmax(x)->rx_data)

439 440 441 442 443
/* Setting this to 0 ensures interoperability with early servers.
 * Setting this to 1 enhances certain unaligned read/write performance.
 * Default is 0, see sysctl entry and rpc_rdma.c rpcrdma_convert_iovs() */
extern int xprt_rdma_pad_optimize;

444 445 446 447 448
/*
 * Interface Adapter calls - xprtrdma/verbs.c
 */
int rpcrdma_ia_open(struct rpcrdma_xprt *, struct sockaddr *, int);
void rpcrdma_ia_close(struct rpcrdma_ia *);
449 450
bool frwr_is_supported(struct rpcrdma_ia *);
bool fmr_is_supported(struct rpcrdma_ia *);
451 452 453 454 455 456

/*
 * Endpoint calls - xprtrdma/verbs.c
 */
int rpcrdma_ep_create(struct rpcrdma_ep *, struct rpcrdma_ia *,
				struct rpcrdma_create_data_internal *);
457
void rpcrdma_ep_destroy(struct rpcrdma_ep *, struct rpcrdma_ia *);
458
int rpcrdma_ep_connect(struct rpcrdma_ep *, struct rpcrdma_ia *);
459
void rpcrdma_ep_disconnect(struct rpcrdma_ep *, struct rpcrdma_ia *);
460 461 462 463 464 465 466 467 468

int rpcrdma_ep_post(struct rpcrdma_ia *, struct rpcrdma_ep *,
				struct rpcrdma_req *);
int rpcrdma_ep_post_recv(struct rpcrdma_ia *, struct rpcrdma_ep *,
				struct rpcrdma_rep *);

/*
 * Buffer calls - xprtrdma/verbs.c
 */
469 470 471
struct rpcrdma_req *rpcrdma_create_req(struct rpcrdma_xprt *);
struct rpcrdma_rep *rpcrdma_create_rep(struct rpcrdma_xprt *);
void rpcrdma_destroy_req(struct rpcrdma_ia *, struct rpcrdma_req *);
472
int rpcrdma_buffer_create(struct rpcrdma_xprt *);
473 474
void rpcrdma_buffer_destroy(struct rpcrdma_buffer *);

475 476
struct rpcrdma_mw *rpcrdma_get_mw(struct rpcrdma_xprt *);
void rpcrdma_put_mw(struct rpcrdma_xprt *, struct rpcrdma_mw *);
477 478 479 480 481
struct rpcrdma_req *rpcrdma_buffer_get(struct rpcrdma_buffer *);
void rpcrdma_buffer_put(struct rpcrdma_req *);
void rpcrdma_recv_buffer_get(struct rpcrdma_req *);
void rpcrdma_recv_buffer_put(struct rpcrdma_rep *);

482 483
void rpcrdma_defer_mr_recovery(struct rpcrdma_mw *);

484 485 486 487 488
struct rpcrdma_regbuf *rpcrdma_alloc_regbuf(struct rpcrdma_ia *,
					    size_t, gfp_t);
void rpcrdma_free_regbuf(struct rpcrdma_ia *,
			 struct rpcrdma_regbuf *);

489
int rpcrdma_ep_post_extra_recv(struct rpcrdma_xprt *, unsigned int);
490

491 492 493
int rpcrdma_alloc_wq(void);
void rpcrdma_destroy_wq(void);

494 495 496 497 498 499 500 501 502 503
/*
 * Wrappers for chunk registration, shared by read/write chunk code.
 */

static inline enum dma_data_direction
rpcrdma_data_dir(bool writing)
{
	return writing ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
}

504 505 506
/*
 * RPC/RDMA connection management calls - xprtrdma/rpc_rdma.c
 */
507
void rpcrdma_connect_worker(struct work_struct *);
508 509 510 511 512 513 514
void rpcrdma_conn_func(struct rpcrdma_ep *);
void rpcrdma_reply_handler(struct rpcrdma_rep *);

/*
 * RPC/RDMA protocol calls - xprtrdma/rpc_rdma.c
 */
int rpcrdma_marshal_req(struct rpc_rqst *);
C
Chuck Lever 已提交
515 516 517
void rpcrdma_set_max_header_sizes(struct rpcrdma_ia *,
				  struct rpcrdma_create_data_internal *,
				  unsigned int);
518

519 520
/* RPC/RDMA module init - xprtrdma/transport.c
 */
521 522 523 524
extern unsigned int xprt_rdma_max_inline_read;
void xprt_rdma_format_addresses(struct rpc_xprt *xprt, struct sockaddr *sap);
void xprt_rdma_free_addresses(struct rpc_xprt *xprt);
void xprt_rdma_print_stats(struct rpc_xprt *xprt, struct seq_file *seq);
525 526 527
int xprt_rdma_init(void);
void xprt_rdma_cleanup(void);

528 529 530 531
/* Backchannel calls - xprtrdma/backchannel.c
 */
#if defined(CONFIG_SUNRPC_BACKCHANNEL)
int xprt_rdma_bc_setup(struct rpc_xprt *, unsigned int);
532
int xprt_rdma_bc_up(struct svc_serv *, struct net *);
533
size_t xprt_rdma_bc_maxpayload(struct rpc_xprt *);
534
int rpcrdma_bc_post_recv(struct rpcrdma_xprt *, unsigned int);
535
void rpcrdma_bc_receive_call(struct rpcrdma_xprt *, struct rpcrdma_rep *);
536
int rpcrdma_bc_marshal_reply(struct rpc_rqst *);
537 538 539 540
void xprt_rdma_bc_free_rqst(struct rpc_rqst *);
void xprt_rdma_bc_destroy(struct rpc_xprt *, unsigned int);
#endif	/* CONFIG_SUNRPC_BACKCHANNEL */

541
extern struct xprt_class xprt_rdma_bc;
542

543
#endif				/* _LINUX_SUNRPC_XPRT_RDMA_H */