xprt_rdma.h 15.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44
/*
 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the BSD-type
 * license below:
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 *
 *      Redistributions of source code must retain the above copyright
 *      notice, this list of conditions and the following disclaimer.
 *
 *      Redistributions in binary form must reproduce the above
 *      copyright notice, this list of conditions and the following
 *      disclaimer in the documentation and/or other materials provided
 *      with the distribution.
 *
 *      Neither the name of the Network Appliance, Inc. nor the names of
 *      its contributors may be used to endorse or promote products
 *      derived from this software without specific prior written
 *      permission.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 */

#ifndef _LINUX_SUNRPC_XPRT_RDMA_H
#define _LINUX_SUNRPC_XPRT_RDMA_H

#include <linux/wait.h> 		/* wait_queue_head_t, etc */
#include <linux/spinlock.h> 		/* spinlock_t, etc */
A
Arun Sharma 已提交
45
#include <linux/atomic.h>			/* atomic_t, etc */
46
#include <linux/workqueue.h>		/* struct work_struct */
47 48 49 50 51 52 53 54

#include <rdma/rdma_cm.h>		/* RDMA connection api */
#include <rdma/ib_verbs.h>		/* RDMA verbs api */

#include <linux/sunrpc/clnt.h> 		/* rpc_xprt */
#include <linux/sunrpc/rpc_rdma.h> 	/* RPC/RDMA protocol */
#include <linux/sunrpc/xprtrdma.h> 	/* xprt parameters */

55 56 57
#define RDMA_RESOLVE_TIMEOUT	(5000)	/* 5 seconds */
#define RDMA_CONNECT_RETRY_MAX	(2)	/* retries if no listener backlog */

58 59 60 61
/*
 * Interface Adapter -- one per transport instance
 */
struct rpcrdma_ia {
62
	const struct rpcrdma_memreg_ops	*ri_ops;
63
	rwlock_t		ri_qplock;
64
	struct ib_device	*ri_device;
65 66
	struct rdma_cm_id 	*ri_id;
	struct ib_pd		*ri_pd;
67
	struct ib_mr		*ri_dma_mr;
68 69
	struct completion	ri_done;
	int			ri_async_rc;
70
	unsigned int		ri_max_frmr_depth;
71
	struct ib_device_attr	ri_devattr;
72 73
	struct ib_qp_attr	ri_qp_attr;
	struct ib_qp_init_attr	ri_qp_init_attr;
74 75 76 77 78 79 80 81 82 83 84 85 86 87
};

/*
 * RDMA Endpoint -- one per transport instance
 */

struct rpcrdma_ep {
	atomic_t		rep_cqcount;
	int			rep_cqinit;
	int			rep_connected;
	struct ib_qp_init_attr	rep_attr;
	wait_queue_head_t 	rep_connect_wait;
	struct rdma_conn_param	rep_remote_cma;
	struct sockaddr_storage	rep_remote_addr;
88
	struct delayed_work	rep_connect_worker;
89 90
};

C
Chuck Lever 已提交
91 92 93 94 95 96
/*
 * Force a signaled SEND Work Request every so often,
 * in case the provider needs to do some housekeeping.
 */
#define RPCRDMA_MAX_UNSIGNALED_SENDS	(32)

97 98 99
#define INIT_CQCOUNT(ep) atomic_set(&(ep)->rep_cqcount, (ep)->rep_cqinit)
#define DECR_CQCOUNT(ep) atomic_sub_return(1, &(ep)->rep_cqcount)

100 101 102 103
/* Force completion handler to ignore the signal
 */
#define RPCRDMA_IGNORE_COMPLETION	(0ULL)

104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140
/* Registered buffer -- registered kmalloc'd memory for RDMA SEND/RECV
 *
 * The below structure appears at the front of a large region of kmalloc'd
 * memory, which always starts on a good alignment boundary.
 */

struct rpcrdma_regbuf {
	size_t			rg_size;
	struct rpcrdma_req	*rg_owner;
	struct ib_sge		rg_iov;
	__be32			rg_base[0] __attribute__ ((aligned(256)));
};

static inline u64
rdmab_addr(struct rpcrdma_regbuf *rb)
{
	return rb->rg_iov.addr;
}

static inline u32
rdmab_length(struct rpcrdma_regbuf *rb)
{
	return rb->rg_iov.length;
}

static inline u32
rdmab_lkey(struct rpcrdma_regbuf *rb)
{
	return rb->rg_iov.lkey;
}

static inline struct rpcrdma_msg *
rdmab_to_msg(struct rpcrdma_regbuf *rb)
{
	return (struct rpcrdma_msg *)rb->rg_base;
}

141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157
/*
 * struct rpcrdma_rep -- this structure encapsulates state required to recv
 * and complete a reply, asychronously. It needs several pieces of
 * state:
 *   o recv buffer (posted to provider)
 *   o ib_sge (also donated to provider)
 *   o status of reply (length, success or not)
 *   o bookkeeping state to get run by tasklet (list, etc)
 *
 * These are allocated during initialization, per-transport instance;
 * however, the tasklet execution list itself is global, as it should
 * always be pretty short.
 *
 * N of these are associated with a transport instance, and stored in
 * struct rpcrdma_buffer. N is the max number of outstanding requests.
 */

158
#define RPCRDMA_MAX_DATA_SEGS	((1 * 1024 * 1024) / PAGE_SIZE)
159 160 161 162 163
#define RPCRDMA_MAX_SEGS 	(RPCRDMA_MAX_DATA_SEGS + 2) /* head+tail = 2 */

struct rpcrdma_buffer;

struct rpcrdma_rep {
164
	unsigned int		rr_len;
165
	struct ib_device	*rr_device;
166
	struct rpcrdma_xprt	*rr_rxprt;
167 168
	struct list_head	rr_list;
	struct rpcrdma_regbuf	*rr_rdmabuf;
169 170
};

171 172 173 174 175 176
/*
 * struct rpcrdma_mw - external memory region metadata
 *
 * An external memory region is any buffer or page that is registered
 * on the fly (ie, not pre-registered).
 *
177
 * Each rpcrdma_buffer has a list of free MWs anchored in rb_mws. During
178 179 180 181 182 183 184 185 186
 * call_allocate, rpcrdma_buffer_get() assigns one to each segment in
 * an rpcrdma_req. Then rpcrdma_register_external() grabs these to keep
 * track of registration metadata while each RPC is pending.
 * rpcrdma_deregister_external() uses this metadata to unmap and
 * release these resources when an RPC is complete.
 */
enum rpcrdma_frmr_state {
	FRMR_IS_INVALID,	/* ready to be used */
	FRMR_IS_VALID,		/* in use */
187
	FRMR_IS_STALE,		/* failed completion */
188 189 190 191 192 193
};

struct rpcrdma_frmr {
	struct ib_fast_reg_page_list	*fr_pgl;
	struct ib_mr			*fr_mr;
	enum rpcrdma_frmr_state		fr_state;
194 195
	struct work_struct		fr_work;
	struct rpcrdma_xprt		*fr_xprt;
196 197
};

198 199 200
struct rpcrdma_fmr {
	struct ib_fmr		*fmr;
	u64			*physaddrs;
201 202 203 204
};

struct rpcrdma_mw {
	union {
205
		struct rpcrdma_fmr	fmr;
206 207
		struct rpcrdma_frmr	frmr;
	} r;
208
	void			(*mw_sendcompletion)(struct ib_wc *);
209
	struct list_head	mw_list;
210
	struct list_head	mw_all;
211 212
};

213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237
/*
 * struct rpcrdma_req -- structure central to the request/reply sequence.
 *
 * N of these are associated with a transport instance, and stored in
 * struct rpcrdma_buffer. N is the max number of outstanding requests.
 *
 * It includes pre-registered buffer memory for send AND recv.
 * The recv buffer, however, is not owned by this structure, and
 * is "donated" to the hardware when a recv is posted. When a
 * reply is handled, the recv buffer used is given back to the
 * struct rpcrdma_req associated with the request.
 *
 * In addition to the basic memory, this structure includes an array
 * of iovs for send operations. The reason is that the iovs passed to
 * ib_post_{send,recv} must not be modified until the work request
 * completes.
 *
 * NOTES:
 *   o RPCRDMA_MAX_SEGS is the max number of addressible chunk elements we
 *     marshal. The number needed varies depending on the iov lists that
 *     are passed to us, the memory registration mode we are in, and if
 *     physical addressing is used, the layout.
 */

struct rpcrdma_mr_seg {		/* chunk descriptors */
238
	struct rpcrdma_mw *rl_mw;	/* registered MR */
239 240 241 242 243 244 245 246 247 248 249
	u64		mr_base;	/* registration result */
	u32		mr_rkey;	/* registration result */
	u32		mr_len;		/* length of chunk or segment */
	int		mr_nsegs;	/* number of segments in chunk or 0 */
	enum dma_data_direction	mr_dir;	/* segment mapping direction */
	dma_addr_t	mr_dma;		/* segment mapping address */
	size_t		mr_dmalen;	/* segment mapping length */
	struct page	*mr_page;	/* owning page, if any */
	char		*mr_offset;	/* kva if no page, else offset */
};

250 251
#define RPCRDMA_MAX_IOVS	(2)

252
struct rpcrdma_req {
253 254 255 256
	unsigned int		rl_niovs;
	unsigned int		rl_nchunks;
	unsigned int		rl_connect_cookie;
	struct rpcrdma_buffer	*rl_buffer;
257
	struct rpcrdma_rep	*rl_reply;/* holder for reply buffer */
258 259 260 261
	struct ib_sge		rl_send_iov[RPCRDMA_MAX_IOVS];
	struct rpcrdma_regbuf	*rl_rdmabuf;
	struct rpcrdma_regbuf	*rl_sendbuf;
	struct rpcrdma_mr_seg	rl_segments[RPCRDMA_MAX_SEGS];
262
};
263 264 265 266

static inline struct rpcrdma_req *
rpcr_to_rdmar(struct rpc_rqst *rqst)
{
267 268 269 270
	void *buffer = rqst->rq_buffer;
	struct rpcrdma_regbuf *rb;

	rb = container_of(buffer, struct rpcrdma_regbuf, rg_base);
271 272
	return rb->rg_owner;
}
273 274 275 276 277 278 279 280

/*
 * struct rpcrdma_buffer -- holds list/queue of pre-registered memory for
 * inline requests/replies, and client/server credits.
 *
 * One of these is associated with a transport instance
 */
struct rpcrdma_buffer {
C
Chuck Lever 已提交
281 282 283 284 285 286 287 288 289
	spinlock_t		rb_mwlock;	/* protect rb_mws list */
	struct list_head	rb_mws;
	struct list_head	rb_all;
	char			*rb_pool;

	spinlock_t		rb_lock;	/* protect buf arrays */
	u32			rb_max_requests;
	int			rb_send_index;
	int			rb_recv_index;
290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311
	struct rpcrdma_req	**rb_send_bufs;
	struct rpcrdma_rep	**rb_recv_bufs;
};
#define rdmab_to_ia(b) (&container_of((b), struct rpcrdma_xprt, rx_buf)->rx_ia)

/*
 * Internal structure for transport instance creation. This
 * exists primarily for modularity.
 *
 * This data should be set with mount options
 */
struct rpcrdma_create_data_internal {
	struct sockaddr_storage	addr;	/* RDMA server address */
	unsigned int	max_requests;	/* max requests (slots) in flight */
	unsigned int	rsize;		/* mount rsize - max read hdr+data */
	unsigned int	wsize;		/* mount wsize - max write hdr+data */
	unsigned int	inline_rsize;	/* max non-rdma read data payload */
	unsigned int	inline_wsize;	/* max non-rdma write data payload */
	unsigned int	padding;	/* non-rdma write header padding */
};

#define RPCRDMA_INLINE_READ_THRESHOLD(rq) \
312
	(rpcx_to_rdmad(rq->rq_xprt).inline_rsize)
313 314

#define RPCRDMA_INLINE_WRITE_THRESHOLD(rq)\
315
	(rpcx_to_rdmad(rq->rq_xprt).inline_wsize)
316 317

#define RPCRDMA_INLINE_PAD_VALUE(rq)\
318
	rpcx_to_rdmad(rq->rq_xprt).padding
319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335

/*
 * Statistics for RPCRDMA
 */
struct rpcrdma_stats {
	unsigned long		read_chunk_count;
	unsigned long		write_chunk_count;
	unsigned long		reply_chunk_count;

	unsigned long long	total_rdma_request;
	unsigned long long	total_rdma_reply;

	unsigned long long	pullup_copy_count;
	unsigned long long	fixup_copy_count;
	unsigned long		hardway_register_count;
	unsigned long		failed_marshal_count;
	unsigned long		bad_reply_count;
336
	unsigned long		nomsg_call_count;
337 338
};

339 340 341
/*
 * Per-registration mode operations
 */
342
struct rpcrdma_xprt;
343
struct rpcrdma_memreg_ops {
344 345
	int		(*ro_map)(struct rpcrdma_xprt *,
				  struct rpcrdma_mr_seg *, int, bool);
346 347
	int		(*ro_unmap)(struct rpcrdma_xprt *,
				    struct rpcrdma_mr_seg *);
C
Chuck Lever 已提交
348 349 350
	int		(*ro_open)(struct rpcrdma_ia *,
				   struct rpcrdma_ep *,
				   struct rpcrdma_create_data_internal *);
351
	size_t		(*ro_maxpages)(struct rpcrdma_xprt *);
C
Chuck Lever 已提交
352
	int		(*ro_init)(struct rpcrdma_xprt *);
353
	void		(*ro_destroy)(struct rpcrdma_buffer *);
354 355 356 357 358 359 360
	const char	*ro_displayname;
};

extern const struct rpcrdma_memreg_ops rpcrdma_fmr_memreg_ops;
extern const struct rpcrdma_memreg_ops rpcrdma_frwr_memreg_ops;
extern const struct rpcrdma_memreg_ops rpcrdma_physical_memreg_ops;

361 362 363 364 365 366 367 368 369 370 371
/*
 * RPCRDMA transport -- encapsulates the structures above for
 * integration with RPC.
 *
 * The contained structures are embedded, not pointers,
 * for convenience. This structure need not be visible externally.
 *
 * It is allocated and initialized during mount, and released
 * during unmount.
 */
struct rpcrdma_xprt {
372
	struct rpc_xprt		rx_xprt;
373 374 375 376
	struct rpcrdma_ia	rx_ia;
	struct rpcrdma_ep	rx_ep;
	struct rpcrdma_buffer	rx_buf;
	struct rpcrdma_create_data_internal rx_data;
377
	struct delayed_work	rx_connect_worker;
378 379 380
	struct rpcrdma_stats	rx_stats;
};

381
#define rpcx_to_rdmax(x) container_of(x, struct rpcrdma_xprt, rx_xprt)
382 383
#define rpcx_to_rdmad(x) (rpcx_to_rdmax(x)->rx_data)

384 385 386 387 388
/* Setting this to 0 ensures interoperability with early servers.
 * Setting this to 1 enhances certain unaligned read/write performance.
 * Default is 0, see sysctl entry and rpc_rdma.c rpcrdma_convert_iovs() */
extern int xprt_rdma_pad_optimize;

389 390 391 392 393 394 395 396 397 398 399
/*
 * Interface Adapter calls - xprtrdma/verbs.c
 */
int rpcrdma_ia_open(struct rpcrdma_xprt *, struct sockaddr *, int);
void rpcrdma_ia_close(struct rpcrdma_ia *);

/*
 * Endpoint calls - xprtrdma/verbs.c
 */
int rpcrdma_ep_create(struct rpcrdma_ep *, struct rpcrdma_ia *,
				struct rpcrdma_create_data_internal *);
400
void rpcrdma_ep_destroy(struct rpcrdma_ep *, struct rpcrdma_ia *);
401
int rpcrdma_ep_connect(struct rpcrdma_ep *, struct rpcrdma_ia *);
402
void rpcrdma_ep_disconnect(struct rpcrdma_ep *, struct rpcrdma_ia *);
403 404 405 406 407 408 409 410 411

int rpcrdma_ep_post(struct rpcrdma_ia *, struct rpcrdma_ep *,
				struct rpcrdma_req *);
int rpcrdma_ep_post_recv(struct rpcrdma_ia *, struct rpcrdma_ep *,
				struct rpcrdma_rep *);

/*
 * Buffer calls - xprtrdma/verbs.c
 */
412
int rpcrdma_buffer_create(struct rpcrdma_xprt *);
413 414
void rpcrdma_buffer_destroy(struct rpcrdma_buffer *);

415 416
struct rpcrdma_mw *rpcrdma_get_mw(struct rpcrdma_xprt *);
void rpcrdma_put_mw(struct rpcrdma_xprt *, struct rpcrdma_mw *);
417 418 419 420 421
struct rpcrdma_req *rpcrdma_buffer_get(struct rpcrdma_buffer *);
void rpcrdma_buffer_put(struct rpcrdma_req *);
void rpcrdma_recv_buffer_get(struct rpcrdma_req *);
void rpcrdma_recv_buffer_put(struct rpcrdma_rep *);

422 423 424 425 426
struct rpcrdma_regbuf *rpcrdma_alloc_regbuf(struct rpcrdma_ia *,
					    size_t, gfp_t);
void rpcrdma_free_regbuf(struct rpcrdma_ia *,
			 struct rpcrdma_regbuf *);

427
unsigned int rpcrdma_max_segments(struct rpcrdma_xprt *);
428

429 430 431
int frwr_alloc_recovery_wq(void);
void frwr_destroy_recovery_wq(void);

432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473
/*
 * Wrappers for chunk registration, shared by read/write chunk code.
 */

void rpcrdma_mapping_error(struct rpcrdma_mr_seg *);

static inline enum dma_data_direction
rpcrdma_data_dir(bool writing)
{
	return writing ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
}

static inline void
rpcrdma_map_one(struct ib_device *device, struct rpcrdma_mr_seg *seg,
		enum dma_data_direction direction)
{
	seg->mr_dir = direction;
	seg->mr_dmalen = seg->mr_len;

	if (seg->mr_page)
		seg->mr_dma = ib_dma_map_page(device,
				seg->mr_page, offset_in_page(seg->mr_offset),
				seg->mr_dmalen, seg->mr_dir);
	else
		seg->mr_dma = ib_dma_map_single(device,
				seg->mr_offset,
				seg->mr_dmalen, seg->mr_dir);

	if (ib_dma_mapping_error(device, seg->mr_dma))
		rpcrdma_mapping_error(seg);
}

static inline void
rpcrdma_unmap_one(struct ib_device *device, struct rpcrdma_mr_seg *seg)
{
	if (seg->mr_page)
		ib_dma_unmap_page(device,
				  seg->mr_dma, seg->mr_dmalen, seg->mr_dir);
	else
		ib_dma_unmap_single(device,
				    seg->mr_dma, seg->mr_dmalen, seg->mr_dir);
}
474

475 476 477
/*
 * RPC/RDMA connection management calls - xprtrdma/rpc_rdma.c
 */
478
void rpcrdma_connect_worker(struct work_struct *);
479 480 481 482 483 484 485 486
void rpcrdma_conn_func(struct rpcrdma_ep *);
void rpcrdma_reply_handler(struct rpcrdma_rep *);

/*
 * RPC/RDMA protocol calls - xprtrdma/rpc_rdma.c
 */
int rpcrdma_marshal_req(struct rpc_rqst *);

487 488 489 490 491
/* RPC/RDMA module init - xprtrdma/transport.c
 */
int xprt_rdma_init(void);
void xprt_rdma_cleanup(void);

492 493 494 495 496 497 498
/* Temporary NFS request map cache. Created in svc_rdma.c  */
extern struct kmem_cache *svc_rdma_map_cachep;
/* WR context cache. Created in svc_rdma.c  */
extern struct kmem_cache *svc_rdma_ctxt_cachep;
/* Workqueue created in svc_rdma.c */
extern struct workqueue_struct *svc_rdma_wq;

499
#endif				/* _LINUX_SUNRPC_XPRT_RDMA_H */