physical_ops.c 2.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
/*
 * Copyright (c) 2015 Oracle.  All rights reserved.
 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
 */

/* No-op chunk preparation. All client memory is pre-registered.
 * Sometimes referred to as ALLPHYSICAL mode.
 *
 * Physical registration is simple because all client memory is
 * pre-registered and never deregistered. This mode is good for
 * adapter bring up, but is considered not safe: the server is
 * trusted not to abuse its access to client memory not involved
 * in RDMA I/O.
 */

#include "xprt_rdma.h"

#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
# define RPCDBG_FACILITY	RPCDBG_TRANS
#endif

C
Chuck Lever 已提交
22 23 24 25
static int
physical_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep,
		 struct rpcrdma_create_data_internal *cdata)
{
26 27 28 29 30 31 32 33 34 35 36 37 38
	struct ib_mr *mr;

	/* Obtain an rkey to use for RPC data payloads.
	 */
	mr = ib_get_dma_mr(ia->ri_pd,
			   IB_ACCESS_LOCAL_WRITE |
			   IB_ACCESS_REMOTE_WRITE |
			   IB_ACCESS_REMOTE_READ);
	if (IS_ERR(mr)) {
		pr_err("%s: ib_get_dma_mr for failed with %lX\n",
		       __func__, PTR_ERR(mr));
		return -ENOMEM;
	}
39
	ia->ri_dma_mr = mr;
C
Chuck Lever 已提交
40 41 42 43

	rpcrdma_set_max_header_sizes(ia, cdata, min_t(unsigned int,
						      RPCRDMA_MAX_DATA_SEGS,
						      RPCRDMA_MAX_HDR_SEGS));
C
Chuck Lever 已提交
44 45 46
	return 0;
}

47 48 49 50 51 52
/* PHYSICAL memory registration conveys one page per chunk segment.
 */
static size_t
physical_op_maxpages(struct rpcrdma_xprt *r_xprt)
{
	return min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
53
		     RPCRDMA_MAX_HDR_SEGS);
54 55
}

C
Chuck Lever 已提交
56 57 58 59 60 61
static int
physical_op_init(struct rpcrdma_xprt *r_xprt)
{
	return 0;
}

62 63 64 65 66 67 68 69 70
/* The client's physical memory is already exposed for
 * remote access via RDMA READ or RDMA WRITE.
 */
static int
physical_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
		int nsegs, bool writing)
{
	struct rpcrdma_ia *ia = &r_xprt->rx_ia;

71
	rpcrdma_map_one(ia->ri_device, seg, rpcrdma_data_dir(writing));
72
	seg->mr_rkey = ia->ri_dma_mr->rkey;
73 74 75 76
	seg->mr_base = seg->mr_dma;
	return 1;
}

77 78 79 80 81
/* Unmap a memory region, but leave it registered.
 */
static int
physical_op_unmap(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg)
{
82 83
	struct rpcrdma_ia *ia = &r_xprt->rx_ia;

84
	rpcrdma_unmap_one(ia->ri_device, seg);
85 86 87
	return 1;
}

88 89 90 91 92 93 94 95 96 97 98 99
/* DMA unmap all memory regions that were mapped for "req".
 */
static void
physical_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
{
	struct ib_device *device = r_xprt->rx_ia.ri_device;
	unsigned int i;

	for (i = 0; req->rl_nchunks; --req->rl_nchunks)
		rpcrdma_unmap_one(device, &req->rl_segments[i++]);
}

100 101 102 103 104
static void
physical_op_destroy(struct rpcrdma_buffer *buf)
{
}

105
const struct rpcrdma_memreg_ops rpcrdma_physical_memreg_ops = {
106
	.ro_map				= physical_op_map,
107
	.ro_unmap_sync			= physical_op_unmap_sync,
108
	.ro_unmap			= physical_op_unmap,
C
Chuck Lever 已提交
109
	.ro_open			= physical_op_open,
110
	.ro_maxpages			= physical_op_maxpages,
C
Chuck Lever 已提交
111
	.ro_init			= physical_op_init,
112
	.ro_destroy			= physical_op_destroy,
113 114
	.ro_displayname			= "physical",
};