fmr_ops.c 8.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13
/*
 * Copyright (c) 2015 Oracle.  All rights reserved.
 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
 */

/* Lightweight memory registration using Fast Memory Regions (FMR).
 * Referred to sometimes as MTHCAFMR mode.
 *
 * FMR uses synchronous memory registration and deregistration.
 * FMR registration is known to be fast, but FMR deregistration
 * can take tens of usecs to complete.
 */

14 15 16 17 18 19 20 21
/* Normal operation
 *
 * A Memory Region is prepared for RDMA READ or WRITE using the
 * ib_map_phys_fmr verb (fmr_op_map). When the RDMA operation is
 * finished, the Memory Region is unmapped using the ib_unmap_fmr
 * verb (fmr_op_unmap).
 */

22 23 24 25 26 27
#include "xprt_rdma.h"

#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
# define RPCDBG_FACILITY	RPCDBG_TRANS
#endif

28 29 30
/* Maximum scatter/gather per FMR */
#define RPCRDMA_MAX_FMR_SGES	(64)

31 32 33 34 35 36
/* Access mode of externally registered pages */
enum {
	RPCRDMA_FMR_ACCESS_FLAGS	= IB_ACCESS_REMOTE_WRITE |
					  IB_ACCESS_REMOTE_READ,
};

37 38 39 40 41 42 43 44 45 46 47
bool
fmr_is_supported(struct rpcrdma_ia *ia)
{
	if (!ia->ri_device->alloc_fmr) {
		pr_info("rpcrdma: 'fmr' mode is not supported by device %s\n",
			ia->ri_device->name);
		return false;
	}
	return true;
}

48
static int
C
Chuck Lever 已提交
49
fmr_op_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mw *mw)
50 51 52 53 54 55 56
{
	static struct ib_fmr_attr fmr_attr = {
		.max_pages	= RPCRDMA_MAX_FMR_SGES,
		.max_maps	= 1,
		.page_shift	= PAGE_SHIFT
	};

57 58 59
	mw->fmr.fm_physaddrs = kcalloc(RPCRDMA_MAX_FMR_SGES,
				       sizeof(u64), GFP_KERNEL);
	if (!mw->fmr.fm_physaddrs)
60 61 62 63 64 65 66 67 68
		goto out_free;

	mw->mw_sg = kcalloc(RPCRDMA_MAX_FMR_SGES,
			    sizeof(*mw->mw_sg), GFP_KERNEL);
	if (!mw->mw_sg)
		goto out_free;

	sg_init_table(mw->mw_sg, RPCRDMA_MAX_FMR_SGES);

C
Chuck Lever 已提交
69
	mw->fmr.fm_mr = ib_alloc_fmr(ia->ri_pd, RPCRDMA_FMR_ACCESS_FLAGS,
70 71
				     &fmr_attr);
	if (IS_ERR(mw->fmr.fm_mr))
72 73 74 75 76 77
		goto out_fmr_err;

	return 0;

out_fmr_err:
	dprintk("RPC:       %s: ib_alloc_fmr returned %ld\n", __func__,
78
		PTR_ERR(mw->fmr.fm_mr));
79 80 81

out_free:
	kfree(mw->mw_sg);
82
	kfree(mw->fmr.fm_physaddrs);
83 84 85
	return -ENOMEM;
}

86 87 88 89
static int
__fmr_unmap(struct rpcrdma_mw *mw)
{
	LIST_HEAD(l);
90
	int rc;
91

92
	list_add(&mw->fmr.fm_mr->list, &l);
93
	rc = ib_unmap_fmr(&l);
94
	list_del_init(&mw->fmr.fm_mr->list);
95
	return rc;
96 97
}

98
static void
C
Chuck Lever 已提交
99
fmr_op_release_mr(struct rpcrdma_mw *r)
100
{
101
	LIST_HEAD(unmap_list);
102 103
	int rc;

104
	kfree(r->fmr.fm_physaddrs);
105 106
	kfree(r->mw_sg);

107 108 109 110 111 112 113 114
	/* In case this one was left mapped, try to unmap it
	 * to prevent dealloc_fmr from failing with EBUSY
	 */
	rc = __fmr_unmap(r);
	if (rc)
		pr_err("rpcrdma: final ib_unmap_fmr for %p failed %i\n",
		       r, rc);

115
	rc = ib_dealloc_fmr(r->fmr.fm_mr);
116 117 118
	if (rc)
		pr_err("rpcrdma: final ib_dealloc_fmr for %p returned %i\n",
		       r, rc);
C
Chuck Lever 已提交
119 120

	kfree(r);
121 122
}

123
/* Reset of a single FMR.
124 125
 */
static void
126
fmr_op_recover_mr(struct rpcrdma_mw *mw)
127
{
128 129
	struct rpcrdma_xprt *r_xprt = mw->mw_xprt;
	int rc;
130

131 132
	/* ORDER: invalidate first */
	rc = __fmr_unmap(mw);
133

134 135 136
	/* ORDER: then DMA unmap */
	ib_dma_unmap_sg(r_xprt->rx_ia.ri_device,
			mw->mw_sg, mw->mw_nents, mw->mw_dir);
137 138
	if (rc)
		goto out_release;
139 140 141

	rpcrdma_put_mw(r_xprt, mw);
	r_xprt->rx_stats.mrs_recovered++;
142 143 144 145 146 147 148 149 150 151 152
	return;

out_release:
	pr_err("rpcrdma: FMR reset failed (%d), %p released\n", rc, mw);
	r_xprt->rx_stats.mrs_orphaned++;

	spin_lock(&r_xprt->rx_buf.rb_mwlock);
	list_del(&mw->mw_all);
	spin_unlock(&r_xprt->rx_buf.rb_mwlock);

	fmr_op_release_mr(mw);
153 154
}

C
Chuck Lever 已提交
155 156 157 158
static int
fmr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep,
	    struct rpcrdma_create_data_internal *cdata)
{
C
Chuck Lever 已提交
159 160 161
	rpcrdma_set_max_header_sizes(ia, cdata, max_t(unsigned int, 1,
						      RPCRDMA_MAX_DATA_SEGS /
						      RPCRDMA_MAX_FMR_SGES));
C
Chuck Lever 已提交
162 163 164
	return 0;
}

165 166 167 168 169 170
/* FMR mode conveys up to 64 pages of payload per chunk segment.
 */
static size_t
fmr_op_maxpages(struct rpcrdma_xprt *r_xprt)
{
	return min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
171
		     RPCRDMA_MAX_HDR_SEGS * RPCRDMA_MAX_FMR_SGES);
172 173
}

174 175 176 177 178 179 180 181 182
/* Use the ib_map_phys_fmr() verb to register a memory region
 * for remote access via RDMA READ or RDMA WRITE.
 */
static int
fmr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
	   int nsegs, bool writing)
{
	struct rpcrdma_mr_seg *seg1 = seg;
	int len, pageoff, i, rc;
183
	struct rpcrdma_mw *mw;
184
	u64 *dma_pages;
185 186 187

	mw = seg1->rl_mw;
	seg1->rl_mw = NULL;
188 189 190 191
	if (mw)
		rpcrdma_defer_mr_recovery(mw);
	mw = rpcrdma_get_mw(r_xprt);
	if (!mw)
192
		return -ENOBUFS;
193 194 195 196 197 198 199 200

	pageoff = offset_in_page(seg1->mr_offset);
	seg1->mr_offset -= pageoff;	/* start of page */
	seg1->mr_len += pageoff;
	len = -pageoff;
	if (nsegs > RPCRDMA_MAX_FMR_SGES)
		nsegs = RPCRDMA_MAX_FMR_SGES;
	for (i = 0; i < nsegs;) {
201 202 203 204 205 206 207 208
		if (seg->mr_page)
			sg_set_page(&mw->mw_sg[i],
				    seg->mr_page,
				    seg->mr_len,
				    offset_in_page(seg->mr_offset));
		else
			sg_set_buf(&mw->mw_sg[i], seg->mr_offset,
				   seg->mr_len);
209 210 211 212 213 214 215 216
		len += seg->mr_len;
		++seg;
		++i;
		/* Check for holes */
		if ((i < nsegs && offset_in_page(seg->mr_offset)) ||
		    offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len))
			break;
	}
217 218
	mw->mw_nents = i;
	mw->mw_dir = rpcrdma_data_dir(writing);
219 220
	if (i == 0)
		goto out_dmamap_err;
221 222 223 224

	if (!ib_dma_map_sg(r_xprt->rx_ia.ri_device,
			   mw->mw_sg, mw->mw_nents, mw->mw_dir))
		goto out_dmamap_err;
225

226 227 228 229
	for (i = 0, dma_pages = mw->fmr.fm_physaddrs; i < mw->mw_nents; i++)
		dma_pages[i] = sg_dma_address(&mw->mw_sg[i]);
	rc = ib_map_phys_fmr(mw->fmr.fm_mr, dma_pages, mw->mw_nents,
			     dma_pages[0]);
230 231 232
	if (rc)
		goto out_maperr;

233
	seg1->rl_mw = mw;
234
	seg1->mr_rkey = mw->fmr.fm_mr->rkey;
235 236
	seg1->mr_base = dma_pages[0] + pageoff;
	seg1->mr_nsegs = mw->mw_nents;
237
	seg1->mr_len = len;
238 239 240 241 242
	return mw->mw_nents;

out_dmamap_err:
	pr_err("rpcrdma: failed to dma map sg %p sg_nents %u\n",
	       mw->mw_sg, mw->mw_nents);
243
	rpcrdma_defer_mr_recovery(mw);
244
	return -EIO;
245 246

out_maperr:
247 248 249
	pr_err("rpcrdma: ib_map_phys_fmr %u@0x%llx+%i (%d) status %i\n",
	       len, (unsigned long long)dma_pages[0],
	       pageoff, mw->mw_nents, rc);
250
	rpcrdma_defer_mr_recovery(mw);
251
	return -EIO;
252 253
}

254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272
/* Invalidate all memory regions that were registered for "req".
 *
 * Sleeps until it is safe for the host CPU to access the
 * previously mapped memory regions.
 */
static void
fmr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
{
	struct rpcrdma_mr_seg *seg;
	unsigned int i, nchunks;
	struct rpcrdma_mw *mw;
	LIST_HEAD(unmap_list);
	int rc;

	dprintk("RPC:       %s: req %p\n", __func__, req);

	/* ORDER: Invalidate all of the req's MRs first
	 *
	 * ib_unmap_fmr() is slow, so use a single call instead
273
	 * of one call per mapped FMR.
274 275 276 277 278
	 */
	for (i = 0, nchunks = req->rl_nchunks; nchunks; nchunks--) {
		seg = &req->rl_segments[i];
		mw = seg->rl_mw;

279
		list_add_tail(&mw->fmr.fm_mr->list, &unmap_list);
280 281 282 283 284

		i += seg->mr_nsegs;
	}
	rc = ib_unmap_fmr(&unmap_list);
	if (rc)
285
		goto out_reset;
286 287 288 289 290 291

	/* ORDER: Now DMA unmap all of the req's MRs, and return
	 * them to the free MW list.
	 */
	for (i = 0, nchunks = req->rl_nchunks; nchunks; nchunks--) {
		seg = &req->rl_segments[i];
292
		mw = seg->rl_mw;
293

294
		list_del_init(&mw->fmr.fm_mr->list);
295 296 297
		ib_dma_unmap_sg(r_xprt->rx_ia.ri_device,
				mw->mw_sg, mw->mw_nents, mw->mw_dir);
		rpcrdma_put_mw(r_xprt, mw);
298 299 300

		i += seg->mr_nsegs;
		seg->mr_nsegs = 0;
301
		seg->rl_mw = NULL;
302 303 304
	}

	req->rl_nchunks = 0;
305 306 307 308 309 310 311 312 313 314 315 316 317 318
	return;

out_reset:
	pr_err("rpcrdma: ib_unmap_fmr failed (%i)\n", rc);

	for (i = 0, nchunks = req->rl_nchunks; nchunks; nchunks--) {
		seg = &req->rl_segments[i];
		mw = seg->rl_mw;

		list_del_init(&mw->fmr.fm_mr->list);
		fmr_op_recover_mr(mw);

		i += seg->mr_nsegs;
	}
319 320
}

321 322 323 324 325 326 327 328 329 330 331 332 333 334 335
/* Use a slow, safe mechanism to invalidate all memory regions
 * that were registered for "req".
 */
static void
fmr_op_unmap_safe(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
		  bool sync)
{
	struct rpcrdma_mr_seg *seg;
	struct rpcrdma_mw *mw;
	unsigned int i;

	for (i = 0; req->rl_nchunks; req->rl_nchunks--) {
		seg = &req->rl_segments[i];
		mw = seg->rl_mw;

336
		if (sync)
337
			fmr_op_recover_mr(mw);
338
		else
339
			rpcrdma_defer_mr_recovery(mw);
340 341 342 343 344 345 346

		i += seg->mr_nsegs;
		seg->mr_nsegs = 0;
		seg->rl_mw = NULL;
	}
}

347
const struct rpcrdma_memreg_ops rpcrdma_fmr_memreg_ops = {
348
	.ro_map				= fmr_op_map,
349
	.ro_unmap_sync			= fmr_op_unmap_sync,
350
	.ro_unmap_safe			= fmr_op_unmap_safe,
351
	.ro_recover_mr			= fmr_op_recover_mr,
C
Chuck Lever 已提交
352
	.ro_open			= fmr_op_open,
353
	.ro_maxpages			= fmr_op_maxpages,
C
Chuck Lever 已提交
354 355
	.ro_init_mr			= fmr_op_init_mr,
	.ro_release_mr			= fmr_op_release_mr,
356 357
	.ro_displayname			= "fmr",
};