frwr_ops.c 15.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13
/*
 * Copyright (c) 2015 Oracle.  All rights reserved.
 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
 */

/* Lightweight memory registration using Fast Registration Work
 * Requests (FRWR). Also referred to sometimes as FRMR mode.
 *
 * FRWR features ordered asynchronous registration and deregistration
 * of arbitrarily sized memory regions. This is the fastest and safest
 * but most complex memory registration mode.
 */

14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46
/* Normal operation
 *
 * A Memory Region is prepared for RDMA READ or WRITE using a FAST_REG
 * Work Request (frmr_op_map). When the RDMA operation is finished, this
 * Memory Region is invalidated using a LOCAL_INV Work Request
 * (frmr_op_unmap).
 *
 * Typically these Work Requests are not signaled, and neither are RDMA
 * SEND Work Requests (with the exception of signaling occasionally to
 * prevent provider work queue overflows). This greatly reduces HCA
 * interrupt workload.
 *
 * As an optimization, frwr_op_unmap marks MRs INVALID before the
 * LOCAL_INV WR is posted. If posting succeeds, the MR is placed on
 * rb_mws immediately so that no work (like managing a linked list
 * under a spinlock) is needed in the completion upcall.
 *
 * But this means that frwr_op_map() can occasionally encounter an MR
 * that is INVALID but the LOCAL_INV WR has not completed. Work Queue
 * ordering prevents a subsequent FAST_REG WR from executing against
 * that MR while it is still being invalidated.
 */

/* Transport recovery
 *
 * ->op_map and the transport connect worker cannot run at the same
 * time, but ->op_unmap can fire while the transport connect worker
 * is running. Thus MR recovery is handled in ->op_map, to guarantee
 * that recovered MRs are owned by a sending RPC, and not one where
 * ->op_unmap could fire at the same time transport reconnect is
 * being done.
 *
 * When the underlying transport disconnects, MRs are left in one of
47
 * four states:
48 49 50 51 52
 *
 * INVALID:	The MR was not in use before the QP entered ERROR state.
 *
 * VALID:	The MR was registered before the QP entered ERROR state.
 *
53 54 55 56 57 58 59 60
 * FLUSHED_FR:	The MR was being registered when the QP entered ERROR
 *		state, and the pending WR was flushed.
 *
 * FLUSHED_LI:	The MR was being invalidated when the QP entered ERROR
 *		state, and the pending WR was flushed.
 *
 * When frwr_op_map encounters FLUSHED and VALID MRs, they are recovered
 * with ib_dereg_mr and then are re-initialized. Because MR recovery
61 62 63 64 65 66 67 68 69 70 71
 * allocates fresh resources, it is deferred to a workqueue, and the
 * recovered MRs are placed back on the rb_mws list when recovery is
 * complete. frwr_op_map allocates another MR for the current RPC while
 * the broken MR is reset.
 *
 * To ensure that frwr_op_map doesn't encounter an MR that is marked
 * INVALID but that is about to be flushed due to a previous transport
 * disconnect, the transport connect worker attempts to drain all
 * pending send queue WRs before the transport is reconnected.
 */

72 73
#include <linux/sunrpc/rpc_rdma.h>

74 75 76 77 78 79
#include "xprt_rdma.h"

#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
# define RPCDBG_FACILITY	RPCDBG_TRANS
#endif

80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96
bool
frwr_is_supported(struct rpcrdma_ia *ia)
{
	struct ib_device_attr *attrs = &ia->ri_device->attrs;

	if (!(attrs->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS))
		goto out_not_supported;
	if (attrs->max_fast_reg_page_list_len == 0)
		goto out_not_supported;
	return true;

out_not_supported:
	pr_info("rpcrdma: 'frwr' mode is not supported by device %s\n",
		ia->ri_device->name);
	return false;
}

97
static int
C
Chuck Lever 已提交
98
frwr_op_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mw *r)
99
{
C
Chuck Lever 已提交
100
	unsigned int depth = ia->ri_max_frmr_depth;
101 102 103
	struct rpcrdma_frmr *f = &r->frmr;
	int rc;

C
Chuck Lever 已提交
104
	f->fr_mr = ib_alloc_mr(ia->ri_pd, ia->ri_mrtype, depth);
105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130
	if (IS_ERR(f->fr_mr))
		goto out_mr_err;

	r->mw_sg = kcalloc(depth, sizeof(*r->mw_sg), GFP_KERNEL);
	if (!r->mw_sg)
		goto out_list_err;

	sg_init_table(r->mw_sg, depth);
	init_completion(&f->fr_linv_done);
	return 0;

out_mr_err:
	rc = PTR_ERR(f->fr_mr);
	dprintk("RPC:       %s: ib_alloc_mr status %i\n",
		__func__, rc);
	return rc;

out_list_err:
	rc = -ENOMEM;
	dprintk("RPC:       %s: sg allocation failure\n",
		__func__);
	ib_dereg_mr(f->fr_mr);
	return rc;
}

static void
C
Chuck Lever 已提交
131
frwr_op_release_mr(struct rpcrdma_mw *r)
132 133 134
{
	int rc;

135 136 137 138
	/* Ensure MW is not on any rl_registered list */
	if (!list_empty(&r->mw_list))
		list_del(&r->mw_list);

139 140 141 142 143
	rc = ib_dereg_mr(r->frmr.fr_mr);
	if (rc)
		pr_err("rpcrdma: final ib_dereg_mr for %p returned %i\n",
		       r, rc);
	kfree(r->mw_sg);
C
Chuck Lever 已提交
144
	kfree(r);
145 146
}

147 148 149 150 151 152 153 154 155 156 157 158 159
static int
__frwr_reset_mr(struct rpcrdma_ia *ia, struct rpcrdma_mw *r)
{
	struct rpcrdma_frmr *f = &r->frmr;
	int rc;

	rc = ib_dereg_mr(f->fr_mr);
	if (rc) {
		pr_warn("rpcrdma: ib_dereg_mr status %d, frwr %p orphaned\n",
			rc, r);
		return rc;
	}

C
Chuck Lever 已提交
160
	f->fr_mr = ib_alloc_mr(ia->ri_pd, ia->ri_mrtype,
161 162 163 164 165 166 167
			       ia->ri_max_frmr_depth);
	if (IS_ERR(f->fr_mr)) {
		pr_warn("rpcrdma: ib_alloc_mr status %ld, frwr %p orphaned\n",
			PTR_ERR(f->fr_mr), r);
		return PTR_ERR(f->fr_mr);
	}

168
	dprintk("RPC:       %s: recovered FRMR %p\n", __func__, f);
169 170 171 172
	f->fr_state = FRMR_IS_INVALID;
	return 0;
}

173 174
/* Reset of a single FRMR. Generate a fresh rkey by replacing the MR.
 */
175
static void
176
frwr_op_recover_mr(struct rpcrdma_mw *mw)
177
{
178
	enum rpcrdma_frmr_state state = mw->frmr.fr_state;
179
	struct rpcrdma_xprt *r_xprt = mw->mw_xprt;
180 181 182 183
	struct rpcrdma_ia *ia = &r_xprt->rx_ia;
	int rc;

	rc = __frwr_reset_mr(ia, mw);
184 185 186
	if (state != FRMR_FLUSHED_LI)
		ib_dma_unmap_sg(ia->ri_device,
				mw->mw_sg, mw->mw_nents, mw->mw_dir);
187 188
	if (rc)
		goto out_release;
189

190 191
	rpcrdma_put_mw(r_xprt, mw);
	r_xprt->rx_stats.mrs_recovered++;
192 193 194 195 196 197 198 199 200 201 202
	return;

out_release:
	pr_err("rpcrdma: FRMR reset failed %d, %p release\n", rc, mw);
	r_xprt->rx_stats.mrs_orphaned++;

	spin_lock(&r_xprt->rx_buf.rb_mwlock);
	list_del(&mw->mw_all);
	spin_unlock(&r_xprt->rx_buf.rb_mwlock);

	frwr_op_release_mr(mw);
203 204
}

C
Chuck Lever 已提交
205 206 207 208
static int
frwr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep,
	     struct rpcrdma_create_data_internal *cdata)
{
C
Chuck Lever 已提交
209
	struct ib_device_attr *attrs = &ia->ri_device->attrs;
C
Chuck Lever 已提交
210 211
	int depth, delta;

C
Chuck Lever 已提交
212 213 214 215
	ia->ri_mrtype = IB_MR_TYPE_MEM_REG;
	if (attrs->device_cap_flags & IB_DEVICE_SG_GAPS_REG)
		ia->ri_mrtype = IB_MR_TYPE_SG_GAPS;

C
Chuck Lever 已提交
216 217
	ia->ri_max_frmr_depth =
			min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
C
Chuck Lever 已提交
218
			      attrs->max_fast_reg_page_list_len);
C
Chuck Lever 已提交
219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244
	dprintk("RPC:       %s: device's max FR page list len = %u\n",
		__func__, ia->ri_max_frmr_depth);

	/* Add room for frmr register and invalidate WRs.
	 * 1. FRMR reg WR for head
	 * 2. FRMR invalidate WR for head
	 * 3. N FRMR reg WRs for pagelist
	 * 4. N FRMR invalidate WRs for pagelist
	 * 5. FRMR reg WR for tail
	 * 6. FRMR invalidate WR for tail
	 * 7. The RDMA_SEND WR
	 */
	depth = 7;

	/* Calculate N if the device max FRMR depth is smaller than
	 * RPCRDMA_MAX_DATA_SEGS.
	 */
	if (ia->ri_max_frmr_depth < RPCRDMA_MAX_DATA_SEGS) {
		delta = RPCRDMA_MAX_DATA_SEGS - ia->ri_max_frmr_depth;
		do {
			depth += 2; /* FRMR reg + invalidate */
			delta -= ia->ri_max_frmr_depth;
		} while (delta > 0);
	}

	ep->rep_attr.cap.max_send_wr *= depth;
C
Chuck Lever 已提交
245 246
	if (ep->rep_attr.cap.max_send_wr > attrs->max_qp_wr) {
		cdata->max_requests = attrs->max_qp_wr / depth;
C
Chuck Lever 已提交
247 248 249 250 251 252
		if (!cdata->max_requests)
			return -EINVAL;
		ep->rep_attr.cap.max_send_wr = cdata->max_requests *
					       depth;
	}

253 254
	ia->ri_max_segs = max_t(unsigned int, 1, RPCRDMA_MAX_DATA_SEGS /
				ia->ri_max_frmr_depth);
C
Chuck Lever 已提交
255 256 257
	return 0;
}

258 259 260 261 262 263 264 265 266
/* FRWR mode conveys a list of pages per chunk segment. The
 * maximum length of that list is the FRWR page list depth.
 */
static size_t
frwr_op_maxpages(struct rpcrdma_xprt *r_xprt)
{
	struct rpcrdma_ia *ia = &r_xprt->rx_ia;

	return min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
267
		     RPCRDMA_MAX_HDR_SEGS * ia->ri_max_frmr_depth);
268 269
}

270
static void
271
__frwr_sendcompletion_flush(struct ib_wc *wc, const char *wr)
272 273 274 275 276 277 278 279
{
	if (wc->status != IB_WC_WR_FLUSH_ERR)
		pr_err("rpcrdma: %s: %s (%u/0x%x)\n",
		       wr, ib_wc_status_msg(wc->status),
		       wc->status, wc->vendor_err);
}

/**
280
 * frwr_wc_fastreg - Invoked by RDMA provider for a flushed FastReg WC
281 282
 * @cq:	completion queue (ignored)
 * @wc:	completed WR
283 284
 *
 */
285
static void
286
frwr_wc_fastreg(struct ib_cq *cq, struct ib_wc *wc)
287
{
288 289
	struct rpcrdma_frmr *frmr;
	struct ib_cqe *cqe;
290

291 292 293 294
	/* WARNING: Only wr_cqe and status are reliable at this point */
	if (wc->status != IB_WC_SUCCESS) {
		cqe = wc->wr_cqe;
		frmr = container_of(cqe, struct rpcrdma_frmr, fr_cqe);
295 296
		frmr->fr_state = FRMR_FLUSHED_FR;
		__frwr_sendcompletion_flush(wc, "fastreg");
297
	}
298 299
}

300
/**
301
 * frwr_wc_localinv - Invoked by RDMA provider for a flushed LocalInv WC
302 303 304 305
 * @cq:	completion queue (ignored)
 * @wc:	completed WR
 *
 */
306
static void
307
frwr_wc_localinv(struct ib_cq *cq, struct ib_wc *wc)
308
{
309 310
	struct rpcrdma_frmr *frmr;
	struct ib_cqe *cqe;
311

312 313 314 315
	/* WARNING: Only wr_cqe and status are reliable at this point */
	if (wc->status != IB_WC_SUCCESS) {
		cqe = wc->wr_cqe;
		frmr = container_of(cqe, struct rpcrdma_frmr, fr_cqe);
316 317
		frmr->fr_state = FRMR_FLUSHED_LI;
		__frwr_sendcompletion_flush(wc, "localinv");
318 319
	}
}
320

321
/**
322
 * frwr_wc_localinv_wake - Invoked by RDMA provider for a signaled LocalInv WC
323 324 325 326 327 328 329 330 331 332 333 334 335 336
 * @cq:	completion queue (ignored)
 * @wc:	completed WR
 *
 * Awaken anyone waiting for an MR to finish being fenced.
 */
static void
frwr_wc_localinv_wake(struct ib_cq *cq, struct ib_wc *wc)
{
	struct rpcrdma_frmr *frmr;
	struct ib_cqe *cqe;

	/* WARNING: Only wr_cqe and status are reliable at this point */
	cqe = wc->wr_cqe;
	frmr = container_of(cqe, struct rpcrdma_frmr, fr_cqe);
337 338 339 340
	if (wc->status != IB_WC_SUCCESS) {
		frmr->fr_state = FRMR_FLUSHED_LI;
		__frwr_sendcompletion_flush(wc, "localinv");
	}
341
	complete(&frmr->fr_linv_done);
342 343
}

344
/* Post a REG_MR Work Request to register a memory region
345 346
 * for remote access via RDMA READ or RDMA WRITE.
 */
347
static struct rpcrdma_mr_seg *
348
frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
349
	    int nsegs, bool writing, struct rpcrdma_mw **out)
350 351
{
	struct rpcrdma_ia *ia = &r_xprt->rx_ia;
C
Chuck Lever 已提交
352
	bool holes_ok = ia->ri_mrtype == IB_MR_TYPE_SG_GAPS;
353 354 355
	struct rpcrdma_mw *mw;
	struct rpcrdma_frmr *frmr;
	struct ib_mr *mr;
356
	struct ib_reg_wr *reg_wr;
C
Christoph Hellwig 已提交
357
	struct ib_send_wr *bad_wr;
358
	int rc, i, n;
359 360
	u8 key;

361
	mw = NULL;
362 363
	do {
		if (mw)
364
			rpcrdma_defer_mr_recovery(mw);
365 366
		mw = rpcrdma_get_mw(r_xprt);
		if (!mw)
367
			return ERR_PTR(-ENOBUFS);
368 369
	} while (mw->frmr.fr_state != FRMR_IS_INVALID);
	frmr = &mw->frmr;
370
	frmr->fr_state = FRMR_IS_VALID;
371
	mr = frmr->fr_mr;
372
	reg_wr = &frmr->fr_regwr;
373

374 375
	if (nsegs > ia->ri_max_frmr_depth)
		nsegs = ia->ri_max_frmr_depth;
376 377
	for (i = 0; i < nsegs;) {
		if (seg->mr_page)
378
			sg_set_page(&mw->mw_sg[i],
379 380 381 382
				    seg->mr_page,
				    seg->mr_len,
				    offset_in_page(seg->mr_offset));
		else
383
			sg_set_buf(&mw->mw_sg[i], seg->mr_offset,
384 385
				   seg->mr_len);

386 387
		++seg;
		++i;
C
Chuck Lever 已提交
388 389
		if (holes_ok)
			continue;
390 391 392 393
		if ((i < nsegs && offset_in_page(seg->mr_offset)) ||
		    offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len))
			break;
	}
394
	mw->mw_dir = rpcrdma_data_dir(writing);
395

396 397
	mw->mw_nents = ib_dma_map_sg(ia->ri_device, mw->mw_sg, i, mw->mw_dir);
	if (!mw->mw_nents)
398 399 400 401 402
		goto out_dmamap_err;

	n = ib_map_mr_sg(mr, mw->mw_sg, mw->mw_nents, NULL, PAGE_SIZE);
	if (unlikely(n != mw->mw_nents))
		goto out_mapmr_err;
403

404
	dprintk("RPC:       %s: Using frmr %p to map %u segments (%llu bytes)\n",
405
		__func__, frmr, mw->mw_nents, mr->length);
406

407 408
	key = (u8)(mr->rkey & 0x000000FF);
	ib_update_fast_reg_key(mr, ++key);
409

410 411
	reg_wr->wr.next = NULL;
	reg_wr->wr.opcode = IB_WR_REG_MR;
412 413
	frmr->fr_cqe.done = frwr_wc_fastreg;
	reg_wr->wr.wr_cqe = &frmr->fr_cqe;
414 415 416 417 418 419 420
	reg_wr->wr.num_sge = 0;
	reg_wr->wr.send_flags = 0;
	reg_wr->mr = mr;
	reg_wr->key = mr->rkey;
	reg_wr->access = writing ?
			 IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE :
			 IB_ACCESS_REMOTE_READ;
421

422
	rpcrdma_set_signaled(&r_xprt->rx_ep, &reg_wr->wr);
423
	rc = ib_post_send(ia->ri_id->qp, &reg_wr->wr, &bad_wr);
424 425 426
	if (rc)
		goto out_senderr;

427 428 429
	mw->mw_handle = mr->rkey;
	mw->mw_length = mr->length;
	mw->mw_offset = mr->iova;
430

431
	*out = mw;
432
	return seg;
433 434

out_dmamap_err:
435 436 437 438
	pr_err("rpcrdma: failed to DMA map sg %p sg_nents %d\n",
	       mw->mw_sg, i);
	frmr->fr_state = FRMR_IS_INVALID;
	rpcrdma_put_mw(r_xprt, mw);
439
	return ERR_PTR(-EIO);
440 441

out_mapmr_err:
442
	pr_err("rpcrdma: failed to map mr %p (%d/%d)\n",
443
	       frmr->fr_mr, n, mw->mw_nents);
444
	rpcrdma_defer_mr_recovery(mw);
445
	return ERR_PTR(-EIO);
446 447

out_senderr:
448
	pr_err("rpcrdma: FRMR registration ib_post_send returned %i\n", rc);
449
	rpcrdma_defer_mr_recovery(mw);
450
	return ERR_PTR(-ENOTCONN);
451 452
}

453 454 455 456
/* Invalidate all memory regions that were registered for "req".
 *
 * Sleeps until it is safe for the host CPU to access the
 * previously mapped memory regions.
457
 *
458 459
 * Caller ensures that @mws is not empty before the call. This
 * function empties the list.
460 461
 */
static void
462
frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct list_head *mws)
463
{
C
Chuck Lever 已提交
464
	struct ib_send_wr *first, **prev, *last, *bad_wr;
465 466
	struct rpcrdma_ia *ia = &r_xprt->rx_ia;
	struct rpcrdma_frmr *f;
467
	struct rpcrdma_mw *mw;
468
	int count, rc;
469

470
	/* ORDER: Invalidate all of the MRs first
471 472 473 474
	 *
	 * Chain the LOCAL_INV Work Requests and post them with
	 * a single ib_post_send() call.
	 */
475
	f = NULL;
476
	count = 0;
C
Chuck Lever 已提交
477
	prev = &first;
478
	list_for_each_entry(mw, mws, mw_list) {
C
Chuck Lever 已提交
479 480
		mw->frmr.fr_state = FRMR_IS_INVALID;

481
		if (mw->mw_flags & RPCRDMA_MW_F_RI)
482 483
			continue;

C
Chuck Lever 已提交
484 485 486 487 488 489 490 491 492 493
		f = &mw->frmr;
		dprintk("RPC:       %s: invalidating frmr %p\n",
			__func__, f);

		f->fr_cqe.done = frwr_wc_localinv;
		last = &f->fr_invwr;
		memset(last, 0, sizeof(*last));
		last->wr_cqe = &f->fr_cqe;
		last->opcode = IB_WR_LOCAL_INV;
		last->ex.invalidate_rkey = mw->mw_handle;
494
		count++;
495

C
Chuck Lever 已提交
496 497
		*prev = last;
		prev = &last->next;
498
	}
499 500
	if (!f)
		goto unmap;
501 502 503 504 505

	/* Strong send queue ordering guarantees that when the
	 * last WR in the chain completes, all WRs in the chain
	 * are complete.
	 */
C
Chuck Lever 已提交
506
	last->send_flags = IB_SEND_SIGNALED;
507 508
	f->fr_cqe.done = frwr_wc_localinv_wake;
	reinit_completion(&f->fr_linv_done);
509 510 511 512 513 514

	/* Initialize CQ count, since there is always a signaled
	 * WR being posted here.  The new cqcount depends on how
	 * many SQEs are about to be consumed.
	 */
	rpcrdma_init_cqcount(&r_xprt->rx_ep, count);
515 516 517 518 519

	/* Transport disconnect drains the receive CQ before it
	 * replaces the QP. The RPC reply handler won't call us
	 * unless ri_id->qp is a valid pointer.
	 */
520
	r_xprt->rx_stats.local_inv_needed++;
521
	bad_wr = NULL;
C
Chuck Lever 已提交
522
	rc = ib_post_send(ia->ri_id->qp, first, &bad_wr);
523 524
	if (bad_wr != first)
		wait_for_completion(&f->fr_linv_done);
525 526
	if (rc)
		goto reset_mrs;
527

528
	/* ORDER: Now DMA unmap all of the MRs, and return
529 530
	 * them to the free MW list.
	 */
531
unmap:
532 533
	while (!list_empty(mws)) {
		mw = rpcrdma_pop_mw(mws);
C
Chuck Lever 已提交
534
		dprintk("RPC:       %s: DMA unmapping frmr %p\n",
535
			__func__, &mw->frmr);
536 537
		ib_dma_unmap_sg(ia->ri_device,
				mw->mw_sg, mw->mw_nents, mw->mw_dir);
538
		rpcrdma_put_mw(r_xprt, mw);
539
	}
540
	return;
541

542
reset_mrs:
543
	pr_err("rpcrdma: FRMR invalidate ib_post_send returned %i\n", rc);
544

545
	/* Find and reset the MRs in the LOCAL_INV WRs that did not
546
	 * get posted.
547
	 */
548 549 550 551 552 553 554 555 556
	rpcrdma_init_cqcount(&r_xprt->rx_ep, -count);
	while (bad_wr) {
		f = container_of(bad_wr, struct rpcrdma_frmr,
				 fr_invwr);
		mw = container_of(f, struct rpcrdma_mw, frmr);

		__frwr_reset_mr(ia, mw);

		bad_wr = bad_wr->next;
557 558
	}
	goto unmap;
559
}
560

561 562 563 564 565 566 567 568
/* Use a slow, safe mechanism to invalidate all memory regions
 * that were registered for "req".
 */
static void
frwr_op_unmap_safe(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
		   bool sync)
{
	struct rpcrdma_mw *mw;
569

570
	while (!list_empty(&req->rl_registered)) {
571
		mw = rpcrdma_pop_mw(&req->rl_registered);
572
		if (sync)
573
			frwr_op_recover_mr(mw);
574
		else
575
			rpcrdma_defer_mr_recovery(mw);
576
	}
577 578
}

579
const struct rpcrdma_memreg_ops rpcrdma_frwr_memreg_ops = {
580
	.ro_map				= frwr_op_map,
581
	.ro_unmap_sync			= frwr_op_unmap_sync,
582
	.ro_unmap_safe			= frwr_op_unmap_safe,
583
	.ro_recover_mr			= frwr_op_recover_mr,
C
Chuck Lever 已提交
584
	.ro_open			= frwr_op_open,
585
	.ro_maxpages			= frwr_op_maxpages,
C
Chuck Lever 已提交
586 587
	.ro_init_mr			= frwr_op_init_mr,
	.ro_release_mr			= frwr_op_release_mr,
588
	.ro_displayname			= "frwr",
589
	.ro_send_w_inv_ok		= RPCRDMA_CMP_F_SND_W_INV_OK,
590
};