frwr_ops.c 16.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13
/*
 * Copyright (c) 2015 Oracle.  All rights reserved.
 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
 */

/* Lightweight memory registration using Fast Registration Work
 * Requests (FRWR). Also referred to sometimes as FRMR mode.
 *
 * FRWR features ordered asynchronous registration and deregistration
 * of arbitrarily sized memory regions. This is the fastest and safest
 * but most complex memory registration mode.
 */

14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69
/* Normal operation
 *
 * A Memory Region is prepared for RDMA READ or WRITE using a FAST_REG
 * Work Request (frmr_op_map). When the RDMA operation is finished, this
 * Memory Region is invalidated using a LOCAL_INV Work Request
 * (frmr_op_unmap).
 *
 * Typically these Work Requests are not signaled, and neither are RDMA
 * SEND Work Requests (with the exception of signaling occasionally to
 * prevent provider work queue overflows). This greatly reduces HCA
 * interrupt workload.
 *
 * As an optimization, frwr_op_unmap marks MRs INVALID before the
 * LOCAL_INV WR is posted. If posting succeeds, the MR is placed on
 * rb_mws immediately so that no work (like managing a linked list
 * under a spinlock) is needed in the completion upcall.
 *
 * But this means that frwr_op_map() can occasionally encounter an MR
 * that is INVALID but the LOCAL_INV WR has not completed. Work Queue
 * ordering prevents a subsequent FAST_REG WR from executing against
 * that MR while it is still being invalidated.
 */

/* Transport recovery
 *
 * ->op_map and the transport connect worker cannot run at the same
 * time, but ->op_unmap can fire while the transport connect worker
 * is running. Thus MR recovery is handled in ->op_map, to guarantee
 * that recovered MRs are owned by a sending RPC, and not one where
 * ->op_unmap could fire at the same time transport reconnect is
 * being done.
 *
 * When the underlying transport disconnects, MRs are left in one of
 * three states:
 *
 * INVALID:	The MR was not in use before the QP entered ERROR state.
 *		(Or, the LOCAL_INV WR has not completed or flushed yet).
 *
 * STALE:	The MR was being registered or unregistered when the QP
 *		entered ERROR state, and the pending WR was flushed.
 *
 * VALID:	The MR was registered before the QP entered ERROR state.
 *
 * When frwr_op_map encounters STALE and VALID MRs, they are recovered
 * with ib_dereg_mr and then are re-initialized. Beause MR recovery
 * allocates fresh resources, it is deferred to a workqueue, and the
 * recovered MRs are placed back on the rb_mws list when recovery is
 * complete. frwr_op_map allocates another MR for the current RPC while
 * the broken MR is reset.
 *
 * To ensure that frwr_op_map doesn't encounter an MR that is marked
 * INVALID but that is about to be flushed due to a previous transport
 * disconnect, the transport connect worker attempts to drain all
 * pending send queue WRs before the transport is reconnected.
 */

70 71 72 73 74 75
#include "xprt_rdma.h"

#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
# define RPCDBG_FACILITY	RPCDBG_TRANS
#endif

76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119
static int
__frwr_init(struct rpcrdma_mw *r, struct ib_pd *pd, unsigned int depth)
{
	struct rpcrdma_frmr *f = &r->frmr;
	int rc;

	f->fr_mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG, depth);
	if (IS_ERR(f->fr_mr))
		goto out_mr_err;

	r->mw_sg = kcalloc(depth, sizeof(*r->mw_sg), GFP_KERNEL);
	if (!r->mw_sg)
		goto out_list_err;

	sg_init_table(r->mw_sg, depth);
	init_completion(&f->fr_linv_done);
	return 0;

out_mr_err:
	rc = PTR_ERR(f->fr_mr);
	dprintk("RPC:       %s: ib_alloc_mr status %i\n",
		__func__, rc);
	return rc;

out_list_err:
	rc = -ENOMEM;
	dprintk("RPC:       %s: sg allocation failure\n",
		__func__);
	ib_dereg_mr(f->fr_mr);
	return rc;
}

static void
__frwr_release(struct rpcrdma_mw *r)
{
	int rc;

	rc = ib_dereg_mr(r->frmr.fr_mr);
	if (rc)
		pr_err("rpcrdma: final ib_dereg_mr for %p returned %i\n",
		       r, rc);
	kfree(r->mw_sg);
}

120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145
static int
__frwr_reset_mr(struct rpcrdma_ia *ia, struct rpcrdma_mw *r)
{
	struct rpcrdma_frmr *f = &r->frmr;
	int rc;

	rc = ib_dereg_mr(f->fr_mr);
	if (rc) {
		pr_warn("rpcrdma: ib_dereg_mr status %d, frwr %p orphaned\n",
			rc, r);
		return rc;
	}

	f->fr_mr = ib_alloc_mr(ia->ri_pd, IB_MR_TYPE_MEM_REG,
			       ia->ri_max_frmr_depth);
	if (IS_ERR(f->fr_mr)) {
		pr_warn("rpcrdma: ib_alloc_mr status %ld, frwr %p orphaned\n",
			PTR_ERR(f->fr_mr), r);
		return PTR_ERR(f->fr_mr);
	}

	dprintk("RPC:       %s: recovered FRMR %p\n", __func__, r);
	f->fr_state = FRMR_IS_INVALID;
	return 0;
}

146 147 148 149 150 151
/* Reset of a single FRMR. Generate a fresh rkey by replacing the MR.
 *
 * There's no recovery if this fails. The FRMR is abandoned, but
 * remains in rb_all. It will be cleaned up when the transport is
 * destroyed.
 */
152
static void
153
frwr_op_recover_mr(struct rpcrdma_mw *mw)
154
{
155
	struct rpcrdma_xprt *r_xprt = mw->mw_xprt;
156 157 158 159
	struct rpcrdma_ia *ia = &r_xprt->rx_ia;
	int rc;

	rc = __frwr_reset_mr(ia, mw);
160
	ib_dma_unmap_sg(ia->ri_device, mw->mw_sg, mw->mw_nents, mw->mw_dir);
161 162 163 164
	if (rc) {
		pr_err("rpcrdma: FRMR reset status %d, %p orphaned\n",
		       rc, mw);
		r_xprt->rx_stats.mrs_orphaned++;
165
		return;
166
	}
167

168 169
	rpcrdma_put_mw(r_xprt, mw);
	r_xprt->rx_stats.mrs_recovered++;
170 171
}

C
Chuck Lever 已提交
172 173 174 175 176 177 178 179
static int
frwr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep,
	     struct rpcrdma_create_data_internal *cdata)
{
	int depth, delta;

	ia->ri_max_frmr_depth =
			min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
180
			      ia->ri_device->attrs.max_fast_reg_page_list_len);
C
Chuck Lever 已提交
181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206
	dprintk("RPC:       %s: device's max FR page list len = %u\n",
		__func__, ia->ri_max_frmr_depth);

	/* Add room for frmr register and invalidate WRs.
	 * 1. FRMR reg WR for head
	 * 2. FRMR invalidate WR for head
	 * 3. N FRMR reg WRs for pagelist
	 * 4. N FRMR invalidate WRs for pagelist
	 * 5. FRMR reg WR for tail
	 * 6. FRMR invalidate WR for tail
	 * 7. The RDMA_SEND WR
	 */
	depth = 7;

	/* Calculate N if the device max FRMR depth is smaller than
	 * RPCRDMA_MAX_DATA_SEGS.
	 */
	if (ia->ri_max_frmr_depth < RPCRDMA_MAX_DATA_SEGS) {
		delta = RPCRDMA_MAX_DATA_SEGS - ia->ri_max_frmr_depth;
		do {
			depth += 2; /* FRMR reg + invalidate */
			delta -= ia->ri_max_frmr_depth;
		} while (delta > 0);
	}

	ep->rep_attr.cap.max_send_wr *= depth;
207 208
	if (ep->rep_attr.cap.max_send_wr > ia->ri_device->attrs.max_qp_wr) {
		cdata->max_requests = ia->ri_device->attrs.max_qp_wr / depth;
C
Chuck Lever 已提交
209 210 211 212 213 214
		if (!cdata->max_requests)
			return -EINVAL;
		ep->rep_attr.cap.max_send_wr = cdata->max_requests *
					       depth;
	}

C
Chuck Lever 已提交
215 216 217
	rpcrdma_set_max_header_sizes(ia, cdata, max_t(unsigned int, 1,
						      RPCRDMA_MAX_DATA_SEGS /
						      ia->ri_max_frmr_depth));
C
Chuck Lever 已提交
218 219 220
	return 0;
}

221 222 223 224 225 226 227 228 229
/* FRWR mode conveys a list of pages per chunk segment. The
 * maximum length of that list is the FRWR page list depth.
 */
static size_t
frwr_op_maxpages(struct rpcrdma_xprt *r_xprt)
{
	struct rpcrdma_ia *ia = &r_xprt->rx_ia;

	return min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
230
		     RPCRDMA_MAX_HDR_SEGS * ia->ri_max_frmr_depth);
231 232
}

233 234 235 236 237 238 239 240 241 242 243 244 245 246 247
static void
__frwr_sendcompletion_flush(struct ib_wc *wc, struct rpcrdma_frmr *frmr,
			    const char *wr)
{
	frmr->fr_state = FRMR_IS_STALE;
	if (wc->status != IB_WC_WR_FLUSH_ERR)
		pr_err("rpcrdma: %s: %s (%u/0x%x)\n",
		       wr, ib_wc_status_msg(wc->status),
		       wc->status, wc->vendor_err);
}

/**
 * frwr_wc_fastreg - Invoked by RDMA provider for each polled FastReg WC
 * @cq:	completion queue (ignored)
 * @wc:	completed WR
248 249
 *
 */
250
static void
251
frwr_wc_fastreg(struct ib_cq *cq, struct ib_wc *wc)
252
{
253 254
	struct rpcrdma_frmr *frmr;
	struct ib_cqe *cqe;
255

256 257 258 259 260 261
	/* WARNING: Only wr_cqe and status are reliable at this point */
	if (wc->status != IB_WC_SUCCESS) {
		cqe = wc->wr_cqe;
		frmr = container_of(cqe, struct rpcrdma_frmr, fr_cqe);
		__frwr_sendcompletion_flush(wc, frmr, "fastreg");
	}
262 263
}

264 265 266 267 268 269
/**
 * frwr_wc_localinv - Invoked by RDMA provider for each polled LocalInv WC
 * @cq:	completion queue (ignored)
 * @wc:	completed WR
 *
 */
270
static void
271
frwr_wc_localinv(struct ib_cq *cq, struct ib_wc *wc)
272
{
273 274
	struct rpcrdma_frmr *frmr;
	struct ib_cqe *cqe;
275

276 277 278 279 280 281 282
	/* WARNING: Only wr_cqe and status are reliable at this point */
	if (wc->status != IB_WC_SUCCESS) {
		cqe = wc->wr_cqe;
		frmr = container_of(cqe, struct rpcrdma_frmr, fr_cqe);
		__frwr_sendcompletion_flush(wc, frmr, "localinv");
	}
}
283

284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302
/**
 * frwr_wc_localinv - Invoked by RDMA provider for each polled LocalInv WC
 * @cq:	completion queue (ignored)
 * @wc:	completed WR
 *
 * Awaken anyone waiting for an MR to finish being fenced.
 */
static void
frwr_wc_localinv_wake(struct ib_cq *cq, struct ib_wc *wc)
{
	struct rpcrdma_frmr *frmr;
	struct ib_cqe *cqe;

	/* WARNING: Only wr_cqe and status are reliable at this point */
	cqe = wc->wr_cqe;
	frmr = container_of(cqe, struct rpcrdma_frmr, fr_cqe);
	if (wc->status != IB_WC_SUCCESS)
		__frwr_sendcompletion_flush(wc, frmr, "localinv");
	complete_all(&frmr->fr_linv_done);
303 304
}

C
Chuck Lever 已提交
305 306 307 308 309 310 311 312
static int
frwr_op_init(struct rpcrdma_xprt *r_xprt)
{
	struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
	unsigned int depth = r_xprt->rx_ia.ri_max_frmr_depth;
	struct ib_pd *pd = r_xprt->rx_ia.ri_pd;
	int i;

C
Chuck Lever 已提交
313
	spin_lock_init(&buf->rb_mwlock);
C
Chuck Lever 已提交
314 315 316
	INIT_LIST_HEAD(&buf->rb_mws);
	INIT_LIST_HEAD(&buf->rb_all);

317 318 319 320
	i = max_t(int, RPCRDMA_MAX_DATA_SEGS / depth, 1);
	i += 2;				/* head + tail */
	i *= buf->rb_max_requests;	/* one set for each RPC slot */
	dprintk("RPC:       %s: initalizing %d FRMRs\n", __func__, i);
C
Chuck Lever 已提交
321 322 323 324 325 326 327 328 329

	while (i--) {
		struct rpcrdma_mw *r;
		int rc;

		r = kzalloc(sizeof(*r), GFP_KERNEL);
		if (!r)
			return -ENOMEM;

330
		rc = __frwr_init(r, pd, depth);
C
Chuck Lever 已提交
331 332 333 334 335
		if (rc) {
			kfree(r);
			return rc;
		}

336
		r->mw_xprt = r_xprt;
C
Chuck Lever 已提交
337 338 339 340 341 342 343
		list_add(&r->mw_list, &buf->rb_mws);
		list_add(&r->mw_all, &buf->rb_all);
	}

	return 0;
}

344
/* Post a REG_MR Work Request to register a memory region
345 346 347 348 349 350 351 352
 * for remote access via RDMA READ or RDMA WRITE.
 */
static int
frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
	    int nsegs, bool writing)
{
	struct rpcrdma_ia *ia = &r_xprt->rx_ia;
	struct rpcrdma_mr_seg *seg1 = seg;
353 354 355
	struct rpcrdma_mw *mw;
	struct rpcrdma_frmr *frmr;
	struct ib_mr *mr;
356
	struct ib_reg_wr *reg_wr;
C
Christoph Hellwig 已提交
357
	struct ib_send_wr *bad_wr;
358
	int rc, i, n, dma_nents;
359 360
	u8 key;

361 362 363 364
	mw = seg1->rl_mw;
	seg1->rl_mw = NULL;
	do {
		if (mw)
365
			rpcrdma_defer_mr_recovery(mw);
366 367 368
		mw = rpcrdma_get_mw(r_xprt);
		if (!mw)
			return -ENOMEM;
369 370
	} while (mw->frmr.fr_state != FRMR_IS_INVALID);
	frmr = &mw->frmr;
371
	frmr->fr_state = FRMR_IS_VALID;
372
	mr = frmr->fr_mr;
373
	reg_wr = &frmr->fr_regwr;
374

375 376
	if (nsegs > ia->ri_max_frmr_depth)
		nsegs = ia->ri_max_frmr_depth;
377 378
	for (i = 0; i < nsegs;) {
		if (seg->mr_page)
379
			sg_set_page(&mw->mw_sg[i],
380 381 382 383
				    seg->mr_page,
				    seg->mr_len,
				    offset_in_page(seg->mr_offset));
		else
384
			sg_set_buf(&mw->mw_sg[i], seg->mr_offset,
385 386
				   seg->mr_len);

387 388
		++seg;
		++i;
389

390 391 392 393 394
		/* Check for holes */
		if ((i < nsegs && offset_in_page(seg->mr_offset)) ||
		    offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len))
			break;
	}
395 396
	mw->mw_nents = i;
	mw->mw_dir = rpcrdma_data_dir(writing);
397

398 399 400 401 402 403 404 405
	dma_nents = ib_dma_map_sg(ia->ri_device,
				  mw->mw_sg, mw->mw_nents, mw->mw_dir);
	if (!dma_nents)
		goto out_dmamap_err;

	n = ib_map_mr_sg(mr, mw->mw_sg, mw->mw_nents, NULL, PAGE_SIZE);
	if (unlikely(n != mw->mw_nents))
		goto out_mapmr_err;
406 407

	dprintk("RPC:       %s: Using frmr %p to map %u segments (%u bytes)\n",
408
		__func__, mw, mw->mw_nents, mr->length);
409

410 411
	key = (u8)(mr->rkey & 0x000000FF);
	ib_update_fast_reg_key(mr, ++key);
412

413 414
	reg_wr->wr.next = NULL;
	reg_wr->wr.opcode = IB_WR_REG_MR;
415 416
	frmr->fr_cqe.done = frwr_wc_fastreg;
	reg_wr->wr.wr_cqe = &frmr->fr_cqe;
417 418 419 420 421 422 423
	reg_wr->wr.num_sge = 0;
	reg_wr->wr.send_flags = 0;
	reg_wr->mr = mr;
	reg_wr->key = mr->rkey;
	reg_wr->access = writing ?
			 IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE :
			 IB_ACCESS_REMOTE_READ;
424 425

	DECR_CQCOUNT(&r_xprt->rx_ep);
426
	rc = ib_post_send(ia->ri_id->qp, &reg_wr->wr, &bad_wr);
427 428 429
	if (rc)
		goto out_senderr;

430
	seg1->rl_mw = mw;
431
	seg1->mr_rkey = mr->rkey;
432
	seg1->mr_base = mr->iova;
433
	seg1->mr_nsegs = mw->mw_nents;
434 435
	seg1->mr_len = mr->length;

436 437 438 439 440
	return mw->mw_nents;

out_dmamap_err:
	pr_err("rpcrdma: failed to dma map sg %p sg_nents %u\n",
	       mw->mw_sg, mw->mw_nents);
441
	rpcrdma_defer_mr_recovery(mw);
442 443 444 445 446 447
	return -ENOMEM;

out_mapmr_err:
	pr_err("rpcrdma: failed to map mr %p (%u/%u)\n",
	       frmr->fr_mr, n, mw->mw_nents);
	rc = n < 0 ? n : -EIO;
448
	rpcrdma_defer_mr_recovery(mw);
449
	return rc;
450 451

out_senderr:
452
	rpcrdma_defer_mr_recovery(mw);
453 454 455
	return rc;
}

456 457 458 459
static struct ib_send_wr *
__frwr_prepare_linv_wr(struct rpcrdma_mr_seg *seg)
{
	struct rpcrdma_mw *mw = seg->rl_mw;
460
	struct rpcrdma_frmr *f = &mw->frmr;
461 462 463 464 465 466
	struct ib_send_wr *invalidate_wr;

	f->fr_state = FRMR_IS_INVALID;
	invalidate_wr = &f->fr_invwr;

	memset(invalidate_wr, 0, sizeof(*invalidate_wr));
467 468
	f->fr_cqe.done = frwr_wc_localinv;
	invalidate_wr->wr_cqe = &f->fr_cqe;
469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487
	invalidate_wr->opcode = IB_WR_LOCAL_INV;
	invalidate_wr->ex.invalidate_rkey = f->fr_mr->rkey;

	return invalidate_wr;
}

/* Invalidate all memory regions that were registered for "req".
 *
 * Sleeps until it is safe for the host CPU to access the
 * previously mapped memory regions.
 */
static void
frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
{
	struct ib_send_wr *invalidate_wrs, *pos, *prev, *bad_wr;
	struct rpcrdma_ia *ia = &r_xprt->rx_ia;
	struct rpcrdma_mr_seg *seg;
	unsigned int i, nchunks;
	struct rpcrdma_frmr *f;
488
	struct rpcrdma_mw *mw;
489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512
	int rc;

	dprintk("RPC:       %s: req %p\n", __func__, req);

	/* ORDER: Invalidate all of the req's MRs first
	 *
	 * Chain the LOCAL_INV Work Requests and post them with
	 * a single ib_post_send() call.
	 */
	invalidate_wrs = pos = prev = NULL;
	seg = NULL;
	for (i = 0, nchunks = req->rl_nchunks; nchunks; nchunks--) {
		seg = &req->rl_segments[i];

		pos = __frwr_prepare_linv_wr(seg);

		if (!invalidate_wrs)
			invalidate_wrs = pos;
		else
			prev->next = pos;
		prev = pos;

		i += seg->mr_nsegs;
	}
513
	f = &seg->rl_mw->frmr;
514 515 516 517 518 519

	/* Strong send queue ordering guarantees that when the
	 * last WR in the chain completes, all WRs in the chain
	 * are complete.
	 */
	f->fr_invwr.send_flags = IB_SEND_SIGNALED;
520 521
	f->fr_cqe.done = frwr_wc_localinv_wake;
	reinit_completion(&f->fr_linv_done);
522 523 524 525 526 527 528
	INIT_CQCOUNT(&r_xprt->rx_ep);

	/* Transport disconnect drains the receive CQ before it
	 * replaces the QP. The RPC reply handler won't call us
	 * unless ri_id->qp is a valid pointer.
	 */
	rc = ib_post_send(ia->ri_id->qp, invalidate_wrs, &bad_wr);
529 530
	if (rc)
		goto reset_mrs;
531 532 533 534 535 536

	wait_for_completion(&f->fr_linv_done);

	/* ORDER: Now DMA unmap all of the req's MRs, and return
	 * them to the free MW list.
	 */
537
unmap:
538 539
	for (i = 0, nchunks = req->rl_nchunks; nchunks; nchunks--) {
		seg = &req->rl_segments[i];
540 541
		mw = seg->rl_mw;
		seg->rl_mw = NULL;
542

543 544
		ib_dma_unmap_sg(ia->ri_device,
				mw->mw_sg, mw->mw_nents, mw->mw_dir);
545
		rpcrdma_put_mw(r_xprt, mw);
546 547 548 549 550 551

		i += seg->mr_nsegs;
		seg->mr_nsegs = 0;
	}

	req->rl_nchunks = 0;
552
	return;
553

554 555
reset_mrs:
	pr_warn("%s: ib_post_send failed %i\n", __func__, rc);
556

557 558 559 560 561 562 563
	/* Find and reset the MRs in the LOCAL_INV WRs that did not
	 * get posted. This is synchronous, and slow.
	 */
	for (i = 0, nchunks = req->rl_nchunks; nchunks; nchunks--) {
		seg = &req->rl_segments[i];
		mw = seg->rl_mw;
		f = &mw->frmr;
564

565 566 567 568
		if (mw->frmr.fr_mr->rkey == bad_wr->ex.invalidate_rkey) {
			__frwr_reset_mr(ia, mw);
			bad_wr = bad_wr->next;
		}
569

570 571 572
		i += seg->mr_nsegs;
	}
	goto unmap;
573
}
574

575 576 577 578 579 580 581 582 583 584
/* Use a slow, safe mechanism to invalidate all memory regions
 * that were registered for "req".
 */
static void
frwr_op_unmap_safe(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
		   bool sync)
{
	struct rpcrdma_mr_seg *seg;
	struct rpcrdma_mw *mw;
	unsigned int i;
585

586 587 588
	for (i = 0; req->rl_nchunks; req->rl_nchunks--) {
		seg = &req->rl_segments[i];
		mw = seg->rl_mw;
589

590
		if (sync)
591
			frwr_op_recover_mr(mw);
592
		else
593
			rpcrdma_defer_mr_recovery(mw);
594 595 596 597 598

		i += seg->mr_nsegs;
		seg->mr_nsegs = 0;
		seg->rl_mw = NULL;
	}
599 600
}

601 602 603 604 605 606 607 608 609 610 611 612 613
static void
frwr_op_destroy(struct rpcrdma_buffer *buf)
{
	struct rpcrdma_mw *r;

	while (!list_empty(&buf->rb_all)) {
		r = list_entry(buf->rb_all.next, struct rpcrdma_mw, mw_all);
		list_del(&r->mw_all);
		__frwr_release(r);
		kfree(r);
	}
}

614
const struct rpcrdma_memreg_ops rpcrdma_frwr_memreg_ops = {
615
	.ro_map				= frwr_op_map,
616
	.ro_unmap_sync			= frwr_op_unmap_sync,
617
	.ro_unmap_safe			= frwr_op_unmap_safe,
618
	.ro_recover_mr			= frwr_op_recover_mr,
C
Chuck Lever 已提交
619
	.ro_open			= frwr_op_open,
620
	.ro_maxpages			= frwr_op_maxpages,
C
Chuck Lever 已提交
621
	.ro_init			= frwr_op_init,
622
	.ro_destroy			= frwr_op_destroy,
623 624
	.ro_displayname			= "frwr",
};