frwr_ops.c 17.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13
/*
 * Copyright (c) 2015 Oracle.  All rights reserved.
 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
 */

/* Lightweight memory registration using Fast Registration Work
 * Requests (FRWR). Also referred to sometimes as FRMR mode.
 *
 * FRWR features ordered asynchronous registration and deregistration
 * of arbitrarily sized memory regions. This is the fastest and safest
 * but most complex memory registration mode.
 */

14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69
/* Normal operation
 *
 * A Memory Region is prepared for RDMA READ or WRITE using a FAST_REG
 * Work Request (frmr_op_map). When the RDMA operation is finished, this
 * Memory Region is invalidated using a LOCAL_INV Work Request
 * (frmr_op_unmap).
 *
 * Typically these Work Requests are not signaled, and neither are RDMA
 * SEND Work Requests (with the exception of signaling occasionally to
 * prevent provider work queue overflows). This greatly reduces HCA
 * interrupt workload.
 *
 * As an optimization, frwr_op_unmap marks MRs INVALID before the
 * LOCAL_INV WR is posted. If posting succeeds, the MR is placed on
 * rb_mws immediately so that no work (like managing a linked list
 * under a spinlock) is needed in the completion upcall.
 *
 * But this means that frwr_op_map() can occasionally encounter an MR
 * that is INVALID but the LOCAL_INV WR has not completed. Work Queue
 * ordering prevents a subsequent FAST_REG WR from executing against
 * that MR while it is still being invalidated.
 */

/* Transport recovery
 *
 * ->op_map and the transport connect worker cannot run at the same
 * time, but ->op_unmap can fire while the transport connect worker
 * is running. Thus MR recovery is handled in ->op_map, to guarantee
 * that recovered MRs are owned by a sending RPC, and not one where
 * ->op_unmap could fire at the same time transport reconnect is
 * being done.
 *
 * When the underlying transport disconnects, MRs are left in one of
 * three states:
 *
 * INVALID:	The MR was not in use before the QP entered ERROR state.
 *		(Or, the LOCAL_INV WR has not completed or flushed yet).
 *
 * STALE:	The MR was being registered or unregistered when the QP
 *		entered ERROR state, and the pending WR was flushed.
 *
 * VALID:	The MR was registered before the QP entered ERROR state.
 *
 * When frwr_op_map encounters STALE and VALID MRs, they are recovered
 * with ib_dereg_mr and then are re-initialized. Beause MR recovery
 * allocates fresh resources, it is deferred to a workqueue, and the
 * recovered MRs are placed back on the rb_mws list when recovery is
 * complete. frwr_op_map allocates another MR for the current RPC while
 * the broken MR is reset.
 *
 * To ensure that frwr_op_map doesn't encounter an MR that is marked
 * INVALID but that is about to be flushed due to a previous transport
 * disconnect, the transport connect worker attempts to drain all
 * pending send queue WRs before the transport is reconnected.
 */

70 71 72 73 74 75
#include "xprt_rdma.h"

#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
# define RPCDBG_FACILITY	RPCDBG_TRANS
#endif

76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111
static struct workqueue_struct *frwr_recovery_wq;

#define FRWR_RECOVERY_WQ_FLAGS		(WQ_UNBOUND | WQ_MEM_RECLAIM)

int
frwr_alloc_recovery_wq(void)
{
	frwr_recovery_wq = alloc_workqueue("frwr_recovery",
					   FRWR_RECOVERY_WQ_FLAGS, 0);
	return !frwr_recovery_wq ? -ENOMEM : 0;
}

void
frwr_destroy_recovery_wq(void)
{
	struct workqueue_struct *wq;

	if (!frwr_recovery_wq)
		return;

	wq = frwr_recovery_wq;
	frwr_recovery_wq = NULL;
	destroy_workqueue(wq);
}

/* Deferred reset of a single FRMR. Generate a fresh rkey by
 * replacing the MR.
 *
 * There's no recovery if this fails. The FRMR is abandoned, but
 * remains in rb_all. It will be cleaned up when the transport is
 * destroyed.
 */
static void
__frwr_recovery_worker(struct work_struct *work)
{
	struct rpcrdma_mw *r = container_of(work, struct rpcrdma_mw,
112 113
					    frmr.fr_work);
	struct rpcrdma_xprt *r_xprt = r->frmr.fr_xprt;
114 115 116
	unsigned int depth = r_xprt->rx_ia.ri_max_frmr_depth;
	struct ib_pd *pd = r_xprt->rx_ia.ri_pd;

117
	if (ib_dereg_mr(r->frmr.fr_mr))
118 119
		goto out_fail;

120 121
	r->frmr.fr_mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG, depth);
	if (IS_ERR(r->frmr.fr_mr))
122 123 124
		goto out_fail;

	dprintk("RPC:       %s: recovered FRMR %p\n", __func__, r);
125
	r->frmr.fr_state = FRMR_IS_INVALID;
126 127 128 129 130 131 132 133 134 135 136 137 138 139
	rpcrdma_put_mw(r_xprt, r);
	return;

out_fail:
	pr_warn("RPC:       %s: FRMR %p unrecovered\n",
		__func__, r);
}

/* A broken MR was discovered in a context that can't sleep.
 * Defer recovery to the recovery worker.
 */
static void
__frwr_queue_recovery(struct rpcrdma_mw *r)
{
140 141
	INIT_WORK(&r->frmr.fr_work, __frwr_recovery_worker);
	queue_work(frwr_recovery_wq, &r->frmr.fr_work);
142 143
}

C
Chuck Lever 已提交
144 145 146 147
static int
__frwr_init(struct rpcrdma_mw *r, struct ib_pd *pd, struct ib_device *device,
	    unsigned int depth)
{
148
	struct rpcrdma_frmr *f = &r->frmr;
C
Chuck Lever 已提交
149 150
	int rc;

151
	f->fr_mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG, depth);
C
Chuck Lever 已提交
152 153
	if (IS_ERR(f->fr_mr))
		goto out_mr_err;
154

155 156
	f->fr_sg = kcalloc(depth, sizeof(*f->fr_sg), GFP_KERNEL);
	if (!f->fr_sg)
C
Chuck Lever 已提交
157
		goto out_list_err;
158

159
	sg_init_table(f->fr_sg, depth);
160

161 162
	init_completion(&f->fr_linv_done);

C
Chuck Lever 已提交
163 164 165 166
	return 0;

out_mr_err:
	rc = PTR_ERR(f->fr_mr);
167
	dprintk("RPC:       %s: ib_alloc_mr status %i\n",
C
Chuck Lever 已提交
168 169 170 171
		__func__, rc);
	return rc;

out_list_err:
172 173 174
	rc = -ENOMEM;
	dprintk("RPC:       %s: sg allocation failure\n",
		__func__);
C
Chuck Lever 已提交
175 176 177 178
	ib_dereg_mr(f->fr_mr);
	return rc;
}

179 180 181 182 183
static void
__frwr_release(struct rpcrdma_mw *r)
{
	int rc;

184
	rc = ib_dereg_mr(r->frmr.fr_mr);
185 186 187
	if (rc)
		dprintk("RPC:       %s: ib_dereg_mr status %i\n",
			__func__, rc);
188
	kfree(r->frmr.fr_sg);
189 190
}

C
Chuck Lever 已提交
191 192 193 194 195 196 197 198
static int
frwr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep,
	     struct rpcrdma_create_data_internal *cdata)
{
	int depth, delta;

	ia->ri_max_frmr_depth =
			min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
199
			      ia->ri_device->attrs.max_fast_reg_page_list_len);
C
Chuck Lever 已提交
200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225
	dprintk("RPC:       %s: device's max FR page list len = %u\n",
		__func__, ia->ri_max_frmr_depth);

	/* Add room for frmr register and invalidate WRs.
	 * 1. FRMR reg WR for head
	 * 2. FRMR invalidate WR for head
	 * 3. N FRMR reg WRs for pagelist
	 * 4. N FRMR invalidate WRs for pagelist
	 * 5. FRMR reg WR for tail
	 * 6. FRMR invalidate WR for tail
	 * 7. The RDMA_SEND WR
	 */
	depth = 7;

	/* Calculate N if the device max FRMR depth is smaller than
	 * RPCRDMA_MAX_DATA_SEGS.
	 */
	if (ia->ri_max_frmr_depth < RPCRDMA_MAX_DATA_SEGS) {
		delta = RPCRDMA_MAX_DATA_SEGS - ia->ri_max_frmr_depth;
		do {
			depth += 2; /* FRMR reg + invalidate */
			delta -= ia->ri_max_frmr_depth;
		} while (delta > 0);
	}

	ep->rep_attr.cap.max_send_wr *= depth;
226 227
	if (ep->rep_attr.cap.max_send_wr > ia->ri_device->attrs.max_qp_wr) {
		cdata->max_requests = ia->ri_device->attrs.max_qp_wr / depth;
C
Chuck Lever 已提交
228 229 230 231 232 233
		if (!cdata->max_requests)
			return -EINVAL;
		ep->rep_attr.cap.max_send_wr = cdata->max_requests *
					       depth;
	}

C
Chuck Lever 已提交
234 235 236
	rpcrdma_set_max_header_sizes(ia, cdata, max_t(unsigned int, 1,
						      RPCRDMA_MAX_DATA_SEGS /
						      ia->ri_max_frmr_depth));
C
Chuck Lever 已提交
237 238 239
	return 0;
}

240 241 242 243 244 245 246 247 248
/* FRWR mode conveys a list of pages per chunk segment. The
 * maximum length of that list is the FRWR page list depth.
 */
static size_t
frwr_op_maxpages(struct rpcrdma_xprt *r_xprt)
{
	struct rpcrdma_ia *ia = &r_xprt->rx_ia;

	return min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
249
		     RPCRDMA_MAX_HDR_SEGS * ia->ri_max_frmr_depth);
250 251
}

252 253 254 255 256 257 258 259 260 261 262 263 264 265 266
static void
__frwr_sendcompletion_flush(struct ib_wc *wc, struct rpcrdma_frmr *frmr,
			    const char *wr)
{
	frmr->fr_state = FRMR_IS_STALE;
	if (wc->status != IB_WC_WR_FLUSH_ERR)
		pr_err("rpcrdma: %s: %s (%u/0x%x)\n",
		       wr, ib_wc_status_msg(wc->status),
		       wc->status, wc->vendor_err);
}

/**
 * frwr_wc_fastreg - Invoked by RDMA provider for each polled FastReg WC
 * @cq:	completion queue (ignored)
 * @wc:	completed WR
267 268
 *
 */
269
static void
270
frwr_wc_fastreg(struct ib_cq *cq, struct ib_wc *wc)
271
{
272 273
	struct rpcrdma_frmr *frmr;
	struct ib_cqe *cqe;
274

275 276 277 278 279 280
	/* WARNING: Only wr_cqe and status are reliable at this point */
	if (wc->status != IB_WC_SUCCESS) {
		cqe = wc->wr_cqe;
		frmr = container_of(cqe, struct rpcrdma_frmr, fr_cqe);
		__frwr_sendcompletion_flush(wc, frmr, "fastreg");
	}
281 282
}

283 284 285 286 287 288
/**
 * frwr_wc_localinv - Invoked by RDMA provider for each polled LocalInv WC
 * @cq:	completion queue (ignored)
 * @wc:	completed WR
 *
 */
289
static void
290
frwr_wc_localinv(struct ib_cq *cq, struct ib_wc *wc)
291
{
292 293
	struct rpcrdma_frmr *frmr;
	struct ib_cqe *cqe;
294

295 296 297 298 299 300 301
	/* WARNING: Only wr_cqe and status are reliable at this point */
	if (wc->status != IB_WC_SUCCESS) {
		cqe = wc->wr_cqe;
		frmr = container_of(cqe, struct rpcrdma_frmr, fr_cqe);
		__frwr_sendcompletion_flush(wc, frmr, "localinv");
	}
}
302

303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321
/**
 * frwr_wc_localinv - Invoked by RDMA provider for each polled LocalInv WC
 * @cq:	completion queue (ignored)
 * @wc:	completed WR
 *
 * Awaken anyone waiting for an MR to finish being fenced.
 */
static void
frwr_wc_localinv_wake(struct ib_cq *cq, struct ib_wc *wc)
{
	struct rpcrdma_frmr *frmr;
	struct ib_cqe *cqe;

	/* WARNING: Only wr_cqe and status are reliable at this point */
	cqe = wc->wr_cqe;
	frmr = container_of(cqe, struct rpcrdma_frmr, fr_cqe);
	if (wc->status != IB_WC_SUCCESS)
		__frwr_sendcompletion_flush(wc, frmr, "localinv");
	complete_all(&frmr->fr_linv_done);
322 323
}

C
Chuck Lever 已提交
324 325 326 327
static int
frwr_op_init(struct rpcrdma_xprt *r_xprt)
{
	struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
328
	struct ib_device *device = r_xprt->rx_ia.ri_device;
C
Chuck Lever 已提交
329 330 331 332
	unsigned int depth = r_xprt->rx_ia.ri_max_frmr_depth;
	struct ib_pd *pd = r_xprt->rx_ia.ri_pd;
	int i;

C
Chuck Lever 已提交
333
	spin_lock_init(&buf->rb_mwlock);
C
Chuck Lever 已提交
334 335 336
	INIT_LIST_HEAD(&buf->rb_mws);
	INIT_LIST_HEAD(&buf->rb_all);

337 338 339 340
	i = max_t(int, RPCRDMA_MAX_DATA_SEGS / depth, 1);
	i += 2;				/* head + tail */
	i *= buf->rb_max_requests;	/* one set for each RPC slot */
	dprintk("RPC:       %s: initalizing %d FRMRs\n", __func__, i);
C
Chuck Lever 已提交
341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357

	while (i--) {
		struct rpcrdma_mw *r;
		int rc;

		r = kzalloc(sizeof(*r), GFP_KERNEL);
		if (!r)
			return -ENOMEM;

		rc = __frwr_init(r, pd, device, depth);
		if (rc) {
			kfree(r);
			return rc;
		}

		list_add(&r->mw_list, &buf->rb_mws);
		list_add(&r->mw_all, &buf->rb_all);
358
		r->frmr.fr_xprt = r_xprt;
C
Chuck Lever 已提交
359 360 361 362 363
	}

	return 0;
}

364 365 366 367 368 369 370 371
/* Post a FAST_REG Work Request to register a memory region
 * for remote access via RDMA READ or RDMA WRITE.
 */
static int
frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
	    int nsegs, bool writing)
{
	struct rpcrdma_ia *ia = &r_xprt->rx_ia;
372
	struct ib_device *device = ia->ri_device;
373
	enum dma_data_direction direction = rpcrdma_data_dir(writing);
374
	struct rpcrdma_mr_seg *seg1 = seg;
375 376 377
	struct rpcrdma_mw *mw;
	struct rpcrdma_frmr *frmr;
	struct ib_mr *mr;
378
	struct ib_reg_wr *reg_wr;
C
Christoph Hellwig 已提交
379
	struct ib_send_wr *bad_wr;
380
	int rc, i, n, dma_nents;
381 382
	u8 key;

383 384 385 386 387 388 389 390
	mw = seg1->rl_mw;
	seg1->rl_mw = NULL;
	do {
		if (mw)
			__frwr_queue_recovery(mw);
		mw = rpcrdma_get_mw(r_xprt);
		if (!mw)
			return -ENOMEM;
391 392
	} while (mw->frmr.fr_state != FRMR_IS_INVALID);
	frmr = &mw->frmr;
393
	frmr->fr_state = FRMR_IS_VALID;
394
	mr = frmr->fr_mr;
395
	reg_wr = &frmr->fr_regwr;
396

397 398
	if (nsegs > ia->ri_max_frmr_depth)
		nsegs = ia->ri_max_frmr_depth;
399

400 401
	for (i = 0; i < nsegs;) {
		if (seg->mr_page)
402
			sg_set_page(&frmr->fr_sg[i],
403 404 405 406
				    seg->mr_page,
				    seg->mr_len,
				    offset_in_page(seg->mr_offset));
		else
407
			sg_set_buf(&frmr->fr_sg[i], seg->mr_offset,
408 409
				   seg->mr_len);

410 411
		++seg;
		++i;
412

413 414 415 416 417
		/* Check for holes */
		if ((i < nsegs && offset_in_page(seg->mr_offset)) ||
		    offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len))
			break;
	}
418
	frmr->fr_nents = i;
419

420
	dma_nents = ib_dma_map_sg(device, frmr->fr_sg, frmr->fr_nents, direction);
421 422
	if (!dma_nents) {
		pr_err("RPC:       %s: failed to dma map sg %p sg_nents %u\n",
423
		       __func__, frmr->fr_sg, frmr->fr_nents);
424 425 426
		return -ENOMEM;
	}

427 428
	n = ib_map_mr_sg(mr, frmr->fr_sg, frmr->fr_nents, PAGE_SIZE);
	if (unlikely(n != frmr->fr_nents)) {
429
		pr_err("RPC:       %s: failed to map mr %p (%u/%u)\n",
430
		       __func__, frmr->fr_mr, n, frmr->fr_nents);
431 432 433 434 435
		rc = n < 0 ? n : -EINVAL;
		goto out_senderr;
	}

	dprintk("RPC:       %s: Using frmr %p to map %u segments (%u bytes)\n",
436
		__func__, mw, frmr->fr_nents, mr->length);
437

438 439
	key = (u8)(mr->rkey & 0x000000FF);
	ib_update_fast_reg_key(mr, ++key);
440

441 442
	reg_wr->wr.next = NULL;
	reg_wr->wr.opcode = IB_WR_REG_MR;
443 444
	frmr->fr_cqe.done = frwr_wc_fastreg;
	reg_wr->wr.wr_cqe = &frmr->fr_cqe;
445 446 447 448 449 450 451
	reg_wr->wr.num_sge = 0;
	reg_wr->wr.send_flags = 0;
	reg_wr->mr = mr;
	reg_wr->key = mr->rkey;
	reg_wr->access = writing ?
			 IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE :
			 IB_ACCESS_REMOTE_READ;
452 453

	DECR_CQCOUNT(&r_xprt->rx_ep);
454
	rc = ib_post_send(ia->ri_id->qp, &reg_wr->wr, &bad_wr);
455 456 457
	if (rc)
		goto out_senderr;

458
	seg1->mr_dir = direction;
459
	seg1->rl_mw = mw;
460
	seg1->mr_rkey = mr->rkey;
461
	seg1->mr_base = mr->iova;
462
	seg1->mr_nsegs = frmr->fr_nents;
463 464
	seg1->mr_len = mr->length;

465
	return frmr->fr_nents;
466 467 468

out_senderr:
	dprintk("RPC:       %s: ib_post_send status %i\n", __func__, rc);
469
	ib_dma_unmap_sg(device, frmr->fr_sg, dma_nents, direction);
470
	__frwr_queue_recovery(mw);
471 472 473
	return rc;
}

474 475 476 477
static struct ib_send_wr *
__frwr_prepare_linv_wr(struct rpcrdma_mr_seg *seg)
{
	struct rpcrdma_mw *mw = seg->rl_mw;
478
	struct rpcrdma_frmr *f = &mw->frmr;
479 480 481 482 483 484
	struct ib_send_wr *invalidate_wr;

	f->fr_state = FRMR_IS_INVALID;
	invalidate_wr = &f->fr_invwr;

	memset(invalidate_wr, 0, sizeof(*invalidate_wr));
485 486
	f->fr_cqe.done = frwr_wc_localinv;
	invalidate_wr->wr_cqe = &f->fr_cqe;
487 488 489 490 491 492 493 494 495 496 497 498
	invalidate_wr->opcode = IB_WR_LOCAL_INV;
	invalidate_wr->ex.invalidate_rkey = f->fr_mr->rkey;

	return invalidate_wr;
}

static void
__frwr_dma_unmap(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
		 int rc)
{
	struct ib_device *device = r_xprt->rx_ia.ri_device;
	struct rpcrdma_mw *mw = seg->rl_mw;
499
	struct rpcrdma_frmr *f = &mw->frmr;
500 501 502

	seg->rl_mw = NULL;

503
	ib_dma_unmap_sg(device, f->fr_sg, f->fr_nents, seg->mr_dir);
504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547

	if (!rc)
		rpcrdma_put_mw(r_xprt, mw);
	else
		__frwr_queue_recovery(mw);
}

/* Invalidate all memory regions that were registered for "req".
 *
 * Sleeps until it is safe for the host CPU to access the
 * previously mapped memory regions.
 */
static void
frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
{
	struct ib_send_wr *invalidate_wrs, *pos, *prev, *bad_wr;
	struct rpcrdma_ia *ia = &r_xprt->rx_ia;
	struct rpcrdma_mr_seg *seg;
	unsigned int i, nchunks;
	struct rpcrdma_frmr *f;
	int rc;

	dprintk("RPC:       %s: req %p\n", __func__, req);

	/* ORDER: Invalidate all of the req's MRs first
	 *
	 * Chain the LOCAL_INV Work Requests and post them with
	 * a single ib_post_send() call.
	 */
	invalidate_wrs = pos = prev = NULL;
	seg = NULL;
	for (i = 0, nchunks = req->rl_nchunks; nchunks; nchunks--) {
		seg = &req->rl_segments[i];

		pos = __frwr_prepare_linv_wr(seg);

		if (!invalidate_wrs)
			invalidate_wrs = pos;
		else
			prev->next = pos;
		prev = pos;

		i += seg->mr_nsegs;
	}
548
	f = &seg->rl_mw->frmr;
549 550 551 552 553 554

	/* Strong send queue ordering guarantees that when the
	 * last WR in the chain completes, all WRs in the chain
	 * are complete.
	 */
	f->fr_invwr.send_flags = IB_SEND_SIGNALED;
555 556
	f->fr_cqe.done = frwr_wc_localinv_wake;
	reinit_completion(&f->fr_linv_done);
557 558 559 560 561 562 563
	INIT_CQCOUNT(&r_xprt->rx_ep);

	/* Transport disconnect drains the receive CQ before it
	 * replaces the QP. The RPC reply handler won't call us
	 * unless ri_id->qp is a valid pointer.
	 */
	rc = ib_post_send(ia->ri_id->qp, invalidate_wrs, &bad_wr);
564
	if (rc) {
565
		pr_warn("%s: ib_post_send failed %i\n", __func__, rc);
566 567 568
		rdma_disconnect(ia->ri_id);
		goto unmap;
	}
569 570 571 572 573 574

	wait_for_completion(&f->fr_linv_done);

	/* ORDER: Now DMA unmap all of the req's MRs, and return
	 * them to the free MW list.
	 */
575
unmap:
576 577 578 579 580 581 582 583 584 585 586 587
	for (i = 0, nchunks = req->rl_nchunks; nchunks; nchunks--) {
		seg = &req->rl_segments[i];

		__frwr_dma_unmap(r_xprt, seg, rc);

		i += seg->mr_nsegs;
		seg->mr_nsegs = 0;
	}

	req->rl_nchunks = 0;
}

588 589 590 591 592 593 594 595
/* Post a LOCAL_INV Work Request to prevent further remote access
 * via RDMA READ or RDMA WRITE.
 */
static int
frwr_op_unmap(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg)
{
	struct rpcrdma_mr_seg *seg1 = seg;
	struct rpcrdma_ia *ia = &r_xprt->rx_ia;
596
	struct rpcrdma_mw *mw = seg1->rl_mw;
597
	struct rpcrdma_frmr *frmr = &mw->frmr;
598
	struct ib_send_wr *invalidate_wr, *bad_wr;
599 600
	int rc, nsegs = seg->mr_nsegs;

601 602 603
	dprintk("RPC:       %s: FRMR %p\n", __func__, mw);

	seg1->rl_mw = NULL;
604
	frmr->fr_state = FRMR_IS_INVALID;
605
	invalidate_wr = &mw->frmr.fr_invwr;
606

607
	memset(invalidate_wr, 0, sizeof(*invalidate_wr));
608 609
	frmr->fr_cqe.done = frwr_wc_localinv;
	invalidate_wr->wr_cqe = &frmr->fr_cqe;
610 611
	invalidate_wr->opcode = IB_WR_LOCAL_INV;
	invalidate_wr->ex.invalidate_rkey = frmr->fr_mr->rkey;
612 613
	DECR_CQCOUNT(&r_xprt->rx_ep);

614
	ib_dma_unmap_sg(ia->ri_device, frmr->fr_sg, frmr->fr_nents, seg1->mr_dir);
615
	read_lock(&ia->ri_qplock);
616
	rc = ib_post_send(ia->ri_id->qp, invalidate_wr, &bad_wr);
617 618 619
	read_unlock(&ia->ri_qplock);
	if (rc)
		goto out_err;
620 621

	rpcrdma_put_mw(r_xprt, mw);
622 623 624 625
	return nsegs;

out_err:
	dprintk("RPC:       %s: ib_post_send status %i\n", __func__, rc);
626
	__frwr_queue_recovery(mw);
627 628 629
	return nsegs;
}

630 631 632 633 634
static void
frwr_op_destroy(struct rpcrdma_buffer *buf)
{
	struct rpcrdma_mw *r;

635 636 637
	/* Ensure stale MWs for "buf" are no longer in flight */
	flush_workqueue(frwr_recovery_wq);

638 639 640 641 642 643 644 645
	while (!list_empty(&buf->rb_all)) {
		r = list_entry(buf->rb_all.next, struct rpcrdma_mw, mw_all);
		list_del(&r->mw_all);
		__frwr_release(r);
		kfree(r);
	}
}

646
const struct rpcrdma_memreg_ops rpcrdma_frwr_memreg_ops = {
647
	.ro_map				= frwr_op_map,
648
	.ro_unmap_sync			= frwr_op_unmap_sync,
649
	.ro_unmap			= frwr_op_unmap,
C
Chuck Lever 已提交
650
	.ro_open			= frwr_op_open,
651
	.ro_maxpages			= frwr_op_maxpages,
C
Chuck Lever 已提交
652
	.ro_init			= frwr_op_init,
653
	.ro_destroy			= frwr_op_destroy,
654 655
	.ro_displayname			= "frwr",
};