ib_rdma.c 17.8 KB
Newer Older
1
/*
2
 * Copyright (c) 2006, 2018 Oracle and/or its affiliates. All rights reserved.
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *      - Redistributions of source code must retain the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer.
 *
 *      - Redistributions in binary form must reproduce the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer in the documentation and/or other materials
 *        provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 *
 */
#include <linux/kernel.h>
34
#include <linux/slab.h>
C
Chris Mason 已提交
35
#include <linux/rculist.h>
36
#include <linux/llist.h>
37

38
#include "rds_single_path.h"
39
#include "ib_mr.h"
40
#include "rds.h"
41 42

struct workqueue_struct *rds_ib_mr_wq;
43 44 45 46 47 48
struct rds_ib_dereg_odp_mr {
	struct work_struct work;
	struct ib_mr *mr;
};

static void rds_ib_odp_mr_worker(struct work_struct *work);
49 50 51 52 53 54

static struct rds_ib_device *rds_ib_get_device(__be32 ipaddr)
{
	struct rds_ib_device *rds_ibdev;
	struct rds_ib_ipaddr *i_ipaddr;

55 56
	rcu_read_lock();
	list_for_each_entry_rcu(rds_ibdev, &rds_ib_devices, list) {
C
Chris Mason 已提交
57
		list_for_each_entry_rcu(i_ipaddr, &rds_ibdev->ipaddr_list, list) {
58
			if (i_ipaddr->ipaddr == ipaddr) {
59
				refcount_inc(&rds_ibdev->refcount);
C
Chris Mason 已提交
60
				rcu_read_unlock();
61 62 63 64
				return rds_ibdev;
			}
		}
	}
65
	rcu_read_unlock();
66 67 68 69 70 71 72 73 74 75 76 77 78 79 80

	return NULL;
}

static int rds_ib_add_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
{
	struct rds_ib_ipaddr *i_ipaddr;

	i_ipaddr = kmalloc(sizeof *i_ipaddr, GFP_KERNEL);
	if (!i_ipaddr)
		return -ENOMEM;

	i_ipaddr->ipaddr = ipaddr;

	spin_lock_irq(&rds_ibdev->spinlock);
C
Chris Mason 已提交
81
	list_add_tail_rcu(&i_ipaddr->list, &rds_ibdev->ipaddr_list);
82 83 84 85 86 87 88
	spin_unlock_irq(&rds_ibdev->spinlock);

	return 0;
}

static void rds_ib_remove_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
{
89
	struct rds_ib_ipaddr *i_ipaddr;
C
Chris Mason 已提交
90 91
	struct rds_ib_ipaddr *to_free = NULL;

92 93

	spin_lock_irq(&rds_ibdev->spinlock);
C
Chris Mason 已提交
94
	list_for_each_entry_rcu(i_ipaddr, &rds_ibdev->ipaddr_list, list) {
95
		if (i_ipaddr->ipaddr == ipaddr) {
C
Chris Mason 已提交
96 97
			list_del_rcu(&i_ipaddr->list);
			to_free = i_ipaddr;
98 99 100 101
			break;
		}
	}
	spin_unlock_irq(&rds_ibdev->spinlock);
C
Chris Mason 已提交
102

103 104
	if (to_free)
		kfree_rcu(to_free, rcu);
105 106
}

107 108
int rds_ib_update_ipaddr(struct rds_ib_device *rds_ibdev,
			 struct in6_addr *ipaddr)
109 110 111
{
	struct rds_ib_device *rds_ibdev_old;

112
	rds_ibdev_old = rds_ib_get_device(ipaddr->s6_addr32[3]);
113
	if (!rds_ibdev_old)
114
		return rds_ib_add_ipaddr(rds_ibdev, ipaddr->s6_addr32[3]);
115 116

	if (rds_ibdev_old != rds_ibdev) {
117
		rds_ib_remove_ipaddr(rds_ibdev_old, ipaddr->s6_addr32[3]);
118
		rds_ib_dev_put(rds_ibdev_old);
119
		return rds_ib_add_ipaddr(rds_ibdev, ipaddr->s6_addr32[3]);
120
	}
121
	rds_ib_dev_put(rds_ibdev_old);
122

123
	return 0;
124 125
}

126
void rds_ib_add_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn)
127 128 129 130 131 132 133 134 135
{
	struct rds_ib_connection *ic = conn->c_transport_data;

	/* conn was previously on the nodev_conns_list */
	spin_lock_irq(&ib_nodev_conns_lock);
	BUG_ON(list_empty(&ib_nodev_conns));
	BUG_ON(list_empty(&ic->ib_node));
	list_del(&ic->ib_node);

136
	spin_lock(&rds_ibdev->spinlock);
137
	list_add_tail(&ic->ib_node, &rds_ibdev->conn_list);
138
	spin_unlock(&rds_ibdev->spinlock);
139
	spin_unlock_irq(&ib_nodev_conns_lock);
140 141

	ic->rds_ibdev = rds_ibdev;
142
	refcount_inc(&rds_ibdev->refcount);
143 144
}

145
void rds_ib_remove_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn)
146
{
147
	struct rds_ib_connection *ic = conn->c_transport_data;
148

149 150
	/* place conn on nodev_conns_list */
	spin_lock(&ib_nodev_conns_lock);
151

152 153 154 155 156 157 158 159 160 161
	spin_lock_irq(&rds_ibdev->spinlock);
	BUG_ON(list_empty(&ic->ib_node));
	list_del(&ic->ib_node);
	spin_unlock_irq(&rds_ibdev->spinlock);

	list_add_tail(&ic->ib_node, &ib_nodev_conns);

	spin_unlock(&ib_nodev_conns_lock);

	ic->rds_ibdev = NULL;
162
	rds_ib_dev_put(rds_ibdev);
163 164
}

165
void rds_ib_destroy_nodev_conns(void)
166 167 168 169 170
{
	struct rds_ib_connection *ic, *_ic;
	LIST_HEAD(tmp_list);

	/* avoid calling conn_destroy with irqs off */
171 172 173
	spin_lock_irq(&ib_nodev_conns_lock);
	list_splice(&ib_nodev_conns, &tmp_list);
	spin_unlock_irq(&ib_nodev_conns_lock);
174

A
Andy Grover 已提交
175
	list_for_each_entry_safe(ic, _ic, &tmp_list, ib_node)
176 177 178 179 180
		rds_conn_destroy(ic->conn);
}

void rds_ib_get_mr_info(struct rds_ib_device *rds_ibdev, struct rds_info_rdma_connection *iinfo)
{
181
	struct rds_ib_mr_pool *pool_1m = rds_ibdev->mr_1m_pool;
182

183 184
	iinfo->rdma_mr_max = pool_1m->max_items;
	iinfo->rdma_mr_size = pool_1m->fmr_attr.max_pages;
185 186
}

K
Ka-Cheong Poon 已提交
187
#if IS_ENABLED(CONFIG_IPV6)
188 189 190 191 192 193 194 195
void rds6_ib_get_mr_info(struct rds_ib_device *rds_ibdev,
			 struct rds6_info_rdma_connection *iinfo6)
{
	struct rds_ib_mr_pool *pool_1m = rds_ibdev->mr_1m_pool;

	iinfo6->rdma_mr_max = pool_1m->max_items;
	iinfo6->rdma_mr_size = pool_1m->fmr_attr.max_pages;
}
K
Ka-Cheong Poon 已提交
196
#endif
197

198
struct rds_ib_mr *rds_ib_reuse_mr(struct rds_ib_mr_pool *pool)
199 200
{
	struct rds_ib_mr *ibmr = NULL;
201
	struct llist_node *ret;
202
	unsigned long flags;
203

204
	spin_lock_irqsave(&pool->clean_lock, flags);
205
	ret = llist_del_first(&pool->clean_list);
206
	spin_unlock_irqrestore(&pool->clean_lock, flags);
207
	if (ret) {
208
		ibmr = llist_entry(ret, struct rds_ib_mr, llnode);
209 210 211 212 213
		if (pool->pool_type == RDS_IB_MR_8K_POOL)
			rds_ib_stats_inc(s_ib_rdma_mr_8k_reused);
		else
			rds_ib_stats_inc(s_ib_rdma_mr_1m_reused);
	}
214 215 216 217 218 219 220 221 222

	return ibmr;
}

void rds_ib_sync_mr(void *trans_private, int direction)
{
	struct rds_ib_mr *ibmr = trans_private;
	struct rds_ib_device *rds_ibdev = ibmr->device;

223 224 225
	if (ibmr->odp)
		return;

226 227 228 229 230 231 232 233 234 235 236 237
	switch (direction) {
	case DMA_FROM_DEVICE:
		ib_dma_sync_sg_for_cpu(rds_ibdev->dev, ibmr->sg,
			ibmr->sg_dma_len, DMA_BIDIRECTIONAL);
		break;
	case DMA_TO_DEVICE:
		ib_dma_sync_sg_for_device(rds_ibdev->dev, ibmr->sg,
			ibmr->sg_dma_len, DMA_BIDIRECTIONAL);
		break;
	}
}

238
void __rds_ib_teardown_mr(struct rds_ib_mr *ibmr)
239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257
{
	struct rds_ib_device *rds_ibdev = ibmr->device;

	if (ibmr->sg_dma_len) {
		ib_dma_unmap_sg(rds_ibdev->dev,
				ibmr->sg, ibmr->sg_len,
				DMA_BIDIRECTIONAL);
		ibmr->sg_dma_len = 0;
	}

	/* Release the s/g list */
	if (ibmr->sg_len) {
		unsigned int i;

		for (i = 0; i < ibmr->sg_len; ++i) {
			struct page *page = sg_page(&ibmr->sg[i]);

			/* FIXME we need a way to tell a r/w MR
			 * from a r/o MR */
258
			WARN_ON(!page->mapping && irqs_disabled());
259 260 261 262 263 264 265 266 267 268
			set_page_dirty(page);
			put_page(page);
		}
		kfree(ibmr->sg);

		ibmr->sg = NULL;
		ibmr->sg_len = 0;
	}
}

269
void rds_ib_teardown_mr(struct rds_ib_mr *ibmr)
270 271 272 273 274
{
	unsigned int pinned = ibmr->sg_len;

	__rds_ib_teardown_mr(ibmr);
	if (pinned) {
275
		struct rds_ib_mr_pool *pool = ibmr->pool;
276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291

		atomic_sub(pinned, &pool->free_pinned);
	}
}

static inline unsigned int rds_ib_flush_goal(struct rds_ib_mr_pool *pool, int free_all)
{
	unsigned int item_count;

	item_count = atomic_read(&pool->item_count);
	if (free_all)
		return item_count;

	return 0;
}

292
/*
293
 * given an llist of mrs, put them all into the list_head for more processing
294
 */
W
Wengang Wang 已提交
295 296
static unsigned int llist_append_to_list(struct llist_head *llist,
					 struct list_head *list)
297 298
{
	struct rds_ib_mr *ibmr;
299 300
	struct llist_node *node;
	struct llist_node *next;
W
Wengang Wang 已提交
301
	unsigned int count = 0;
302 303 304 305 306

	node = llist_del_all(llist);
	while (node) {
		next = node->next;
		ibmr = llist_entry(node, struct rds_ib_mr, llnode);
307
		list_add_tail(&ibmr->unmap_list, list);
308
		node = next;
W
Wengang Wang 已提交
309
		count++;
310
	}
W
Wengang Wang 已提交
311
	return count;
312 313 314
}

/*
315 316 317
 * this takes a list head of mrs and turns it into linked llist nodes
 * of clusters.  Each cluster has linked llist nodes of
 * MR_CLUSTER_SIZE mrs that are ready for reuse.
318
 */
319
static void list_to_llist_nodes(struct list_head *list,
320 321
				struct llist_node **nodes_head,
				struct llist_node **nodes_tail)
322 323
{
	struct rds_ib_mr *ibmr;
324 325
	struct llist_node *cur = NULL;
	struct llist_node **next = nodes_head;
326 327

	list_for_each_entry(ibmr, list, unmap_list) {
328 329 330
		cur = &ibmr->llnode;
		*next = cur;
		next = &cur->next;
331
	}
332 333
	*next = NULL;
	*nodes_tail = cur;
334 335
}

336 337 338 339 340 341
/*
 * Flush our pool of MRs.
 * At a minimum, all currently unused MRs are unmapped.
 * If the number of MRs allocated exceeds the limit, we also try
 * to free as many MRs as needed to get back to this limit.
 */
342 343
int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool,
			 int free_all, struct rds_ib_mr **ibmr_ret)
344
{
345
	struct rds_ib_mr *ibmr;
346 347
	struct llist_node *clean_nodes;
	struct llist_node *clean_tail;
348 349
	LIST_HEAD(unmap_list);
	unsigned long unpinned = 0;
W
Wengang Wang 已提交
350
	unsigned int nfreed = 0, dirty_to_clean = 0, free_goal;
351

352 353 354 355
	if (pool->pool_type == RDS_IB_MR_8K_POOL)
		rds_ib_stats_inc(s_ib_rdma_mr_8k_pool_flush);
	else
		rds_ib_stats_inc(s_ib_rdma_mr_1m_pool_flush);
356

357 358
	if (ibmr_ret) {
		DEFINE_WAIT(wait);
359
		while (!mutex_trylock(&pool->flush_lock)) {
360
			ibmr = rds_ib_reuse_mr(pool);
361 362 363 364 365 366 367 368
			if (ibmr) {
				*ibmr_ret = ibmr;
				finish_wait(&pool->flush_wait, &wait);
				goto out_nolock;
			}

			prepare_to_wait(&pool->flush_wait, &wait,
					TASK_UNINTERRUPTIBLE);
369
			if (llist_empty(&pool->clean_list))
370 371
				schedule();

372
			ibmr = rds_ib_reuse_mr(pool);
373 374 375 376 377 378 379 380 381 382 383
			if (ibmr) {
				*ibmr_ret = ibmr;
				finish_wait(&pool->flush_wait, &wait);
				goto out_nolock;
			}
		}
		finish_wait(&pool->flush_wait, &wait);
	} else
		mutex_lock(&pool->flush_lock);

	if (ibmr_ret) {
384
		ibmr = rds_ib_reuse_mr(pool);
385 386 387 388 389
		if (ibmr) {
			*ibmr_ret = ibmr;
			goto out;
		}
	}
390 391

	/* Get the list of all MRs to be dropped. Ordering matters -
392 393
	 * we want to put drop_list ahead of free_list.
	 */
W
Wengang Wang 已提交
394 395
	dirty_to_clean = llist_append_to_list(&pool->drop_list, &unmap_list);
	dirty_to_clean += llist_append_to_list(&pool->free_list, &unmap_list);
396 397 398 399
	if (free_all) {
		unsigned long flags;

		spin_lock_irqsave(&pool->clean_lock, flags);
400
		llist_append_to_list(&pool->clean_list, &unmap_list);
401 402
		spin_unlock_irqrestore(&pool->clean_lock, flags);
	}
403 404 405 406 407 408

	free_goal = rds_ib_flush_goal(pool, free_all);

	if (list_empty(&unmap_list))
		goto out;

409 410 411 412
	if (pool->use_fastreg)
		rds_ib_unreg_frmr(&unmap_list, &nfreed, &unpinned, free_goal);
	else
		rds_ib_unreg_fmr(&unmap_list, &nfreed, &unpinned, free_goal);
413

414
	if (!list_empty(&unmap_list)) {
415 416 417
		unsigned long flags;

		list_to_llist_nodes(&unmap_list, &clean_nodes, &clean_tail);
418
		if (ibmr_ret) {
419
			*ibmr_ret = llist_entry(clean_nodes, struct rds_ib_mr, llnode);
420 421
			clean_nodes = clean_nodes->next;
		}
422
		/* more than one entry in llist nodes */
423 424
		if (clean_nodes) {
			spin_lock_irqsave(&pool->clean_lock, flags);
425 426
			llist_add_batch(clean_nodes, clean_tail,
					&pool->clean_list);
427 428
			spin_unlock_irqrestore(&pool->clean_lock, flags);
		}
429
	}
430 431

	atomic_sub(unpinned, &pool->free_pinned);
W
Wengang Wang 已提交
432
	atomic_sub(dirty_to_clean, &pool->dirty_count);
433 434 435 436
	atomic_sub(nfreed, &pool->item_count);

out:
	mutex_unlock(&pool->flush_lock);
437 438 439
	if (waitqueue_active(&pool->flush_wait))
		wake_up(&pool->flush_wait);
out_nolock:
440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462
	return 0;
}

struct rds_ib_mr *rds_ib_try_reuse_ibmr(struct rds_ib_mr_pool *pool)
{
	struct rds_ib_mr *ibmr = NULL;
	int iter = 0;

	while (1) {
		ibmr = rds_ib_reuse_mr(pool);
		if (ibmr)
			return ibmr;

		if (atomic_inc_return(&pool->item_count) <= pool->max_items)
			break;

		atomic_dec(&pool->item_count);

		if (++iter > 2) {
			if (pool->pool_type == RDS_IB_MR_8K_POOL)
				rds_ib_stats_inc(s_ib_rdma_mr_8k_pool_depleted);
			else
				rds_ib_stats_inc(s_ib_rdma_mr_1m_pool_depleted);
463
			break;
464 465 466 467 468 469 470 471 472 473 474 475 476
		}

		/* We do have some empty MRs. Flush them out. */
		if (pool->pool_type == RDS_IB_MR_8K_POOL)
			rds_ib_stats_inc(s_ib_rdma_mr_8k_pool_wait);
		else
			rds_ib_stats_inc(s_ib_rdma_mr_1m_pool_wait);

		rds_ib_flush_mr_pool(pool, 0, &ibmr);
		if (ibmr)
			return ibmr;
	}

477
	return NULL;
478 479 480 481
}

static void rds_ib_mr_pool_flush_worker(struct work_struct *work)
{
482
	struct rds_ib_mr_pool *pool = container_of(work, struct rds_ib_mr_pool, flush_worker.work);
483

484
	rds_ib_flush_mr_pool(pool, 0, NULL);
485 486 487 488 489
}

void rds_ib_free_mr(void *trans_private, int invalidate)
{
	struct rds_ib_mr *ibmr = trans_private;
490
	struct rds_ib_mr_pool *pool = ibmr->pool;
491 492 493 494
	struct rds_ib_device *rds_ibdev = ibmr->device;

	rdsdebug("RDS/IB: free_mr nents %u\n", ibmr->sg_len);

495 496 497 498 499 500 501 502 503 504
	if (ibmr->odp) {
		/* A MR created and marked as use_once. We use delayed work,
		 * because there is a change that we are in interrupt and can't
		 * call to ib_dereg_mr() directly.
		 */
		INIT_DELAYED_WORK(&ibmr->work, rds_ib_odp_mr_worker);
		queue_delayed_work(rds_ib_mr_wq, &ibmr->work, 0);
		return;
	}

505
	/* Return it to the pool's free list */
506 507 508 509
	if (rds_ibdev->use_fastreg)
		rds_ib_free_frmr_list(ibmr);
	else
		rds_ib_free_fmr_list(ibmr);
510 511 512 513 514

	atomic_add(ibmr->sg_len, &pool->free_pinned);
	atomic_inc(&pool->dirty_count);

	/* If we've pinned too many pages, request a flush */
515
	if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned ||
516
	    atomic_read(&pool->dirty_count) >= pool->max_items / 5)
517
		queue_delayed_work(rds_ib_mr_wq, &pool->flush_worker, 10);
518 519 520

	if (invalidate) {
		if (likely(!in_interrupt())) {
521
			rds_ib_flush_mr_pool(pool, 0, NULL);
522 523
		} else {
			/* We get here if the user created a MR marked
524 525
			 * as use_once and invalidate at the same time.
			 */
526
			queue_delayed_work(rds_ib_mr_wq,
527
					   &pool->flush_worker, 10);
528 529
		}
	}
530 531

	rds_ib_dev_put(rds_ibdev);
532 533 534 535 536 537
}

void rds_ib_flush_mrs(void)
{
	struct rds_ib_device *rds_ibdev;

538
	down_read(&rds_ib_devices_lock);
539
	list_for_each_entry(rds_ibdev, &rds_ib_devices, list) {
540 541
		if (rds_ibdev->mr_8k_pool)
			rds_ib_flush_mr_pool(rds_ibdev->mr_8k_pool, 0, NULL);
542

543 544
		if (rds_ibdev->mr_1m_pool)
			rds_ib_flush_mr_pool(rds_ibdev->mr_1m_pool, 0, NULL);
545
	}
546
	up_read(&rds_ib_devices_lock);
547 548
}

549 550 551 552 553 554 555
u32 rds_ib_get_lkey(void *trans_private)
{
	struct rds_ib_mr *ibmr = trans_private;

	return ibmr->u.mr->lkey;
}

556
void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
557
		    struct rds_sock *rs, u32 *key_ret,
558 559
		    struct rds_connection *conn,
		    u64 start, u64 length, int need_odp)
560 561 562
{
	struct rds_ib_device *rds_ibdev;
	struct rds_ib_mr *ibmr = NULL;
563
	struct rds_ib_connection *ic = NULL;
564 565
	int ret;

566
	rds_ibdev = rds_ib_get_device(rs->rs_bound_addr.s6_addr32[3]);
567 568 569 570 571
	if (!rds_ibdev) {
		ret = -ENODEV;
		goto out;
	}

572 573 574 575 576 577
	if (need_odp == ODP_ZEROBASED || need_odp == ODP_VIRTUAL) {
		u64 virt_addr = need_odp == ODP_ZEROBASED ? 0 : start;
		int access_flags =
			(IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_READ |
			 IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_ATOMIC |
			 IB_ACCESS_ON_DEMAND);
578
		struct ib_sge sge = {};
579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605
		struct ib_mr *ib_mr;

		if (!rds_ibdev->odp_capable) {
			ret = -EOPNOTSUPP;
			goto out;
		}

		ib_mr = ib_reg_user_mr(rds_ibdev->pd, start, length, virt_addr,
				       access_flags);

		if (IS_ERR(ib_mr)) {
			rdsdebug("rds_ib_get_user_mr returned %d\n",
				 IS_ERR(ib_mr));
			ret = PTR_ERR(ib_mr);
			goto out;
		}
		if (key_ret)
			*key_ret = ib_mr->rkey;

		ibmr = kzalloc(sizeof(*ibmr), GFP_KERNEL);
		if (!ibmr) {
			ib_dereg_mr(ib_mr);
			ret = -ENOMEM;
			goto out;
		}
		ibmr->u.mr = ib_mr;
		ibmr->odp = 1;
606 607 608 609 610 611 612 613

		sge.addr = virt_addr;
		sge.length = length;
		sge.lkey = ib_mr->lkey;

		ib_advise_mr(rds_ibdev->pd,
			     IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_WRITE,
			     IB_UVERBS_ADVISE_MR_FLAG_FLUSH, &sge, 1);
614 615 616
		return ibmr;
	}

617 618 619
	if (conn)
		ic = conn->c_transport_data;

620
	if (!rds_ibdev->mr_8k_pool || !rds_ibdev->mr_1m_pool) {
621 622 623 624
		ret = -ENODEV;
		goto out;
	}

625 626 627 628
	if (rds_ibdev->use_fastreg)
		ibmr = rds_ib_reg_frmr(rds_ibdev, ic, sg, nents, key_ret);
	else
		ibmr = rds_ib_reg_fmr(rds_ibdev, sg, nents, key_ret);
629 630
	if (IS_ERR(ibmr)) {
		ret = PTR_ERR(ibmr);
631
		pr_warn("RDS/IB: rds_ib_get_mr failed (errno=%d)\n", ret);
632 633 634
	} else {
		return ibmr;
	}
635

636
 out:
637 638
	if (rds_ibdev)
		rds_ib_dev_put(rds_ibdev);
639

640
	return ERR_PTR(ret);
641
}
642

643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664
void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *pool)
{
	cancel_delayed_work_sync(&pool->flush_worker);
	rds_ib_flush_mr_pool(pool, 1, NULL);
	WARN_ON(atomic_read(&pool->item_count));
	WARN_ON(atomic_read(&pool->free_pinned));
	kfree(pool);
}

struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *rds_ibdev,
					     int pool_type)
{
	struct rds_ib_mr_pool *pool;

	pool = kzalloc(sizeof(*pool), GFP_KERNEL);
	if (!pool)
		return ERR_PTR(-ENOMEM);

	pool->pool_type = pool_type;
	init_llist_head(&pool->free_list);
	init_llist_head(&pool->drop_list);
	init_llist_head(&pool->clean_list);
665
	spin_lock_init(&pool->clean_lock);
666 667 668 669 670 671 672
	mutex_init(&pool->flush_lock);
	init_waitqueue_head(&pool->flush_wait);
	INIT_DELAYED_WORK(&pool->flush_worker, rds_ib_mr_pool_flush_worker);

	if (pool_type == RDS_IB_MR_1M_POOL) {
		/* +1 allows for unaligned MRs */
		pool->fmr_attr.max_pages = RDS_MR_1M_MSG_SIZE + 1;
673
		pool->max_items = rds_ibdev->max_1m_mrs;
674 675 676
	} else {
		/* pool_type == RDS_IB_MR_8K_POOL */
		pool->fmr_attr.max_pages = RDS_MR_8K_MSG_SIZE + 1;
677
		pool->max_items = rds_ibdev->max_8k_mrs;
678 679 680 681 682 683
	}

	pool->max_free_pinned = pool->max_items * pool->fmr_attr.max_pages / 4;
	pool->fmr_attr.max_maps = rds_ibdev->fmr_max_remaps;
	pool->fmr_attr.page_shift = PAGE_SHIFT;
	pool->max_items_soft = rds_ibdev->max_mrs * 3 / 4;
684
	pool->use_fastreg = rds_ibdev->use_fastreg;
685 686 687 688 689 690

	return pool;
}

int rds_ib_mr_init(void)
{
691
	rds_ib_mr_wq = alloc_workqueue("rds_mr_flushd", WQ_MEM_RECLAIM, 0);
692 693 694 695 696 697 698 699 700 701 702 703 704
	if (!rds_ib_mr_wq)
		return -ENOMEM;
	return 0;
}

/* By the time this is called all the IB devices should have been torn down and
 * had their pools freed.  As each pool is freed its work struct is waited on,
 * so the pool flushing work queue should be idle by the time we get here.
 */
void rds_ib_mr_exit(void)
{
	destroy_workqueue(rds_ib_mr_wq);
}
705 706 707 708 709 710 711 712 713

static void rds_ib_odp_mr_worker(struct work_struct  *work)
{
	struct rds_ib_mr *ibmr;

	ibmr = container_of(work, struct rds_ib_mr, work.work);
	ib_dereg_mr(ibmr->u.mr);
	kfree(ibmr);
}