ib_rdma.c 20.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33
/*
 * Copyright (c) 2006 Oracle.  All rights reserved.
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *      - Redistributions of source code must retain the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer.
 *
 *      - Redistributions in binary form must reproduce the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer in the documentation and/or other materials
 *        provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 *
 */
#include <linux/kernel.h>
34
#include <linux/slab.h>
C
Chris Mason 已提交
35
#include <linux/rculist.h>
36
#include <linux/llist.h>
37 38 39 40

#include "rds.h"
#include "ib.h"

41 42
static DEFINE_PER_CPU(unsigned long, clean_list_grace);
#define CLEAN_LIST_BUSY_BIT 0
43 44 45 46 47 48 49 50

/*
 * This is stored as mr->r_trans_private.
 */
struct rds_ib_mr {
	struct rds_ib_device	*device;
	struct rds_ib_mr_pool	*pool;
	struct ib_fmr		*fmr;
51

52
	struct llist_node	llnode;
53 54 55

	/* unmap_list is for freeing */
	struct list_head	unmap_list;
56 57 58 59 60 61 62 63 64 65 66 67 68
	unsigned int		remap_count;

	struct scatterlist	*sg;
	unsigned int		sg_len;
	u64			*dma;
	int			sg_dma_len;
};

/*
 * Our own little FMR pool
 */
struct rds_ib_mr_pool {
	struct mutex		flush_lock;		/* serialize fmr invalidate */
69
	struct delayed_work	flush_worker;		/* flush worker */
70 71 72

	atomic_t		item_count;		/* total # of MRs */
	atomic_t		dirty_count;		/* # dirty of MRs */
73

74 75 76
	struct llist_head	drop_list;		/* MRs that have reached their max_maps limit */
	struct llist_head	free_list;		/* unused MRs */
	struct llist_head	clean_list;		/* global unused & unamapped MRs */
77 78
	wait_queue_head_t	flush_wait;

79 80 81 82 83 84 85
	atomic_t		free_pinned;		/* memory pinned by free MRs */
	unsigned long		max_items;
	unsigned long		max_items_soft;
	unsigned long		max_free_pinned;
	struct ib_fmr_attr	fmr_attr;
};

86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104
struct workqueue_struct *rds_ib_fmr_wq;

int rds_ib_fmr_init(void)
{
	rds_ib_fmr_wq = create_workqueue("rds_fmr_flushd");
	if (!rds_ib_fmr_wq)
		return -ENOMEM;
	return 0;
}

/* By the time this is called all the IB devices should have been torn down and
 * had their pools freed.  As each pool is freed its work struct is waited on,
 * so the pool flushing work queue should be idle by the time we get here.
 */
void rds_ib_fmr_exit(void)
{
	destroy_workqueue(rds_ib_fmr_wq);
}

105
static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool, int free_all, struct rds_ib_mr **);
106 107 108 109 110 111 112 113
static void rds_ib_teardown_mr(struct rds_ib_mr *ibmr);
static void rds_ib_mr_pool_flush_worker(struct work_struct *work);

static struct rds_ib_device *rds_ib_get_device(__be32 ipaddr)
{
	struct rds_ib_device *rds_ibdev;
	struct rds_ib_ipaddr *i_ipaddr;

114 115
	rcu_read_lock();
	list_for_each_entry_rcu(rds_ibdev, &rds_ib_devices, list) {
C
Chris Mason 已提交
116
		list_for_each_entry_rcu(i_ipaddr, &rds_ibdev->ipaddr_list, list) {
117
			if (i_ipaddr->ipaddr == ipaddr) {
118
				atomic_inc(&rds_ibdev->refcount);
C
Chris Mason 已提交
119
				rcu_read_unlock();
120 121 122 123
				return rds_ibdev;
			}
		}
	}
124
	rcu_read_unlock();
125 126 127 128 129 130 131 132 133 134 135 136 137 138 139

	return NULL;
}

static int rds_ib_add_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
{
	struct rds_ib_ipaddr *i_ipaddr;

	i_ipaddr = kmalloc(sizeof *i_ipaddr, GFP_KERNEL);
	if (!i_ipaddr)
		return -ENOMEM;

	i_ipaddr->ipaddr = ipaddr;

	spin_lock_irq(&rds_ibdev->spinlock);
C
Chris Mason 已提交
140
	list_add_tail_rcu(&i_ipaddr->list, &rds_ibdev->ipaddr_list);
141 142 143 144 145 146 147
	spin_unlock_irq(&rds_ibdev->spinlock);

	return 0;
}

static void rds_ib_remove_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
{
148
	struct rds_ib_ipaddr *i_ipaddr;
C
Chris Mason 已提交
149 150
	struct rds_ib_ipaddr *to_free = NULL;

151 152

	spin_lock_irq(&rds_ibdev->spinlock);
C
Chris Mason 已提交
153
	list_for_each_entry_rcu(i_ipaddr, &rds_ibdev->ipaddr_list, list) {
154
		if (i_ipaddr->ipaddr == ipaddr) {
C
Chris Mason 已提交
155 156
			list_del_rcu(&i_ipaddr->list);
			to_free = i_ipaddr;
157 158 159 160
			break;
		}
	}
	spin_unlock_irq(&rds_ibdev->spinlock);
C
Chris Mason 已提交
161

162 163
	if (to_free)
		kfree_rcu(to_free, rcu);
164 165 166 167 168 169 170
}

int rds_ib_update_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
{
	struct rds_ib_device *rds_ibdev_old;

	rds_ibdev_old = rds_ib_get_device(ipaddr);
171 172 173 174
	if (!rds_ibdev_old)
		return rds_ib_add_ipaddr(rds_ibdev, ipaddr);

	if (rds_ibdev_old != rds_ibdev) {
175
		rds_ib_remove_ipaddr(rds_ibdev_old, ipaddr);
176
		rds_ib_dev_put(rds_ibdev_old);
177
		return rds_ib_add_ipaddr(rds_ibdev, ipaddr);
178
	}
179
	rds_ib_dev_put(rds_ibdev_old);
180

181
	return 0;
182 183
}

184
void rds_ib_add_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn)
185 186 187 188 189 190 191 192 193
{
	struct rds_ib_connection *ic = conn->c_transport_data;

	/* conn was previously on the nodev_conns_list */
	spin_lock_irq(&ib_nodev_conns_lock);
	BUG_ON(list_empty(&ib_nodev_conns));
	BUG_ON(list_empty(&ic->ib_node));
	list_del(&ic->ib_node);

194
	spin_lock(&rds_ibdev->spinlock);
195
	list_add_tail(&ic->ib_node, &rds_ibdev->conn_list);
196
	spin_unlock(&rds_ibdev->spinlock);
197
	spin_unlock_irq(&ib_nodev_conns_lock);
198 199

	ic->rds_ibdev = rds_ibdev;
200
	atomic_inc(&rds_ibdev->refcount);
201 202
}

203
void rds_ib_remove_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn)
204
{
205
	struct rds_ib_connection *ic = conn->c_transport_data;
206

207 208
	/* place conn on nodev_conns_list */
	spin_lock(&ib_nodev_conns_lock);
209

210 211 212 213 214 215 216 217 218 219
	spin_lock_irq(&rds_ibdev->spinlock);
	BUG_ON(list_empty(&ic->ib_node));
	list_del(&ic->ib_node);
	spin_unlock_irq(&rds_ibdev->spinlock);

	list_add_tail(&ic->ib_node, &ib_nodev_conns);

	spin_unlock(&ib_nodev_conns_lock);

	ic->rds_ibdev = NULL;
220
	rds_ib_dev_put(rds_ibdev);
221 222
}

223
void rds_ib_destroy_nodev_conns(void)
224 225 226 227 228
{
	struct rds_ib_connection *ic, *_ic;
	LIST_HEAD(tmp_list);

	/* avoid calling conn_destroy with irqs off */
229 230 231
	spin_lock_irq(&ib_nodev_conns_lock);
	list_splice(&ib_nodev_conns, &tmp_list);
	spin_unlock_irq(&ib_nodev_conns_lock);
232

A
Andy Grover 已提交
233
	list_for_each_entry_safe(ic, _ic, &tmp_list, ib_node)
234 235 236 237 238 239 240 241 242 243 244
		rds_conn_destroy(ic->conn);
}

struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *rds_ibdev)
{
	struct rds_ib_mr_pool *pool;

	pool = kzalloc(sizeof(*pool), GFP_KERNEL);
	if (!pool)
		return ERR_PTR(-ENOMEM);

245 246 247
	init_llist_head(&pool->free_list);
	init_llist_head(&pool->drop_list);
	init_llist_head(&pool->clean_list);
248
	mutex_init(&pool->flush_lock);
249
	init_waitqueue_head(&pool->flush_wait);
250
	INIT_DELAYED_WORK(&pool->flush_worker, rds_ib_mr_pool_flush_worker);
251 252 253

	pool->fmr_attr.max_pages = fmr_message_size;
	pool->fmr_attr.max_maps = rds_ibdev->fmr_max_remaps;
254
	pool->fmr_attr.page_shift = PAGE_SHIFT;
255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277
	pool->max_free_pinned = rds_ibdev->max_fmrs * fmr_message_size / 4;

	/* We never allow more than max_items MRs to be allocated.
	 * When we exceed more than max_items_soft, we start freeing
	 * items more aggressively.
	 * Make sure that max_items > max_items_soft > max_items / 2
	 */
	pool->max_items_soft = rds_ibdev->max_fmrs * 3 / 4;
	pool->max_items = rds_ibdev->max_fmrs;

	return pool;
}

void rds_ib_get_mr_info(struct rds_ib_device *rds_ibdev, struct rds_info_rdma_connection *iinfo)
{
	struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool;

	iinfo->rdma_mr_max = pool->max_items;
	iinfo->rdma_mr_size = pool->fmr_attr.max_pages;
}

void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *pool)
{
278
	cancel_delayed_work_sync(&pool->flush_worker);
279
	rds_ib_flush_mr_pool(pool, 1, NULL);
280 281
	WARN_ON(atomic_read(&pool->item_count));
	WARN_ON(atomic_read(&pool->free_pinned));
282 283 284 285 286 287
	kfree(pool);
}

static inline struct rds_ib_mr *rds_ib_reuse_fmr(struct rds_ib_mr_pool *pool)
{
	struct rds_ib_mr *ibmr = NULL;
288
	struct llist_node *ret;
289
	unsigned long *flag;
290

291
	preempt_disable();
292
	flag = this_cpu_ptr(&clean_list_grace);
293
	set_bit(CLEAN_LIST_BUSY_BIT, flag);
294
	ret = llist_del_first(&pool->clean_list);
295
	if (ret)
296
		ibmr = llist_entry(ret, struct rds_ib_mr, llnode);
297

298 299
	clear_bit(CLEAN_LIST_BUSY_BIT, flag);
	preempt_enable();
300 301 302
	return ibmr;
}

303 304 305 306 307 308 309 310 311 312 313 314
static inline void wait_clean_list_grace(void)
{
	int cpu;
	unsigned long *flag;

	for_each_online_cpu(cpu) {
		flag = &per_cpu(clean_list_grace, cpu);
		while (test_bit(CLEAN_LIST_BUSY_BIT, flag))
			cpu_relax();
	}
}

315 316 317 318 319 320
static struct rds_ib_mr *rds_ib_alloc_fmr(struct rds_ib_device *rds_ibdev)
{
	struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool;
	struct rds_ib_mr *ibmr = NULL;
	int err = 0, iter = 0;

321
	if (atomic_read(&pool->dirty_count) >= pool->max_items / 10)
322
		queue_delayed_work(rds_ib_fmr_wq, &pool->flush_worker, 10);
323

324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349
	while (1) {
		ibmr = rds_ib_reuse_fmr(pool);
		if (ibmr)
			return ibmr;

		/* No clean MRs - now we have the choice of either
		 * allocating a fresh MR up to the limit imposed by the
		 * driver, or flush any dirty unused MRs.
		 * We try to avoid stalling in the send path if possible,
		 * so we allocate as long as we're allowed to.
		 *
		 * We're fussy with enforcing the FMR limit, though. If the driver
		 * tells us we can't use more than N fmrs, we shouldn't start
		 * arguing with it */
		if (atomic_inc_return(&pool->item_count) <= pool->max_items)
			break;

		atomic_dec(&pool->item_count);

		if (++iter > 2) {
			rds_ib_stats_inc(s_ib_rdma_mr_pool_depleted);
			return ERR_PTR(-EAGAIN);
		}

		/* We do have some empty MRs. Flush them out. */
		rds_ib_stats_inc(s_ib_rdma_mr_pool_wait);
350 351 352
		rds_ib_flush_mr_pool(pool, 0, &ibmr);
		if (ibmr)
			return ibmr;
353 354
	}

355
	ibmr = kzalloc_node(sizeof(*ibmr), GFP_KERNEL, rdsibdev_to_node(rds_ibdev));
356 357 358 359 360 361 362 363
	if (!ibmr) {
		err = -ENOMEM;
		goto out_no_cigar;
	}

	ibmr->fmr = ib_alloc_fmr(rds_ibdev->pd,
			(IB_ACCESS_LOCAL_WRITE |
			 IB_ACCESS_REMOTE_READ |
A
Andy Grover 已提交
364 365
			 IB_ACCESS_REMOTE_WRITE|
			 IB_ACCESS_REMOTE_ATOMIC),
366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412
			&pool->fmr_attr);
	if (IS_ERR(ibmr->fmr)) {
		err = PTR_ERR(ibmr->fmr);
		ibmr->fmr = NULL;
		printk(KERN_WARNING "RDS/IB: ib_alloc_fmr failed (err=%d)\n", err);
		goto out_no_cigar;
	}

	rds_ib_stats_inc(s_ib_rdma_mr_alloc);
	return ibmr;

out_no_cigar:
	if (ibmr) {
		if (ibmr->fmr)
			ib_dealloc_fmr(ibmr->fmr);
		kfree(ibmr);
	}
	atomic_dec(&pool->item_count);
	return ERR_PTR(err);
}

static int rds_ib_map_fmr(struct rds_ib_device *rds_ibdev, struct rds_ib_mr *ibmr,
	       struct scatterlist *sg, unsigned int nents)
{
	struct ib_device *dev = rds_ibdev->dev;
	struct scatterlist *scat = sg;
	u64 io_addr = 0;
	u64 *dma_pages;
	u32 len;
	int page_cnt, sg_dma_len;
	int i, j;
	int ret;

	sg_dma_len = ib_dma_map_sg(dev, sg, nents,
				 DMA_BIDIRECTIONAL);
	if (unlikely(!sg_dma_len)) {
		printk(KERN_WARNING "RDS/IB: dma_map_sg failed!\n");
		return -EBUSY;
	}

	len = 0;
	page_cnt = 0;

	for (i = 0; i < sg_dma_len; ++i) {
		unsigned int dma_len = ib_sg_dma_len(dev, &scat[i]);
		u64 dma_addr = ib_sg_dma_address(dev, &scat[i]);

413
		if (dma_addr & ~PAGE_MASK) {
414 415 416 417 418
			if (i > 0)
				return -EINVAL;
			else
				++page_cnt;
		}
419
		if ((dma_addr + dma_len) & ~PAGE_MASK) {
420 421 422 423 424 425 426 427 428
			if (i < sg_dma_len - 1)
				return -EINVAL;
			else
				++page_cnt;
		}

		len += dma_len;
	}

429
	page_cnt += len >> PAGE_SHIFT;
430 431 432
	if (page_cnt > fmr_message_size)
		return -EINVAL;

433 434
	dma_pages = kmalloc_node(sizeof(u64) * page_cnt, GFP_ATOMIC,
				 rdsibdev_to_node(rds_ibdev));
435 436 437 438 439 440 441 442
	if (!dma_pages)
		return -ENOMEM;

	page_cnt = 0;
	for (i = 0; i < sg_dma_len; ++i) {
		unsigned int dma_len = ib_sg_dma_len(dev, &scat[i]);
		u64 dma_addr = ib_sg_dma_address(dev, &scat[i]);

443
		for (j = 0; j < dma_len; j += PAGE_SIZE)
444
			dma_pages[page_cnt++] =
445
				(dma_addr & PAGE_MASK) + j;
446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507
	}

	ret = ib_map_phys_fmr(ibmr->fmr,
				   dma_pages, page_cnt, io_addr);
	if (ret)
		goto out;

	/* Success - we successfully remapped the MR, so we can
	 * safely tear down the old mapping. */
	rds_ib_teardown_mr(ibmr);

	ibmr->sg = scat;
	ibmr->sg_len = nents;
	ibmr->sg_dma_len = sg_dma_len;
	ibmr->remap_count++;

	rds_ib_stats_inc(s_ib_rdma_mr_used);
	ret = 0;

out:
	kfree(dma_pages);

	return ret;
}

void rds_ib_sync_mr(void *trans_private, int direction)
{
	struct rds_ib_mr *ibmr = trans_private;
	struct rds_ib_device *rds_ibdev = ibmr->device;

	switch (direction) {
	case DMA_FROM_DEVICE:
		ib_dma_sync_sg_for_cpu(rds_ibdev->dev, ibmr->sg,
			ibmr->sg_dma_len, DMA_BIDIRECTIONAL);
		break;
	case DMA_TO_DEVICE:
		ib_dma_sync_sg_for_device(rds_ibdev->dev, ibmr->sg,
			ibmr->sg_dma_len, DMA_BIDIRECTIONAL);
		break;
	}
}

static void __rds_ib_teardown_mr(struct rds_ib_mr *ibmr)
{
	struct rds_ib_device *rds_ibdev = ibmr->device;

	if (ibmr->sg_dma_len) {
		ib_dma_unmap_sg(rds_ibdev->dev,
				ibmr->sg, ibmr->sg_len,
				DMA_BIDIRECTIONAL);
		ibmr->sg_dma_len = 0;
	}

	/* Release the s/g list */
	if (ibmr->sg_len) {
		unsigned int i;

		for (i = 0; i < ibmr->sg_len; ++i) {
			struct page *page = sg_page(&ibmr->sg[i]);

			/* FIXME we need a way to tell a r/w MR
			 * from a r/o MR */
508
			WARN_ON(!page->mapping && irqs_disabled());
509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542
			set_page_dirty(page);
			put_page(page);
		}
		kfree(ibmr->sg);

		ibmr->sg = NULL;
		ibmr->sg_len = 0;
	}
}

static void rds_ib_teardown_mr(struct rds_ib_mr *ibmr)
{
	unsigned int pinned = ibmr->sg_len;

	__rds_ib_teardown_mr(ibmr);
	if (pinned) {
		struct rds_ib_device *rds_ibdev = ibmr->device;
		struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool;

		atomic_sub(pinned, &pool->free_pinned);
	}
}

static inline unsigned int rds_ib_flush_goal(struct rds_ib_mr_pool *pool, int free_all)
{
	unsigned int item_count;

	item_count = atomic_read(&pool->item_count);
	if (free_all)
		return item_count;

	return 0;
}

543
/*
544
 * given an llist of mrs, put them all into the list_head for more processing
545
 */
W
Wengang Wang 已提交
546 547
static unsigned int llist_append_to_list(struct llist_head *llist,
					 struct list_head *list)
548 549
{
	struct rds_ib_mr *ibmr;
550 551
	struct llist_node *node;
	struct llist_node *next;
W
Wengang Wang 已提交
552
	unsigned int count = 0;
553 554 555 556 557

	node = llist_del_all(llist);
	while (node) {
		next = node->next;
		ibmr = llist_entry(node, struct rds_ib_mr, llnode);
558
		list_add_tail(&ibmr->unmap_list, list);
559
		node = next;
W
Wengang Wang 已提交
560
		count++;
561
	}
W
Wengang Wang 已提交
562
	return count;
563 564 565
}

/*
566 567 568
 * this takes a list head of mrs and turns it into linked llist nodes
 * of clusters.  Each cluster has linked llist nodes of
 * MR_CLUSTER_SIZE mrs that are ready for reuse.
569
 */
570 571 572 573
static void list_to_llist_nodes(struct rds_ib_mr_pool *pool,
				struct list_head *list,
				struct llist_node **nodes_head,
				struct llist_node **nodes_tail)
574 575
{
	struct rds_ib_mr *ibmr;
576 577
	struct llist_node *cur = NULL;
	struct llist_node **next = nodes_head;
578 579

	list_for_each_entry(ibmr, list, unmap_list) {
580 581 582
		cur = &ibmr->llnode;
		*next = cur;
		next = &cur->next;
583
	}
584 585
	*next = NULL;
	*nodes_tail = cur;
586 587
}

588 589 590 591 592 593
/*
 * Flush our pool of MRs.
 * At a minimum, all currently unused MRs are unmapped.
 * If the number of MRs allocated exceeds the limit, we also try
 * to free as many MRs as needed to get back to this limit.
 */
594 595
static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool,
			        int free_all, struct rds_ib_mr **ibmr_ret)
596 597
{
	struct rds_ib_mr *ibmr, *next;
598 599
	struct llist_node *clean_nodes;
	struct llist_node *clean_tail;
600 601 602
	LIST_HEAD(unmap_list);
	LIST_HEAD(fmr_list);
	unsigned long unpinned = 0;
W
Wengang Wang 已提交
603
	unsigned int nfreed = 0, dirty_to_clean = 0, free_goal;
604 605 606 607
	int ret = 0;

	rds_ib_stats_inc(s_ib_rdma_mr_pool_flush);

608 609 610 611 612 613 614 615 616 617 618 619
	if (ibmr_ret) {
		DEFINE_WAIT(wait);
		while(!mutex_trylock(&pool->flush_lock)) {
			ibmr = rds_ib_reuse_fmr(pool);
			if (ibmr) {
				*ibmr_ret = ibmr;
				finish_wait(&pool->flush_wait, &wait);
				goto out_nolock;
			}

			prepare_to_wait(&pool->flush_wait, &wait,
					TASK_UNINTERRUPTIBLE);
620
			if (llist_empty(&pool->clean_list))
621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640
				schedule();

			ibmr = rds_ib_reuse_fmr(pool);
			if (ibmr) {
				*ibmr_ret = ibmr;
				finish_wait(&pool->flush_wait, &wait);
				goto out_nolock;
			}
		}
		finish_wait(&pool->flush_wait, &wait);
	} else
		mutex_lock(&pool->flush_lock);

	if (ibmr_ret) {
		ibmr = rds_ib_reuse_fmr(pool);
		if (ibmr) {
			*ibmr_ret = ibmr;
			goto out;
		}
	}
641 642

	/* Get the list of all MRs to be dropped. Ordering matters -
643 644
	 * we want to put drop_list ahead of free_list.
	 */
W
Wengang Wang 已提交
645 646
	dirty_to_clean = llist_append_to_list(&pool->drop_list, &unmap_list);
	dirty_to_clean += llist_append_to_list(&pool->free_list, &unmap_list);
647
	if (free_all)
648
		llist_append_to_list(&pool->clean_list, &unmap_list);
649 650 651 652 653 654 655

	free_goal = rds_ib_flush_goal(pool, free_all);

	if (list_empty(&unmap_list))
		goto out;

	/* String all ib_mr's onto one list and hand them to ib_unmap_fmr */
656
	list_for_each_entry(ibmr, &unmap_list, unmap_list)
657
		list_add(&ibmr->fmr->list, &fmr_list);
658

659 660 661 662 663
	ret = ib_unmap_fmr(&fmr_list);
	if (ret)
		printk(KERN_WARNING "RDS/IB: ib_unmap_fmr failed (err=%d)\n", ret);

	/* Now we can destroy the DMA mapping and unpin any pages */
664
	list_for_each_entry_safe(ibmr, next, &unmap_list, unmap_list) {
665 666 667 668
		unpinned += ibmr->sg_len;
		__rds_ib_teardown_mr(ibmr);
		if (nfreed < free_goal || ibmr->remap_count >= pool->fmr_attr.max_maps) {
			rds_ib_stats_inc(s_ib_rdma_mr_free);
669
			list_del(&ibmr->unmap_list);
670 671 672 673 674 675
			ib_dealloc_fmr(ibmr->fmr);
			kfree(ibmr);
			nfreed++;
		}
	}

676 677 678
	if (!list_empty(&unmap_list)) {
		/* we have to make sure that none of the things we're about
		 * to put on the clean list would race with other cpus trying
679
		 * to pull items off.  The llist would explode if we managed to
680
		 * remove something from the clean list and then add it back again
681
		 * while another CPU was spinning on that same item in llist_del_first.
682
		 *
683
		 * This is pretty unlikely, but just in case  wait for an llist grace period
684 685 686 687
		 * here before adding anything back into the clean list.
		 */
		wait_clean_list_grace();

688
		list_to_llist_nodes(pool, &unmap_list, &clean_nodes, &clean_tail);
689
		if (ibmr_ret)
690
			*ibmr_ret = llist_entry(clean_nodes, struct rds_ib_mr, llnode);
691

692 693 694
		/* more than one entry in llist nodes */
		if (clean_nodes->next)
			llist_add_batch(clean_nodes->next, clean_tail, &pool->clean_list);
695 696

	}
697 698

	atomic_sub(unpinned, &pool->free_pinned);
W
Wengang Wang 已提交
699
	atomic_sub(dirty_to_clean, &pool->dirty_count);
700 701 702 703
	atomic_sub(nfreed, &pool->item_count);

out:
	mutex_unlock(&pool->flush_lock);
704 705 706
	if (waitqueue_active(&pool->flush_wait))
		wake_up(&pool->flush_wait);
out_nolock:
707 708 709 710 711
	return ret;
}

static void rds_ib_mr_pool_flush_worker(struct work_struct *work)
{
712
	struct rds_ib_mr_pool *pool = container_of(work, struct rds_ib_mr_pool, flush_worker.work);
713

714
	rds_ib_flush_mr_pool(pool, 0, NULL);
715 716 717 718 719 720 721 722 723 724 725 726
}

void rds_ib_free_mr(void *trans_private, int invalidate)
{
	struct rds_ib_mr *ibmr = trans_private;
	struct rds_ib_device *rds_ibdev = ibmr->device;
	struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool;

	rdsdebug("RDS/IB: free_mr nents %u\n", ibmr->sg_len);

	/* Return it to the pool's free list */
	if (ibmr->remap_count >= pool->fmr_attr.max_maps)
727
		llist_add(&ibmr->llnode, &pool->drop_list);
728
	else
729
		llist_add(&ibmr->llnode, &pool->free_list);
730 731 732 733 734

	atomic_add(ibmr->sg_len, &pool->free_pinned);
	atomic_inc(&pool->dirty_count);

	/* If we've pinned too many pages, request a flush */
735
	if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned ||
736
	    atomic_read(&pool->dirty_count) >= pool->max_items / 5)
737
		queue_delayed_work(rds_ib_fmr_wq, &pool->flush_worker, 10);
738 739 740

	if (invalidate) {
		if (likely(!in_interrupt())) {
741
			rds_ib_flush_mr_pool(pool, 0, NULL);
742 743
		} else {
			/* We get here if the user created a MR marked
744 745 746 747
			 * as use_once and invalidate at the same time.
			 */
			queue_delayed_work(rds_ib_fmr_wq,
					   &pool->flush_worker, 10);
748 749
		}
	}
750 751

	rds_ib_dev_put(rds_ibdev);
752 753 754 755 756 757
}

void rds_ib_flush_mrs(void)
{
	struct rds_ib_device *rds_ibdev;

758
	down_read(&rds_ib_devices_lock);
759 760 761 762
	list_for_each_entry(rds_ibdev, &rds_ib_devices, list) {
		struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool;

		if (pool)
763
			rds_ib_flush_mr_pool(pool, 0, NULL);
764
	}
765
	up_read(&rds_ib_devices_lock);
766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786
}

void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
		    struct rds_sock *rs, u32 *key_ret)
{
	struct rds_ib_device *rds_ibdev;
	struct rds_ib_mr *ibmr = NULL;
	int ret;

	rds_ibdev = rds_ib_get_device(rs->rs_bound_addr);
	if (!rds_ibdev) {
		ret = -ENODEV;
		goto out;
	}

	if (!rds_ibdev->mr_pool) {
		ret = -ENODEV;
		goto out;
	}

	ibmr = rds_ib_alloc_fmr(rds_ibdev);
787 788
	if (IS_ERR(ibmr)) {
		rds_ib_dev_put(rds_ibdev);
789
		return ibmr;
790
	}
791 792 793 794 795 796 797 798

	ret = rds_ib_map_fmr(rds_ibdev, ibmr, sg, nents);
	if (ret == 0)
		*key_ret = ibmr->fmr->rkey;
	else
		printk(KERN_WARNING "RDS/IB: map_fmr failed (errno=%d)\n", ret);

	ibmr->device = rds_ibdev;
799
	rds_ibdev = NULL;
800 801 802 803 804 805 806

 out:
	if (ret) {
		if (ibmr)
			rds_ib_free_mr(ibmr, 0);
		ibmr = ERR_PTR(ret);
	}
807 808
	if (rds_ibdev)
		rds_ib_dev_put(rds_ibdev);
809 810
	return ibmr;
}
811