iova.c 26.8 KB
Newer Older
1
/*
2
 * Copyright © 2006-2009, Intel Corporation.
3
 *
4 5 6 7 8 9 10 11 12 13 14 15
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 *
 * This program is distributed in the hope it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
 * Place - Suite 330, Boston, MA 02111-1307 USA.
16
 *
17
 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
18 19
 */

K
Kay, Allen M 已提交
20
#include <linux/iova.h>
21
#include <linux/module.h>
22
#include <linux/slab.h>
23 24
#include <linux/smp.h>
#include <linux/bitops.h>
25
#include <linux/cpu.h>
26 27 28 29 30 31 32 33 34

static bool iova_rcache_insert(struct iova_domain *iovad,
			       unsigned long pfn,
			       unsigned long size);
static unsigned long iova_rcache_get(struct iova_domain *iovad,
				     unsigned long size,
				     unsigned long limit_pfn);
static void init_iova_rcaches(struct iova_domain *iovad);
static void free_iova_rcaches(struct iova_domain *iovad);
35
static void fq_destroy_all_entries(struct iova_domain *iovad);
J
Joerg Roedel 已提交
36
static void fq_flush_timeout(unsigned long data);
37

38
void
39 40
init_iova_domain(struct iova_domain *iovad, unsigned long granule,
	unsigned long start_pfn, unsigned long pfn_32bit)
41
{
42 43 44 45 46 47 48
	/*
	 * IOVA granularity will normally be equal to the smallest
	 * supported IOMMU page size; both *must* be capable of
	 * representing individual CPU pages exactly.
	 */
	BUG_ON((granule > PAGE_SIZE) || !is_power_of_2(granule));

49 50 51
	spin_lock_init(&iovad->iova_rbtree_lock);
	iovad->rbroot = RB_ROOT;
	iovad->cached32_node = NULL;
52
	iovad->granule = granule;
53
	iovad->start_pfn = start_pfn;
54
	iovad->dma_32bit_pfn = pfn_32bit + 1;
55 56
	iovad->flush_cb = NULL;
	iovad->fq = NULL;
57
	init_iova_rcaches(iovad);
58
}
S
Sakari Ailus 已提交
59
EXPORT_SYMBOL_GPL(init_iova_domain);
60

61 62 63 64 65
static void free_iova_flush_queue(struct iova_domain *iovad)
{
	if (!iovad->fq)
		return;

J
Joerg Roedel 已提交
66 67 68
	if (timer_pending(&iovad->fq_timer))
		del_timer(&iovad->fq_timer);

69
	fq_destroy_all_entries(iovad);
J
Joerg Roedel 已提交
70

71 72 73 74 75 76 77 78 79 80 81 82
	free_percpu(iovad->fq);

	iovad->fq         = NULL;
	iovad->flush_cb   = NULL;
	iovad->entry_dtor = NULL;
}

int init_iova_flush_queue(struct iova_domain *iovad,
			  iova_flush_cb flush_cb, iova_entry_dtor entry_dtor)
{
	int cpu;

83 84 85
	atomic64_set(&iovad->fq_flush_start_cnt,  0);
	atomic64_set(&iovad->fq_flush_finish_cnt, 0);

86 87 88 89 90 91 92 93 94 95 96 97 98
	iovad->fq = alloc_percpu(struct iova_fq);
	if (!iovad->fq)
		return -ENOMEM;

	iovad->flush_cb   = flush_cb;
	iovad->entry_dtor = entry_dtor;

	for_each_possible_cpu(cpu) {
		struct iova_fq *fq;

		fq = per_cpu_ptr(iovad->fq, cpu);
		fq->head = 0;
		fq->tail = 0;
99 100

		spin_lock_init(&fq->lock);
101 102
	}

J
Joerg Roedel 已提交
103 104 105
	setup_timer(&iovad->fq_timer, fq_flush_timeout, (unsigned long)iovad);
	atomic_set(&iovad->fq_timer_on, 0);

106 107 108 109
	return 0;
}
EXPORT_SYMBOL_GPL(init_iova_flush_queue);

110 111 112
static struct rb_node *
__get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn)
{
113
	if ((*limit_pfn > iovad->dma_32bit_pfn) ||
114 115 116 117 118
		(iovad->cached32_node == NULL))
		return rb_last(&iovad->rbroot);
	else {
		struct rb_node *prev_node = rb_prev(iovad->cached32_node);
		struct iova *curr_iova =
G
Geliang Tang 已提交
119
			rb_entry(iovad->cached32_node, struct iova, node);
120
		*limit_pfn = curr_iova->pfn_lo;
121 122 123 124 125 126 127 128
		return prev_node;
	}
}

static void
__cached_rbnode_insert_update(struct iova_domain *iovad,
	unsigned long limit_pfn, struct iova *new)
{
D
David Miller 已提交
129
	if (limit_pfn != iovad->dma_32bit_pfn)
130 131 132 133 134 135 136 137 138 139 140 141 142
		return;
	iovad->cached32_node = &new->node;
}

static void
__cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
{
	struct iova *cached_iova;
	struct rb_node *curr;

	if (!iovad->cached32_node)
		return;
	curr = iovad->cached32_node;
G
Geliang Tang 已提交
143
	cached_iova = rb_entry(curr, struct iova, node);
144

145 146
	if (free->pfn_lo >= cached_iova->pfn_lo) {
		struct rb_node *node = rb_next(&free->node);
G
Geliang Tang 已提交
147
		struct iova *iova = rb_entry(node, struct iova, node);
148 149 150 151 152 153 154

		/* only cache if it's below 32bit pfn */
		if (node && iova->pfn_lo < iovad->dma_32bit_pfn)
			iovad->cached32_node = node;
		else
			iovad->cached32_node = NULL;
	}
155 156
}

157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184
/* Insert the iova into domain rbtree by holding writer lock */
static void
iova_insert_rbtree(struct rb_root *root, struct iova *iova,
		   struct rb_node *start)
{
	struct rb_node **new, *parent = NULL;

	new = (start) ? &start : &(root->rb_node);
	/* Figure out where to put new node */
	while (*new) {
		struct iova *this = rb_entry(*new, struct iova, node);

		parent = *new;

		if (iova->pfn_lo < this->pfn_lo)
			new = &((*new)->rb_left);
		else if (iova->pfn_lo > this->pfn_lo)
			new = &((*new)->rb_right);
		else {
			WARN_ON(1); /* this should not happen */
			return;
		}
	}
	/* Add new node and rebalance tree. */
	rb_link_node(&iova->node, parent, new);
	rb_insert_color(&iova->node, root);
}

185 186 187
/*
 * Computes the padding size required, to make the start address
 * naturally aligned on the power-of-two order of its size
188
 */
189 190
static unsigned int
iova_get_pad_size(unsigned int size, unsigned int limit_pfn)
191
{
192
	return (limit_pfn - size) & (__roundup_pow_of_two(size) - 1);
193 194
}

M
mark gross 已提交
195 196 197
static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
		unsigned long size, unsigned long limit_pfn,
			struct iova *new, bool size_aligned)
198
{
M
mark gross 已提交
199
	struct rb_node *prev, *curr = NULL;
200 201
	unsigned long flags;
	unsigned long saved_pfn;
202
	unsigned int pad_size = 0;
203 204 205 206 207

	/* Walk the tree backwards */
	spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
	saved_pfn = limit_pfn;
	curr = __get_cached_rbnode(iovad, &limit_pfn);
M
mark gross 已提交
208
	prev = curr;
209
	while (curr) {
G
Geliang Tang 已提交
210
		struct iova *curr_iova = rb_entry(curr, struct iova, node);
M
mark gross 已提交
211

212
		if (limit_pfn <= curr_iova->pfn_lo) {
213
			goto move_left;
214
		} else if (limit_pfn > curr_iova->pfn_hi) {
215 216
			if (size_aligned)
				pad_size = iova_get_pad_size(size, limit_pfn);
217
			if ((curr_iova->pfn_hi + size + pad_size) < limit_pfn)
218 219
				break;	/* found a free slot */
		}
220
		limit_pfn = curr_iova->pfn_lo;
221
move_left:
M
mark gross 已提交
222
		prev = curr;
223 224 225
		curr = rb_prev(curr);
	}

226 227 228
	if (!curr) {
		if (size_aligned)
			pad_size = iova_get_pad_size(size, limit_pfn);
229
		if ((iovad->start_pfn + size + pad_size) > limit_pfn) {
230 231 232
			spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
			return -ENOMEM;
		}
233
	}
234 235

	/* pfn_lo will point to size aligned address if size_aligned is set */
236
	new->pfn_lo = limit_pfn - (size + pad_size);
237
	new->pfn_hi = new->pfn_lo + size - 1;
238

239 240
	/* If we have 'prev', it's a valid place to start the insertion. */
	iova_insert_rbtree(&iovad->rbroot, new, prev);
M
mark gross 已提交
241 242
	__cached_rbnode_insert_update(iovad, saved_pfn, new);

243
	spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
M
mark gross 已提交
244 245


246 247 248
	return 0;
}

249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283
static struct kmem_cache *iova_cache;
static unsigned int iova_cache_users;
static DEFINE_MUTEX(iova_cache_mutex);

struct iova *alloc_iova_mem(void)
{
	return kmem_cache_alloc(iova_cache, GFP_ATOMIC);
}
EXPORT_SYMBOL(alloc_iova_mem);

void free_iova_mem(struct iova *iova)
{
	kmem_cache_free(iova_cache, iova);
}
EXPORT_SYMBOL(free_iova_mem);

int iova_cache_get(void)
{
	mutex_lock(&iova_cache_mutex);
	if (!iova_cache_users) {
		iova_cache = kmem_cache_create(
			"iommu_iova", sizeof(struct iova), 0,
			SLAB_HWCACHE_ALIGN, NULL);
		if (!iova_cache) {
			mutex_unlock(&iova_cache_mutex);
			printk(KERN_ERR "Couldn't create iova cache\n");
			return -ENOMEM;
		}
	}

	iova_cache_users++;
	mutex_unlock(&iova_cache_mutex);

	return 0;
}
S
Sakari Ailus 已提交
284
EXPORT_SYMBOL_GPL(iova_cache_get);
285 286 287 288 289 290 291 292 293 294 295 296 297

void iova_cache_put(void)
{
	mutex_lock(&iova_cache_mutex);
	if (WARN_ON(!iova_cache_users)) {
		mutex_unlock(&iova_cache_mutex);
		return;
	}
	iova_cache_users--;
	if (!iova_cache_users)
		kmem_cache_destroy(iova_cache);
	mutex_unlock(&iova_cache_mutex);
}
S
Sakari Ailus 已提交
298
EXPORT_SYMBOL_GPL(iova_cache_put);
299

300 301
/**
 * alloc_iova - allocates an iova
M
Masanari Iida 已提交
302 303 304 305
 * @iovad: - iova domain in question
 * @size: - size of page frames to allocate
 * @limit_pfn: - max limit address
 * @size_aligned: - set if size_aligned address range is required
306 307
 * This function allocates an iova in the range iovad->start_pfn to limit_pfn,
 * searching top-down from limit_pfn to iovad->start_pfn. If the size_aligned
308 309
 * flag is set then the allocated address iova->pfn_lo will be naturally
 * aligned on roundup_power_of_two(size).
310 311 312
 */
struct iova *
alloc_iova(struct iova_domain *iovad, unsigned long size,
313 314
	unsigned long limit_pfn,
	bool size_aligned)
315 316 317 318 319 320 321 322
{
	struct iova *new_iova;
	int ret;

	new_iova = alloc_iova_mem();
	if (!new_iova)
		return NULL;

323
	ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn + 1,
M
mark gross 已提交
324
			new_iova, size_aligned);
325 326 327 328 329 330 331 332

	if (ret) {
		free_iova_mem(new_iova);
		return NULL;
	}

	return new_iova;
}
S
Sakari Ailus 已提交
333
EXPORT_SYMBOL_GPL(alloc_iova);
334

335 336
static struct iova *
private_find_iova(struct iova_domain *iovad, unsigned long pfn)
337
{
338 339 340
	struct rb_node *node = iovad->rbroot.rb_node;

	assert_spin_locked(&iovad->iova_rbtree_lock);
341 342

	while (node) {
G
Geliang Tang 已提交
343
		struct iova *iova = rb_entry(node, struct iova, node);
344 345 346 347 348 349 350 351 352 353 354 355 356 357

		/* If pfn falls within iova's range, return iova */
		if ((pfn >= iova->pfn_lo) && (pfn <= iova->pfn_hi)) {
			return iova;
		}

		if (pfn < iova->pfn_lo)
			node = node->rb_left;
		else if (pfn > iova->pfn_lo)
			node = node->rb_right;
	}

	return NULL;
}
358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384

static void private_free_iova(struct iova_domain *iovad, struct iova *iova)
{
	assert_spin_locked(&iovad->iova_rbtree_lock);
	__cached_rbnode_delete_update(iovad, iova);
	rb_erase(&iova->node, &iovad->rbroot);
	free_iova_mem(iova);
}

/**
 * find_iova - finds an iova for a given pfn
 * @iovad: - iova domain in question.
 * @pfn: - page frame number
 * This function finds and returns an iova belonging to the
 * given doamin which matches the given pfn.
 */
struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn)
{
	unsigned long flags;
	struct iova *iova;

	/* Take the lock so that no other thread is manipulating the rbtree */
	spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
	iova = private_find_iova(iovad, pfn);
	spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
	return iova;
}
S
Sakari Ailus 已提交
385
EXPORT_SYMBOL_GPL(find_iova);
386 387 388 389 390 391 392 393 394 395 396 397 398

/**
 * __free_iova - frees the given iova
 * @iovad: iova domain in question.
 * @iova: iova in question.
 * Frees the given iova belonging to the giving domain
 */
void
__free_iova(struct iova_domain *iovad, struct iova *iova)
{
	unsigned long flags;

	spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
399
	private_free_iova(iovad, iova);
400 401
	spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
}
S
Sakari Ailus 已提交
402
EXPORT_SYMBOL_GPL(__free_iova);
403 404 405 406 407 408 409 410 411 412 413 414

/**
 * free_iova - finds and frees the iova for a given pfn
 * @iovad: - iova domain in question.
 * @pfn: - pfn that is allocated previously
 * This functions finds an iova for a given pfn and then
 * frees the iova from that domain.
 */
void
free_iova(struct iova_domain *iovad, unsigned long pfn)
{
	struct iova *iova = find_iova(iovad, pfn);
415

416 417 418 419
	if (iova)
		__free_iova(iovad, iova);

}
S
Sakari Ailus 已提交
420
EXPORT_SYMBOL_GPL(free_iova);
421

422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478
/**
 * alloc_iova_fast - allocates an iova from rcache
 * @iovad: - iova domain in question
 * @size: - size of page frames to allocate
 * @limit_pfn: - max limit address
 * This function tries to satisfy an iova allocation from the rcache,
 * and falls back to regular allocation on failure.
*/
unsigned long
alloc_iova_fast(struct iova_domain *iovad, unsigned long size,
		unsigned long limit_pfn)
{
	bool flushed_rcache = false;
	unsigned long iova_pfn;
	struct iova *new_iova;

	iova_pfn = iova_rcache_get(iovad, size, limit_pfn);
	if (iova_pfn)
		return iova_pfn;

retry:
	new_iova = alloc_iova(iovad, size, limit_pfn, true);
	if (!new_iova) {
		unsigned int cpu;

		if (flushed_rcache)
			return 0;

		/* Try replenishing IOVAs by flushing rcache. */
		flushed_rcache = true;
		for_each_online_cpu(cpu)
			free_cpu_cached_iovas(cpu, iovad);
		goto retry;
	}

	return new_iova->pfn_lo;
}
EXPORT_SYMBOL_GPL(alloc_iova_fast);

/**
 * free_iova_fast - free iova pfn range into rcache
 * @iovad: - iova domain in question.
 * @pfn: - pfn that is allocated previously
 * @size: - # of pages in range
 * This functions frees an iova range by trying to put it into the rcache,
 * falling back to regular iova deallocation via free_iova() if this fails.
 */
void
free_iova_fast(struct iova_domain *iovad, unsigned long pfn, unsigned long size)
{
	if (iova_rcache_insert(iovad, pfn, size))
		return;

	free_iova(iovad, pfn);
}
EXPORT_SYMBOL_GPL(free_iova_fast);

479 480 481 482 483
#define fq_ring_for_each(i, fq) \
	for ((i) = (fq)->head; (i) != (fq)->tail; (i) = ((i) + 1) % IOVA_FQ_SIZE)

static inline bool fq_full(struct iova_fq *fq)
{
484
	assert_spin_locked(&fq->lock);
485 486 487 488 489 490 491
	return (((fq->tail + 1) % IOVA_FQ_SIZE) == fq->head);
}

static inline unsigned fq_ring_add(struct iova_fq *fq)
{
	unsigned idx = fq->tail;

492 493
	assert_spin_locked(&fq->lock);

494 495 496 497 498 499 500
	fq->tail = (idx + 1) % IOVA_FQ_SIZE;

	return idx;
}

static void fq_ring_free(struct iova_domain *iovad, struct iova_fq *fq)
{
501
	u64 counter = atomic64_read(&iovad->fq_flush_finish_cnt);
502 503
	unsigned idx;

504 505
	assert_spin_locked(&fq->lock);

506 507
	fq_ring_for_each(idx, fq) {

508 509 510
		if (fq->entries[idx].counter >= counter)
			break;

511 512 513 514 515 516
		if (iovad->entry_dtor)
			iovad->entry_dtor(fq->entries[idx].data);

		free_iova_fast(iovad,
			       fq->entries[idx].iova_pfn,
			       fq->entries[idx].pages);
517 518

		fq->head = (fq->head + 1) % IOVA_FQ_SIZE;
519
	}
520
}
521

522 523 524 525 526
static void iova_domain_flush(struct iova_domain *iovad)
{
	atomic64_inc(&iovad->fq_flush_start_cnt);
	iovad->flush_cb(iovad);
	atomic64_inc(&iovad->fq_flush_finish_cnt);
527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549
}

static void fq_destroy_all_entries(struct iova_domain *iovad)
{
	int cpu;

	/*
	 * This code runs when the iova_domain is being detroyed, so don't
	 * bother to free iovas, just call the entry_dtor on all remaining
	 * entries.
	 */
	if (!iovad->entry_dtor)
		return;

	for_each_possible_cpu(cpu) {
		struct iova_fq *fq = per_cpu_ptr(iovad->fq, cpu);
		int idx;

		fq_ring_for_each(idx, fq)
			iovad->entry_dtor(fq->entries[idx].data);
	}
}

J
Joerg Roedel 已提交
550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568
static void fq_flush_timeout(unsigned long data)
{
	struct iova_domain *iovad = (struct iova_domain *)data;
	int cpu;

	atomic_set(&iovad->fq_timer_on, 0);
	iova_domain_flush(iovad);

	for_each_possible_cpu(cpu) {
		unsigned long flags;
		struct iova_fq *fq;

		fq = per_cpu_ptr(iovad->fq, cpu);
		spin_lock_irqsave(&fq->lock, flags);
		fq_ring_free(iovad, fq);
		spin_unlock_irqrestore(&fq->lock, flags);
	}
}

569 570 571 572 573
void queue_iova(struct iova_domain *iovad,
		unsigned long pfn, unsigned long pages,
		unsigned long data)
{
	struct iova_fq *fq = get_cpu_ptr(iovad->fq);
574
	unsigned long flags;
575 576
	unsigned idx;

577 578
	spin_lock_irqsave(&fq->lock, flags);

579 580 581 582 583 584 585
	/*
	 * First remove all entries from the flush queue that have already been
	 * flushed out on another CPU. This makes the fq_full() check below less
	 * likely to be true.
	 */
	fq_ring_free(iovad, fq);

586
	if (fq_full(fq)) {
587
		iova_domain_flush(iovad);
588 589 590 591 592 593 594 595
		fq_ring_free(iovad, fq);
	}

	idx = fq_ring_add(fq);

	fq->entries[idx].iova_pfn = pfn;
	fq->entries[idx].pages    = pages;
	fq->entries[idx].data     = data;
596
	fq->entries[idx].counter  = atomic64_read(&iovad->fq_flush_start_cnt);
597

598
	spin_unlock_irqrestore(&fq->lock, flags);
J
Joerg Roedel 已提交
599 600 601 602 603

	if (atomic_cmpxchg(&iovad->fq_timer_on, 0, 1) == 0)
		mod_timer(&iovad->fq_timer,
			  jiffies + msecs_to_jiffies(IOVA_FQ_TIMEOUT));

604 605 606 607
	put_cpu_ptr(iovad->fq);
}
EXPORT_SYMBOL_GPL(queue_iova);

608 609 610 611 612 613 614 615 616 617
/**
 * put_iova_domain - destroys the iova doamin
 * @iovad: - iova domain in question.
 * All the iova's in that domain are destroyed.
 */
void put_iova_domain(struct iova_domain *iovad)
{
	struct rb_node *node;
	unsigned long flags;

618
	free_iova_flush_queue(iovad);
619
	free_iova_rcaches(iovad);
620 621 622
	spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
	node = rb_first(&iovad->rbroot);
	while (node) {
G
Geliang Tang 已提交
623
		struct iova *iova = rb_entry(node, struct iova, node);
624

625 626 627 628 629 630
		rb_erase(node, &iovad->rbroot);
		free_iova_mem(iova);
		node = rb_first(&iovad->rbroot);
	}
	spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
}
S
Sakari Ailus 已提交
631
EXPORT_SYMBOL_GPL(put_iova_domain);
632 633 634 635 636

static int
__is_range_overlap(struct rb_node *node,
	unsigned long pfn_lo, unsigned long pfn_hi)
{
G
Geliang Tang 已提交
637
	struct iova *iova = rb_entry(node, struct iova, node);
638 639 640 641 642 643

	if ((pfn_lo <= iova->pfn_hi) && (pfn_hi >= iova->pfn_lo))
		return 1;
	return 0;
}

644 645 646 647 648 649 650 651 652 653 654 655 656 657
static inline struct iova *
alloc_and_init_iova(unsigned long pfn_lo, unsigned long pfn_hi)
{
	struct iova *iova;

	iova = alloc_iova_mem();
	if (iova) {
		iova->pfn_lo = pfn_lo;
		iova->pfn_hi = pfn_hi;
	}

	return iova;
}

658 659 660 661 662 663
static struct iova *
__insert_new_range(struct iova_domain *iovad,
	unsigned long pfn_lo, unsigned long pfn_hi)
{
	struct iova *iova;

664 665
	iova = alloc_and_init_iova(pfn_lo, pfn_hi);
	if (iova)
666
		iova_insert_rbtree(&iovad->rbroot, iova, NULL);
667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697

	return iova;
}

static void
__adjust_overlap_range(struct iova *iova,
	unsigned long *pfn_lo, unsigned long *pfn_hi)
{
	if (*pfn_lo < iova->pfn_lo)
		iova->pfn_lo = *pfn_lo;
	if (*pfn_hi > iova->pfn_hi)
		*pfn_lo = iova->pfn_hi + 1;
}

/**
 * reserve_iova - reserves an iova in the given range
 * @iovad: - iova domain pointer
 * @pfn_lo: - lower page frame address
 * @pfn_hi:- higher pfn adderss
 * This function allocates reserves the address range from pfn_lo to pfn_hi so
 * that this address is not dished out as part of alloc_iova.
 */
struct iova *
reserve_iova(struct iova_domain *iovad,
	unsigned long pfn_lo, unsigned long pfn_hi)
{
	struct rb_node *node;
	unsigned long flags;
	struct iova *iova;
	unsigned int overlap = 0;

698
	spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
699 700
	for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) {
		if (__is_range_overlap(node, pfn_lo, pfn_hi)) {
G
Geliang Tang 已提交
701
			iova = rb_entry(node, struct iova, node);
702 703 704 705 706 707 708 709 710 711
			__adjust_overlap_range(iova, &pfn_lo, &pfn_hi);
			if ((pfn_lo >= iova->pfn_lo) &&
				(pfn_hi <= iova->pfn_hi))
				goto finish;
			overlap = 1;

		} else if (overlap)
				break;
	}

L
Lucas De Marchi 已提交
712
	/* We are here either because this is the first reserver node
713 714 715 716 717
	 * or need to insert remaining non overlap addr range
	 */
	iova = __insert_new_range(iovad, pfn_lo, pfn_hi);
finish:

718
	spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
719 720
	return iova;
}
S
Sakari Ailus 已提交
721
EXPORT_SYMBOL_GPL(reserve_iova);
722 723 724 725 726 727 728 729 730 731 732 733 734 735

/**
 * copy_reserved_iova - copies the reserved between domains
 * @from: - source doamin from where to copy
 * @to: - destination domin where to copy
 * This function copies reserved iova's from one doamin to
 * other.
 */
void
copy_reserved_iova(struct iova_domain *from, struct iova_domain *to)
{
	unsigned long flags;
	struct rb_node *node;

736
	spin_lock_irqsave(&from->iova_rbtree_lock, flags);
737
	for (node = rb_first(&from->rbroot); node; node = rb_next(node)) {
G
Geliang Tang 已提交
738
		struct iova *iova = rb_entry(node, struct iova, node);
739
		struct iova *new_iova;
740

741 742 743 744 745
		new_iova = reserve_iova(to, iova->pfn_lo, iova->pfn_hi);
		if (!new_iova)
			printk(KERN_ERR "Reserve iova range %lx@%lx failed\n",
				iova->pfn_lo, iova->pfn_lo);
	}
746
	spin_unlock_irqrestore(&from->iova_rbtree_lock, flags);
747
}
S
Sakari Ailus 已提交
748
EXPORT_SYMBOL_GPL(copy_reserved_iova);
749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772

struct iova *
split_and_remove_iova(struct iova_domain *iovad, struct iova *iova,
		      unsigned long pfn_lo, unsigned long pfn_hi)
{
	unsigned long flags;
	struct iova *prev = NULL, *next = NULL;

	spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
	if (iova->pfn_lo < pfn_lo) {
		prev = alloc_and_init_iova(iova->pfn_lo, pfn_lo - 1);
		if (prev == NULL)
			goto error;
	}
	if (iova->pfn_hi > pfn_hi) {
		next = alloc_and_init_iova(pfn_hi + 1, iova->pfn_hi);
		if (next == NULL)
			goto error;
	}

	__cached_rbnode_delete_update(iovad, iova);
	rb_erase(&iova->node, &iovad->rbroot);

	if (prev) {
773
		iova_insert_rbtree(&iovad->rbroot, prev, NULL);
774 775 776
		iova->pfn_lo = pfn_lo;
	}
	if (next) {
777
		iova_insert_rbtree(&iovad->rbroot, next, NULL);
778 779 780 781 782 783 784 785 786 787 788 789
		iova->pfn_hi = pfn_hi;
	}
	spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);

	return iova;

error:
	spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
	if (prev)
		free_iova_mem(prev);
	return NULL;
}
790

791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910
/*
 * Magazine caches for IOVA ranges.  For an introduction to magazines,
 * see the USENIX 2001 paper "Magazines and Vmem: Extending the Slab
 * Allocator to Many CPUs and Arbitrary Resources" by Bonwick and Adams.
 * For simplicity, we use a static magazine size and don't implement the
 * dynamic size tuning described in the paper.
 */

#define IOVA_MAG_SIZE 128

struct iova_magazine {
	unsigned long size;
	unsigned long pfns[IOVA_MAG_SIZE];
};

struct iova_cpu_rcache {
	spinlock_t lock;
	struct iova_magazine *loaded;
	struct iova_magazine *prev;
};

static struct iova_magazine *iova_magazine_alloc(gfp_t flags)
{
	return kzalloc(sizeof(struct iova_magazine), flags);
}

static void iova_magazine_free(struct iova_magazine *mag)
{
	kfree(mag);
}

static void
iova_magazine_free_pfns(struct iova_magazine *mag, struct iova_domain *iovad)
{
	unsigned long flags;
	int i;

	if (!mag)
		return;

	spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);

	for (i = 0 ; i < mag->size; ++i) {
		struct iova *iova = private_find_iova(iovad, mag->pfns[i]);

		BUG_ON(!iova);
		private_free_iova(iovad, iova);
	}

	spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);

	mag->size = 0;
}

static bool iova_magazine_full(struct iova_magazine *mag)
{
	return (mag && mag->size == IOVA_MAG_SIZE);
}

static bool iova_magazine_empty(struct iova_magazine *mag)
{
	return (!mag || mag->size == 0);
}

static unsigned long iova_magazine_pop(struct iova_magazine *mag,
				       unsigned long limit_pfn)
{
	BUG_ON(iova_magazine_empty(mag));

	if (mag->pfns[mag->size - 1] >= limit_pfn)
		return 0;

	return mag->pfns[--mag->size];
}

static void iova_magazine_push(struct iova_magazine *mag, unsigned long pfn)
{
	BUG_ON(iova_magazine_full(mag));

	mag->pfns[mag->size++] = pfn;
}

static void init_iova_rcaches(struct iova_domain *iovad)
{
	struct iova_cpu_rcache *cpu_rcache;
	struct iova_rcache *rcache;
	unsigned int cpu;
	int i;

	for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
		rcache = &iovad->rcaches[i];
		spin_lock_init(&rcache->lock);
		rcache->depot_size = 0;
		rcache->cpu_rcaches = __alloc_percpu(sizeof(*cpu_rcache), cache_line_size());
		if (WARN_ON(!rcache->cpu_rcaches))
			continue;
		for_each_possible_cpu(cpu) {
			cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu);
			spin_lock_init(&cpu_rcache->lock);
			cpu_rcache->loaded = iova_magazine_alloc(GFP_KERNEL);
			cpu_rcache->prev = iova_magazine_alloc(GFP_KERNEL);
		}
	}
}

/*
 * Try inserting IOVA range starting with 'iova_pfn' into 'rcache', and
 * return true on success.  Can fail if rcache is full and we can't free
 * space, and free_iova() (our only caller) will then return the IOVA
 * range to the rbtree instead.
 */
static bool __iova_rcache_insert(struct iova_domain *iovad,
				 struct iova_rcache *rcache,
				 unsigned long iova_pfn)
{
	struct iova_magazine *mag_to_free = NULL;
	struct iova_cpu_rcache *cpu_rcache;
	bool can_insert = false;
	unsigned long flags;

911
	cpu_rcache = raw_cpu_ptr(rcache->cpu_rcaches);
912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973
	spin_lock_irqsave(&cpu_rcache->lock, flags);

	if (!iova_magazine_full(cpu_rcache->loaded)) {
		can_insert = true;
	} else if (!iova_magazine_full(cpu_rcache->prev)) {
		swap(cpu_rcache->prev, cpu_rcache->loaded);
		can_insert = true;
	} else {
		struct iova_magazine *new_mag = iova_magazine_alloc(GFP_ATOMIC);

		if (new_mag) {
			spin_lock(&rcache->lock);
			if (rcache->depot_size < MAX_GLOBAL_MAGS) {
				rcache->depot[rcache->depot_size++] =
						cpu_rcache->loaded;
			} else {
				mag_to_free = cpu_rcache->loaded;
			}
			spin_unlock(&rcache->lock);

			cpu_rcache->loaded = new_mag;
			can_insert = true;
		}
	}

	if (can_insert)
		iova_magazine_push(cpu_rcache->loaded, iova_pfn);

	spin_unlock_irqrestore(&cpu_rcache->lock, flags);

	if (mag_to_free) {
		iova_magazine_free_pfns(mag_to_free, iovad);
		iova_magazine_free(mag_to_free);
	}

	return can_insert;
}

static bool iova_rcache_insert(struct iova_domain *iovad, unsigned long pfn,
			       unsigned long size)
{
	unsigned int log_size = order_base_2(size);

	if (log_size >= IOVA_RANGE_CACHE_MAX_SIZE)
		return false;

	return __iova_rcache_insert(iovad, &iovad->rcaches[log_size], pfn);
}

/*
 * Caller wants to allocate a new IOVA range from 'rcache'.  If we can
 * satisfy the request, return a matching non-NULL range and remove
 * it from the 'rcache'.
 */
static unsigned long __iova_rcache_get(struct iova_rcache *rcache,
				       unsigned long limit_pfn)
{
	struct iova_cpu_rcache *cpu_rcache;
	unsigned long iova_pfn = 0;
	bool has_pfn = false;
	unsigned long flags;

974
	cpu_rcache = raw_cpu_ptr(rcache->cpu_rcaches);
975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080
	spin_lock_irqsave(&cpu_rcache->lock, flags);

	if (!iova_magazine_empty(cpu_rcache->loaded)) {
		has_pfn = true;
	} else if (!iova_magazine_empty(cpu_rcache->prev)) {
		swap(cpu_rcache->prev, cpu_rcache->loaded);
		has_pfn = true;
	} else {
		spin_lock(&rcache->lock);
		if (rcache->depot_size > 0) {
			iova_magazine_free(cpu_rcache->loaded);
			cpu_rcache->loaded = rcache->depot[--rcache->depot_size];
			has_pfn = true;
		}
		spin_unlock(&rcache->lock);
	}

	if (has_pfn)
		iova_pfn = iova_magazine_pop(cpu_rcache->loaded, limit_pfn);

	spin_unlock_irqrestore(&cpu_rcache->lock, flags);

	return iova_pfn;
}

/*
 * Try to satisfy IOVA allocation range from rcache.  Fail if requested
 * size is too big or the DMA limit we are given isn't satisfied by the
 * top element in the magazine.
 */
static unsigned long iova_rcache_get(struct iova_domain *iovad,
				     unsigned long size,
				     unsigned long limit_pfn)
{
	unsigned int log_size = order_base_2(size);

	if (log_size >= IOVA_RANGE_CACHE_MAX_SIZE)
		return 0;

	return __iova_rcache_get(&iovad->rcaches[log_size], limit_pfn);
}

/*
 * Free a cpu's rcache.
 */
static void free_cpu_iova_rcache(unsigned int cpu, struct iova_domain *iovad,
				 struct iova_rcache *rcache)
{
	struct iova_cpu_rcache *cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu);
	unsigned long flags;

	spin_lock_irqsave(&cpu_rcache->lock, flags);

	iova_magazine_free_pfns(cpu_rcache->loaded, iovad);
	iova_magazine_free(cpu_rcache->loaded);

	iova_magazine_free_pfns(cpu_rcache->prev, iovad);
	iova_magazine_free(cpu_rcache->prev);

	spin_unlock_irqrestore(&cpu_rcache->lock, flags);
}

/*
 * free rcache data structures.
 */
static void free_iova_rcaches(struct iova_domain *iovad)
{
	struct iova_rcache *rcache;
	unsigned long flags;
	unsigned int cpu;
	int i, j;

	for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
		rcache = &iovad->rcaches[i];
		for_each_possible_cpu(cpu)
			free_cpu_iova_rcache(cpu, iovad, rcache);
		spin_lock_irqsave(&rcache->lock, flags);
		free_percpu(rcache->cpu_rcaches);
		for (j = 0; j < rcache->depot_size; ++j) {
			iova_magazine_free_pfns(rcache->depot[j], iovad);
			iova_magazine_free(rcache->depot[j]);
		}
		spin_unlock_irqrestore(&rcache->lock, flags);
	}
}

/*
 * free all the IOVA ranges cached by a cpu (used when cpu is unplugged)
 */
void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad)
{
	struct iova_cpu_rcache *cpu_rcache;
	struct iova_rcache *rcache;
	unsigned long flags;
	int i;

	for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
		rcache = &iovad->rcaches[i];
		cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu);
		spin_lock_irqsave(&cpu_rcache->lock, flags);
		iova_magazine_free_pfns(cpu_rcache->loaded, iovad);
		iova_magazine_free_pfns(cpu_rcache->prev, iovad);
		spin_unlock_irqrestore(&cpu_rcache->lock, flags);
	}
}

1081 1082
MODULE_AUTHOR("Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>");
MODULE_LICENSE("GPL");