iommu.c 20.8 KB
Newer Older
1
/* iommu.c: Generic sparc64 IOMMU support.
L
Linus Torvalds 已提交
2
 *
3
 * Copyright (C) 1999, 2007, 2008 David S. Miller (davem@davemloft.net)
L
Linus Torvalds 已提交
4 5 6 7
 * Copyright (C) 1999, 2000 Jakub Jelinek (jakub@redhat.com)
 */

#include <linux/kernel.h>
8
#include <linux/module.h>
9
#include <linux/delay.h>
10 11 12
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/errno.h>
13
#include <linux/iommu-helper.h>
14 15

#ifdef CONFIG_PCI
16
#include <linux/pci.h>
17
#endif
L
Linus Torvalds 已提交
18

19
#include <asm/iommu.h>
L
Linus Torvalds 已提交
20 21 22

#include "iommu_common.h"

23
#define STC_CTXMATCH_ADDR(STC, CTX)	\
L
Linus Torvalds 已提交
24
	((STC)->strbuf_ctxmatch_base + ((CTX) << 3))
25 26 27 28
#define STC_FLUSHFLAG_INIT(STC) \
	(*((STC)->strbuf_flushflag) = 0UL)
#define STC_FLUSHFLAG_SET(STC) \
	(*((STC)->strbuf_flushflag) != 0UL)
L
Linus Torvalds 已提交
29

30
#define iommu_read(__reg) \
L
Linus Torvalds 已提交
31 32 33 34 35 36 37
({	u64 __ret; \
	__asm__ __volatile__("ldxa [%1] %2, %0" \
			     : "=r" (__ret) \
			     : "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \
			     : "memory"); \
	__ret; \
})
38
#define iommu_write(__reg, __val) \
L
Linus Torvalds 已提交
39 40 41 42 43 44
	__asm__ __volatile__("stxa %0, [%1] %2" \
			     : /* no outputs */ \
			     : "r" (__val), "r" (__reg), \
			       "i" (ASI_PHYS_BYPASS_EC_E))

/* Must be invoked under the IOMMU lock. */
45
static void iommu_flushall(struct iommu *iommu)
L
Linus Torvalds 已提交
46
{
47
	if (iommu->iommu_flushinv) {
48
		iommu_write(iommu->iommu_flushinv, ~(u64)0);
49 50 51
	} else {
		unsigned long tag;
		int entry;
L
Linus Torvalds 已提交
52

53
		tag = iommu->iommu_tags;
54
		for (entry = 0; entry < 16; entry++) {
55
			iommu_write(tag, 0);
56 57
			tag += 8;
		}
L
Linus Torvalds 已提交
58

59
		/* Ensure completion of previous PIO writes. */
60
		(void) iommu_read(iommu->write_complete_reg);
61
	}
L
Linus Torvalds 已提交
62 63 64 65 66 67 68 69 70 71 72 73 74 75 76
}

#define IOPTE_CONSISTENT(CTX) \
	(IOPTE_VALID | IOPTE_CACHE | \
	 (((CTX) << 47) & IOPTE_CONTEXT))

#define IOPTE_STREAMING(CTX) \
	(IOPTE_CONSISTENT(CTX) | IOPTE_STBUF)

/* Existing mappings are never marked invalid, instead they
 * are pointed to a dummy page.
 */
#define IOPTE_IS_DUMMY(iommu, iopte)	\
	((iopte_val(*iopte) & IOPTE_PAGE) == (iommu)->dummy_page_pa)

77
static inline void iopte_make_dummy(struct iommu *iommu, iopte_t *iopte)
L
Linus Torvalds 已提交
78 79 80 81 82 83 84 85 86
{
	unsigned long val = iopte_val(*iopte);

	val &= ~IOPTE_PAGE;
	val |= iommu->dummy_page_pa;

	iopte_val(*iopte) = val;
}

87 88 89 90 91 92 93 94 95 96 97
/* Based almost entirely upon the ppc64 iommu allocator.  If you use the 'handle'
 * facility it must all be done in one pass while under the iommu lock.
 *
 * On sun4u platforms, we only flush the IOMMU once every time we've passed
 * over the entire page table doing allocations.  Therefore we only ever advance
 * the hint and cannot backtrack it.
 */
unsigned long iommu_range_alloc(struct device *dev,
				struct iommu *iommu,
				unsigned long npages,
				unsigned long *handle)
98
{
99
	unsigned long n, end, start, limit, boundary_size;
100
	struct iommu_arena *arena = &iommu->arena;
101 102 103 104 105 106 107 108 109 110 111 112 113 114 115
	int pass = 0;

	/* This allocator was derived from x86_64's bit string search */

	/* Sanity check */
	if (unlikely(npages == 0)) {
		if (printk_ratelimit())
			WARN_ON(1);
		return DMA_ERROR_CODE;
	}

	if (handle && *handle)
		start = *handle;
	else
		start = arena->hint;
116 117 118

	limit = arena->limit;

119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136
	/* The case below can happen if we have a small segment appended
	 * to a large, or when the previous alloc was at the very end of
	 * the available space. If so, go back to the beginning and flush.
	 */
	if (start >= limit) {
		start = 0;
		if (iommu->flush_all)
			iommu->flush_all(iommu);
	}

 again:

	if (dev)
		boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
				      1 << IO_PAGE_SHIFT);
	else
		boundary_size = ALIGN(1UL << 32, 1 << IO_PAGE_SHIFT);

137 138
	n = iommu_area_alloc(arena->map, limit, start, npages,
			     iommu->page_table_map_base >> IO_PAGE_SHIFT,
139 140
			     boundary_size >> IO_PAGE_SHIFT, 0);
	if (n == -1) {
141
		if (likely(pass < 1)) {
142
			/* First failure, rescan from the beginning.  */
143
			start = 0;
144 145
			if (iommu->flush_all)
				iommu->flush_all(iommu);
146 147 148
			pass++;
			goto again;
		} else {
149 150
			/* Second failure, give up */
			return DMA_ERROR_CODE;
151 152 153
		}
	}

154
	end = n + npages;
155 156 157

	arena->hint = end;

158 159 160 161
	/* Update handle for SG allocations */
	if (handle)
		*handle = end;

162 163 164
	return n;
}

165
void iommu_range_free(struct iommu *iommu, dma_addr_t dma_addr, unsigned long npages)
166
{
167 168
	struct iommu_arena *arena = &iommu->arena;
	unsigned long entry;
169

170 171 172
	entry = (dma_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT;

	iommu_area_free(arena->map, entry, npages);
173 174
}

175
int iommu_table_init(struct iommu *iommu, int tsbsize,
176 177
		     u32 dma_offset, u32 dma_addr_mask,
		     int numa_node)
L
Linus Torvalds 已提交
178
{
179 180
	unsigned long i, order, sz, num_tsb_entries;
	struct page *page;
181 182

	num_tsb_entries = tsbsize / sizeof(iopte_t);
183 184 185 186 187 188 189

	/* Setup initial software IOMMU state. */
	spin_lock_init(&iommu->lock);
	iommu->ctx_lowest_free = 1;
	iommu->page_table_map_base = dma_offset;
	iommu->dma_addr_mask = dma_addr_mask;

190 191 192
	/* Allocate and initialize the free area map.  */
	sz = num_tsb_entries / 8;
	sz = (sz + 7UL) & ~7UL;
193
	iommu->arena.map = kmalloc_node(sz, GFP_KERNEL, numa_node);
194
	if (!iommu->arena.map) {
195 196
		printk(KERN_ERR "IOMMU: Error, kmalloc(arena.map) failed.\n");
		return -ENOMEM;
197
	}
198
	memset(iommu->arena.map, 0, sz);
199
	iommu->arena.limit = num_tsb_entries;
L
Linus Torvalds 已提交
200

201 202 203
	if (tlb_type != hypervisor)
		iommu->flush_all = iommu_flushall;

204 205 206
	/* Allocate and initialize the dummy page which we
	 * set inactive IO PTEs to point to.
	 */
207 208
	page = alloc_pages_node(numa_node, GFP_KERNEL, 0);
	if (!page) {
209 210
		printk(KERN_ERR "IOMMU: Error, gfp(dummy_page) failed.\n");
		goto out_free_map;
211
	}
212 213
	iommu->dummy_page = (unsigned long) page_address(page);
	memset((void *)iommu->dummy_page, 0, PAGE_SIZE);
214 215 216 217
	iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page);

	/* Now allocate and setup the IOMMU page table itself.  */
	order = get_order(tsbsize);
218 219
	page = alloc_pages_node(numa_node, GFP_KERNEL, order);
	if (!page) {
220 221
		printk(KERN_ERR "IOMMU: Error, gfp(tsb) failed.\n");
		goto out_free_dummy_page;
222
	}
223
	iommu->page_table = (iopte_t *)page_address(page);
L
Linus Torvalds 已提交
224

225
	for (i = 0; i < num_tsb_entries; i++)
L
Linus Torvalds 已提交
226
		iopte_make_dummy(iommu, &iommu->page_table[i]);
227 228 229 230 231 232 233 234 235 236 237 238

	return 0;

out_free_dummy_page:
	free_page(iommu->dummy_page);
	iommu->dummy_page = 0UL;

out_free_map:
	kfree(iommu->arena.map);
	iommu->arena.map = NULL;

	return -ENOMEM;
L
Linus Torvalds 已提交
239 240
}

241 242
static inline iopte_t *alloc_npages(struct device *dev, struct iommu *iommu,
				    unsigned long npages)
L
Linus Torvalds 已提交
243
{
244
	unsigned long entry;
L
Linus Torvalds 已提交
245

246 247
	entry = iommu_range_alloc(dev, iommu, npages, NULL);
	if (unlikely(entry == DMA_ERROR_CODE))
248
		return NULL;
L
Linus Torvalds 已提交
249

250
	return iommu->page_table + entry;
L
Linus Torvalds 已提交
251 252
}

253
static int iommu_alloc_ctx(struct iommu *iommu)
254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271
{
	int lowest = iommu->ctx_lowest_free;
	int sz = IOMMU_NUM_CTXS - lowest;
	int n = find_next_zero_bit(iommu->ctx_bitmap, sz, lowest);

	if (unlikely(n == sz)) {
		n = find_next_zero_bit(iommu->ctx_bitmap, lowest, 1);
		if (unlikely(n == lowest)) {
			printk(KERN_WARNING "IOMMU: Ran out of contexts.\n");
			n = 0;
		}
	}
	if (n)
		__set_bit(n, iommu->ctx_bitmap);

	return n;
}

272
static inline void iommu_free_ctx(struct iommu *iommu, int ctx)
273 274 275 276 277 278 279 280
{
	if (likely(ctx)) {
		__clear_bit(ctx, iommu->ctx_bitmap);
		if (ctx < iommu->ctx_lowest_free)
			iommu->ctx_lowest_free = ctx;
	}
}

281 282
static void *dma_4u_alloc_coherent(struct device *dev, size_t size,
				   dma_addr_t *dma_addrp, gfp_t gfp)
L
Linus Torvalds 已提交
283
{
284
	unsigned long flags, order, first_page;
285
	struct iommu *iommu;
286 287
	struct page *page;
	int npages, nid;
L
Linus Torvalds 已提交
288 289 290 291 292 293 294 295
	iopte_t *iopte;
	void *ret;

	size = IO_PAGE_ALIGN(size);
	order = get_order(size);
	if (order >= 10)
		return NULL;

296 297 298
	nid = dev->archdata.numa_node;
	page = alloc_pages_node(nid, gfp, order);
	if (unlikely(!page))
L
Linus Torvalds 已提交
299
		return NULL;
300 301

	first_page = (unsigned long) page_address(page);
L
Linus Torvalds 已提交
302 303
	memset((char *)first_page, 0, PAGE_SIZE << order);

304
	iommu = dev->archdata.iommu;
L
Linus Torvalds 已提交
305 306

	spin_lock_irqsave(&iommu->lock, flags);
307
	iopte = alloc_npages(dev, iommu, size >> IO_PAGE_SHIFT);
308 309 310
	spin_unlock_irqrestore(&iommu->lock, flags);

	if (unlikely(iopte == NULL)) {
L
Linus Torvalds 已提交
311 312 313 314 315 316 317 318 319 320
		free_pages(first_page, order);
		return NULL;
	}

	*dma_addrp = (iommu->page_table_map_base +
		      ((iopte - iommu->page_table) << IO_PAGE_SHIFT));
	ret = (void *) first_page;
	npages = size >> IO_PAGE_SHIFT;
	first_page = __pa(first_page);
	while (npages--) {
321
		iopte_val(*iopte) = (IOPTE_CONSISTENT(0UL) |
L
Linus Torvalds 已提交
322 323 324 325 326 327 328 329 330
				     IOPTE_WRITE |
				     (first_page & IOPTE_PAGE));
		iopte++;
		first_page += IO_PAGE_SIZE;
	}

	return ret;
}

331 332
static void dma_4u_free_coherent(struct device *dev, size_t size,
				 void *cpu, dma_addr_t dvma)
L
Linus Torvalds 已提交
333
{
334
	struct iommu *iommu;
L
Linus Torvalds 已提交
335
	iopte_t *iopte;
336
	unsigned long flags, order, npages;
L
Linus Torvalds 已提交
337 338

	npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
339
	iommu = dev->archdata.iommu;
L
Linus Torvalds 已提交
340 341 342 343 344
	iopte = iommu->page_table +
		((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT);

	spin_lock_irqsave(&iommu->lock, flags);

345
	iommu_range_free(iommu, dvma, npages);
346

L
Linus Torvalds 已提交
347 348 349 350 351 352 353
	spin_unlock_irqrestore(&iommu->lock, flags);

	order = get_order(size);
	if (order < 10)
		free_pages((unsigned long)cpu, order);
}

354 355
static dma_addr_t dma_4u_map_single(struct device *dev, void *ptr, size_t sz,
				    enum dma_data_direction direction)
L
Linus Torvalds 已提交
356
{
357 358
	struct iommu *iommu;
	struct strbuf *strbuf;
L
Linus Torvalds 已提交
359 360 361 362 363 364
	iopte_t *base;
	unsigned long flags, npages, oaddr;
	unsigned long i, base_paddr, ctx;
	u32 bus_addr, ret;
	unsigned long iopte_protection;

365 366
	iommu = dev->archdata.iommu;
	strbuf = dev->archdata.stc;
L
Linus Torvalds 已提交
367

368
	if (unlikely(direction == DMA_NONE))
369
		goto bad_no_ctx;
L
Linus Torvalds 已提交
370 371 372 373 374 375

	oaddr = (unsigned long)ptr;
	npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
	npages >>= IO_PAGE_SHIFT;

	spin_lock_irqsave(&iommu->lock, flags);
376
	base = alloc_npages(dev, iommu, npages);
377 378 379 380
	ctx = 0;
	if (iommu->iommu_ctxflush)
		ctx = iommu_alloc_ctx(iommu);
	spin_unlock_irqrestore(&iommu->lock, flags);
L
Linus Torvalds 已提交
381

382
	if (unlikely(!base))
L
Linus Torvalds 已提交
383
		goto bad;
384

L
Linus Torvalds 已提交
385 386 387 388 389 390 391 392
	bus_addr = (iommu->page_table_map_base +
		    ((base - iommu->page_table) << IO_PAGE_SHIFT));
	ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
	base_paddr = __pa(oaddr & IO_PAGE_MASK);
	if (strbuf->strbuf_enabled)
		iopte_protection = IOPTE_STREAMING(ctx);
	else
		iopte_protection = IOPTE_CONSISTENT(ctx);
393
	if (direction != DMA_TO_DEVICE)
L
Linus Torvalds 已提交
394 395 396 397 398 399 400 401
		iopte_protection |= IOPTE_WRITE;

	for (i = 0; i < npages; i++, base++, base_paddr += IO_PAGE_SIZE)
		iopte_val(*base) = iopte_protection | base_paddr;

	return ret;

bad:
402 403 404 405
	iommu_free_ctx(iommu, ctx);
bad_no_ctx:
	if (printk_ratelimit())
		WARN_ON(1);
406
	return DMA_ERROR_CODE;
L
Linus Torvalds 已提交
407 408
}

409 410 411
static void strbuf_flush(struct strbuf *strbuf, struct iommu *iommu,
			 u32 vaddr, unsigned long ctx, unsigned long npages,
			 enum dma_data_direction direction)
412 413 414 415 416 417
{
	int limit;

	if (strbuf->strbuf_ctxflush &&
	    iommu->iommu_ctxflush) {
		unsigned long matchreg, flushreg;
418
		u64 val;
419 420

		flushreg = strbuf->strbuf_ctxflush;
421
		matchreg = STC_CTXMATCH_ADDR(strbuf, ctx);
422

423 424
		iommu_write(flushreg, ctx);
		val = iommu_read(matchreg);
425 426
		val &= 0xffff;
		if (!val)
427 428 429 430
			goto do_flush_sync;

		while (val) {
			if (val & 0x1)
431
				iommu_write(flushreg, ctx);
432
			val >>= 1;
433
		}
434
		val = iommu_read(matchreg);
435
		if (unlikely(val)) {
436
			printk(KERN_WARNING "strbuf_flush: ctx flush "
437
			       "timeout matchreg[%llx] ctx[%lx]\n",
438 439 440
			       val, ctx);
			goto do_page_flush;
		}
441 442 443
	} else {
		unsigned long i;

444
	do_page_flush:
445
		for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE)
446
			iommu_write(strbuf->strbuf_pflush, vaddr);
447 448
	}

449 450 451 452 453
do_flush_sync:
	/* If the device could not have possibly put dirty data into
	 * the streaming cache, no flush-flag synchronization needs
	 * to be performed.
	 */
454
	if (direction == DMA_TO_DEVICE)
455 456
		return;

457 458 459
	STC_FLUSHFLAG_INIT(strbuf);
	iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
	(void) iommu_read(iommu->write_complete_reg);
460

461
	limit = 100000;
462
	while (!STC_FLUSHFLAG_SET(strbuf)) {
463 464 465
		limit--;
		if (!limit)
			break;
466
		udelay(1);
467
		rmb();
468 469
	}
	if (!limit)
470
		printk(KERN_WARNING "strbuf_flush: flushflag timeout "
471 472 473 474
		       "vaddr[%08x] ctx[%lx] npages[%ld]\n",
		       vaddr, ctx, npages);
}

475 476
static void dma_4u_unmap_single(struct device *dev, dma_addr_t bus_addr,
				size_t sz, enum dma_data_direction direction)
L
Linus Torvalds 已提交
477
{
478 479
	struct iommu *iommu;
	struct strbuf *strbuf;
L
Linus Torvalds 已提交
480
	iopte_t *base;
481
	unsigned long flags, npages, ctx, i;
L
Linus Torvalds 已提交
482

483
	if (unlikely(direction == DMA_NONE)) {
484 485 486 487
		if (printk_ratelimit())
			WARN_ON(1);
		return;
	}
L
Linus Torvalds 已提交
488

489 490
	iommu = dev->archdata.iommu;
	strbuf = dev->archdata.stc;
L
Linus Torvalds 已提交
491 492 493 494 495 496 497 498 499 500 501 502 503 504 505

	npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
	npages >>= IO_PAGE_SHIFT;
	base = iommu->page_table +
		((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
	bus_addr &= IO_PAGE_MASK;

	spin_lock_irqsave(&iommu->lock, flags);

	/* Record the context, if any. */
	ctx = 0;
	if (iommu->iommu_ctxflush)
		ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;

	/* Step 1: Kick data out of streaming buffers if necessary. */
506
	if (strbuf->strbuf_enabled)
507 508
		strbuf_flush(strbuf, iommu, bus_addr, ctx,
			     npages, direction);
L
Linus Torvalds 已提交
509

510 511 512
	/* Step 2: Clear out TSB entries. */
	for (i = 0; i < npages; i++)
		iopte_make_dummy(iommu, base + i);
L
Linus Torvalds 已提交
513

514
	iommu_range_free(iommu, bus_addr, npages);
L
Linus Torvalds 已提交
515

516 517
	iommu_free_ctx(iommu, ctx);

L
Linus Torvalds 已提交
518 519 520
	spin_unlock_irqrestore(&iommu->lock, flags);
}

521 522
static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
			 int nelems, enum dma_data_direction direction)
L
Linus Torvalds 已提交
523
{
524 525 526 527
	struct scatterlist *s, *outs, *segstart;
	unsigned long flags, handle, prot, ctx;
	dma_addr_t dma_next = 0, dma_addr;
	unsigned int max_seg_size;
528
	unsigned long seg_boundary_size;
529
	int outcount, incount, i;
530
	struct strbuf *strbuf;
531
	struct iommu *iommu;
532
	unsigned long base_shift;
533 534

	BUG_ON(direction == DMA_NONE);
L
Linus Torvalds 已提交
535

536 537
	iommu = dev->archdata.iommu;
	strbuf = dev->archdata.stc;
538 539
	if (nelems == 0 || !iommu)
		return 0;
L
Linus Torvalds 已提交
540 541 542

	spin_lock_irqsave(&iommu->lock, flags);

543 544 545 546
	ctx = 0;
	if (iommu->iommu_ctxflush)
		ctx = iommu_alloc_ctx(iommu);

L
Linus Torvalds 已提交
547
	if (strbuf->strbuf_enabled)
548
		prot = IOPTE_STREAMING(ctx);
L
Linus Torvalds 已提交
549
	else
550
		prot = IOPTE_CONSISTENT(ctx);
551
	if (direction != DMA_TO_DEVICE)
552 553 554 555 556 557 558 559 560 561 562
		prot |= IOPTE_WRITE;

	outs = s = segstart = &sglist[0];
	outcount = 1;
	incount = nelems;
	handle = 0;

	/* Init first segment length for backout at failure */
	outs->dma_length = 0;

	max_seg_size = dma_get_max_seg_size(dev);
563 564 565
	seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
				  IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
	base_shift = iommu->page_table_map_base >> IO_PAGE_SHIFT;
566
	for_each_sg(sglist, s, nelems, i) {
567
		unsigned long paddr, npages, entry, out_entry = 0, slen;
568 569 570 571 572 573 574 575 576 577
		iopte_t *base;

		slen = s->length;
		/* Sanity check */
		if (slen == 0) {
			dma_next = 0;
			continue;
		}
		/* Allocate iommu entries for that segment */
		paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
578
		npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE);
579 580 581 582 583 584 585 586 587
		entry = iommu_range_alloc(dev, iommu, npages, &handle);

		/* Handle failure */
		if (unlikely(entry == DMA_ERROR_CODE)) {
			if (printk_ratelimit())
				printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx"
				       " npages %lx\n", iommu, paddr, npages);
			goto iommu_map_failed;
		}
588

589
		base = iommu->page_table + entry;
L
Linus Torvalds 已提交
590

591 592 593 594
		/* Convert entry to a dma_addr_t */
		dma_addr = iommu->page_table_map_base +
			(entry << IO_PAGE_SHIFT);
		dma_addr |= (s->offset & ~IO_PAGE_MASK);
595

596
		/* Insert into HW table */
597
		paddr &= IO_PAGE_MASK;
598 599
		while (npages--) {
			iopte_val(*base) = prot | paddr;
600 601 602
			base++;
			paddr += IO_PAGE_SIZE;
		}
603 604 605 606 607 608 609

		/* If we are in an open segment, try merging */
		if (segstart != s) {
			/* We cannot merge if:
			 * - allocated dma_addr isn't contiguous to previous allocation
			 */
			if ((dma_addr != dma_next) ||
610 611 612
			    (outs->dma_length + s->length > max_seg_size) ||
			    (is_span_boundary(out_entry, base_shift,
					      seg_boundary_size, outs, s))) {
613 614 615 616 617 618 619 620 621 622 623 624 625
				/* Can't merge: create a new segment */
				segstart = s;
				outcount++;
				outs = sg_next(outs);
			} else {
				outs->dma_length += s->length;
			}
		}

		if (segstart == s) {
			/* This is a new segment, fill entries */
			outs->dma_address = dma_addr;
			outs->dma_length = slen;
626
			out_entry = entry;
627 628 629 630
		}

		/* Calculate next page pointer for contiguous check */
		dma_next = dma_addr + slen;
631 632
	}

633 634 635 636 637 638 639 640 641 642 643 644 645
	spin_unlock_irqrestore(&iommu->lock, flags);

	if (outcount < incount) {
		outs = sg_next(outs);
		outs->dma_address = DMA_ERROR_CODE;
		outs->dma_length = 0;
	}

	return outcount;

iommu_map_failed:
	for_each_sg(sglist, s, nelems, i) {
		if (s->dma_length != 0) {
646
			unsigned long vaddr, npages, entry, j;
647 648 649
			iopte_t *base;

			vaddr = s->dma_address & IO_PAGE_MASK;
650 651
			npages = iommu_num_pages(s->dma_address, s->dma_length,
						 IO_PAGE_SIZE);
652 653 654 655 656 657
			iommu_range_free(iommu, vaddr, npages);

			entry = (vaddr - iommu->page_table_map_base)
				>> IO_PAGE_SHIFT;
			base = iommu->page_table + entry;

658 659
			for (j = 0; j < npages; j++)
				iopte_make_dummy(iommu, base + j);
660 661 662 663 664 665 666 667

			s->dma_address = DMA_ERROR_CODE;
			s->dma_length = 0;
		}
		if (s == outs)
			break;
	}
	spin_unlock_irqrestore(&iommu->lock, flags);
L
Linus Torvalds 已提交
668

669
	return 0;
L
Linus Torvalds 已提交
670 671
}

672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691
/* If contexts are being used, they are the same in all of the mappings
 * we make for a particular SG.
 */
static unsigned long fetch_sg_ctx(struct iommu *iommu, struct scatterlist *sg)
{
	unsigned long ctx = 0;

	if (iommu->iommu_ctxflush) {
		iopte_t *base;
		u32 bus_addr;

		bus_addr = sg->dma_address & IO_PAGE_MASK;
		base = iommu->page_table +
			((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);

		ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
	}
	return ctx;
}

692 693
static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist,
			    int nelems, enum dma_data_direction direction)
L
Linus Torvalds 已提交
694
{
695 696
	unsigned long flags, ctx;
	struct scatterlist *sg;
697
	struct strbuf *strbuf;
698
	struct iommu *iommu;
L
Linus Torvalds 已提交
699

700
	BUG_ON(direction == DMA_NONE);
L
Linus Torvalds 已提交
701

702 703 704
	iommu = dev->archdata.iommu;
	strbuf = dev->archdata.stc;

705
	ctx = fetch_sg_ctx(iommu, sglist);
L
Linus Torvalds 已提交
706

707
	spin_lock_irqsave(&iommu->lock, flags);
L
Linus Torvalds 已提交
708

709 710 711 712 713 714 715
	sg = sglist;
	while (nelems--) {
		dma_addr_t dma_handle = sg->dma_address;
		unsigned int len = sg->dma_length;
		unsigned long npages, entry;
		iopte_t *base;
		int i;
L
Linus Torvalds 已提交
716

717 718
		if (!len)
			break;
719
		npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE);
720
		iommu_range_free(iommu, dma_handle, npages);
L
Linus Torvalds 已提交
721

722 723 724
		entry = ((dma_handle - iommu->page_table_map_base)
			 >> IO_PAGE_SHIFT);
		base = iommu->page_table + entry;
L
Linus Torvalds 已提交
725

726 727 728 729
		dma_handle &= IO_PAGE_MASK;
		if (strbuf->strbuf_enabled)
			strbuf_flush(strbuf, iommu, dma_handle, ctx,
				     npages, direction);
L
Linus Torvalds 已提交
730

731 732
		for (i = 0; i < npages; i++)
			iopte_make_dummy(iommu, base + i);
L
Linus Torvalds 已提交
733

734 735
		sg = sg_next(sg);
	}
L
Linus Torvalds 已提交
736

737 738
	iommu_free_ctx(iommu, ctx);

L
Linus Torvalds 已提交
739 740 741
	spin_unlock_irqrestore(&iommu->lock, flags);
}

742 743 744
static void dma_4u_sync_single_for_cpu(struct device *dev,
				       dma_addr_t bus_addr, size_t sz,
				       enum dma_data_direction direction)
L
Linus Torvalds 已提交
745
{
746 747
	struct iommu *iommu;
	struct strbuf *strbuf;
L
Linus Torvalds 已提交
748 749
	unsigned long flags, ctx, npages;

750 751
	iommu = dev->archdata.iommu;
	strbuf = dev->archdata.stc;
L
Linus Torvalds 已提交
752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773

	if (!strbuf->strbuf_enabled)
		return;

	spin_lock_irqsave(&iommu->lock, flags);

	npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
	npages >>= IO_PAGE_SHIFT;
	bus_addr &= IO_PAGE_MASK;

	/* Step 1: Record the context, if any. */
	ctx = 0;
	if (iommu->iommu_ctxflush &&
	    strbuf->strbuf_ctxflush) {
		iopte_t *iopte;

		iopte = iommu->page_table +
			((bus_addr - iommu->page_table_map_base)>>IO_PAGE_SHIFT);
		ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
	}

	/* Step 2: Kick data out of streaming buffers. */
774
	strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
L
Linus Torvalds 已提交
775 776 777 778

	spin_unlock_irqrestore(&iommu->lock, flags);
}

779 780 781
static void dma_4u_sync_sg_for_cpu(struct device *dev,
				   struct scatterlist *sglist, int nelems,
				   enum dma_data_direction direction)
L
Linus Torvalds 已提交
782
{
783 784
	struct iommu *iommu;
	struct strbuf *strbuf;
785
	unsigned long flags, ctx, npages, i;
J
Jens Axboe 已提交
786
	struct scatterlist *sg, *sgprv;
787
	u32 bus_addr;
L
Linus Torvalds 已提交
788

789 790
	iommu = dev->archdata.iommu;
	strbuf = dev->archdata.stc;
L
Linus Torvalds 已提交
791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808

	if (!strbuf->strbuf_enabled)
		return;

	spin_lock_irqsave(&iommu->lock, flags);

	/* Step 1: Record the context, if any. */
	ctx = 0;
	if (iommu->iommu_ctxflush &&
	    strbuf->strbuf_ctxflush) {
		iopte_t *iopte;

		iopte = iommu->page_table +
			((sglist[0].dma_address - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
		ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
	}

	/* Step 2: Kick data out of streaming buffers. */
809
	bus_addr = sglist[0].dma_address & IO_PAGE_MASK;
J
Jens Axboe 已提交
810 811 812
	sgprv = NULL;
	for_each_sg(sglist, sg, nelems, i) {
		if (sg->dma_length == 0)
813
			break;
J
Jens Axboe 已提交
814 815 816 817
		sgprv = sg;
	}

	npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length)
818
		  - bus_addr) >> IO_PAGE_SHIFT;
819
	strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
L
Linus Torvalds 已提交
820 821 822 823

	spin_unlock_irqrestore(&iommu->lock, flags);
}

824
static const struct dma_ops sun4u_dma_ops = {
825 826 827 828 829 830 831 832
	.alloc_coherent		= dma_4u_alloc_coherent,
	.free_coherent		= dma_4u_free_coherent,
	.map_single		= dma_4u_map_single,
	.unmap_single		= dma_4u_unmap_single,
	.map_sg			= dma_4u_map_sg,
	.unmap_sg		= dma_4u_unmap_sg,
	.sync_single_for_cpu	= dma_4u_sync_single_for_cpu,
	.sync_sg_for_cpu	= dma_4u_sync_sg_for_cpu,
833 834
};

835 836
const struct dma_ops *dma_ops = &sun4u_dma_ops;
EXPORT_SYMBOL(dma_ops);
L
Linus Torvalds 已提交
837

838
int dma_supported(struct device *dev, u64 device_mask)
L
Linus Torvalds 已提交
839
{
840 841
	struct iommu *iommu = dev->archdata.iommu;
	u64 dma_addr_mask = iommu->dma_addr_mask;
L
Linus Torvalds 已提交
842

843 844
	if (device_mask >= (1UL << 32UL))
		return 0;
L
Linus Torvalds 已提交
845

846 847
	if ((device_mask & dma_addr_mask) == dma_addr_mask)
		return 1;
L
Linus Torvalds 已提交
848

849 850 851 852
#ifdef CONFIG_PCI
	if (dev->bus == &pci_bus_type)
		return pci_dma_supported(to_pci_dev(dev), device_mask);
#endif
L
Linus Torvalds 已提交
853

854 855 856
	return 0;
}
EXPORT_SYMBOL(dma_supported);
L
Linus Torvalds 已提交
857

858 859 860 861 862 863 864
int dma_set_mask(struct device *dev, u64 dma_mask)
{
#ifdef CONFIG_PCI
	if (dev->bus == &pci_bus_type)
		return pci_set_dma_mask(to_pci_dev(dev), dma_mask);
#endif
	return -EINVAL;
L
Linus Torvalds 已提交
865
}
866
EXPORT_SYMBOL(dma_set_mask);