swiotlb-xen.c 19.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35
/*
 *  Copyright 2010
 *  by Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
 *
 * This code provides a IOMMU for Xen PV guests with PCI passthrough.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License v2.0 as published by
 * the Free Software Foundation
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * PV guests under Xen are running in an non-contiguous memory architecture.
 *
 * When PCI pass-through is utilized, this necessitates an IOMMU for
 * translating bus (DMA) to virtual and vice-versa and also providing a
 * mechanism to have contiguous pages for device drivers operations (say DMA
 * operations).
 *
 * Specifically, under Xen the Linux idea of pages is an illusion. It
 * assumes that pages start at zero and go up to the available memory. To
 * help with that, the Linux Xen MMU provides a lookup mechanism to
 * translate the page frame numbers (PFN) to machine frame numbers (MFN)
 * and vice-versa. The MFN are the "real" frame numbers. Furthermore
 * memory is not contiguous. Xen hypervisor stitches memory for guests
 * from different pools, which means there is no guarantee that PFN==MFN
 * and PFN+1==MFN+1. Lastly with Xen 4.0, pages (in debug mode) are
 * allocated in descending order (high to low), meaning the guest might
 * never get any MFN's under the 4GB mark.
 *
 */

J
Joe Perches 已提交
36 37
#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt

38 39
#include <linux/bootmem.h>
#include <linux/dma-mapping.h>
40
#include <linux/export.h>
41 42 43
#include <xen/swiotlb-xen.h>
#include <xen/page.h>
#include <xen/xen-ops.h>
44
#include <xen/hvc-console.h>
45

46
#include <asm/dma-mapping.h>
47
#include <asm/xen/page-coherent.h>
48

49
#include <trace/events/swiotlb.h>
50 51 52 53 54 55
/*
 * Used to do a quick range check in swiotlb_tbl_unmap_single and
 * swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this
 * API.
 */

56 57 58 59 60 61 62 63 64 65 66 67 68 69
#ifndef CONFIG_X86
static unsigned long dma_alloc_coherent_mask(struct device *dev,
					    gfp_t gfp)
{
	unsigned long dma_mask = 0;

	dma_mask = dev->coherent_dma_mask;
	if (!dma_mask)
		dma_mask = (gfp & GFP_DMA) ? DMA_BIT_MASK(24) : DMA_BIT_MASK(32);

	return dma_mask;
}
#endif

70 71 72 73 74 75
static char *xen_io_tlb_start, *xen_io_tlb_end;
static unsigned long xen_io_tlb_nslabs;
/*
 * Quick lookup value of the bus address of the IOTLB.
 */

76
static u64 start_dma_addr;
77

78 79 80 81 82
/*
 * Both of these functions should avoid PFN_PHYS because phys_addr_t
 * can be 32bit when dma_addr_t is 64bit leading to a loss in
 * information if the shift is done before casting to 64bit.
 */
83
static inline dma_addr_t xen_phys_to_bus(phys_addr_t paddr)
84
{
85 86 87 88 89 90
	unsigned long mfn = pfn_to_mfn(PFN_DOWN(paddr));
	dma_addr_t dma = (dma_addr_t)mfn << PAGE_SHIFT;

	dma |= paddr & ~PAGE_MASK;

	return dma;
91 92
}

93
static inline phys_addr_t xen_bus_to_phys(dma_addr_t baddr)
94
{
95 96 97 98 99 100 101
	unsigned long pfn = mfn_to_pfn(PFN_DOWN(baddr));
	dma_addr_t dma = (dma_addr_t)pfn << PAGE_SHIFT;
	phys_addr_t paddr = dma;

	paddr |= baddr & ~PAGE_MASK;

	return paddr;
102 103
}

104
static inline dma_addr_t xen_virt_to_bus(void *address)
105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126
{
	return xen_phys_to_bus(virt_to_phys(address));
}

static int check_pages_physically_contiguous(unsigned long pfn,
					     unsigned int offset,
					     size_t length)
{
	unsigned long next_mfn;
	int i;
	int nr_pages;

	next_mfn = pfn_to_mfn(pfn);
	nr_pages = (offset + length + PAGE_SIZE-1) >> PAGE_SHIFT;

	for (i = 1; i < nr_pages; i++) {
		if (pfn_to_mfn(++pfn) != ++next_mfn)
			return 0;
	}
	return 1;
}

127
static inline int range_straddles_page_boundary(phys_addr_t p, size_t size)
128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163
{
	unsigned long pfn = PFN_DOWN(p);
	unsigned int offset = p & ~PAGE_MASK;

	if (offset + size <= PAGE_SIZE)
		return 0;
	if (check_pages_physically_contiguous(pfn, offset, size))
		return 0;
	return 1;
}

static int is_xen_swiotlb_buffer(dma_addr_t dma_addr)
{
	unsigned long mfn = PFN_DOWN(dma_addr);
	unsigned long pfn = mfn_to_local_pfn(mfn);
	phys_addr_t paddr;

	/* If the address is outside our domain, it CAN
	 * have the same virtual address as another address
	 * in our domain. Therefore _only_ check address within our domain.
	 */
	if (pfn_valid(pfn)) {
		paddr = PFN_PHYS(pfn);
		return paddr >= virt_to_phys(xen_io_tlb_start) &&
		       paddr < virt_to_phys(xen_io_tlb_end);
	}
	return 0;
}

static int max_dma_bits = 32;

static int
xen_swiotlb_fixup(void *buf, size_t size, unsigned long nslabs)
{
	int i, rc;
	int dma_bits;
164
	dma_addr_t dma_handle;
165
	phys_addr_t p = virt_to_phys(buf);
166 167 168 169 170 171 172 173 174

	dma_bits = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT) + PAGE_SHIFT;

	i = 0;
	do {
		int slabs = min(nslabs - i, (unsigned long)IO_TLB_SEGSIZE);

		do {
			rc = xen_create_contiguous_region(
175
				p + (i << IO_TLB_SHIFT),
176
				get_order(slabs << IO_TLB_SHIFT),
177
				dma_bits, &dma_handle);
178 179 180 181 182 183 184 185
		} while (rc && dma_bits++ < max_dma_bits);
		if (rc)
			return rc;

		i += slabs;
	} while (i < nslabs);
	return 0;
}
186 187 188 189 190 191 192
static unsigned long xen_set_nslabs(unsigned long nr_tbl)
{
	if (!nr_tbl) {
		xen_io_tlb_nslabs = (64 * 1024 * 1024 >> IO_TLB_SHIFT);
		xen_io_tlb_nslabs = ALIGN(xen_io_tlb_nslabs, IO_TLB_SEGSIZE);
	} else
		xen_io_tlb_nslabs = nr_tbl;
193

194 195
	return xen_io_tlb_nslabs << IO_TLB_SHIFT;
}
196

197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217
enum xen_swiotlb_err {
	XEN_SWIOTLB_UNKNOWN = 0,
	XEN_SWIOTLB_ENOMEM,
	XEN_SWIOTLB_EFIXUP
};

static const char *xen_swiotlb_error(enum xen_swiotlb_err err)
{
	switch (err) {
	case XEN_SWIOTLB_ENOMEM:
		return "Cannot allocate Xen-SWIOTLB buffer\n";
	case XEN_SWIOTLB_EFIXUP:
		return "Failed to get contiguous memory for DMA from Xen!\n"\
		    "You either: don't have the permissions, do not have"\
		    " enough free memory under 4GB, or the hypervisor memory"\
		    " is too fragmented!";
	default:
		break;
	}
	return "";
}
218
int __ref xen_swiotlb_init(int verbose, bool early)
219
{
220
	unsigned long bytes, order;
221
	int rc = -ENOMEM;
222
	enum xen_swiotlb_err m_ret = XEN_SWIOTLB_UNKNOWN;
223
	unsigned int repeat = 3;
224

225
	xen_io_tlb_nslabs = swiotlb_nr_tbl();
226
retry:
227
	bytes = xen_set_nslabs(xen_io_tlb_nslabs);
228
	order = get_order(xen_io_tlb_nslabs << IO_TLB_SHIFT);
229 230 231
	/*
	 * Get IO TLB memory from any location.
	 */
232 233 234 235 236 237
	if (early)
		xen_io_tlb_start = alloc_bootmem_pages(PAGE_ALIGN(bytes));
	else {
#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
		while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
238
			xen_io_tlb_start = (void *)xen_get_swiotlb_free_pages(order);
239 240 241 242 243
			if (xen_io_tlb_start)
				break;
			order--;
		}
		if (order != get_order(bytes)) {
J
Joe Perches 已提交
244 245
			pr_warn("Warning: only able to allocate %ld MB for software IO TLB\n",
				(PAGE_SIZE << order) >> 20);
246 247 248 249
			xen_io_tlb_nslabs = SLABS_PER_PAGE << order;
			bytes = xen_io_tlb_nslabs << IO_TLB_SHIFT;
		}
	}
250
	if (!xen_io_tlb_start) {
251
		m_ret = XEN_SWIOTLB_ENOMEM;
252 253
		goto error;
	}
254 255 256 257 258 259 260
	xen_io_tlb_end = xen_io_tlb_start + bytes;
	/*
	 * And replace that memory with pages under 4GB.
	 */
	rc = xen_swiotlb_fixup(xen_io_tlb_start,
			       bytes,
			       xen_io_tlb_nslabs);
261
	if (rc) {
262 263 264 265 266 267
		if (early)
			free_bootmem(__pa(xen_io_tlb_start), PAGE_ALIGN(bytes));
		else {
			free_pages((unsigned long)xen_io_tlb_start, order);
			xen_io_tlb_start = NULL;
		}
268
		m_ret = XEN_SWIOTLB_EFIXUP;
269
		goto error;
270
	}
271
	start_dma_addr = xen_virt_to_bus(xen_io_tlb_start);
272
	if (early) {
273 274 275
		if (swiotlb_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs,
			 verbose))
			panic("Cannot allocate SWIOTLB buffer");
276 277
		rc = 0;
	} else
278 279
		rc = swiotlb_late_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs);
	return rc;
280
error:
281 282 283
	if (repeat--) {
		xen_io_tlb_nslabs = max(1024UL, /* Min is 2MB */
					(xen_io_tlb_nslabs >> 1));
J
Joe Perches 已提交
284 285
		pr_info("Lowering to %luMB\n",
			(xen_io_tlb_nslabs << IO_TLB_SHIFT) >> 20);
286 287
		goto retry;
	}
J
Joe Perches 已提交
288
	pr_err("%s (rc:%d)\n", xen_swiotlb_error(m_ret), rc);
289 290 291 292 293
	if (early)
		panic("%s (rc:%d)", xen_swiotlb_error(m_ret), rc);
	else
		free_pages((unsigned long)xen_io_tlb_start, order);
	return rc;
294 295 296
}
void *
xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
297 298
			   dma_addr_t *dma_handle, gfp_t flags,
			   struct dma_attrs *attrs)
299 300 301 302
{
	void *ret;
	int order = get_order(size);
	u64 dma_mask = DMA_BIT_MASK(32);
303 304
	phys_addr_t phys;
	dma_addr_t dev_addr;
305 306 307 308 309 310 311 312 313 314 315 316

	/*
	* Ignore region specifiers - the kernel's ideas of
	* pseudo-phys memory layout has nothing to do with the
	* machine physical layout.  We can't allocate highmem
	* because we can't return a pointer to it.
	*/
	flags &= ~(__GFP_DMA | __GFP_HIGHMEM);

	if (dma_alloc_from_coherent(hwdev, size, dma_handle, &ret))
		return ret;

317 318 319 320 321 322
	/* On ARM this function returns an ioremap'ped virtual address for
	 * which virt_to_phys doesn't return the corresponding physical
	 * address. In fact on ARM virt_to_phys only works for kernel direct
	 * mapped RAM memory. Also see comment below.
	 */
	ret = xen_alloc_coherent_pages(hwdev, size, dma_handle, flags, attrs);
323

324 325 326
	if (!ret)
		return ret;

327
	if (hwdev && hwdev->coherent_dma_mask)
328
		dma_mask = dma_alloc_coherent_mask(hwdev, flags);
329

330 331 332 333 334
	/* At this point dma_handle is the physical address, next we are
	 * going to set it to the machine address.
	 * Do not use virt_to_phys(ret) because on ARM it doesn't correspond
	 * to *dma_handle. */
	phys = *dma_handle;
335 336 337 338 339
	dev_addr = xen_phys_to_bus(phys);
	if (((dev_addr + size - 1 <= dma_mask)) &&
	    !range_straddles_page_boundary(phys, size))
		*dma_handle = dev_addr;
	else {
340
		if (xen_create_contiguous_region(phys, order,
341
						 fls64(dma_mask), dma_handle) != 0) {
342
			xen_free_coherent_pages(hwdev, size, ret, (dma_addr_t)phys, attrs);
343 344 345
			return NULL;
		}
	}
346
	memset(ret, 0, size);
347 348 349 350 351 352
	return ret;
}
EXPORT_SYMBOL_GPL(xen_swiotlb_alloc_coherent);

void
xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
353
			  dma_addr_t dev_addr, struct dma_attrs *attrs)
354 355
{
	int order = get_order(size);
356 357
	phys_addr_t phys;
	u64 dma_mask = DMA_BIT_MASK(32);
358 359 360 361

	if (dma_release_from_coherent(hwdev, order, vaddr))
		return;

362 363 364
	if (hwdev && hwdev->coherent_dma_mask)
		dma_mask = hwdev->coherent_dma_mask;

365 366 367
	/* do not use virt_to_phys because on ARM it doesn't return you the
	 * physical address */
	phys = xen_bus_to_phys(dev_addr);
368 369 370

	if (((dev_addr + size - 1 > dma_mask)) ||
	    range_straddles_page_boundary(phys, size))
371
		xen_destroy_contiguous_region(phys, order);
372

373
	xen_free_coherent_pages(hwdev, size, vaddr, (dma_addr_t)phys, attrs);
374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389
}
EXPORT_SYMBOL_GPL(xen_swiotlb_free_coherent);


/*
 * Map a single buffer of the indicated size for DMA in streaming mode.  The
 * physical address to use is returned.
 *
 * Once the device is given the dma address, the device owns this memory until
 * either xen_swiotlb_unmap_page or xen_swiotlb_dma_sync_single is performed.
 */
dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
				unsigned long offset, size_t size,
				enum dma_data_direction dir,
				struct dma_attrs *attrs)
{
390
	phys_addr_t map, phys = page_to_phys(page) + offset;
391 392 393 394 395 396 397 398 399
	dma_addr_t dev_addr = xen_phys_to_bus(phys);

	BUG_ON(dir == DMA_NONE);
	/*
	 * If the address happens to be in the device's DMA window,
	 * we can safely return the device addr and not worry about bounce
	 * buffering it.
	 */
	if (dma_capable(dev, dev_addr, size) &&
400 401 402
	    !range_straddles_page_boundary(phys, size) &&
		!xen_arch_need_swiotlb(dev, PFN_DOWN(phys), PFN_DOWN(dev_addr)) &&
		!swiotlb_force) {
403 404 405
		/* we are not interested in the dma_addr returned by
		 * xen_dma_map_page, only in the potential cache flushes executed
		 * by the function. */
406
		xen_dma_map_page(dev, page, dev_addr, offset, size, dir, attrs);
407
		return dev_addr;
408
	}
409 410 411 412

	/*
	 * Oh well, have to allocate and map a bounce buffer.
	 */
413 414
	trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force);

415
	map = swiotlb_tbl_map_single(dev, start_dma_addr, phys, size, dir);
416
	if (map == SWIOTLB_MAP_ERROR)
417 418
		return DMA_ERROR_CODE;

419
	xen_dma_map_page(dev, pfn_to_page(map >> PAGE_SHIFT),
420
					dev_addr, map & ~PAGE_MASK, size, dir, attrs);
421
	dev_addr = xen_phys_to_bus(map);
422 423 424 425

	/*
	 * Ensure that the address returned is DMA'ble
	 */
426
	if (!dma_capable(dev, dev_addr, size)) {
427
		swiotlb_tbl_unmap_single(dev, map, size, dir);
428 429
		dev_addr = 0;
	}
430 431 432 433 434 435 436 437 438 439 440 441 442
	return dev_addr;
}
EXPORT_SYMBOL_GPL(xen_swiotlb_map_page);

/*
 * Unmap a single streaming mode DMA translation.  The dma_addr and size must
 * match what was provided for in a previous xen_swiotlb_map_page call.  All
 * other usages are undefined.
 *
 * After this call, reads by the cpu to the buffer are guaranteed to see
 * whatever the device wrote there.
 */
static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
443 444
			     size_t size, enum dma_data_direction dir,
				 struct dma_attrs *attrs)
445 446 447 448 449
{
	phys_addr_t paddr = xen_bus_to_phys(dev_addr);

	BUG_ON(dir == DMA_NONE);

450
	xen_dma_unmap_page(hwdev, dev_addr, size, dir, attrs);
451

452 453
	/* NOTE: We use dev_addr here, not paddr! */
	if (is_xen_swiotlb_buffer(dev_addr)) {
454
		swiotlb_tbl_unmap_single(hwdev, paddr, size, dir);
455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473
		return;
	}

	if (dir != DMA_FROM_DEVICE)
		return;

	/*
	 * phys_to_virt doesn't work with hihgmem page but we could
	 * call dma_mark_clean() with hihgmem page here. However, we
	 * are fine since dma_mark_clean() is null on POWERPC. We can
	 * make dma_mark_clean() take a physical address if necessary.
	 */
	dma_mark_clean(phys_to_virt(paddr), size);
}

void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
			    size_t size, enum dma_data_direction dir,
			    struct dma_attrs *attrs)
{
474
	xen_unmap_single(hwdev, dev_addr, size, dir, attrs);
475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496
}
EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_page);

/*
 * Make physical memory consistent for a single streaming mode DMA translation
 * after a transfer.
 *
 * If you perform a xen_swiotlb_map_page() but wish to interrogate the buffer
 * using the cpu, yet do not wish to teardown the dma mapping, you must
 * call this function before doing so.  At the next point you give the dma
 * address back to the card, you must first perform a
 * xen_swiotlb_dma_sync_for_device, and then the device again owns the buffer
 */
static void
xen_swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
			size_t size, enum dma_data_direction dir,
			enum dma_sync_target target)
{
	phys_addr_t paddr = xen_bus_to_phys(dev_addr);

	BUG_ON(dir == DMA_NONE);

497
	if (target == SYNC_FOR_CPU)
498
		xen_dma_sync_single_for_cpu(hwdev, dev_addr, size, dir);
499

500
	/* NOTE: We use dev_addr here, not paddr! */
501
	if (is_xen_swiotlb_buffer(dev_addr))
502
		swiotlb_tbl_sync_single(hwdev, paddr, size, dir, target);
503 504

	if (target == SYNC_FOR_DEVICE)
505
		xen_dma_sync_single_for_device(hwdev, dev_addr, size, dir);
506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559

	if (dir != DMA_FROM_DEVICE)
		return;

	dma_mark_clean(phys_to_virt(paddr), size);
}

void
xen_swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
				size_t size, enum dma_data_direction dir)
{
	xen_swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU);
}
EXPORT_SYMBOL_GPL(xen_swiotlb_sync_single_for_cpu);

void
xen_swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
				   size_t size, enum dma_data_direction dir)
{
	xen_swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE);
}
EXPORT_SYMBOL_GPL(xen_swiotlb_sync_single_for_device);

/*
 * Map a set of buffers described by scatterlist in streaming mode for DMA.
 * This is the scatter-gather version of the above xen_swiotlb_map_page
 * interface.  Here the scatter gather list elements are each tagged with the
 * appropriate dma address and length.  They are obtained via
 * sg_dma_{address,length}(SG).
 *
 * NOTE: An implementation may be able to use a smaller number of
 *       DMA address/length pairs than there are SG table elements.
 *       (for example via virtual mapping capabilities)
 *       The routine returns the number of addr/length pairs actually
 *       used, at most nents.
 *
 * Device ownership issues as mentioned above for xen_swiotlb_map_page are the
 * same here.
 */
int
xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
			 int nelems, enum dma_data_direction dir,
			 struct dma_attrs *attrs)
{
	struct scatterlist *sg;
	int i;

	BUG_ON(dir == DMA_NONE);

	for_each_sg(sgl, sg, nelems, i) {
		phys_addr_t paddr = sg_phys(sg);
		dma_addr_t dev_addr = xen_phys_to_bus(paddr);

		if (swiotlb_force ||
560
		    xen_arch_need_swiotlb(hwdev, PFN_DOWN(paddr), PFN_DOWN(dev_addr)) ||
561 562
		    !dma_capable(hwdev, dev_addr, sg->length) ||
		    range_straddles_page_boundary(paddr, sg->length)) {
563 564 565 566 567 568
			phys_addr_t map = swiotlb_tbl_map_single(hwdev,
								 start_dma_addr,
								 sg_phys(sg),
								 sg->length,
								 dir);
			if (map == SWIOTLB_MAP_ERROR) {
569
				dev_warn(hwdev, "swiotlb buffer is full\n");
570 571 572 573
				/* Don't panic here, we expect map_sg users
				   to do proper error handling. */
				xen_swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir,
							   attrs);
574
				sg_dma_len(sgl) = 0;
575
				return 0;
576
			}
577
			xen_dma_map_page(hwdev, pfn_to_page(map >> PAGE_SHIFT),
578
						dev_addr,
579 580 581 582
						map & ~PAGE_MASK,
						sg->length,
						dir,
						attrs);
583
			sg->dma_address = xen_phys_to_bus(map);
584 585 586 587 588
		} else {
			/* we are not interested in the dma_addr returned by
			 * xen_dma_map_page, only in the potential cache flushes executed
			 * by the function. */
			xen_dma_map_page(hwdev, pfn_to_page(paddr >> PAGE_SHIFT),
589
						dev_addr,
590 591 592 593
						paddr & ~PAGE_MASK,
						sg->length,
						dir,
						attrs);
594
			sg->dma_address = dev_addr;
595
		}
596
		sg_dma_len(sg) = sg->length;
597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616
	}
	return nelems;
}
EXPORT_SYMBOL_GPL(xen_swiotlb_map_sg_attrs);

/*
 * Unmap a set of streaming mode DMA translations.  Again, cpu read rules
 * concerning calls here are the same as for swiotlb_unmap_page() above.
 */
void
xen_swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
			   int nelems, enum dma_data_direction dir,
			   struct dma_attrs *attrs)
{
	struct scatterlist *sg;
	int i;

	BUG_ON(dir == DMA_NONE);

	for_each_sg(sgl, sg, nelems, i)
617
		xen_unmap_single(hwdev, sg->dma_address, sg_dma_len(sg), dir, attrs);
618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638

}
EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_sg_attrs);

/*
 * Make physical memory consistent for a set of streaming mode DMA translations
 * after a transfer.
 *
 * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules
 * and usage.
 */
static void
xen_swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl,
		    int nelems, enum dma_data_direction dir,
		    enum dma_sync_target target)
{
	struct scatterlist *sg;
	int i;

	for_each_sg(sgl, sg, nelems, i)
		xen_swiotlb_sync_single(hwdev, sg->dma_address,
639
					sg_dma_len(sg), dir, target);
640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676
}

void
xen_swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
			    int nelems, enum dma_data_direction dir)
{
	xen_swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU);
}
EXPORT_SYMBOL_GPL(xen_swiotlb_sync_sg_for_cpu);

void
xen_swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
			       int nelems, enum dma_data_direction dir)
{
	xen_swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE);
}
EXPORT_SYMBOL_GPL(xen_swiotlb_sync_sg_for_device);

int
xen_swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
{
	return !dma_addr;
}
EXPORT_SYMBOL_GPL(xen_swiotlb_dma_mapping_error);

/*
 * Return whether the given device DMA address mask can be supported
 * properly.  For example, if your device can only drive the low 24-bits
 * during bus mastering, then you would pass 0x00ffffff as the mask to
 * this function.
 */
int
xen_swiotlb_dma_supported(struct device *hwdev, u64 mask)
{
	return xen_virt_to_bus(xen_io_tlb_end - 1) <= mask;
}
EXPORT_SYMBOL_GPL(xen_swiotlb_dma_supported);
677 678 679 680 681 682 683 684 685 686 687 688

int
xen_swiotlb_set_dma_mask(struct device *dev, u64 dma_mask)
{
	if (!dev->dma_mask || !xen_swiotlb_dma_supported(dev, dma_mask))
		return -EIO;

	*dev->dma_mask = dma_mask;

	return 0;
}
EXPORT_SYMBOL_GPL(xen_swiotlb_set_dma_mask);