提交 e1d8f62a 编写于 作者: K Konrad Rzeszutek Wilk

Merge remote-tracking branch 'stefano/swiotlb-xen-9.1' into stable/for-linus-3.13

* stefano/swiotlb-xen-9.1:
  swiotlb-xen: fix error code returned by xen_swiotlb_map_sg_attrs
  swiotlb-xen: static inline xen_phys_to_bus, xen_bus_to_phys, xen_virt_to_bus and range_straddles_page_boundary
  grant-table: call set_phys_to_machine after mapping grant refs
  arm,arm64: do not always merge biovec if we are running on Xen
  swiotlb: print a warning when the swiotlb is full
  swiotlb-xen: use xen_dma_map/unmap_page, xen_dma_sync_single_for_cpu/device
  xen: introduce xen_dma_map/unmap_page and xen_dma_sync_single_for_cpu/device
  swiotlb-xen: use xen_alloc/free_coherent_pages
  xen: introduce xen_alloc/free_coherent_pages
  arm64/xen: get_dma_ops: return xen_dma_ops if we are running as xen_initial_domain
  arm/xen: get_dma_ops: return xen_dma_ops if we are running as xen_initial_domain
  swiotlb-xen: introduce xen_swiotlb_set_dma_mask
  xen/arm,arm64: enable SWIOTLB_XEN
  xen: make xen_create_contiguous_region return the dma address
  xen/x86: allow __set_phys_to_machine for autotranslate guests
  arm/xen,arm64/xen: introduce p2m
  arm64: define DMA_ERROR_CODE
  arm: make SWIOTLB available
Signed-off-by: NKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>

Conflicts:
	arch/arm/include/asm/dma-mapping.h
	drivers/xen/swiotlb-xen.c

[Conflicts arose b/c "arm: make SWIOTLB available" v8 was in Stefano's
branch, while I had v9 + Ack from Russel. I also fixed up white-space
issues]
......@@ -1888,6 +1888,7 @@ config XEN
depends on CPU_V7 && !CPU_V6
depends on !GENERIC_ATOMIC64
select ARM_PSCI
select SWIOTLB_XEN
help
Say Y if you want to run Linux in a Virtual Machine on Xen on ARM.
......
......@@ -11,17 +11,28 @@
#include <asm-generic/dma-coherent.h>
#include <asm/memory.h>
#include <xen/xen.h>
#include <asm/xen/hypervisor.h>
#define DMA_ERROR_CODE (~0)
extern struct dma_map_ops arm_dma_ops;
extern struct dma_map_ops arm_coherent_dma_ops;
static inline struct dma_map_ops *get_dma_ops(struct device *dev)
static inline struct dma_map_ops *__generic_dma_ops(struct device *dev)
{
if (dev && dev->archdata.dma_ops)
return dev->archdata.dma_ops;
return &arm_dma_ops;
}
static inline struct dma_map_ops *get_dma_ops(struct device *dev)
{
if (xen_initial_domain())
return xen_dma_ops;
else
return __generic_dma_ops(dev);
}
static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
{
BUG_ON(!dev);
......
......@@ -24,9 +24,11 @@
#ifdef __KERNEL__
#include <linux/types.h>
#include <linux/blk_types.h>
#include <asm/byteorder.h>
#include <asm/memory.h>
#include <asm-generic/pci_iomap.h>
#include <xen/xen.h>
/*
* ISA I/O bus memory addresses are 1:1 with the physical address.
......@@ -372,6 +374,12 @@ extern void pci_iounmap(struct pci_dev *dev, void __iomem *addr);
#define BIOVEC_MERGEABLE(vec1, vec2) \
((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2)))
extern bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
const struct bio_vec *vec2);
#define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
(__BIOVEC_PHYS_MERGEABLE(vec1, vec2) && \
(!xen_domain() || xen_biovec_phys_mergeable(vec1, vec2)))
#ifdef CONFIG_MMU
#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
extern int valid_phys_addr_range(phys_addr_t addr, size_t size);
......
......@@ -16,4 +16,6 @@ static inline enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
return PARAVIRT_LAZY_NONE;
}
extern struct dma_map_ops *xen_dma_ops;
#endif /* _ASM_ARM_XEN_HYPERVISOR_H */
#ifndef _ASM_ARM_XEN_PAGE_COHERENT_H
#define _ASM_ARM_XEN_PAGE_COHERENT_H
#include <asm/page.h>
#include <linux/dma-attrs.h>
#include <linux/dma-mapping.h>
static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size,
dma_addr_t *dma_handle, gfp_t flags,
struct dma_attrs *attrs)
{
return __generic_dma_ops(hwdev)->alloc(hwdev, size, dma_handle, flags, attrs);
}
static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
void *cpu_addr, dma_addr_t dma_handle,
struct dma_attrs *attrs)
{
__generic_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs);
}
static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction dir,
struct dma_attrs *attrs)
{
__generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
}
static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
size_t size, enum dma_data_direction dir,
struct dma_attrs *attrs)
{
if (__generic_dma_ops(hwdev)->unmap_page)
__generic_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs);
}
static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
dma_addr_t handle, size_t size, enum dma_data_direction dir)
{
if (__generic_dma_ops(hwdev)->sync_single_for_cpu)
__generic_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir);
}
static inline void xen_dma_sync_single_for_device(struct device *hwdev,
dma_addr_t handle, size_t size, enum dma_data_direction dir)
{
if (__generic_dma_ops(hwdev)->sync_single_for_device)
__generic_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir);
}
#endif /* _ASM_ARM_XEN_PAGE_COHERENT_H */
......@@ -6,12 +6,12 @@
#include <linux/pfn.h>
#include <linux/types.h>
#include <linux/dma-mapping.h>
#include <xen/xen.h>
#include <xen/interface/grant_table.h>
#define pfn_to_mfn(pfn) (pfn)
#define phys_to_machine_mapping_valid(pfn) (1)
#define mfn_to_pfn(mfn) (mfn)
#define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT))
#define pte_mfn pte_pfn
......@@ -32,6 +32,44 @@ typedef struct xpaddr {
#define INVALID_P2M_ENTRY (~0UL)
unsigned long __pfn_to_mfn(unsigned long pfn);
unsigned long __mfn_to_pfn(unsigned long mfn);
extern struct rb_root phys_to_mach;
static inline unsigned long pfn_to_mfn(unsigned long pfn)
{
unsigned long mfn;
if (phys_to_mach.rb_node != NULL) {
mfn = __pfn_to_mfn(pfn);
if (mfn != INVALID_P2M_ENTRY)
return mfn;
}
if (xen_initial_domain())
return pfn;
else
return INVALID_P2M_ENTRY;
}
static inline unsigned long mfn_to_pfn(unsigned long mfn)
{
unsigned long pfn;
if (phys_to_mach.rb_node != NULL) {
pfn = __mfn_to_pfn(mfn);
if (pfn != INVALID_P2M_ENTRY)
return pfn;
}
if (xen_initial_domain())
return mfn;
else
return INVALID_P2M_ENTRY;
}
#define mfn_to_local_pfn(mfn) mfn_to_pfn(mfn)
static inline xmaddr_t phys_to_machine(xpaddr_t phys)
{
unsigned offset = phys.paddr & ~PAGE_MASK;
......@@ -76,11 +114,9 @@ static inline int m2p_remove_override(struct page *page, bool clear_pte)
return 0;
}
static inline bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
{
BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY);
return true;
}
bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn);
bool __set_phys_to_machine_multi(unsigned long pfn, unsigned long mfn,
unsigned long nr_pages);
static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)
{
......
obj-y := enlighten.o hypercall.o grant-table.o
obj-y := enlighten.o hypercall.o grant-table.o p2m.o mm.o
#include <linux/bootmem.h>
#include <linux/gfp.h>
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/dma-mapping.h>
#include <linux/vmalloc.h>
#include <linux/swiotlb.h>
#include <xen/xen.h>
#include <xen/interface/memory.h>
#include <xen/swiotlb-xen.h>
#include <asm/cacheflush.h>
#include <asm/xen/page.h>
#include <asm/xen/hypercall.h>
#include <asm/xen/interface.h>
int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
unsigned int address_bits,
dma_addr_t *dma_handle)
{
if (!xen_initial_domain())
return -EINVAL;
/* we assume that dom0 is mapped 1:1 for now */
*dma_handle = pstart;
return 0;
}
EXPORT_SYMBOL_GPL(xen_create_contiguous_region);
void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order)
{
return;
}
EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
struct dma_map_ops *xen_dma_ops;
EXPORT_SYMBOL_GPL(xen_dma_ops);
static struct dma_map_ops xen_swiotlb_dma_ops = {
.mapping_error = xen_swiotlb_dma_mapping_error,
.alloc = xen_swiotlb_alloc_coherent,
.free = xen_swiotlb_free_coherent,
.sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu,
.sync_single_for_device = xen_swiotlb_sync_single_for_device,
.sync_sg_for_cpu = xen_swiotlb_sync_sg_for_cpu,
.sync_sg_for_device = xen_swiotlb_sync_sg_for_device,
.map_sg = xen_swiotlb_map_sg_attrs,
.unmap_sg = xen_swiotlb_unmap_sg_attrs,
.map_page = xen_swiotlb_map_page,
.unmap_page = xen_swiotlb_unmap_page,
.dma_supported = xen_swiotlb_dma_supported,
.set_dma_mask = xen_swiotlb_set_dma_mask,
};
int __init xen_mm_init(void)
{
if (!xen_initial_domain())
return 0;
xen_swiotlb_init(1, false);
xen_dma_ops = &xen_swiotlb_dma_ops;
return 0;
}
arch_initcall(xen_mm_init);
#include <linux/bootmem.h>
#include <linux/gfp.h>
#include <linux/export.h>
#include <linux/rwlock.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/dma-mapping.h>
#include <linux/vmalloc.h>
#include <linux/swiotlb.h>
#include <xen/xen.h>
#include <xen/interface/memory.h>
#include <xen/swiotlb-xen.h>
#include <asm/cacheflush.h>
#include <asm/xen/page.h>
#include <asm/xen/hypercall.h>
#include <asm/xen/interface.h>
struct xen_p2m_entry {
unsigned long pfn;
unsigned long mfn;
unsigned long nr_pages;
struct rb_node rbnode_mach;
struct rb_node rbnode_phys;
};
rwlock_t p2m_lock;
struct rb_root phys_to_mach = RB_ROOT;
static struct rb_root mach_to_phys = RB_ROOT;
static int xen_add_phys_to_mach_entry(struct xen_p2m_entry *new)
{
struct rb_node **link = &phys_to_mach.rb_node;
struct rb_node *parent = NULL;
struct xen_p2m_entry *entry;
int rc = 0;
while (*link) {
parent = *link;
entry = rb_entry(parent, struct xen_p2m_entry, rbnode_phys);
if (new->mfn == entry->mfn)
goto err_out;
if (new->pfn == entry->pfn)
goto err_out;
if (new->pfn < entry->pfn)
link = &(*link)->rb_left;
else
link = &(*link)->rb_right;
}
rb_link_node(&new->rbnode_phys, parent, link);
rb_insert_color(&new->rbnode_phys, &phys_to_mach);
goto out;
err_out:
rc = -EINVAL;
pr_warn("%s: cannot add pfn=%pa -> mfn=%pa: pfn=%pa -> mfn=%pa already exists\n",
__func__, &new->pfn, &new->mfn, &entry->pfn, &entry->mfn);
out:
return rc;
}
unsigned long __pfn_to_mfn(unsigned long pfn)
{
struct rb_node *n = phys_to_mach.rb_node;
struct xen_p2m_entry *entry;
unsigned long irqflags;
read_lock_irqsave(&p2m_lock, irqflags);
while (n) {
entry = rb_entry(n, struct xen_p2m_entry, rbnode_phys);
if (entry->pfn <= pfn &&
entry->pfn + entry->nr_pages > pfn) {
read_unlock_irqrestore(&p2m_lock, irqflags);
return entry->mfn + (pfn - entry->pfn);
}
if (pfn < entry->pfn)
n = n->rb_left;
else
n = n->rb_right;
}
read_unlock_irqrestore(&p2m_lock, irqflags);
return INVALID_P2M_ENTRY;
}
EXPORT_SYMBOL_GPL(__pfn_to_mfn);
static int xen_add_mach_to_phys_entry(struct xen_p2m_entry *new)
{
struct rb_node **link = &mach_to_phys.rb_node;
struct rb_node *parent = NULL;
struct xen_p2m_entry *entry;
int rc = 0;
while (*link) {
parent = *link;
entry = rb_entry(parent, struct xen_p2m_entry, rbnode_mach);
if (new->mfn == entry->mfn)
goto err_out;
if (new->pfn == entry->pfn)
goto err_out;
if (new->mfn < entry->mfn)
link = &(*link)->rb_left;
else
link = &(*link)->rb_right;
}
rb_link_node(&new->rbnode_mach, parent, link);
rb_insert_color(&new->rbnode_mach, &mach_to_phys);
goto out;
err_out:
rc = -EINVAL;
pr_warn("%s: cannot add pfn=%pa -> mfn=%pa: pfn=%pa -> mfn=%pa already exists\n",
__func__, &new->pfn, &new->mfn, &entry->pfn, &entry->mfn);
out:
return rc;
}
unsigned long __mfn_to_pfn(unsigned long mfn)
{
struct rb_node *n = mach_to_phys.rb_node;
struct xen_p2m_entry *entry;
unsigned long irqflags;
read_lock_irqsave(&p2m_lock, irqflags);
while (n) {
entry = rb_entry(n, struct xen_p2m_entry, rbnode_mach);
if (entry->mfn <= mfn &&
entry->mfn + entry->nr_pages > mfn) {
read_unlock_irqrestore(&p2m_lock, irqflags);
return entry->pfn + (mfn - entry->mfn);
}
if (mfn < entry->mfn)
n = n->rb_left;
else
n = n->rb_right;
}
read_unlock_irqrestore(&p2m_lock, irqflags);
return INVALID_P2M_ENTRY;
}
EXPORT_SYMBOL_GPL(__mfn_to_pfn);
bool __set_phys_to_machine_multi(unsigned long pfn,
unsigned long mfn, unsigned long nr_pages)
{
int rc;
unsigned long irqflags;
struct xen_p2m_entry *p2m_entry;
struct rb_node *n = phys_to_mach.rb_node;
if (mfn == INVALID_P2M_ENTRY) {
write_lock_irqsave(&p2m_lock, irqflags);
while (n) {
p2m_entry = rb_entry(n, struct xen_p2m_entry, rbnode_phys);
if (p2m_entry->pfn <= pfn &&
p2m_entry->pfn + p2m_entry->nr_pages > pfn) {
rb_erase(&p2m_entry->rbnode_mach, &mach_to_phys);
rb_erase(&p2m_entry->rbnode_phys, &phys_to_mach);
write_unlock_irqrestore(&p2m_lock, irqflags);
kfree(p2m_entry);
return true;
}
if (pfn < p2m_entry->pfn)
n = n->rb_left;
else
n = n->rb_right;
}
write_unlock_irqrestore(&p2m_lock, irqflags);
return true;
}
p2m_entry = kzalloc(sizeof(struct xen_p2m_entry), GFP_NOWAIT);
if (!p2m_entry) {
pr_warn("cannot allocate xen_p2m_entry\n");
return false;
}
p2m_entry->pfn = pfn;
p2m_entry->nr_pages = nr_pages;
p2m_entry->mfn = mfn;
write_lock_irqsave(&p2m_lock, irqflags);
if ((rc = xen_add_phys_to_mach_entry(p2m_entry) < 0) ||
(rc = xen_add_mach_to_phys_entry(p2m_entry) < 0)) {
write_unlock_irqrestore(&p2m_lock, irqflags);
return false;
}
write_unlock_irqrestore(&p2m_lock, irqflags);
return true;
}
EXPORT_SYMBOL_GPL(__set_phys_to_machine_multi);
bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
{
return __set_phys_to_machine_multi(pfn, mfn, 1);
}
EXPORT_SYMBOL_GPL(__set_phys_to_machine);
int p2m_init(void)
{
rwlock_init(&p2m_lock);
return 0;
}
arch_initcall(p2m_init);
......@@ -211,6 +211,7 @@ config XEN_DOM0
config XEN
bool "Xen guest support on ARM64 (EXPERIMENTAL)"
depends on ARM64 && OF
select SWIOTLB_XEN
help
Say Y if you want to run Linux in a Virtual Machine on Xen on ARM64.
......
......@@ -23,11 +23,15 @@
#include <asm-generic/dma-coherent.h>
#include <xen/xen.h>
#include <asm/xen/hypervisor.h>
#define ARCH_HAS_DMA_GET_REQUIRED_MASK
#define DMA_ERROR_CODE (~(dma_addr_t)0)
extern struct dma_map_ops *dma_ops;
static inline struct dma_map_ops *get_dma_ops(struct device *dev)
static inline struct dma_map_ops *__generic_dma_ops(struct device *dev)
{
if (unlikely(!dev) || !dev->archdata.dma_ops)
return dma_ops;
......@@ -35,6 +39,14 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
return dev->archdata.dma_ops;
}
static inline struct dma_map_ops *get_dma_ops(struct device *dev)
{
if (xen_initial_domain())
return xen_dma_ops;
else
return __generic_dma_ops(dev);
}
#include <asm-generic/dma-mapping-common.h>
static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
......
......@@ -22,11 +22,14 @@
#ifdef __KERNEL__
#include <linux/types.h>
#include <linux/blk_types.h>
#include <asm/byteorder.h>
#include <asm/barrier.h>
#include <asm/pgtable.h>
#include <xen/xen.h>
/*
* Generic IO read/write. These perform native-endian accesses.
*/
......@@ -263,5 +266,11 @@ extern int devmem_is_allowed(unsigned long pfn);
*/
#define xlate_dev_kmem_ptr(p) p
extern bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
const struct bio_vec *vec2);
#define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
(__BIOVEC_PHYS_MERGEABLE(vec1, vec2) && \
(!xen_domain() || xen_biovec_phys_mergeable(vec1, vec2)))
#endif /* __KERNEL__ */
#endif /* __ASM_IO_H */
#ifndef _ASM_ARM64_XEN_PAGE_COHERENT_H
#define _ASM_ARM64_XEN_PAGE_COHERENT_H
#include <asm/page.h>
#include <linux/dma-attrs.h>
#include <linux/dma-mapping.h>
static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size,
dma_addr_t *dma_handle, gfp_t flags,
struct dma_attrs *attrs)
{
return __generic_dma_ops(hwdev)->alloc(hwdev, size, dma_handle, flags, attrs);
}
static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
void *cpu_addr, dma_addr_t dma_handle,
struct dma_attrs *attrs)
{
__generic_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs);
}
static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction dir,
struct dma_attrs *attrs)
{
__generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
}
static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
size_t size, enum dma_data_direction dir,
struct dma_attrs *attrs)
{
__generic_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs);
}
static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
dma_addr_t handle, size_t size, enum dma_data_direction dir)
{
__generic_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir);
}
static inline void xen_dma_sync_single_for_device(struct device *hwdev,
dma_addr_t handle, size_t size, enum dma_data_direction dir)
{
__generic_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir);
}
#endif /* _ASM_ARM64_XEN_PAGE_COHERENT_H */
xen-arm-y += $(addprefix ../../arm/xen/, enlighten.o grant-table.o)
xen-arm-y += $(addprefix ../../arm/xen/, enlighten.o grant-table.o p2m.o mm.o)
obj-y := xen-arm.o hypercall.o
#ifndef _ASM_IA64_XEN_PAGE_COHERENT_H
#define _ASM_IA64_XEN_PAGE_COHERENT_H
#include <asm/page.h>
#include <linux/dma-attrs.h>
#include <linux/dma-mapping.h>
static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size,
dma_addr_t *dma_handle, gfp_t flags,
struct dma_attrs *attrs)
{
void *vstart = (void*)__get_free_pages(flags, get_order(size));
*dma_handle = virt_to_phys(vstart);
return vstart;
}
static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
void *cpu_addr, dma_addr_t dma_handle,
struct dma_attrs *attrs)
{
free_pages((unsigned long) cpu_addr, get_order(size));
}
static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction dir,
struct dma_attrs *attrs) { }
static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
size_t size, enum dma_data_direction dir,
struct dma_attrs *attrs) { }
static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
dma_addr_t handle, size_t size, enum dma_data_direction dir) { }
static inline void xen_dma_sync_single_for_device(struct device *hwdev,
dma_addr_t handle, size_t size, enum dma_data_direction dir) { }
#endif /* _ASM_IA64_XEN_PAGE_COHERENT_H */
#ifndef _ASM_X86_XEN_PAGE_COHERENT_H
#define _ASM_X86_XEN_PAGE_COHERENT_H
#include <asm/page.h>
#include <linux/dma-attrs.h>
#include <linux/dma-mapping.h>
static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size,
dma_addr_t *dma_handle, gfp_t flags,
struct dma_attrs *attrs)
{
void *vstart = (void*)__get_free_pages(flags, get_order(size));
*dma_handle = virt_to_phys(vstart);
return vstart;
}
static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
void *cpu_addr, dma_addr_t dma_handle,
struct dma_attrs *attrs)
{
free_pages((unsigned long) cpu_addr, get_order(size));
}
static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction dir,
struct dma_attrs *attrs) { }
static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
size_t size, enum dma_data_direction dir,
struct dma_attrs *attrs) { }
static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
dma_addr_t handle, size_t size, enum dma_data_direction dir) { }
static inline void xen_dma_sync_single_for_device(struct device *hwdev,
dma_addr_t handle, size_t size, enum dma_data_direction dir) { }
#endif /* _ASM_X86_XEN_PAGE_COHERENT_H */
......@@ -2328,12 +2328,14 @@ static int xen_exchange_memory(unsigned long extents_in, unsigned int order_in,
return success;
}
int xen_create_contiguous_region(unsigned long vstart, unsigned int order,
unsigned int address_bits)
int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
unsigned int address_bits,
dma_addr_t *dma_handle)
{
unsigned long *in_frames = discontig_frames, out_frame;
unsigned long flags;
int success;
unsigned long vstart = (unsigned long)phys_to_virt(pstart);
/*
* Currently an auto-translated guest will not perform I/O, nor will
......@@ -2368,15 +2370,17 @@ int xen_create_contiguous_region(unsigned long vstart, unsigned int order,
spin_unlock_irqrestore(&xen_reservation_lock, flags);
*dma_handle = virt_to_machine(vstart).maddr;
return success ? 0 : -ENOMEM;
}
EXPORT_SYMBOL_GPL(xen_create_contiguous_region);
void xen_destroy_contiguous_region(unsigned long vstart, unsigned int order)
void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order)
{
unsigned long *out_frames = discontig_frames, in_frame;
unsigned long flags;
int success;
unsigned long vstart;
if (xen_feature(XENFEAT_auto_translated_physmap))
return;
......@@ -2384,6 +2388,7 @@ void xen_destroy_contiguous_region(unsigned long vstart, unsigned int order)
if (unlikely(order > MAX_CONTIG_ORDER))
return;
vstart = (unsigned long)phys_to_virt(pstart);
memset((void *) vstart, 0, PAGE_SIZE << order);
spin_lock_irqsave(&xen_reservation_lock, flags);
......
......@@ -799,10 +799,10 @@ bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
{
unsigned topidx, mididx, idx;
if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) {
BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY);
/* don't track P2M changes in autotranslate guests */
if (unlikely(xen_feature(XENFEAT_auto_translated_physmap)))
return true;
}
if (unlikely(pfn >= MAX_P2M_PFN)) {
BUG_ON(mfn != INVALID_P2M_ENTRY);
return true;
......
......@@ -140,7 +140,6 @@ config XEN_GRANT_DEV_ALLOC
config SWIOTLB_XEN
def_bool y
depends on PCI && X86
select SWIOTLB
config XEN_TMEM
......
......@@ -49,6 +49,7 @@
#include <xen/grant_table.h>
#include <xen/interface/memory.h>
#include <xen/hvc-console.h>
#include <xen/swiotlb-xen.h>
#include <asm/xen/hypercall.h>
#include <asm/xen/interface.h>
......@@ -898,8 +899,16 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref, map_ops + i,
&map_ops[i].status, __func__);
if (xen_feature(XENFEAT_auto_translated_physmap))
/* this is basically a nop on x86 */
if (xen_feature(XENFEAT_auto_translated_physmap)) {
for (i = 0; i < count; i++) {
if (map_ops[i].status)
continue;
set_phys_to_machine(map_ops[i].host_addr >> PAGE_SHIFT,
map_ops[i].dev_bus_addr >> PAGE_SHIFT);
}
return ret;
}
if (!in_interrupt() && paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) {
arch_enter_lazy_mmu_mode();
......@@ -942,8 +951,14 @@ int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
if (ret)
return ret;
if (xen_feature(XENFEAT_auto_translated_physmap))
/* this is basically a nop on x86 */
if (xen_feature(XENFEAT_auto_translated_physmap)) {
for (i = 0; i < count; i++) {
set_phys_to_machine(unmap_ops[i].host_addr >> PAGE_SHIFT,
INVALID_P2M_ENTRY);
}
return ret;
}
if (!in_interrupt() && paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) {
arch_enter_lazy_mmu_mode();
......
......@@ -43,6 +43,9 @@
#include <xen/xen-ops.h>
#include <xen/hvc-console.h>
#include <asm/dma-mapping.h>
#include <asm/xen/page-coherent.h>
#include <trace/events/swiotlb.h>
/*
* Used to do a quick range check in swiotlb_tbl_unmap_single and
......@@ -50,6 +53,20 @@
* API.
*/
#ifndef CONFIG_X86
static unsigned long dma_alloc_coherent_mask(struct device *dev,
gfp_t gfp)
{
unsigned long dma_mask = 0;
dma_mask = dev->coherent_dma_mask;
if (!dma_mask)
dma_mask = (gfp & GFP_DMA) ? DMA_BIT_MASK(24) : DMA_BIT_MASK(32);
return dma_mask;
}
#endif
static char *xen_io_tlb_start, *xen_io_tlb_end;
static unsigned long xen_io_tlb_nslabs;
/*
......@@ -58,17 +75,17 @@ static unsigned long xen_io_tlb_nslabs;
static u64 start_dma_addr;
static dma_addr_t xen_phys_to_bus(phys_addr_t paddr)
static inline dma_addr_t xen_phys_to_bus(phys_addr_t paddr)
{
return phys_to_machine(XPADDR(paddr)).maddr;
}
static phys_addr_t xen_bus_to_phys(dma_addr_t baddr)
static inline phys_addr_t xen_bus_to_phys(dma_addr_t baddr)
{
return machine_to_phys(XMADDR(baddr)).paddr;
}
static dma_addr_t xen_virt_to_bus(void *address)
static inline dma_addr_t xen_virt_to_bus(void *address)
{
return xen_phys_to_bus(virt_to_phys(address));
}
......@@ -91,7 +108,7 @@ static int check_pages_physically_contiguous(unsigned long pfn,
return 1;
}
static int range_straddles_page_boundary(phys_addr_t p, size_t size)
static inline int range_straddles_page_boundary(phys_addr_t p, size_t size)
{
unsigned long pfn = PFN_DOWN(p);
unsigned int offset = p & ~PAGE_MASK;
......@@ -128,6 +145,8 @@ xen_swiotlb_fixup(void *buf, size_t size, unsigned long nslabs)
{
int i, rc;
int dma_bits;
dma_addr_t dma_handle;
phys_addr_t p = virt_to_phys(buf);
dma_bits = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT) + PAGE_SHIFT;
......@@ -137,9 +156,9 @@ xen_swiotlb_fixup(void *buf, size_t size, unsigned long nslabs)
do {
rc = xen_create_contiguous_region(
(unsigned long)buf + (i << IO_TLB_SHIFT),
p + (i << IO_TLB_SHIFT),
get_order(slabs << IO_TLB_SHIFT),
dma_bits);
dma_bits, &dma_handle);
} while (rc && dma_bits++ < max_dma_bits);
if (rc)
return rc;
......@@ -265,7 +284,6 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
void *ret;
int order = get_order(size);
u64 dma_mask = DMA_BIT_MASK(32);
unsigned long vstart;
phys_addr_t phys;
dma_addr_t dev_addr;
......@@ -280,8 +298,12 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
if (dma_alloc_from_coherent(hwdev, size, dma_handle, &ret))
return ret;
vstart = __get_free_pages(flags, order);
ret = (void *)vstart;
/* On ARM this function returns an ioremap'ped virtual address for
* which virt_to_phys doesn't return the corresponding physical
* address. In fact on ARM virt_to_phys only works for kernel direct
* mapped RAM memory. Also see comment below.
*/
ret = xen_alloc_coherent_pages(hwdev, size, dma_handle, flags, attrs);
if (!ret)
return ret;
......@@ -289,18 +311,21 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
if (hwdev && hwdev->coherent_dma_mask)
dma_mask = dma_alloc_coherent_mask(hwdev, flags);
phys = virt_to_phys(ret);
/* At this point dma_handle is the physical address, next we are
* going to set it to the machine address.
* Do not use virt_to_phys(ret) because on ARM it doesn't correspond
* to *dma_handle. */
phys = *dma_handle;
dev_addr = xen_phys_to_bus(phys);
if (((dev_addr + size - 1 <= dma_mask)) &&
!range_straddles_page_boundary(phys, size))
*dma_handle = dev_addr;
else {
if (xen_create_contiguous_region(vstart, order,
fls64(dma_mask)) != 0) {
free_pages(vstart, order);
if (xen_create_contiguous_region(phys, order,
fls64(dma_mask), dma_handle) != 0) {
xen_free_coherent_pages(hwdev, size, ret, (dma_addr_t)phys, attrs);
return NULL;
}
*dma_handle = virt_to_machine(ret).maddr;
}
memset(ret, 0, size);
return ret;
......@@ -321,13 +346,15 @@ xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
if (hwdev && hwdev->coherent_dma_mask)
dma_mask = hwdev->coherent_dma_mask;
phys = virt_to_phys(vaddr);
/* do not use virt_to_phys because on ARM it doesn't return you the
* physical address */
phys = xen_bus_to_phys(dev_addr);
if (((dev_addr + size - 1 > dma_mask)) ||
range_straddles_page_boundary(phys, size))
xen_destroy_contiguous_region((unsigned long)vaddr, order);
xen_destroy_contiguous_region(phys, order);
free_pages((unsigned long)vaddr, order);
xen_free_coherent_pages(hwdev, size, vaddr, (dma_addr_t)phys, attrs);
}
EXPORT_SYMBOL_GPL(xen_swiotlb_free_coherent);
......@@ -354,8 +381,13 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
* buffering it.
*/
if (dma_capable(dev, dev_addr, size) &&
!range_straddles_page_boundary(phys, size) && !swiotlb_force)
!range_straddles_page_boundary(phys, size) && !swiotlb_force) {
/* we are not interested in the dma_addr returned by
* xen_dma_map_page, only in the potential cache flushes executed
* by the function. */
xen_dma_map_page(dev, page, offset, size, dir, attrs);
return dev_addr;
}
/*
* Oh well, have to allocate and map a bounce buffer.
......@@ -366,6 +398,8 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
if (map == SWIOTLB_MAP_ERROR)
return DMA_ERROR_CODE;
xen_dma_map_page(dev, pfn_to_page(map >> PAGE_SHIFT),
map & ~PAGE_MASK, size, dir, attrs);
dev_addr = xen_phys_to_bus(map);
/*
......@@ -388,12 +422,15 @@ EXPORT_SYMBOL_GPL(xen_swiotlb_map_page);
* whatever the device wrote there.
*/
static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
size_t size, enum dma_data_direction dir)
size_t size, enum dma_data_direction dir,
struct dma_attrs *attrs)
{
phys_addr_t paddr = xen_bus_to_phys(dev_addr);
BUG_ON(dir == DMA_NONE);
xen_dma_unmap_page(hwdev, paddr, size, dir, attrs);
/* NOTE: We use dev_addr here, not paddr! */
if (is_xen_swiotlb_buffer(dev_addr)) {
swiotlb_tbl_unmap_single(hwdev, paddr, size, dir);
......@@ -416,7 +453,7 @@ void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
size_t size, enum dma_data_direction dir,
struct dma_attrs *attrs)
{
xen_unmap_single(hwdev, dev_addr, size, dir);
xen_unmap_single(hwdev, dev_addr, size, dir, attrs);
}
EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_page);
......@@ -439,11 +476,15 @@ xen_swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
BUG_ON(dir == DMA_NONE);
if (target == SYNC_FOR_CPU)
xen_dma_sync_single_for_cpu(hwdev, paddr, size, dir);
/* NOTE: We use dev_addr here, not paddr! */
if (is_xen_swiotlb_buffer(dev_addr)) {
if (is_xen_swiotlb_buffer(dev_addr))
swiotlb_tbl_sync_single(hwdev, paddr, size, dir, target);
return;
}
if (target == SYNC_FOR_DEVICE)
xen_dma_sync_single_for_cpu(hwdev, paddr, size, dir);
if (dir != DMA_FROM_DEVICE)
return;
......@@ -506,16 +547,26 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
sg->length,
dir);
if (map == SWIOTLB_MAP_ERROR) {
dev_warn(hwdev, "swiotlb buffer is full\n");
/* Don't panic here, we expect map_sg users
to do proper error handling. */
xen_swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir,
attrs);
sg_dma_len(sgl) = 0;
return DMA_ERROR_CODE;
return 0;
}
sg->dma_address = xen_phys_to_bus(map);
} else
} else {
/* we are not interested in the dma_addr returned by
* xen_dma_map_page, only in the potential cache flushes executed
* by the function. */
xen_dma_map_page(hwdev, pfn_to_page(paddr >> PAGE_SHIFT),
paddr & ~PAGE_MASK,
sg->length,
dir,
attrs);
sg->dma_address = dev_addr;
}
sg_dma_len(sg) = sg->length;
}
return nelems;
......@@ -537,7 +588,7 @@ xen_swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
BUG_ON(dir == DMA_NONE);
for_each_sg(sgl, sg, nelems, i)
xen_unmap_single(hwdev, sg->dma_address, sg_dma_len(sg), dir);
xen_unmap_single(hwdev, sg->dma_address, sg_dma_len(sg), dir, attrs);
}
EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_sg_attrs);
......@@ -597,3 +648,15 @@ xen_swiotlb_dma_supported(struct device *hwdev, u64 mask)
return xen_virt_to_bus(xen_io_tlb_end - 1) <= mask;
}
EXPORT_SYMBOL_GPL(xen_swiotlb_dma_supported);
int
xen_swiotlb_set_dma_mask(struct device *dev, u64 dma_mask)
{
if (!dev->dma_mask || !xen_swiotlb_dma_supported(dev, dma_mask))
return -EIO;
*dev->dma_mask = dma_mask;
return 0;
}
EXPORT_SYMBOL_GPL(xen_swiotlb_set_dma_mask);
......@@ -56,4 +56,6 @@ xen_swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr);
extern int
xen_swiotlb_dma_supported(struct device *hwdev, u64 mask);
extern int
xen_swiotlb_set_dma_mask(struct device *dev, u64 dma_mask);
#endif /* __LINUX_SWIOTLB_XEN_H */
......@@ -19,10 +19,11 @@ void xen_arch_resume(void);
int xen_setup_shutdown_event(void);
extern unsigned long *xen_contiguous_bitmap;
int xen_create_contiguous_region(unsigned long vstart, unsigned int order,
unsigned int address_bits);
int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
unsigned int address_bits,
dma_addr_t *dma_handle);
void xen_destroy_contiguous_region(unsigned long vstart, unsigned int order);
void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order);
struct vm_area_struct;
int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
......
......@@ -505,6 +505,7 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
not_found:
spin_unlock_irqrestore(&io_tlb_lock, flags);
dev_warn(hwdev, "swiotlb buffer is full\n");
return SWIOTLB_MAP_ERROR;
found:
spin_unlock_irqrestore(&io_tlb_lock, flags);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册