提交 cefc53c7 编写于 作者: J Joerg Roedel

iommu-api: Add iommu_map and iommu_unmap functions

These two functions provide support for mapping and
unmapping physical addresses to io virtual addresses. The
difference to the iommu_(un)map_range() is that the new
functions take a gfp_order parameter instead of a size. This
allows the IOMMU backend implementations to detect easier if
a given range can be mapped by larger page sizes.
These new functions should replace the old ones in the long
term.
Signed-off-by: NJoerg Roedel <joerg.roedel@amd.com>
上级 4abc14a7
...@@ -107,3 +107,34 @@ int iommu_domain_has_cap(struct iommu_domain *domain, ...@@ -107,3 +107,34 @@ int iommu_domain_has_cap(struct iommu_domain *domain,
return iommu_ops->domain_has_cap(domain, cap); return iommu_ops->domain_has_cap(domain, cap);
} }
EXPORT_SYMBOL_GPL(iommu_domain_has_cap); EXPORT_SYMBOL_GPL(iommu_domain_has_cap);
int iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, int gfp_order, int prot)
{
unsigned long invalid_mask;
size_t size;
size = 0x1000UL << gfp_order;
invalid_mask = size - 1;
BUG_ON((iova | paddr) & invalid_mask);
return iommu_ops->map_range(domain, iova, paddr, size, prot);
}
EXPORT_SYMBOL_GPL(iommu_map);
int iommu_unmap(struct iommu_domain *domain, unsigned long iova, int gfp_order)
{
unsigned long invalid_mask;
size_t size;
size = 0x1000UL << gfp_order;
invalid_mask = size - 1;
BUG_ON(iova & invalid_mask);
iommu_ops->unmap_range(domain, iova, size);
return gfp_order;
}
EXPORT_SYMBOL_GPL(iommu_unmap);
...@@ -60,6 +60,10 @@ extern int iommu_map_range(struct iommu_domain *domain, unsigned long iova, ...@@ -60,6 +60,10 @@ extern int iommu_map_range(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot); phys_addr_t paddr, size_t size, int prot);
extern void iommu_unmap_range(struct iommu_domain *domain, unsigned long iova, extern void iommu_unmap_range(struct iommu_domain *domain, unsigned long iova,
size_t size); size_t size);
extern int iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, int gfp_order, int prot);
extern int iommu_unmap(struct iommu_domain *domain, unsigned long iova,
int gfp_order);
extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain,
unsigned long iova); unsigned long iova);
extern int iommu_domain_has_cap(struct iommu_domain *domain, extern int iommu_domain_has_cap(struct iommu_domain *domain,
...@@ -108,6 +112,18 @@ static inline void iommu_unmap_range(struct iommu_domain *domain, ...@@ -108,6 +112,18 @@ static inline void iommu_unmap_range(struct iommu_domain *domain,
{ {
} }
static inline int iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, int gfp_order, int prot)
{
return -ENODEV;
}
static inline int iommu_unmap(struct iommu_domain *domain, unsigned long iova,
int gfp_order)
{
return -ENODEV;
}
static inline phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, static inline phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain,
unsigned long iova) unsigned long iova)
{ {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册