提交 ac6d7046 编写于 作者: J Jean-Philippe Brucker 提交者: Joerg Roedel

iommu/dma: Pass address limit rather than size to iommu_setup_dma_ops()

Passing a 64-bit address width to iommu_setup_dma_ops() is valid on
virtual platforms, but isn't currently possible. The overflow check in
iommu_dma_init_domain() prevents this even when @dma_base isn't 0. Pass
a limit address instead of a size, so callers don't have to fake a size
to work around the check.

The base and limit parameters are being phased out, because:
* they are redundant for x86 callers. dma-iommu already reserves the
  first page, and the upper limit is already in domain->geometry.
* they can now be obtained from dev->dma_range_map on Arm.
But removing them on Arm isn't completely straightforward so is left for
future work. As an intermediate step, simplify the x86 callers by
passing dummy limits.
Signed-off-by: NJean-Philippe Brucker <jean-philippe@linaro.org>
Reviewed-by: NEric Auger <eric.auger@redhat.com>
Reviewed-by: NRobin Murphy <robin.murphy@arm.com>
Link: https://lore.kernel.org/r/20210618152059.1194210-5-jean-philippe@linaro.orgSigned-off-by: NJoerg Roedel <jroedel@suse.de>
上级 3cf48554
...@@ -50,7 +50,7 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, ...@@ -50,7 +50,7 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
dev->dma_coherent = coherent; dev->dma_coherent = coherent;
if (iommu) if (iommu)
iommu_setup_dma_ops(dev, dma_base, size); iommu_setup_dma_ops(dev, dma_base, dma_base + size - 1);
#ifdef CONFIG_XEN #ifdef CONFIG_XEN
if (xen_swiotlb_detect()) if (xen_swiotlb_detect())
......
...@@ -1713,7 +1713,7 @@ static void amd_iommu_probe_finalize(struct device *dev) ...@@ -1713,7 +1713,7 @@ static void amd_iommu_probe_finalize(struct device *dev)
/* Domains are initialized for this device - have a look what we ended up with */ /* Domains are initialized for this device - have a look what we ended up with */
domain = iommu_get_domain_for_dev(dev); domain = iommu_get_domain_for_dev(dev);
if (domain->type == IOMMU_DOMAIN_DMA) if (domain->type == IOMMU_DOMAIN_DMA)
iommu_setup_dma_ops(dev, IOVA_START_PFN << PAGE_SHIFT, 0); iommu_setup_dma_ops(dev, 0, U64_MAX);
else else
set_dma_ops(dev, NULL); set_dma_ops(dev, NULL);
} }
......
...@@ -319,16 +319,16 @@ static bool dev_is_untrusted(struct device *dev) ...@@ -319,16 +319,16 @@ static bool dev_is_untrusted(struct device *dev)
* iommu_dma_init_domain - Initialise a DMA mapping domain * iommu_dma_init_domain - Initialise a DMA mapping domain
* @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
* @base: IOVA at which the mappable address space starts * @base: IOVA at which the mappable address space starts
* @size: Size of IOVA space * @limit: Last address of the IOVA space
* @dev: Device the domain is being initialised for * @dev: Device the domain is being initialised for
* *
* @base and @size should be exact multiples of IOMMU page granularity to * @base and @limit + 1 should be exact multiples of IOMMU page granularity to
* avoid rounding surprises. If necessary, we reserve the page at address 0 * avoid rounding surprises. If necessary, we reserve the page at address 0
* to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but * to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but
* any change which could make prior IOVAs invalid will fail. * any change which could make prior IOVAs invalid will fail.
*/ */
static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
u64 size, struct device *dev) dma_addr_t limit, struct device *dev)
{ {
struct iommu_dma_cookie *cookie = domain->iova_cookie; struct iommu_dma_cookie *cookie = domain->iova_cookie;
unsigned long order, base_pfn; unsigned long order, base_pfn;
...@@ -346,7 +346,7 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, ...@@ -346,7 +346,7 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
/* Check the domain allows at least some access to the device... */ /* Check the domain allows at least some access to the device... */
if (domain->geometry.force_aperture) { if (domain->geometry.force_aperture) {
if (base > domain->geometry.aperture_end || if (base > domain->geometry.aperture_end ||
base + size <= domain->geometry.aperture_start) { limit < domain->geometry.aperture_start) {
pr_warn("specified DMA range outside IOMMU capability\n"); pr_warn("specified DMA range outside IOMMU capability\n");
return -EFAULT; return -EFAULT;
} }
...@@ -1308,7 +1308,7 @@ static const struct dma_map_ops iommu_dma_ops = { ...@@ -1308,7 +1308,7 @@ static const struct dma_map_ops iommu_dma_ops = {
* The IOMMU core code allocates the default DMA domain, which the underlying * The IOMMU core code allocates the default DMA domain, which the underlying
* IOMMU driver needs to support via the dma-iommu layer. * IOMMU driver needs to support via the dma-iommu layer.
*/ */
void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size) void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 dma_limit)
{ {
struct iommu_domain *domain = iommu_get_domain_for_dev(dev); struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
...@@ -1320,7 +1320,7 @@ void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size) ...@@ -1320,7 +1320,7 @@ void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size)
* underlying IOMMU driver needs to support via the dma-iommu layer. * underlying IOMMU driver needs to support via the dma-iommu layer.
*/ */
if (domain->type == IOMMU_DOMAIN_DMA) { if (domain->type == IOMMU_DOMAIN_DMA) {
if (iommu_dma_init_domain(domain, dma_base, size, dev)) if (iommu_dma_init_domain(domain, dma_base, dma_limit, dev))
goto out_err; goto out_err;
dev->dma_ops = &iommu_dma_ops; dev->dma_ops = &iommu_dma_ops;
} }
......
...@@ -5165,13 +5165,10 @@ static void intel_iommu_release_device(struct device *dev) ...@@ -5165,13 +5165,10 @@ static void intel_iommu_release_device(struct device *dev)
static void intel_iommu_probe_finalize(struct device *dev) static void intel_iommu_probe_finalize(struct device *dev)
{ {
dma_addr_t base = IOVA_START_PFN << VTD_PAGE_SHIFT;
struct iommu_domain *domain = iommu_get_domain_for_dev(dev); struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
if (domain && domain->type == IOMMU_DOMAIN_DMA) if (domain && domain->type == IOMMU_DOMAIN_DMA)
iommu_setup_dma_ops(dev, base, iommu_setup_dma_ops(dev, 0, U64_MAX);
__DOMAIN_MAX_ADDR(dmar_domain->gaw) - base);
else else
set_dma_ops(dev, NULL); set_dma_ops(dev, NULL);
} }
......
...@@ -19,7 +19,7 @@ int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base); ...@@ -19,7 +19,7 @@ int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base);
void iommu_put_dma_cookie(struct iommu_domain *domain); void iommu_put_dma_cookie(struct iommu_domain *domain);
/* Setup call for arch DMA mapping code */ /* Setup call for arch DMA mapping code */
void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size); void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 dma_limit);
/* The DMA API isn't _quite_ the whole story, though... */ /* The DMA API isn't _quite_ the whole story, though... */
/* /*
...@@ -50,7 +50,7 @@ struct msi_msg; ...@@ -50,7 +50,7 @@ struct msi_msg;
struct device; struct device;
static inline void iommu_setup_dma_ops(struct device *dev, u64 dma_base, static inline void iommu_setup_dma_ops(struct device *dev, u64 dma_base,
u64 size) u64 dma_limit)
{ {
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册