提交 538d5b33 编写于 作者: T Tomasz Nowicki 提交者: Joerg Roedel

iommu/iova: Make rcache flush optional on IOVA allocation failure

Since IOVA allocation failure is not unusual case we need to flush
CPUs' rcache in hope we will succeed in next round.

However, it is useful to decide whether we need rcache flush step because
of two reasons:
- Not scalability. On large system with ~100 CPUs iterating and flushing
  rcache for each CPU becomes serious bottleneck so we may want to defer it.
- free_cpu_cached_iovas() does not care about max PFN we are interested in.
  Thus we may flush our rcaches and still get no new IOVA like in the
  commonly used scenario:

    if (dma_limit > DMA_BIT_MASK(32) && dev_is_pci(dev))
        iova = alloc_iova_fast(iovad, iova_len, DMA_BIT_MASK(32) >> shift);

    if (!iova)
        iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift);

   1. First alloc_iova_fast() call is limited to DMA_BIT_MASK(32) to get
      PCI devices a SAC address
   2. alloc_iova() fails due to full 32-bit space
   3. rcaches contain PFNs out of 32-bit space so free_cpu_cached_iovas()
      throws entries away for nothing and alloc_iova() fails again
   4. Next alloc_iova_fast() call cannot take advantage of rcache since we
      have just defeated caches. In this case we pick the slowest option
      to proceed.

This patch reworks flushed_rcache local flag to be additional function
argument instead and control rcache flush step. Also, it updates all users
to do the flush as the last chance.
Signed-off-by: NTomasz Nowicki <Tomasz.Nowicki@caviumnetworks.com>
Reviewed-by: NRobin Murphy <robin.murphy@arm.com>
Tested-by: NNate Watterson <nwatters@codeaurora.org>
Signed-off-by: NJoerg Roedel <jroedel@suse.de>
上级 4d689b61
...@@ -1546,10 +1546,11 @@ static unsigned long dma_ops_alloc_iova(struct device *dev, ...@@ -1546,10 +1546,11 @@ static unsigned long dma_ops_alloc_iova(struct device *dev,
if (dma_mask > DMA_BIT_MASK(32)) if (dma_mask > DMA_BIT_MASK(32))
pfn = alloc_iova_fast(&dma_dom->iovad, pages, pfn = alloc_iova_fast(&dma_dom->iovad, pages,
IOVA_PFN(DMA_BIT_MASK(32))); IOVA_PFN(DMA_BIT_MASK(32)), false);
if (!pfn) if (!pfn)
pfn = alloc_iova_fast(&dma_dom->iovad, pages, IOVA_PFN(dma_mask)); pfn = alloc_iova_fast(&dma_dom->iovad, pages,
IOVA_PFN(dma_mask), true);
return (pfn << PAGE_SHIFT); return (pfn << PAGE_SHIFT);
} }
......
...@@ -370,10 +370,12 @@ static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain, ...@@ -370,10 +370,12 @@ static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
/* Try to get PCI devices a SAC address */ /* Try to get PCI devices a SAC address */
if (dma_limit > DMA_BIT_MASK(32) && dev_is_pci(dev)) if (dma_limit > DMA_BIT_MASK(32) && dev_is_pci(dev))
iova = alloc_iova_fast(iovad, iova_len, DMA_BIT_MASK(32) >> shift); iova = alloc_iova_fast(iovad, iova_len,
DMA_BIT_MASK(32) >> shift, false);
if (!iova) if (!iova)
iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift); iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift,
true);
return (dma_addr_t)iova << shift; return (dma_addr_t)iova << shift;
} }
......
...@@ -3469,11 +3469,12 @@ static unsigned long intel_alloc_iova(struct device *dev, ...@@ -3469,11 +3469,12 @@ static unsigned long intel_alloc_iova(struct device *dev,
* from higher range * from higher range
*/ */
iova_pfn = alloc_iova_fast(&domain->iovad, nrpages, iova_pfn = alloc_iova_fast(&domain->iovad, nrpages,
IOVA_PFN(DMA_BIT_MASK(32))); IOVA_PFN(DMA_BIT_MASK(32)), false);
if (iova_pfn) if (iova_pfn)
return iova_pfn; return iova_pfn;
} }
iova_pfn = alloc_iova_fast(&domain->iovad, nrpages, IOVA_PFN(dma_mask)); iova_pfn = alloc_iova_fast(&domain->iovad, nrpages,
IOVA_PFN(dma_mask), true);
if (unlikely(!iova_pfn)) { if (unlikely(!iova_pfn)) {
pr_err("Allocating %ld-page iova for %s failed", pr_err("Allocating %ld-page iova for %s failed",
nrpages, dev_name(dev)); nrpages, dev_name(dev));
......
...@@ -395,14 +395,15 @@ EXPORT_SYMBOL_GPL(free_iova); ...@@ -395,14 +395,15 @@ EXPORT_SYMBOL_GPL(free_iova);
* @iovad: - iova domain in question * @iovad: - iova domain in question
* @size: - size of page frames to allocate * @size: - size of page frames to allocate
* @limit_pfn: - max limit address * @limit_pfn: - max limit address
* @flush_rcache: - set to flush rcache on regular allocation failure
* This function tries to satisfy an iova allocation from the rcache, * This function tries to satisfy an iova allocation from the rcache,
* and falls back to regular allocation on failure. * and falls back to regular allocation on failure. If regular allocation
* fails too and the flush_rcache flag is set then the rcache will be flushed.
*/ */
unsigned long unsigned long
alloc_iova_fast(struct iova_domain *iovad, unsigned long size, alloc_iova_fast(struct iova_domain *iovad, unsigned long size,
unsigned long limit_pfn) unsigned long limit_pfn, bool flush_rcache)
{ {
bool flushed_rcache = false;
unsigned long iova_pfn; unsigned long iova_pfn;
struct iova *new_iova; struct iova *new_iova;
...@@ -415,11 +416,11 @@ alloc_iova_fast(struct iova_domain *iovad, unsigned long size, ...@@ -415,11 +416,11 @@ alloc_iova_fast(struct iova_domain *iovad, unsigned long size,
if (!new_iova) { if (!new_iova) {
unsigned int cpu; unsigned int cpu;
if (flushed_rcache) if (!flush_rcache)
return 0; return 0;
/* Try replenishing IOVAs by flushing rcache. */ /* Try replenishing IOVAs by flushing rcache. */
flushed_rcache = true; flush_rcache = false;
for_each_online_cpu(cpu) for_each_online_cpu(cpu)
free_cpu_cached_iovas(cpu, iovad); free_cpu_cached_iovas(cpu, iovad);
goto retry; goto retry;
......
...@@ -150,7 +150,7 @@ void queue_iova(struct iova_domain *iovad, ...@@ -150,7 +150,7 @@ void queue_iova(struct iova_domain *iovad,
unsigned long pfn, unsigned long pages, unsigned long pfn, unsigned long pages,
unsigned long data); unsigned long data);
unsigned long alloc_iova_fast(struct iova_domain *iovad, unsigned long size, unsigned long alloc_iova_fast(struct iova_domain *iovad, unsigned long size,
unsigned long limit_pfn); unsigned long limit_pfn, bool flush_rcache);
struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo, struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo,
unsigned long pfn_hi); unsigned long pfn_hi);
void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to); void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to);
...@@ -212,7 +212,8 @@ static inline void queue_iova(struct iova_domain *iovad, ...@@ -212,7 +212,8 @@ static inline void queue_iova(struct iova_domain *iovad,
static inline unsigned long alloc_iova_fast(struct iova_domain *iovad, static inline unsigned long alloc_iova_fast(struct iova_domain *iovad,
unsigned long size, unsigned long size,
unsigned long limit_pfn) unsigned long limit_pfn,
bool flush_rcache)
{ {
return 0; return 0;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册