提交 524a669b 编写于 作者: C Christoph Hellwig

iommu/vt-d: remove the mapping_error dma_map_ops method

Return DMA_MAPPING_ERROR instead of 0 on a dma mapping failure and let
the core dma-mapping code handle the rest.
Signed-off-by: NChristoph Hellwig <hch@lst.de>
Acked-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 964f2311
......@@ -3617,7 +3617,7 @@ static dma_addr_t __intel_map_page(struct device *dev, struct page *page,
domain = get_valid_domain_for_dev(dev);
if (!domain)
return 0;
return DMA_MAPPING_ERROR;
iommu = domain_get_iommu(domain);
size = aligned_nrpages(paddr, size);
......@@ -3655,7 +3655,7 @@ static dma_addr_t __intel_map_page(struct device *dev, struct page *page,
free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(size));
pr_err("Device %s request: %zx@%llx dir %d --- failed\n",
dev_name(dev), size, (unsigned long long)paddr, dir);
return 0;
return DMA_MAPPING_ERROR;
}
static dma_addr_t intel_map_page(struct device *dev, struct page *page,
......@@ -3756,7 +3756,7 @@ static void *intel_alloc_coherent(struct device *dev, size_t size,
*dma_handle = __intel_map_page(dev, page, 0, size, DMA_BIDIRECTIONAL,
dev->coherent_dma_mask);
if (*dma_handle)
if (*dma_handle != DMA_MAPPING_ERROR)
return page_address(page);
if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
__free_pages(page, order);
......@@ -3865,11 +3865,6 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele
return nelems;
}
static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
return !dma_addr;
}
static const struct dma_map_ops intel_dma_ops = {
.alloc = intel_alloc_coherent,
.free = intel_free_coherent,
......@@ -3877,7 +3872,6 @@ static const struct dma_map_ops intel_dma_ops = {
.unmap_sg = intel_unmap_sg,
.map_page = intel_map_page,
.unmap_page = intel_unmap_page,
.mapping_error = intel_mapping_error,
.dma_supported = dma_direct_supported,
};
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册