提交 97ef952a 编写于 作者: M Marek Szyprowski

ARM: dma-mapping: add support for DMA_ATTR_SKIP_CPU_SYNC attribute

This patch adds support for DMA_ATTR_SKIP_CPU_SYNC attribute for
dma_(un)map_(single,page,sg) functions family. It lets dma mapping clients
to create a mapping for the buffer for the given device without performing
a CPU cache synchronization. CPU cache synchronization can be skipped for
the buffers which it is known that they are already in 'device' domain (CPU
caches have been already synchronized or there are only coherent mappings
for the buffer). For advanced users only, please use it with care.
Signed-off-by: NMarek Szyprowski <m.szyprowski@samsung.com>
Reviewed-by: NKyungmin Park <kyungmin.park@samsung.com>
上级 bdf5e487
...@@ -73,7 +73,7 @@ static dma_addr_t arm_dma_map_page(struct device *dev, struct page *page, ...@@ -73,7 +73,7 @@ static dma_addr_t arm_dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction dir, unsigned long offset, size_t size, enum dma_data_direction dir,
struct dma_attrs *attrs) struct dma_attrs *attrs)
{ {
if (!arch_is_coherent()) if (!arch_is_coherent() && !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
__dma_page_cpu_to_dev(page, offset, size, dir); __dma_page_cpu_to_dev(page, offset, size, dir);
return pfn_to_dma(dev, page_to_pfn(page)) + offset; return pfn_to_dma(dev, page_to_pfn(page)) + offset;
} }
...@@ -96,7 +96,7 @@ static void arm_dma_unmap_page(struct device *dev, dma_addr_t handle, ...@@ -96,7 +96,7 @@ static void arm_dma_unmap_page(struct device *dev, dma_addr_t handle,
size_t size, enum dma_data_direction dir, size_t size, enum dma_data_direction dir,
struct dma_attrs *attrs) struct dma_attrs *attrs)
{ {
if (!arch_is_coherent()) if (!arch_is_coherent() && !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
__dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)), __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
handle & ~PAGE_MASK, size, dir); handle & ~PAGE_MASK, size, dir);
} }
...@@ -1207,7 +1207,7 @@ static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt, ...@@ -1207,7 +1207,7 @@ static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
*/ */
static int __map_sg_chunk(struct device *dev, struct scatterlist *sg, static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
size_t size, dma_addr_t *handle, size_t size, dma_addr_t *handle,
enum dma_data_direction dir) enum dma_data_direction dir, struct dma_attrs *attrs)
{ {
struct dma_iommu_mapping *mapping = dev->archdata.mapping; struct dma_iommu_mapping *mapping = dev->archdata.mapping;
dma_addr_t iova, iova_base; dma_addr_t iova, iova_base;
...@@ -1226,7 +1226,8 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg, ...@@ -1226,7 +1226,8 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
phys_addr_t phys = page_to_phys(sg_page(s)); phys_addr_t phys = page_to_phys(sg_page(s));
unsigned int len = PAGE_ALIGN(s->offset + s->length); unsigned int len = PAGE_ALIGN(s->offset + s->length);
if (!arch_is_coherent()) if (!arch_is_coherent() &&
!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
__dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir); __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
ret = iommu_map(mapping->domain, iova, phys, len, 0); ret = iommu_map(mapping->domain, iova, phys, len, 0);
...@@ -1273,7 +1274,7 @@ int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents, ...@@ -1273,7 +1274,7 @@ int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents,
if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) { if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) {
if (__map_sg_chunk(dev, start, size, &dma->dma_address, if (__map_sg_chunk(dev, start, size, &dma->dma_address,
dir) < 0) dir, attrs) < 0)
goto bad_mapping; goto bad_mapping;
dma->dma_address += offset; dma->dma_address += offset;
...@@ -1286,7 +1287,7 @@ int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents, ...@@ -1286,7 +1287,7 @@ int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents,
} }
size += s->length; size += s->length;
} }
if (__map_sg_chunk(dev, start, size, &dma->dma_address, dir) < 0) if (__map_sg_chunk(dev, start, size, &dma->dma_address, dir, attrs) < 0)
goto bad_mapping; goto bad_mapping;
dma->dma_address += offset; dma->dma_address += offset;
...@@ -1320,7 +1321,8 @@ void arm_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, ...@@ -1320,7 +1321,8 @@ void arm_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
if (sg_dma_len(s)) if (sg_dma_len(s))
__iommu_remove_mapping(dev, sg_dma_address(s), __iommu_remove_mapping(dev, sg_dma_address(s),
sg_dma_len(s)); sg_dma_len(s));
if (!arch_is_coherent()) if (!arch_is_coherent() &&
!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
__dma_page_dev_to_cpu(sg_page(s), s->offset, __dma_page_dev_to_cpu(sg_page(s), s->offset,
s->length, dir); s->length, dir);
} }
...@@ -1382,7 +1384,7 @@ static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page, ...@@ -1382,7 +1384,7 @@ static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page,
dma_addr_t dma_addr; dma_addr_t dma_addr;
int ret, len = PAGE_ALIGN(size + offset); int ret, len = PAGE_ALIGN(size + offset);
if (!arch_is_coherent()) if (!arch_is_coherent() && !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
__dma_page_cpu_to_dev(page, offset, size, dir); __dma_page_cpu_to_dev(page, offset, size, dir);
dma_addr = __alloc_iova(mapping, len); dma_addr = __alloc_iova(mapping, len);
...@@ -1421,7 +1423,7 @@ static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle, ...@@ -1421,7 +1423,7 @@ static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle,
if (!iova) if (!iova)
return; return;
if (!arch_is_coherent()) if (!arch_is_coherent() && !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
__dma_page_dev_to_cpu(page, offset, size, dir); __dma_page_dev_to_cpu(page, offset, size, dir);
iommu_unmap(mapping->domain, iova, len); iommu_unmap(mapping->domain, iova, len);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册