提交 92826e96 编写于 作者: C Christoph Hellwig

dma-direct: don't fail on highmem CMA pages in dma_direct_alloc_pages

When dma_direct_alloc_pages encounters a highmem page it just gives up
currently.  But what we really should do is to try memory using the
page allocator instead - without this platforms with a global highmem
CMA pool will fail all dma_alloc_pages allocations.

Fixes: efa70f2f ("dma-mapping: add a new dma_alloc_pages API")
Reported-by: NMark O'Neill <mao@tumblingdice.co.uk>
Signed-off-by: NChristoph Hellwig <hch@lst.de>
上级 566fb90e
...@@ -115,7 +115,7 @@ static struct page *dma_direct_alloc_swiotlb(struct device *dev, size_t size) ...@@ -115,7 +115,7 @@ static struct page *dma_direct_alloc_swiotlb(struct device *dev, size_t size)
} }
static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size, static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
gfp_t gfp) gfp_t gfp, bool allow_highmem)
{ {
int node = dev_to_node(dev); int node = dev_to_node(dev);
struct page *page = NULL; struct page *page = NULL;
...@@ -129,9 +129,12 @@ static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size, ...@@ -129,9 +129,12 @@ static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
gfp |= dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask, gfp |= dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
&phys_limit); &phys_limit);
page = dma_alloc_contiguous(dev, size, gfp); page = dma_alloc_contiguous(dev, size, gfp);
if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) { if (page) {
dma_free_contiguous(dev, page, size); if (!dma_coherent_ok(dev, page_to_phys(page), size) ||
page = NULL; (!allow_highmem && PageHighMem(page))) {
dma_free_contiguous(dev, page, size);
page = NULL;
}
} }
again: again:
if (!page) if (!page)
...@@ -189,7 +192,7 @@ static void *dma_direct_alloc_no_mapping(struct device *dev, size_t size, ...@@ -189,7 +192,7 @@ static void *dma_direct_alloc_no_mapping(struct device *dev, size_t size,
{ {
struct page *page; struct page *page;
page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO); page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO, true);
if (!page) if (!page)
return NULL; return NULL;
...@@ -262,7 +265,7 @@ void *dma_direct_alloc(struct device *dev, size_t size, ...@@ -262,7 +265,7 @@ void *dma_direct_alloc(struct device *dev, size_t size,
return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp); return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp);
/* we always manually zero the memory once we are done */ /* we always manually zero the memory once we are done */
page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO); page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO, true);
if (!page) if (!page)
return NULL; return NULL;
...@@ -370,19 +373,9 @@ struct page *dma_direct_alloc_pages(struct device *dev, size_t size, ...@@ -370,19 +373,9 @@ struct page *dma_direct_alloc_pages(struct device *dev, size_t size,
if (force_dma_unencrypted(dev) && dma_direct_use_pool(dev, gfp)) if (force_dma_unencrypted(dev) && dma_direct_use_pool(dev, gfp))
return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp); return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp);
page = __dma_direct_alloc_pages(dev, size, gfp); page = __dma_direct_alloc_pages(dev, size, gfp, false);
if (!page) if (!page)
return NULL; return NULL;
if (PageHighMem(page)) {
/*
* Depending on the cma= arguments and per-arch setup
* dma_alloc_contiguous could return highmem pages.
* Without remapping there is no way to return them here,
* so log an error and fail.
*/
dev_info(dev, "Rejecting highmem page from CMA.\n");
goto out_free_pages;
}
ret = page_address(page); ret = page_address(page);
if (dma_set_decrypted(dev, ret, size)) if (dma_set_decrypted(dev, ret, size))
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册