dma-direct.c 2.7 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2
/*
C
Christoph Hellwig 已提交
3 4
 * DMA operations that map physical memory directly without using an IOMMU or
 * flushing caches.
5 6 7
 */
#include <linux/export.h>
#include <linux/mm.h>
C
Christoph Hellwig 已提交
8
#include <linux/dma-direct.h>
9
#include <linux/scatterlist.h>
10
#include <linux/dma-contiguous.h>
11
#include <linux/pfn.h>
12

13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29
#define DIRECT_MAPPING_ERROR		0

static bool
check_addr(struct device *dev, dma_addr_t dma_addr, size_t size,
		const char *caller)
{
	if (unlikely(dev && !dma_capable(dev, dma_addr, size))) {
		if (*dev->dma_mask >= DMA_BIT_MASK(32)) {
			dev_err(dev,
				"%s: overflow %pad+%zu of device mask %llx\n",
				caller, &dma_addr, size, *dev->dma_mask);
		}
		return false;
	}
	return true;
}

30 31
static void *dma_direct_alloc(struct device *dev, size_t size,
		dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
32
{
33 34 35
	unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
	int page_order = get_order(size);
	struct page *page = NULL;
36

37 38 39 40
	/* CMA can be used only in the context which permits sleeping */
	if (gfpflags_allow_blocking(gfp))
		page = dma_alloc_from_contiguous(dev, count, page_order, gfp);
	if (!page)
41
		page = alloc_pages_node(dev_to_node(dev), gfp, page_order);
42 43
	if (!page)
		return NULL;
44

45 46 47
	*dma_handle = phys_to_dma(dev, page_to_phys(page));
	memset(page_address(page), 0, size);
	return page_address(page);
48 49
}

50 51
static void dma_direct_free(struct device *dev, size_t size, void *cpu_addr,
		dma_addr_t dma_addr, unsigned long attrs)
52
{
53 54 55 56
	unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;

	if (!dma_release_from_contiguous(dev, virt_to_page(cpu_addr), count))
		free_pages((unsigned long)cpu_addr, get_order(size));
57 58
}

59 60 61
static dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
		unsigned long offset, size_t size, enum dma_data_direction dir,
		unsigned long attrs)
62
{
63 64 65 66 67
	dma_addr_t dma_addr = phys_to_dma(dev, page_to_phys(page)) + offset;

	if (!check_addr(dev, dma_addr, size, __func__))
		return DIRECT_MAPPING_ERROR;
	return dma_addr;
68 69
}

70 71
static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
		int nents, enum dma_data_direction dir, unsigned long attrs)
72 73 74 75 76 77
{
	int i;
	struct scatterlist *sg;

	for_each_sg(sgl, sg, nents, i) {
		BUG_ON(!sg_page(sg));
C
Christoph Hellwig 已提交
78 79

		sg_dma_address(sg) = phys_to_dma(dev, sg_phys(sg));
80 81
		if (!check_addr(dev, sg_dma_address(sg), sg->length, __func__))
			return 0;
82 83 84 85 86 87
		sg_dma_len(sg) = sg->length;
	}

	return nents;
}

88 89 90 91 92
static int dma_direct_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
	return dma_addr == DIRECT_MAPPING_ERROR;
}

93 94 95 96 97
const struct dma_map_ops dma_direct_ops = {
	.alloc			= dma_direct_alloc,
	.free			= dma_direct_free,
	.map_page		= dma_direct_map_page,
	.map_sg			= dma_direct_map_sg,
98
	.mapping_error		= dma_direct_mapping_error,
99
};
100
EXPORT_SYMBOL(dma_direct_ops);