direct.c 5.5 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2
/*
C
Christoph Hellwig 已提交
3 4
 * DMA operations that map physical memory directly without using an IOMMU or
 * flushing caches.
5 6 7
 */
#include <linux/export.h>
#include <linux/mm.h>
C
Christoph Hellwig 已提交
8
#include <linux/dma-direct.h>
9
#include <linux/scatterlist.h>
10
#include <linux/dma-contiguous.h>
11
#include <linux/pfn.h>
12
#include <linux/set_memory.h>
13

14 15
#define DIRECT_MAPPING_ERROR		0

16 17 18 19 20 21 22 23
/*
 * Most architectures use ZONE_DMA for the first 16 Megabytes, but
 * some use it for entirely different regions:
 */
#ifndef ARCH_ZONE_DMA_BITS
#define ARCH_ZONE_DMA_BITS 24
#endif

24 25 26 27 28 29 30 31
/*
 * For AMD SEV all DMA must be to unencrypted addresses.
 */
static inline bool force_dma_unencrypted(void)
{
	return sev_active();
}

32 33 34 35 36
static bool
check_addr(struct device *dev, dma_addr_t dma_addr, size_t size,
		const char *caller)
{
	if (unlikely(dev && !dma_capable(dev, dma_addr, size))) {
37 38 39 40 41 42 43
		if (!dev->dma_mask) {
			dev_err(dev,
				"%s: call on device without dma_mask\n",
				caller);
			return false;
		}

44 45 46 47 48 49 50 51 52 53
		if (*dev->dma_mask >= DMA_BIT_MASK(32)) {
			dev_err(dev,
				"%s: overflow %pad+%zu of device mask %llx\n",
				caller, &dma_addr, size, *dev->dma_mask);
		}
		return false;
	}
	return true;
}

54 55
static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
{
56 57 58
	dma_addr_t addr = force_dma_unencrypted() ?
		__phys_to_dma(dev, phys) : phys_to_dma(dev, phys);
	return addr + size - 1 <= dev->coherent_dma_mask;
59 60
}

61 62
void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
		gfp_t gfp, unsigned long attrs)
63
{
64 65 66
	unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
	int page_order = get_order(size);
	struct page *page = NULL;
67
	void *ret;
68

69 70 71
	/* we always manually zero the memory once we are done: */
	gfp &= ~__GFP_ZERO;

72 73 74 75 76 77
	/* GFP_DMA32 and GFP_DMA are no ops without the corresponding zones: */
	if (dev->coherent_dma_mask <= DMA_BIT_MASK(ARCH_ZONE_DMA_BITS))
		gfp |= GFP_DMA;
	if (dev->coherent_dma_mask <= DMA_BIT_MASK(32) && !(gfp & GFP_DMA))
		gfp |= GFP_DMA32;

78
again:
79
	/* CMA can be used only in the context which permits sleeping */
80
	if (gfpflags_allow_blocking(gfp)) {
81 82
		page = dma_alloc_from_contiguous(dev, count, page_order,
						 gfp & __GFP_NOWARN);
83 84 85 86 87
		if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
			dma_release_from_contiguous(dev, page, count);
			page = NULL;
		}
	}
88
	if (!page)
89
		page = alloc_pages_node(dev_to_node(dev), gfp, page_order);
90 91 92 93 94

	if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
		__free_pages(page, page_order);
		page = NULL;

95 96 97 98 99 100 101
		if (IS_ENABLED(CONFIG_ZONE_DMA32) &&
		    dev->coherent_dma_mask < DMA_BIT_MASK(64) &&
		    !(gfp & (GFP_DMA32 | GFP_DMA))) {
			gfp |= GFP_DMA32;
			goto again;
		}

102 103
		if (IS_ENABLED(CONFIG_ZONE_DMA) &&
		    dev->coherent_dma_mask < DMA_BIT_MASK(32) &&
104 105 106 107 108 109
		    !(gfp & GFP_DMA)) {
			gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
			goto again;
		}
	}

110 111
	if (!page)
		return NULL;
112 113 114 115 116 117 118 119 120
	ret = page_address(page);
	if (force_dma_unencrypted()) {
		set_memory_decrypted((unsigned long)ret, 1 << page_order);
		*dma_handle = __phys_to_dma(dev, page_to_phys(page));
	} else {
		*dma_handle = phys_to_dma(dev, page_to_phys(page));
	}
	memset(ret, 0, size);
	return ret;
121 122
}

123 124 125 126
/*
 * NOTE: this function must never look at the dma_addr argument, because we want
 * to be able to use it as a helper for iommu implementations as well.
 */
127
void dma_direct_free(struct device *dev, size_t size, void *cpu_addr,
128
		dma_addr_t dma_addr, unsigned long attrs)
129
{
130
	unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
131
	unsigned int page_order = get_order(size);
132

133 134
	if (force_dma_unencrypted())
		set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order);
135
	if (!dma_release_from_contiguous(dev, virt_to_page(cpu_addr), count))
136
		free_pages((unsigned long)cpu_addr, page_order);
137 138
}

139
dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
140 141
		unsigned long offset, size_t size, enum dma_data_direction dir,
		unsigned long attrs)
142
{
143 144 145 146 147
	dma_addr_t dma_addr = phys_to_dma(dev, page_to_phys(page)) + offset;

	if (!check_addr(dev, dma_addr, size, __func__))
		return DIRECT_MAPPING_ERROR;
	return dma_addr;
148 149
}

150 151
int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
		enum dma_data_direction dir, unsigned long attrs)
152 153 154 155 156 157
{
	int i;
	struct scatterlist *sg;

	for_each_sg(sgl, sg, nents, i) {
		BUG_ON(!sg_page(sg));
C
Christoph Hellwig 已提交
158 159

		sg_dma_address(sg) = phys_to_dma(dev, sg_phys(sg));
160 161
		if (!check_addr(dev, sg_dma_address(sg), sg->length, __func__))
			return 0;
162 163 164 165 166 167
		sg_dma_len(sg) = sg->length;
	}

	return nents;
}

168 169 170
int dma_direct_supported(struct device *dev, u64 mask)
{
#ifdef CONFIG_ZONE_DMA
171
	if (mask < phys_to_dma(dev, DMA_BIT_MASK(ARCH_ZONE_DMA_BITS)))
172 173 174 175 176 177 178 179
		return 0;
#else
	/*
	 * Because 32-bit DMA masks are so common we expect every architecture
	 * to be able to satisfy them - either by not supporting more physical
	 * memory, or by providing a ZONE_DMA32.  If neither is the case, the
	 * architecture needs to use an IOMMU instead of the direct mapping.
	 */
180
	if (mask < phys_to_dma(dev, DMA_BIT_MASK(32)))
181 182
		return 0;
#endif
183
	/*
184 185
	 * Upstream PCI/PCIe bridges or SoC interconnects may not carry
	 * as many DMA address bits as the device itself supports.
186
	 */
187
	if (dev->bus_dma_mask && mask > dev->bus_dma_mask)
188
		return 0;
189 190 191
	return 1;
}

192
int dma_direct_mapping_error(struct device *dev, dma_addr_t dma_addr)
193 194 195 196
{
	return dma_addr == DIRECT_MAPPING_ERROR;
}

197 198 199 200 201
const struct dma_map_ops dma_direct_ops = {
	.alloc			= dma_direct_alloc,
	.free			= dma_direct_free,
	.map_page		= dma_direct_map_page,
	.map_sg			= dma_direct_map_sg,
202
	.dma_supported		= dma_direct_supported,
203
	.mapping_error		= dma_direct_mapping_error,
204
};
205
EXPORT_SYMBOL(dma_direct_ops);