dma-coherent.c 11.0 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2 3 4 5
/*
 * Coherent per-device memory handling.
 * Borrowed from i386
 */
6
#include <linux/io.h>
7
#include <linux/slab.h>
8
#include <linux/kernel.h>
9
#include <linux/module.h>
10 11 12 13
#include <linux/dma-mapping.h>

struct dma_coherent_mem {
	void		*virt_base;
14
	dma_addr_t	device_base;
15
	unsigned long	pfn_base;
16 17 18
	int		size;
	int		flags;
	unsigned long	*bitmap;
19
	spinlock_t	spinlock;
20
	bool		use_dev_dma_pfn_offset;
21 22
};

23 24 25 26 27 28
static struct dma_coherent_mem *dma_coherent_default_memory __ro_after_init;

static inline struct dma_coherent_mem *dev_get_coherent_memory(struct device *dev)
{
	if (dev && dev->dma_mem)
		return dev->dma_mem;
29
	return NULL;
30 31
}

32 33 34 35 36 37 38 39 40
static inline dma_addr_t dma_get_device_base(struct device *dev,
					     struct dma_coherent_mem * mem)
{
	if (mem->use_dev_dma_pfn_offset)
		return (mem->pfn_base - dev->dma_pfn_offset) << PAGE_SHIFT;
	else
		return mem->device_base;
}

41
static int dma_init_coherent_memory(
42 43
	phys_addr_t phys_addr, dma_addr_t device_addr, size_t size, int flags,
	struct dma_coherent_mem **mem)
44
{
45
	struct dma_coherent_mem *dma_mem = NULL;
46 47 48
	void __iomem *mem_base = NULL;
	int pages = size >> PAGE_SHIFT;
	int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
49
	int ret;
50

51 52
	if (!size) {
		ret = -EINVAL;
53
		goto out;
54
	}
55

56
	mem_base = memremap(phys_addr, size, MEMREMAP_WC);
57 58
	if (!mem_base) {
		ret = -EINVAL;
59
		goto out;
60
	}
61
	dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
62 63
	if (!dma_mem) {
		ret = -ENOMEM;
64
		goto out;
65
	}
66
	dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
67 68
	if (!dma_mem->bitmap) {
		ret = -ENOMEM;
69
		goto out;
70
	}
71 72 73 74 75 76 77

	dma_mem->virt_base = mem_base;
	dma_mem->device_base = device_addr;
	dma_mem->pfn_base = PFN_DOWN(phys_addr);
	dma_mem->size = pages;
	dma_mem->flags = flags;
	spin_lock_init(&dma_mem->spinlock);
78

79
	*mem = dma_mem;
80
	return 0;
81

82 83
out:
	kfree(dma_mem);
84 85
	if (mem_base)
		memunmap(mem_base);
86
	return ret;
87
}
88 89 90 91 92

static void dma_release_coherent_memory(struct dma_coherent_mem *mem)
{
	if (!mem)
		return;
93

94
	memunmap(mem->virt_base);
95 96 97 98 99 100 101
	kfree(mem->bitmap);
	kfree(mem);
}

static int dma_assign_coherent_memory(struct device *dev,
				      struct dma_coherent_mem *mem)
{
102 103 104
	if (!dev)
		return -ENODEV;

105 106 107 108 109 110 111 112 113 114 115
	if (dev->dma_mem)
		return -EBUSY;

	dev->dma_mem = mem;
	return 0;
}

int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
				dma_addr_t device_addr, size_t size, int flags)
{
	struct dma_coherent_mem *mem;
116
	int ret;
117

118 119 120
	ret = dma_init_coherent_memory(phys_addr, device_addr, size, flags, &mem);
	if (ret)
		return ret;
121

122 123 124 125
	ret = dma_assign_coherent_memory(dev, mem);
	if (ret)
		dma_release_coherent_memory(mem);
	return ret;
126
}
127 128 129 130 131 132 133 134
EXPORT_SYMBOL(dma_declare_coherent_memory);

void dma_release_declared_memory(struct device *dev)
{
	struct dma_coherent_mem *mem = dev->dma_mem;

	if (!mem)
		return;
135
	dma_release_coherent_memory(mem);
136 137 138 139 140 141 142 143
	dev->dma_mem = NULL;
}
EXPORT_SYMBOL(dma_release_declared_memory);

void *dma_mark_declared_memory_occupied(struct device *dev,
					dma_addr_t device_addr, size_t size)
{
	struct dma_coherent_mem *mem = dev->dma_mem;
144
	unsigned long flags;
145 146
	int pos, err;

147
	size += device_addr & ~PAGE_MASK;
148 149 150 151

	if (!mem)
		return ERR_PTR(-EINVAL);

152
	spin_lock_irqsave(&mem->spinlock, flags);
153
	pos = PFN_DOWN(device_addr - dma_get_device_base(dev, mem));
154
	err = bitmap_allocate_region(mem->bitmap, pos, get_order(size));
155 156
	spin_unlock_irqrestore(&mem->spinlock, flags);

157 158 159 160 161 162
	if (err != 0)
		return ERR_PTR(err);
	return mem->virt_base + (pos << PAGE_SHIFT);
}
EXPORT_SYMBOL(dma_mark_declared_memory_occupied);

163 164
static void *__dma_alloc_from_coherent(struct dma_coherent_mem *mem,
		ssize_t size, dma_addr_t *dma_handle)
165 166
{
	int order = get_order(size);
167
	unsigned long flags;
A
Andrew Morton 已提交
168
	int pageno;
169
	void *ret;
170

171
	spin_lock_irqsave(&mem->spinlock, flags);
172

173
	if (unlikely(size > (mem->size << PAGE_SHIFT)))
174
		goto err;
A
Andrew Morton 已提交
175 176

	pageno = bitmap_find_free_region(mem->bitmap, mem->size, order);
177 178 179 180
	if (unlikely(pageno < 0))
		goto err;

	/*
181
	 * Memory was found in the coherent area.
182
	 */
183 184
	*dma_handle = mem->device_base + (pageno << PAGE_SHIFT);
	ret = mem->virt_base + (pageno << PAGE_SHIFT);
185
	spin_unlock_irqrestore(&mem->spinlock, flags);
186
	memset(ret, 0, size);
187
	return ret;
188
err:
189
	spin_unlock_irqrestore(&mem->spinlock, flags);
190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218
	return NULL;
}

/**
 * dma_alloc_from_dev_coherent() - allocate memory from device coherent pool
 * @dev:	device from which we allocate memory
 * @size:	size of requested memory area
 * @dma_handle:	This will be filled with the correct dma handle
 * @ret:	This pointer will be filled with the virtual address
 *		to allocated area.
 *
 * This function should be only called from per-arch dma_alloc_coherent()
 * to support allocation from per-device coherent memory pools.
 *
 * Returns 0 if dma_alloc_coherent should continue with allocating from
 * generic memory areas, or !0 if dma_alloc_coherent should return @ret.
 */
int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
		dma_addr_t *dma_handle, void **ret)
{
	struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);

	if (!mem)
		return 0;

	*ret = __dma_alloc_from_coherent(mem, size, dma_handle);
	if (*ret)
		return 1;

219 220 221 222 223 224
	/*
	 * In the case where the allocation can not be satisfied from the
	 * per-device area, try to fall back to generic memory if the
	 * constraints allow it.
	 */
	return mem->flags & DMA_MEMORY_EXCLUSIVE;
225
}
226
EXPORT_SYMBOL(dma_alloc_from_dev_coherent);
227

228
void *dma_alloc_from_global_coherent(ssize_t size, dma_addr_t *dma_handle)
229
{
230 231 232 233 234 235
	if (!dma_coherent_default_memory)
		return NULL;

	return __dma_alloc_from_coherent(dma_coherent_default_memory, size,
			dma_handle);
}
236

237 238 239
static int __dma_release_from_coherent(struct dma_coherent_mem *mem,
				       int order, void *vaddr)
{
240 241 242
	if (mem && vaddr >= mem->virt_base && vaddr <
		   (mem->virt_base + (mem->size << PAGE_SHIFT))) {
		int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
243
		unsigned long flags;
244

245
		spin_lock_irqsave(&mem->spinlock, flags);
246
		bitmap_release_region(mem->bitmap, page, order);
247
		spin_unlock_irqrestore(&mem->spinlock, flags);
248 249 250 251
		return 1;
	}
	return 0;
}
252 253

/**
254
 * dma_release_from_dev_coherent() - free memory to device coherent memory pool
255
 * @dev:	device from which the memory was allocated
256 257
 * @order:	the order of pages allocated
 * @vaddr:	virtual address of allocated pages
258 259
 *
 * This checks whether the memory was allocated from the per-device
260
 * coherent memory pool and if so, releases that memory.
261
 *
262 263
 * Returns 1 if we correctly released the memory, or 0 if the caller should
 * proceed with releasing memory from generic pools.
264
 */
265
int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr)
266
{
267
	struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
268

269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284
	return __dma_release_from_coherent(mem, order, vaddr);
}
EXPORT_SYMBOL(dma_release_from_dev_coherent);

int dma_release_from_global_coherent(int order, void *vaddr)
{
	if (!dma_coherent_default_memory)
		return 0;

	return __dma_release_from_coherent(dma_coherent_default_memory, order,
			vaddr);
}

static int __dma_mmap_from_coherent(struct dma_coherent_mem *mem,
		struct vm_area_struct *vma, void *vaddr, size_t size, int *ret)
{
285 286 287 288
	if (mem && vaddr >= mem->virt_base && vaddr + size <=
		   (mem->virt_base + (mem->size << PAGE_SHIFT))) {
		unsigned long off = vma->vm_pgoff;
		int start = (vaddr - mem->virt_base) >> PAGE_SHIFT;
289
		int user_count = vma_pages(vma);
290
		int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
291 292 293

		*ret = -ENXIO;
		if (off < count && user_count <= count - off) {
294
			unsigned long pfn = mem->pfn_base + start + off;
295 296 297 298 299 300 301 302
			*ret = remap_pfn_range(vma, vma->vm_start, pfn,
					       user_count << PAGE_SHIFT,
					       vma->vm_page_prot);
		}
		return 1;
	}
	return 0;
}
303 304 305 306 307 308 309 310 311 312 313 314

/**
 * dma_mmap_from_dev_coherent() - mmap memory from the device coherent pool
 * @dev:	device from which the memory was allocated
 * @vma:	vm_area for the userspace memory
 * @vaddr:	cpu address returned by dma_alloc_from_dev_coherent
 * @size:	size of the memory buffer allocated
 * @ret:	result from remap_pfn_range()
 *
 * This checks whether the memory was allocated from the per-device
 * coherent memory pool and if so, maps that memory to the provided vma.
 *
315 316 317
 * Returns 1 if @vaddr belongs to the device coherent pool and the caller
 * should return @ret, or 0 if they should proceed with mapping memory from
 * generic areas.
318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336
 */
int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
			   void *vaddr, size_t size, int *ret)
{
	struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);

	return __dma_mmap_from_coherent(mem, vma, vaddr, size, ret);
}
EXPORT_SYMBOL(dma_mmap_from_dev_coherent);

int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *vaddr,
				   size_t size, int *ret)
{
	if (!dma_coherent_default_memory)
		return 0;

	return __dma_mmap_from_coherent(dma_coherent_default_memory, vma,
					vaddr, size, ret);
}
337 338 339 340 341 342 343 344 345

/*
 * Support for reserved memory regions defined in device tree
 */
#ifdef CONFIG_OF_RESERVED_MEM
#include <linux/of.h>
#include <linux/of_fdt.h>
#include <linux/of_reserved_mem.h>

346 347
static struct reserved_mem *dma_reserved_default_memory __initdata;

348 349 350
static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev)
{
	struct dma_coherent_mem *mem = rmem->priv;
351 352
	int ret;

353 354 355 356 357 358 359 360 361
	if (!mem) {
		ret = dma_init_coherent_memory(rmem->base, rmem->base,
					       rmem->size,
					       DMA_MEMORY_EXCLUSIVE, &mem);
		if (ret) {
			pr_err("Reserved memory: failed to init DMA memory pool at %pa, size %ld MiB\n",
				&rmem->base, (unsigned long)rmem->size / SZ_1M);
			return ret;
		}
362
	}
363
	mem->use_dev_dma_pfn_offset = true;
364 365 366 367 368 369 370 371
	rmem->priv = mem;
	dma_assign_coherent_memory(dev, mem);
	return 0;
}

static void rmem_dma_device_release(struct reserved_mem *rmem,
				    struct device *dev)
{
372 373
	if (dev)
		dev->dma_mem = NULL;
374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392
}

static const struct reserved_mem_ops rmem_dma_ops = {
	.device_init	= rmem_dma_device_init,
	.device_release	= rmem_dma_device_release,
};

static int __init rmem_dma_setup(struct reserved_mem *rmem)
{
	unsigned long node = rmem->fdt_node;

	if (of_get_flat_dt_prop(node, "reusable", NULL))
		return -EINVAL;

#ifdef CONFIG_ARM
	if (!of_get_flat_dt_prop(node, "no-map", NULL)) {
		pr_err("Reserved memory: regions without no-map are not yet supported\n");
		return -EINVAL;
	}
393 394 395 396 397 398

	if (of_get_flat_dt_prop(node, "linux,dma-default", NULL)) {
		WARN(dma_reserved_default_memory,
		     "Reserved memory: region for default DMA coherent area is redefined\n");
		dma_reserved_default_memory = rmem;
	}
399 400 401 402 403 404 405
#endif

	rmem->ops = &rmem_dma_ops;
	pr_info("Reserved memory: created DMA memory pool at %pa, size %ld MiB\n",
		&rmem->base, (unsigned long)rmem->size / SZ_1M);
	return 0;
}
406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432

static int __init dma_init_reserved_memory(void)
{
	const struct reserved_mem_ops *ops;
	int ret;

	if (!dma_reserved_default_memory)
		return -ENOMEM;

	ops = dma_reserved_default_memory->ops;

	/*
	 * We rely on rmem_dma_device_init() does not propagate error of
	 * dma_assign_coherent_memory() for "NULL" device.
	 */
	ret = ops->device_init(dma_reserved_default_memory, NULL);

	if (!ret) {
		dma_coherent_default_memory = dma_reserved_default_memory->priv;
		pr_info("DMA: default coherent area is set\n");
	}

	return ret;
}

core_initcall(dma_init_reserved_memory);

433 434
RESERVEDMEM_OF_DECLARE(dma, "shared-dma-pool", rmem_dma_setup);
#endif