dma-mapping.c 9.1 KB
Newer Older
T
Tejun Heo 已提交
1 2 3 4 5 6 7 8 9
/*
 * drivers/base/dma-mapping.c - arch-independent dma-mapping routines
 *
 * Copyright (c) 2006  SUSE Linux Products GmbH
 * Copyright (c) 2006  Tejun Heo <teheo@suse.de>
 *
 * This file is released under the GPLv2.
 */

10
#include <linux/acpi.h>
T
Tejun Heo 已提交
11
#include <linux/dma-mapping.h>
12
#include <linux/export.h>
13
#include <linux/gfp.h>
14
#include <linux/of_device.h>
15 16
#include <linux/slab.h>
#include <linux/vmalloc.h>
T
Tejun Heo 已提交
17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65

/*
 * Managed DMA API
 */
struct dma_devres {
	size_t		size;
	void		*vaddr;
	dma_addr_t	dma_handle;
};

static void dmam_coherent_release(struct device *dev, void *res)
{
	struct dma_devres *this = res;

	dma_free_coherent(dev, this->size, this->vaddr, this->dma_handle);
}

static void dmam_noncoherent_release(struct device *dev, void *res)
{
	struct dma_devres *this = res;

	dma_free_noncoherent(dev, this->size, this->vaddr, this->dma_handle);
}

static int dmam_match(struct device *dev, void *res, void *match_data)
{
	struct dma_devres *this = res, *match = match_data;

	if (this->vaddr == match->vaddr) {
		WARN_ON(this->size != match->size ||
			this->dma_handle != match->dma_handle);
		return 1;
	}
	return 0;
}

/**
 * dmam_alloc_coherent - Managed dma_alloc_coherent()
 * @dev: Device to allocate coherent memory for
 * @size: Size of allocation
 * @dma_handle: Out argument for allocated DMA handle
 * @gfp: Allocation flags
 *
 * Managed dma_alloc_coherent().  Memory allocated using this function
 * will be automatically released on driver detach.
 *
 * RETURNS:
 * Pointer to allocated memory on success, NULL on failure.
 */
66
void *dmam_alloc_coherent(struct device *dev, size_t size,
T
Tejun Heo 已提交
67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112
			   dma_addr_t *dma_handle, gfp_t gfp)
{
	struct dma_devres *dr;
	void *vaddr;

	dr = devres_alloc(dmam_coherent_release, sizeof(*dr), gfp);
	if (!dr)
		return NULL;

	vaddr = dma_alloc_coherent(dev, size, dma_handle, gfp);
	if (!vaddr) {
		devres_free(dr);
		return NULL;
	}

	dr->vaddr = vaddr;
	dr->dma_handle = *dma_handle;
	dr->size = size;

	devres_add(dev, dr);

	return vaddr;
}
EXPORT_SYMBOL(dmam_alloc_coherent);

/**
 * dmam_free_coherent - Managed dma_free_coherent()
 * @dev: Device to free coherent memory for
 * @size: Size of allocation
 * @vaddr: Virtual address of the memory to free
 * @dma_handle: DMA handle of the memory to free
 *
 * Managed dma_free_coherent().
 */
void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
			dma_addr_t dma_handle)
{
	struct dma_devres match_data = { size, vaddr, dma_handle };

	dma_free_coherent(dev, size, vaddr, dma_handle);
	WARN_ON(devres_destroy(dev, dmam_coherent_release, dmam_match,
			       &match_data));
}
EXPORT_SYMBOL(dmam_free_coherent);

/**
113
 * dmam_alloc_non_coherent - Managed dma_alloc_noncoherent()
T
Tejun Heo 已提交
114 115 116 117 118
 * @dev: Device to allocate non_coherent memory for
 * @size: Size of allocation
 * @dma_handle: Out argument for allocated DMA handle
 * @gfp: Allocation flags
 *
119
 * Managed dma_alloc_noncoherent().  Memory allocated using this
T
Tejun Heo 已提交
120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170
 * function will be automatically released on driver detach.
 *
 * RETURNS:
 * Pointer to allocated memory on success, NULL on failure.
 */
void *dmam_alloc_noncoherent(struct device *dev, size_t size,
			     dma_addr_t *dma_handle, gfp_t gfp)
{
	struct dma_devres *dr;
	void *vaddr;

	dr = devres_alloc(dmam_noncoherent_release, sizeof(*dr), gfp);
	if (!dr)
		return NULL;

	vaddr = dma_alloc_noncoherent(dev, size, dma_handle, gfp);
	if (!vaddr) {
		devres_free(dr);
		return NULL;
	}

	dr->vaddr = vaddr;
	dr->dma_handle = *dma_handle;
	dr->size = size;

	devres_add(dev, dr);

	return vaddr;
}
EXPORT_SYMBOL(dmam_alloc_noncoherent);

/**
 * dmam_free_coherent - Managed dma_free_noncoherent()
 * @dev: Device to free noncoherent memory for
 * @size: Size of allocation
 * @vaddr: Virtual address of the memory to free
 * @dma_handle: DMA handle of the memory to free
 *
 * Managed dma_free_noncoherent().
 */
void dmam_free_noncoherent(struct device *dev, size_t size, void *vaddr,
			   dma_addr_t dma_handle)
{
	struct dma_devres match_data = { size, vaddr, dma_handle };

	dma_free_noncoherent(dev, size, vaddr, dma_handle);
	WARN_ON(!devres_destroy(dev, dmam_noncoherent_release, dmam_match,
				&match_data));
}
EXPORT_SYMBOL(dmam_free_noncoherent);

171
#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
T
Tejun Heo 已提交
172 173 174 175 176 177 178 179 180

static void dmam_coherent_decl_release(struct device *dev, void *res)
{
	dma_release_declared_memory(dev);
}

/**
 * dmam_declare_coherent_memory - Managed dma_declare_coherent_memory()
 * @dev: Device to declare coherent memory for
181
 * @phys_addr: Physical address of coherent memory to be declared
T
Tejun Heo 已提交
182 183 184 185 186 187 188 189 190
 * @device_addr: Device address of coherent memory to be declared
 * @size: Size of coherent memory to be declared
 * @flags: Flags
 *
 * Managed dma_declare_coherent_memory().
 *
 * RETURNS:
 * 0 on success, -errno on failure.
 */
191
int dmam_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
T
Tejun Heo 已提交
192 193 194 195 196 197 198 199 200
				 dma_addr_t device_addr, size_t size, int flags)
{
	void *res;
	int rc;

	res = devres_alloc(dmam_coherent_decl_release, 0, GFP_KERNEL);
	if (!res)
		return -ENOMEM;

201
	rc = dma_declare_coherent_memory(dev, phys_addr, device_addr, size,
T
Tejun Heo 已提交
202
					 flags);
203
	if (rc) {
T
Tejun Heo 已提交
204
		devres_add(dev, res);
205 206
		rc = 0;
	} else {
T
Tejun Heo 已提交
207
		devres_free(res);
208 209
		rc = -ENOMEM;
	}
T
Tejun Heo 已提交
210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226

	return rc;
}
EXPORT_SYMBOL(dmam_declare_coherent_memory);

/**
 * dmam_release_declared_memory - Managed dma_release_declared_memory().
 * @dev: Device to release declared coherent memory for
 *
 * Managed dmam_release_declared_memory().
 */
void dmam_release_declared_memory(struct device *dev)
{
	WARN_ON(devres_destroy(dev, dmam_coherent_decl_release, NULL, NULL));
}
EXPORT_SYMBOL(dmam_release_declared_memory);

227 228
#endif

229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246
/*
 * Create scatter-list for the already allocated DMA buffer.
 */
int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
		 void *cpu_addr, dma_addr_t handle, size_t size)
{
	struct page *page = virt_to_page(cpu_addr);
	int ret;

	ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
	if (unlikely(ret))
		return ret;

	sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
	return 0;
}
EXPORT_SYMBOL(dma_common_get_sgtable);

247 248 249 250 251 252 253
/*
 * Create userspace mapping for the DMA-coherent memory.
 */
int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
		    void *cpu_addr, dma_addr_t dma_addr, size_t size)
{
	int ret = -ENXIO;
254
#if defined(CONFIG_MMU) && !defined(CONFIG_ARCH_NO_COHERENT_DMA_MMAP)
255
	unsigned long user_count = vma_pages(vma);
256 257 258 259 260 261 262 263 264 265 266 267 268 269 270
	unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
	unsigned long pfn = page_to_pfn(virt_to_page(cpu_addr));
	unsigned long off = vma->vm_pgoff;

	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);

	if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
		return ret;

	if (off < count && user_count <= (count - off)) {
		ret = remap_pfn_range(vma, vma->vm_start,
				      pfn + off,
				      user_count << PAGE_SHIFT,
				      vma->vm_page_prot);
	}
271
#endif	/* CONFIG_MMU && !CONFIG_ARCH_NO_COHERENT_DMA_MMAP */
272 273 274 275

	return ret;
}
EXPORT_SYMBOL(dma_common_mmap);
276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341

#ifdef CONFIG_MMU
/*
 * remaps an array of PAGE_SIZE pages into another vm_area
 * Cannot be used in non-sleeping contexts
 */
void *dma_common_pages_remap(struct page **pages, size_t size,
			unsigned long vm_flags, pgprot_t prot,
			const void *caller)
{
	struct vm_struct *area;

	area = get_vm_area_caller(size, vm_flags, caller);
	if (!area)
		return NULL;

	area->pages = pages;

	if (map_vm_area(area, prot, pages)) {
		vunmap(area->addr);
		return NULL;
	}

	return area->addr;
}

/*
 * remaps an allocated contiguous region into another vm_area.
 * Cannot be used in non-sleeping contexts
 */

void *dma_common_contiguous_remap(struct page *page, size_t size,
			unsigned long vm_flags,
			pgprot_t prot, const void *caller)
{
	int i;
	struct page **pages;
	void *ptr;
	unsigned long pfn;

	pages = kmalloc(sizeof(struct page *) << get_order(size), GFP_KERNEL);
	if (!pages)
		return NULL;

	for (i = 0, pfn = page_to_pfn(page); i < (size >> PAGE_SHIFT); i++)
		pages[i] = pfn_to_page(pfn + i);

	ptr = dma_common_pages_remap(pages, size, vm_flags, prot, caller);

	kfree(pages);

	return ptr;
}

/*
 * unmaps a range previously mapped by dma_common_*_remap
 */
void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags)
{
	struct vm_struct *area = find_vm_area(cpu_addr);

	if (!area || (area->flags & vm_flags) != vm_flags) {
		WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr);
		return;
	}

342
	unmap_kernel_range((unsigned long)cpu_addr, PAGE_ALIGN(size));
343 344 345
	vunmap(cpu_addr);
}
#endif
346 347 348 349 350 351 352 353 354 355

/*
 * Common configuration to enable DMA API use for a device
 */
#include <linux/pci.h>

int dma_configure(struct device *dev)
{
	struct device *bridge = NULL, *dma_dev = dev;
	enum dev_dma_attr attr;
356
	int ret = 0;
357 358 359 360 361 362 363 364 365 366

	if (dev_is_pci(dev)) {
		bridge = pci_get_host_bridge_device(to_pci_dev(dev));
		dma_dev = bridge;
		if (IS_ENABLED(CONFIG_OF) && dma_dev->parent &&
		    dma_dev->parent->of_node)
			dma_dev = dma_dev->parent;
	}

	if (dma_dev->of_node) {
367
		ret = of_dma_configure(dev, dma_dev->of_node);
368 369 370 371 372 373 374 375 376
	} else if (has_acpi_companion(dma_dev)) {
		attr = acpi_get_dma_attr(to_acpi_device_node(dma_dev->fwnode));
		if (attr != DEV_DMA_NOT_SUPPORTED)
			acpi_dma_configure(dev, attr);
	}

	if (bridge)
		pci_put_host_bridge_device(bridge);

377
	return ret;
378 379 380 381 382 383 384
}

void dma_deconfigure(struct device *dev)
{
	of_dma_deconfigure(dev);
	acpi_dma_deconfigure(dev);
}