videobuf2-dma-contig.c 9.1 KB
Newer Older
1 2 3 4 5
/*
 * videobuf2-dma-contig.c - DMA contig memory allocator for videobuf2
 *
 * Copyright (C) 2010 Samsung Electronics
 *
6
 * Author: Pawel Osciak <pawel@osciak.com>
7 8 9 10 11 12 13
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation.
 */

#include <linux/module.h>
14 15
#include <linux/scatterlist.h>
#include <linux/sched.h>
16 17 18 19
#include <linux/slab.h>
#include <linux/dma-mapping.h>

#include <media/videobuf2-core.h>
20
#include <media/videobuf2-dma-contig.h>
21 22 23 24 25 26 27
#include <media/videobuf2-memops.h>

struct vb2_dc_conf {
	struct device		*dev;
};

struct vb2_dc_buf {
28
	struct device			*dev;
29 30
	void				*vaddr;
	unsigned long			size;
31
	dma_addr_t			dma_addr;
32 33
	enum dma_data_direction		dma_dir;
	struct sg_table			*dma_sgt;
34 35

	/* MMAP related */
36
	struct vb2_vmarea_handler	handler;
37 38 39 40
	atomic_t			refcount;

	/* USERPTR related */
	struct vm_area_struct		*vma;
41 42
};

43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80
/*********************************************/
/*        scatterlist table functions        */
/*********************************************/


static void vb2_dc_sgt_foreach_page(struct sg_table *sgt,
	void (*cb)(struct page *pg))
{
	struct scatterlist *s;
	unsigned int i;

	for_each_sg(sgt->sgl, s, sgt->orig_nents, i) {
		struct page *page = sg_page(s);
		unsigned int n_pages = PAGE_ALIGN(s->offset + s->length)
			>> PAGE_SHIFT;
		unsigned int j;

		for (j = 0; j < n_pages; ++j, ++page)
			cb(page);
	}
}

static unsigned long vb2_dc_get_contiguous_size(struct sg_table *sgt)
{
	struct scatterlist *s;
	dma_addr_t expected = sg_dma_address(sgt->sgl);
	unsigned int i;
	unsigned long size = 0;

	for_each_sg(sgt->sgl, s, sgt->nents, i) {
		if (sg_dma_address(s) != expected)
			break;
		expected = sg_dma_address(s) + sg_dma_len(s);
		size += sg_dma_len(s);
	}
	return size;
}

81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105
/*********************************************/
/*         callbacks for all buffers         */
/*********************************************/

static void *vb2_dc_cookie(void *buf_priv)
{
	struct vb2_dc_buf *buf = buf_priv;

	return &buf->dma_addr;
}

static void *vb2_dc_vaddr(void *buf_priv)
{
	struct vb2_dc_buf *buf = buf_priv;

	return buf->vaddr;
}

static unsigned int vb2_dc_num_users(void *buf_priv)
{
	struct vb2_dc_buf *buf = buf_priv;

	return atomic_read(&buf->refcount);
}

106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127
static void vb2_dc_prepare(void *buf_priv)
{
	struct vb2_dc_buf *buf = buf_priv;
	struct sg_table *sgt = buf->dma_sgt;

	if (!sgt)
		return;

	dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
}

static void vb2_dc_finish(void *buf_priv)
{
	struct vb2_dc_buf *buf = buf_priv;
	struct sg_table *sgt = buf->dma_sgt;

	if (!sgt)
		return;

	dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
}

128 129 130 131 132 133 134 135 136 137 138 139 140 141
/*********************************************/
/*        callbacks for MMAP buffers         */
/*********************************************/

static void vb2_dc_put(void *buf_priv)
{
	struct vb2_dc_buf *buf = buf_priv;

	if (!atomic_dec_and_test(&buf->refcount))
		return;

	dma_free_coherent(buf->dev, buf->size, buf->vaddr, buf->dma_addr);
	kfree(buf);
}
142

143
static void *vb2_dc_alloc(void *alloc_ctx, unsigned long size)
144 145
{
	struct vb2_dc_conf *conf = alloc_ctx;
146
	struct device *dev = conf->dev;
147 148 149 150 151 152
	struct vb2_dc_buf *buf;

	buf = kzalloc(sizeof *buf, GFP_KERNEL);
	if (!buf)
		return ERR_PTR(-ENOMEM);

153
	buf->vaddr = dma_alloc_coherent(dev, size, &buf->dma_addr, GFP_KERNEL);
154
	if (!buf->vaddr) {
155
		dev_err(dev, "dma_alloc_coherent of size %ld failed\n", size);
156 157 158 159
		kfree(buf);
		return ERR_PTR(-ENOMEM);
	}

160
	buf->dev = dev;
161 162 163
	buf->size = size;

	buf->handler.refcount = &buf->refcount;
164
	buf->handler.put = vb2_dc_put;
165 166 167 168 169 170 171
	buf->handler.arg = buf;

	atomic_inc(&buf->refcount);

	return buf;
}

172
static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma)
173 174 175 176 177 178 179 180
{
	struct vb2_dc_buf *buf = buf_priv;

	if (!buf) {
		printk(KERN_ERR "No buffer to map\n");
		return -EINVAL;
	}

181
	return vb2_mmap_pfn_range(vma, buf->dma_addr, buf->size,
182 183 184
				  &vb2_common_vm_ops, &buf->handler);
}

185 186 187 188
/*********************************************/
/*       callbacks for USERPTR buffers       */
/*********************************************/

189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248
static inline int vma_is_io(struct vm_area_struct *vma)
{
	return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
}

static int vb2_dc_get_user_pages(unsigned long start, struct page **pages,
	int n_pages, struct vm_area_struct *vma, int write)
{
	if (vma_is_io(vma)) {
		unsigned int i;

		for (i = 0; i < n_pages; ++i, start += PAGE_SIZE) {
			unsigned long pfn;
			int ret = follow_pfn(vma, start, &pfn);

			if (ret) {
				pr_err("no page for address %lu\n", start);
				return ret;
			}
			pages[i] = pfn_to_page(pfn);
		}
	} else {
		int n;

		n = get_user_pages(current, current->mm, start & PAGE_MASK,
			n_pages, write, 1, pages, NULL);
		/* negative error means that no page was pinned */
		n = max(n, 0);
		if (n != n_pages) {
			pr_err("got only %d of %d user pages\n", n, n_pages);
			while (n)
				put_page(pages[--n]);
			return -EFAULT;
		}
	}

	return 0;
}

static void vb2_dc_put_dirty_page(struct page *page)
{
	set_page_dirty_lock(page);
	put_page(page);
}

static void vb2_dc_put_userptr(void *buf_priv)
{
	struct vb2_dc_buf *buf = buf_priv;
	struct sg_table *sgt = buf->dma_sgt;

	dma_unmap_sg(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
	if (!vma_is_io(buf->vma))
		vb2_dc_sgt_foreach_page(sgt, vb2_dc_put_dirty_page);

	sg_free_table(sgt);
	kfree(sgt);
	vb2_put_vma(buf->vma);
	kfree(buf);
}

249
static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
250
	unsigned long size, int write)
251
{
252
	struct vb2_dc_conf *conf = alloc_ctx;
253
	struct vb2_dc_buf *buf;
254 255 256 257 258 259
	unsigned long start;
	unsigned long end;
	unsigned long offset;
	struct page **pages;
	int n_pages;
	int ret = 0;
260
	struct vm_area_struct *vma;
261 262
	struct sg_table *sgt;
	unsigned long contig_size;
263 264 265 266 267

	buf = kzalloc(sizeof *buf, GFP_KERNEL);
	if (!buf)
		return ERR_PTR(-ENOMEM);

268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305
	buf->dev = conf->dev;
	buf->dma_dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE;

	start = vaddr & PAGE_MASK;
	offset = vaddr & ~PAGE_MASK;
	end = PAGE_ALIGN(vaddr + size);
	n_pages = (end - start) >> PAGE_SHIFT;

	pages = kmalloc(n_pages * sizeof(pages[0]), GFP_KERNEL);
	if (!pages) {
		ret = -ENOMEM;
		pr_err("failed to allocate pages table\n");
		goto fail_buf;
	}

	/* current->mm->mmap_sem is taken by videobuf2 core */
	vma = find_vma(current->mm, vaddr);
	if (!vma) {
		pr_err("no vma for address %lu\n", vaddr);
		ret = -EFAULT;
		goto fail_pages;
	}

	if (vma->vm_end < vaddr + size) {
		pr_err("vma at %lu is too small for %lu bytes\n", vaddr, size);
		ret = -EFAULT;
		goto fail_pages;
	}

	buf->vma = vb2_get_vma(vma);
	if (!buf->vma) {
		pr_err("failed to copy vma\n");
		ret = -ENOMEM;
		goto fail_pages;
	}

	/* extract page list from userspace mapping */
	ret = vb2_dc_get_user_pages(start, pages, n_pages, vma, write);
306
	if (ret) {
307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342
		pr_err("failed to get user pages\n");
		goto fail_vma;
	}

	sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
	if (!sgt) {
		pr_err("failed to allocate sg table\n");
		ret = -ENOMEM;
		goto fail_get_user_pages;
	}

	ret = sg_alloc_table_from_pages(sgt, pages, n_pages,
		offset, size, GFP_KERNEL);
	if (ret) {
		pr_err("failed to initialize sg table\n");
		goto fail_sgt;
	}

	/* pages are no longer needed */
	kfree(pages);
	pages = NULL;

	sgt->nents = dma_map_sg(buf->dev, sgt->sgl, sgt->orig_nents,
		buf->dma_dir);
	if (sgt->nents <= 0) {
		pr_err("failed to map scatterlist\n");
		ret = -EIO;
		goto fail_sgt_init;
	}

	contig_size = vb2_dc_get_contiguous_size(sgt);
	if (contig_size < size) {
		pr_err("contiguous mapping is too small %lu/%lu\n",
			contig_size, size);
		ret = -EFAULT;
		goto fail_map_sg;
343 344
	}

345
	buf->dma_addr = sg_dma_address(sgt->sgl);
346
	buf->size = size;
347
	buf->dma_sgt = sgt;
348 349 350

	return buf;

351 352
fail_map_sg:
	dma_unmap_sg(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
353

354 355 356 357 358 359 360
fail_sgt_init:
	if (!vma_is_io(buf->vma))
		vb2_dc_sgt_foreach_page(sgt, put_page);
	sg_free_table(sgt);

fail_sgt:
	kfree(sgt);
361

362 363 364 365 366 367
fail_get_user_pages:
	if (pages && !vma_is_io(buf->vma))
		while (n_pages)
			put_page(pages[--n_pages]);

fail_vma:
368
	vb2_put_vma(buf->vma);
369 370 371 372 373

fail_pages:
	kfree(pages); /* kfree is NULL-proof */

fail_buf:
374
	kfree(buf);
375 376

	return ERR_PTR(ret);
377 378
}

379 380 381 382
/*********************************************/
/*       DMA CONTIG exported functions       */
/*********************************************/

383
const struct vb2_mem_ops vb2_dma_contig_memops = {
384 385 386 387 388 389 390
	.alloc		= vb2_dc_alloc,
	.put		= vb2_dc_put,
	.cookie		= vb2_dc_cookie,
	.vaddr		= vb2_dc_vaddr,
	.mmap		= vb2_dc_mmap,
	.get_userptr	= vb2_dc_get_userptr,
	.put_userptr	= vb2_dc_put_userptr,
391 392
	.prepare	= vb2_dc_prepare,
	.finish		= vb2_dc_finish,
393
	.num_users	= vb2_dc_num_users,
394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417
};
EXPORT_SYMBOL_GPL(vb2_dma_contig_memops);

void *vb2_dma_contig_init_ctx(struct device *dev)
{
	struct vb2_dc_conf *conf;

	conf = kzalloc(sizeof *conf, GFP_KERNEL);
	if (!conf)
		return ERR_PTR(-ENOMEM);

	conf->dev = dev;

	return conf;
}
EXPORT_SYMBOL_GPL(vb2_dma_contig_init_ctx);

void vb2_dma_contig_cleanup_ctx(void *alloc_ctx)
{
	kfree(alloc_ctx);
}
EXPORT_SYMBOL_GPL(vb2_dma_contig_cleanup_ctx);

MODULE_DESCRIPTION("DMA-contig memory handling routines for videobuf2");
418
MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
419
MODULE_LICENSE("GPL");