videobuf2-dma-sg.c 8.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
/*
 * videobuf2-dma-sg.c - dma scatter/gather memory allocator for videobuf2
 *
 * Copyright (C) 2010 Samsung Electronics
 *
 * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation.
 */

#include <linux/module.h>
#include <linux/mm.h>
#include <linux/scatterlist.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>

#include <media/videobuf2-core.h>
#include <media/videobuf2-memops.h>
#include <media/videobuf2-dma-sg.h>

24 25 26 27 28 29 30 31 32
static int debug;
module_param(debug, int, 0644);

#define dprintk(level, fmt, arg...)					\
	do {								\
		if (debug >= level)					\
			printk(KERN_DEBUG "vb2-dma-sg: " fmt, ## arg);	\
	} while (0)

33 34 35 36 37
struct vb2_dma_sg_buf {
	void				*vaddr;
	struct page			**pages;
	int				write;
	int				offset;
38 39 40
	struct sg_table			sg_table;
	size_t				size;
	unsigned int			num_pages;
41 42
	atomic_t			refcount;
	struct vb2_vmarea_handler	handler;
43
	struct vm_area_struct		*vma;
44 45 46 47
};

static void vb2_dma_sg_put(void *buf_priv);

48 49 50 51
static int vb2_dma_sg_alloc_compacted(struct vb2_dma_sg_buf *buf,
		gfp_t gfp_flags)
{
	unsigned int last_page = 0;
52
	int size = buf->size;
53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79

	while (size > 0) {
		struct page *pages;
		int order;
		int i;

		order = get_order(size);
		/* Dont over allocate*/
		if ((PAGE_SIZE << order) > size)
			order--;

		pages = NULL;
		while (!pages) {
			pages = alloc_pages(GFP_KERNEL | __GFP_ZERO |
					__GFP_NOWARN | gfp_flags, order);
			if (pages)
				break;

			if (order == 0) {
				while (last_page--)
					__free_page(buf->pages[last_page]);
				return -ENOMEM;
			}
			order--;
		}

		split_page(pages, order);
80 81
		for (i = 0; i < (1 << order); i++)
			buf->pages[last_page++] = &pages[i];
82 83 84 85 86 87 88

		size -= PAGE_SIZE << order;
	}

	return 0;
}

H
Hans Verkuil 已提交
89
static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size, gfp_t gfp_flags)
90 91
{
	struct vb2_dma_sg_buf *buf;
92
	int ret;
93
	int num_pages;
94 95 96 97 98 99 100 101

	buf = kzalloc(sizeof *buf, GFP_KERNEL);
	if (!buf)
		return NULL;

	buf->vaddr = NULL;
	buf->write = 0;
	buf->offset = 0;
102
	buf->size = size;
103
	/* size is already page aligned */
104
	buf->num_pages = size >> PAGE_SHIFT;
105

106
	buf->pages = kzalloc(buf->num_pages * sizeof(struct page *),
107 108 109 110
			     GFP_KERNEL);
	if (!buf->pages)
		goto fail_pages_array_alloc;

111 112 113
	ret = vb2_dma_sg_alloc_compacted(buf, gfp_flags);
	if (ret)
		goto fail_pages_alloc;
114

115 116 117 118 119
	ret = sg_alloc_table_from_pages(&buf->sg_table, buf->pages,
			buf->num_pages, 0, size, gfp_flags);
	if (ret)
		goto fail_table_alloc;

120 121 122 123 124 125
	buf->handler.refcount = &buf->refcount;
	buf->handler.put = vb2_dma_sg_put;
	buf->handler.arg = buf;

	atomic_inc(&buf->refcount);

126
	dprintk(1, "%s: Allocated buffer of %d pages\n",
127
		__func__, buf->num_pages);
128 129
	return buf;

130 131 132 133
fail_table_alloc:
	num_pages = buf->num_pages;
	while (num_pages--)
		__free_page(buf->pages[num_pages]);
134
fail_pages_alloc:
135
	kfree(buf->pages);
136 137 138 139 140 141 142 143
fail_pages_array_alloc:
	kfree(buf);
	return NULL;
}

static void vb2_dma_sg_put(void *buf_priv)
{
	struct vb2_dma_sg_buf *buf = buf_priv;
144
	int i = buf->num_pages;
145 146

	if (atomic_dec_and_test(&buf->refcount)) {
147
		dprintk(1, "%s: Freeing buffer of %d pages\n", __func__,
148
			buf->num_pages);
149
		if (buf->vaddr)
150 151
			vm_unmap_ram(buf->vaddr, buf->num_pages);
		sg_free_table(&buf->sg_table);
152 153 154 155 156 157 158
		while (--i >= 0)
			__free_page(buf->pages[i]);
		kfree(buf->pages);
		kfree(buf);
	}
}

159 160 161 162 163
static inline int vma_is_io(struct vm_area_struct *vma)
{
	return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
}

164 165 166 167 168
static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
				    unsigned long size, int write)
{
	struct vb2_dma_sg_buf *buf;
	unsigned long first, last;
169
	int num_pages_from_user;
170
	struct vm_area_struct *vma;
171 172 173 174 175 176 177 178

	buf = kzalloc(sizeof *buf, GFP_KERNEL);
	if (!buf)
		return NULL;

	buf->vaddr = NULL;
	buf->write = write;
	buf->offset = vaddr & ~PAGE_MASK;
179
	buf->size = size;
180 181 182

	first = (vaddr           & PAGE_MASK) >> PAGE_SHIFT;
	last  = ((vaddr + size - 1) & PAGE_MASK) >> PAGE_SHIFT;
183
	buf->num_pages = last - first + 1;
184

185
	buf->pages = kzalloc(buf->num_pages * sizeof(struct page *),
186 187
			     GFP_KERNEL);
	if (!buf->pages)
188
		goto userptr_fail_alloc_pages;
189

190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221
	vma = find_vma(current->mm, vaddr);
	if (!vma) {
		dprintk(1, "no vma for address %lu\n", vaddr);
		goto userptr_fail_find_vma;
	}

	if (vma->vm_end < vaddr + size) {
		dprintk(1, "vma at %lu is too small for %lu bytes\n",
			vaddr, size);
		goto userptr_fail_find_vma;
	}

	buf->vma = vb2_get_vma(vma);
	if (!buf->vma) {
		dprintk(1, "failed to copy vma\n");
		goto userptr_fail_find_vma;
	}

	if (vma_is_io(buf->vma)) {
		for (num_pages_from_user = 0;
		     num_pages_from_user < buf->num_pages;
		     ++num_pages_from_user, vaddr += PAGE_SIZE) {
			unsigned long pfn;

			if (follow_pfn(buf->vma, vaddr, &pfn)) {
				dprintk(1, "no page for address %lu\n", vaddr);
				break;
			}
			buf->pages[num_pages_from_user] = pfn_to_page(pfn);
		}
	} else
		num_pages_from_user = get_user_pages(current, current->mm,
222
					     vaddr & PAGE_MASK,
223
					     buf->num_pages,
224 225 226 227
					     write,
					     1, /* force */
					     buf->pages,
					     NULL);
228

229
	if (num_pages_from_user != buf->num_pages)
230 231
		goto userptr_fail_get_user_pages;

232 233 234 235
	if (sg_alloc_table_from_pages(&buf->sg_table, buf->pages,
			buf->num_pages, buf->offset, size, 0))
		goto userptr_fail_alloc_table_from_pages;

236 237
	return buf;

238
userptr_fail_alloc_table_from_pages:
239
userptr_fail_get_user_pages:
240
	dprintk(1, "get_user_pages requested/got: %d/%d]\n",
241
		buf->num_pages, num_pages_from_user);
242 243 244 245 246
	if (!vma_is_io(buf->vma))
		while (--num_pages_from_user >= 0)
			put_page(buf->pages[num_pages_from_user]);
	vb2_put_vma(buf->vma);
userptr_fail_find_vma:
247
	kfree(buf->pages);
248
userptr_fail_alloc_pages:
249 250 251 252 253 254 255 256 257 258 259
	kfree(buf);
	return NULL;
}

/*
 * @put_userptr: inform the allocator that a USERPTR buffer will no longer
 *		 be used
 */
static void vb2_dma_sg_put_userptr(void *buf_priv)
{
	struct vb2_dma_sg_buf *buf = buf_priv;
260
	int i = buf->num_pages;
261

262
	dprintk(1, "%s: Releasing userspace buffer of %d pages\n",
263
	       __func__, buf->num_pages);
264
	if (buf->vaddr)
265 266
		vm_unmap_ram(buf->vaddr, buf->num_pages);
	sg_free_table(&buf->sg_table);
267 268 269
	while (--i >= 0) {
		if (buf->write)
			set_page_dirty_lock(buf->pages[i]);
270 271
		if (!vma_is_io(buf->vma))
			put_page(buf->pages[i]);
272 273
	}
	kfree(buf->pages);
274
	vb2_put_vma(buf->vma);
275 276 277 278 279 280 281 282 283 284 285
	kfree(buf);
}

static void *vb2_dma_sg_vaddr(void *buf_priv)
{
	struct vb2_dma_sg_buf *buf = buf_priv;

	BUG_ON(!buf);

	if (!buf->vaddr)
		buf->vaddr = vm_map_ram(buf->pages,
286
					buf->num_pages,
287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341
					-1,
					PAGE_KERNEL);

	/* add offset in case userptr is not page-aligned */
	return buf->vaddr + buf->offset;
}

static unsigned int vb2_dma_sg_num_users(void *buf_priv)
{
	struct vb2_dma_sg_buf *buf = buf_priv;

	return atomic_read(&buf->refcount);
}

static int vb2_dma_sg_mmap(void *buf_priv, struct vm_area_struct *vma)
{
	struct vb2_dma_sg_buf *buf = buf_priv;
	unsigned long uaddr = vma->vm_start;
	unsigned long usize = vma->vm_end - vma->vm_start;
	int i = 0;

	if (!buf) {
		printk(KERN_ERR "No memory to map\n");
		return -EINVAL;
	}

	do {
		int ret;

		ret = vm_insert_page(vma, uaddr, buf->pages[i++]);
		if (ret) {
			printk(KERN_ERR "Remapping memory, error: %d\n", ret);
			return ret;
		}

		uaddr += PAGE_SIZE;
		usize -= PAGE_SIZE;
	} while (usize > 0);


	/*
	 * Use common vm_area operations to track buffer refcount.
	 */
	vma->vm_private_data	= &buf->handler;
	vma->vm_ops		= &vb2_common_vm_ops;

	vma->vm_ops->open(vma);

	return 0;
}

static void *vb2_dma_sg_cookie(void *buf_priv)
{
	struct vb2_dma_sg_buf *buf = buf_priv;

342
	return &buf->sg_table;
343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359
}

const struct vb2_mem_ops vb2_dma_sg_memops = {
	.alloc		= vb2_dma_sg_alloc,
	.put		= vb2_dma_sg_put,
	.get_userptr	= vb2_dma_sg_get_userptr,
	.put_userptr	= vb2_dma_sg_put_userptr,
	.vaddr		= vb2_dma_sg_vaddr,
	.mmap		= vb2_dma_sg_mmap,
	.num_users	= vb2_dma_sg_num_users,
	.cookie		= vb2_dma_sg_cookie,
};
EXPORT_SYMBOL_GPL(vb2_dma_sg_memops);

MODULE_DESCRIPTION("dma scatter/gather memory handling routines for videobuf2");
MODULE_AUTHOR("Andrzej Pietrasiewicz");
MODULE_LICENSE("GPL");