videobuf2-dma-sg.c 8.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
/*
 * videobuf2-dma-sg.c - dma scatter/gather memory allocator for videobuf2
 *
 * Copyright (C) 2010 Samsung Electronics
 *
 * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation.
 */

#include <linux/module.h>
#include <linux/mm.h>
#include <linux/scatterlist.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>

#include <media/videobuf2-core.h>
#include <media/videobuf2-memops.h>
#include <media/videobuf2-dma-sg.h>

24 25 26 27 28 29 30 31 32
static int debug;
module_param(debug, int, 0644);

#define dprintk(level, fmt, arg...)					\
	do {								\
		if (debug >= level)					\
			printk(KERN_DEBUG "vb2-dma-sg: " fmt, ## arg);	\
	} while (0)

33 34 35 36
struct vb2_dma_sg_conf {
	struct device		*dev;
};

37
struct vb2_dma_sg_buf {
38
	struct device			*dev;
39 40 41
	void				*vaddr;
	struct page			**pages;
	int				offset;
42
	enum dma_data_direction		dma_dir;
43 44 45
	struct sg_table			sg_table;
	size_t				size;
	unsigned int			num_pages;
46 47
	atomic_t			refcount;
	struct vb2_vmarea_handler	handler;
48
	struct vm_area_struct		*vma;
49 50 51 52
};

static void vb2_dma_sg_put(void *buf_priv);

53 54 55 56
static int vb2_dma_sg_alloc_compacted(struct vb2_dma_sg_buf *buf,
		gfp_t gfp_flags)
{
	unsigned int last_page = 0;
57
	int size = buf->size;
58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84

	while (size > 0) {
		struct page *pages;
		int order;
		int i;

		order = get_order(size);
		/* Dont over allocate*/
		if ((PAGE_SIZE << order) > size)
			order--;

		pages = NULL;
		while (!pages) {
			pages = alloc_pages(GFP_KERNEL | __GFP_ZERO |
					__GFP_NOWARN | gfp_flags, order);
			if (pages)
				break;

			if (order == 0) {
				while (last_page--)
					__free_page(buf->pages[last_page]);
				return -ENOMEM;
			}
			order--;
		}

		split_page(pages, order);
85 86
		for (i = 0; i < (1 << order); i++)
			buf->pages[last_page++] = &pages[i];
87 88 89 90 91 92 93

		size -= PAGE_SIZE << order;
	}

	return 0;
}

94 95
static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size,
			      enum dma_data_direction dma_dir, gfp_t gfp_flags)
96
{
97
	struct vb2_dma_sg_conf *conf = alloc_ctx;
98
	struct vb2_dma_sg_buf *buf;
99
	int ret;
100
	int num_pages;
101

102 103
	if (WARN_ON(alloc_ctx == NULL))
		return NULL;
104 105 106 107 108
	buf = kzalloc(sizeof *buf, GFP_KERNEL);
	if (!buf)
		return NULL;

	buf->vaddr = NULL;
109
	buf->dma_dir = dma_dir;
110
	buf->offset = 0;
111
	buf->size = size;
112
	/* size is already page aligned */
113
	buf->num_pages = size >> PAGE_SHIFT;
114

115
	buf->pages = kzalloc(buf->num_pages * sizeof(struct page *),
116 117 118 119
			     GFP_KERNEL);
	if (!buf->pages)
		goto fail_pages_array_alloc;

120 121 122
	ret = vb2_dma_sg_alloc_compacted(buf, gfp_flags);
	if (ret)
		goto fail_pages_alloc;
123

124
	ret = sg_alloc_table_from_pages(&buf->sg_table, buf->pages,
125
			buf->num_pages, 0, size, GFP_KERNEL);
126 127 128
	if (ret)
		goto fail_table_alloc;

129 130
	/* Prevent the device from being released while the buffer is used */
	buf->dev = get_device(conf->dev);
131 132 133 134 135 136
	buf->handler.refcount = &buf->refcount;
	buf->handler.put = vb2_dma_sg_put;
	buf->handler.arg = buf;

	atomic_inc(&buf->refcount);

137
	dprintk(1, "%s: Allocated buffer of %d pages\n",
138
		__func__, buf->num_pages);
139 140
	return buf;

141 142 143 144
fail_table_alloc:
	num_pages = buf->num_pages;
	while (num_pages--)
		__free_page(buf->pages[num_pages]);
145
fail_pages_alloc:
146
	kfree(buf->pages);
147 148 149 150 151 152 153 154
fail_pages_array_alloc:
	kfree(buf);
	return NULL;
}

static void vb2_dma_sg_put(void *buf_priv)
{
	struct vb2_dma_sg_buf *buf = buf_priv;
155
	int i = buf->num_pages;
156 157

	if (atomic_dec_and_test(&buf->refcount)) {
158
		dprintk(1, "%s: Freeing buffer of %d pages\n", __func__,
159
			buf->num_pages);
160
		if (buf->vaddr)
161 162
			vm_unmap_ram(buf->vaddr, buf->num_pages);
		sg_free_table(&buf->sg_table);
163 164 165
		while (--i >= 0)
			__free_page(buf->pages[i]);
		kfree(buf->pages);
166
		put_device(buf->dev);
167 168 169 170
		kfree(buf);
	}
}

171 172 173 174 175
static inline int vma_is_io(struct vm_area_struct *vma)
{
	return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
}

176
static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
177 178
				    unsigned long size,
				    enum dma_data_direction dma_dir)
179 180 181
{
	struct vb2_dma_sg_buf *buf;
	unsigned long first, last;
182
	int num_pages_from_user;
183
	struct vm_area_struct *vma;
184 185 186 187 188 189

	buf = kzalloc(sizeof *buf, GFP_KERNEL);
	if (!buf)
		return NULL;

	buf->vaddr = NULL;
190
	buf->dma_dir = dma_dir;
191
	buf->offset = vaddr & ~PAGE_MASK;
192
	buf->size = size;
193 194 195

	first = (vaddr           & PAGE_MASK) >> PAGE_SHIFT;
	last  = ((vaddr + size - 1) & PAGE_MASK) >> PAGE_SHIFT;
196
	buf->num_pages = last - first + 1;
197

198
	buf->pages = kzalloc(buf->num_pages * sizeof(struct page *),
199 200
			     GFP_KERNEL);
	if (!buf->pages)
201
		goto userptr_fail_alloc_pages;
202

203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226
	vma = find_vma(current->mm, vaddr);
	if (!vma) {
		dprintk(1, "no vma for address %lu\n", vaddr);
		goto userptr_fail_find_vma;
	}

	if (vma->vm_end < vaddr + size) {
		dprintk(1, "vma at %lu is too small for %lu bytes\n",
			vaddr, size);
		goto userptr_fail_find_vma;
	}

	buf->vma = vb2_get_vma(vma);
	if (!buf->vma) {
		dprintk(1, "failed to copy vma\n");
		goto userptr_fail_find_vma;
	}

	if (vma_is_io(buf->vma)) {
		for (num_pages_from_user = 0;
		     num_pages_from_user < buf->num_pages;
		     ++num_pages_from_user, vaddr += PAGE_SIZE) {
			unsigned long pfn;

227
			if (follow_pfn(vma, vaddr, &pfn)) {
228 229 230 231 232 233 234
				dprintk(1, "no page for address %lu\n", vaddr);
				break;
			}
			buf->pages[num_pages_from_user] = pfn_to_page(pfn);
		}
	} else
		num_pages_from_user = get_user_pages(current, current->mm,
235
					     vaddr & PAGE_MASK,
236
					     buf->num_pages,
237
					     buf->dma_dir == DMA_FROM_DEVICE,
238 239 240
					     1, /* force */
					     buf->pages,
					     NULL);
241

242
	if (num_pages_from_user != buf->num_pages)
243 244
		goto userptr_fail_get_user_pages;

245 246 247 248
	if (sg_alloc_table_from_pages(&buf->sg_table, buf->pages,
			buf->num_pages, buf->offset, size, 0))
		goto userptr_fail_alloc_table_from_pages;

249 250
	return buf;

251
userptr_fail_alloc_table_from_pages:
252
userptr_fail_get_user_pages:
253
	dprintk(1, "get_user_pages requested/got: %d/%d]\n",
254
		buf->num_pages, num_pages_from_user);
255 256 257 258 259
	if (!vma_is_io(buf->vma))
		while (--num_pages_from_user >= 0)
			put_page(buf->pages[num_pages_from_user]);
	vb2_put_vma(buf->vma);
userptr_fail_find_vma:
260
	kfree(buf->pages);
261
userptr_fail_alloc_pages:
262 263 264 265 266 267 268 269 270 271 272
	kfree(buf);
	return NULL;
}

/*
 * @put_userptr: inform the allocator that a USERPTR buffer will no longer
 *		 be used
 */
static void vb2_dma_sg_put_userptr(void *buf_priv)
{
	struct vb2_dma_sg_buf *buf = buf_priv;
273
	int i = buf->num_pages;
274

275
	dprintk(1, "%s: Releasing userspace buffer of %d pages\n",
276
	       __func__, buf->num_pages);
277
	if (buf->vaddr)
278 279
		vm_unmap_ram(buf->vaddr, buf->num_pages);
	sg_free_table(&buf->sg_table);
280
	while (--i >= 0) {
281
		if (buf->dma_dir == DMA_FROM_DEVICE)
282
			set_page_dirty_lock(buf->pages[i]);
283 284
		if (!vma_is_io(buf->vma))
			put_page(buf->pages[i]);
285 286
	}
	kfree(buf->pages);
287
	vb2_put_vma(buf->vma);
288 289 290 291 292 293 294 295 296 297 298
	kfree(buf);
}

static void *vb2_dma_sg_vaddr(void *buf_priv)
{
	struct vb2_dma_sg_buf *buf = buf_priv;

	BUG_ON(!buf);

	if (!buf->vaddr)
		buf->vaddr = vm_map_ram(buf->pages,
299
					buf->num_pages,
300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354
					-1,
					PAGE_KERNEL);

	/* add offset in case userptr is not page-aligned */
	return buf->vaddr + buf->offset;
}

static unsigned int vb2_dma_sg_num_users(void *buf_priv)
{
	struct vb2_dma_sg_buf *buf = buf_priv;

	return atomic_read(&buf->refcount);
}

static int vb2_dma_sg_mmap(void *buf_priv, struct vm_area_struct *vma)
{
	struct vb2_dma_sg_buf *buf = buf_priv;
	unsigned long uaddr = vma->vm_start;
	unsigned long usize = vma->vm_end - vma->vm_start;
	int i = 0;

	if (!buf) {
		printk(KERN_ERR "No memory to map\n");
		return -EINVAL;
	}

	do {
		int ret;

		ret = vm_insert_page(vma, uaddr, buf->pages[i++]);
		if (ret) {
			printk(KERN_ERR "Remapping memory, error: %d\n", ret);
			return ret;
		}

		uaddr += PAGE_SIZE;
		usize -= PAGE_SIZE;
	} while (usize > 0);


	/*
	 * Use common vm_area operations to track buffer refcount.
	 */
	vma->vm_private_data	= &buf->handler;
	vma->vm_ops		= &vb2_common_vm_ops;

	vma->vm_ops->open(vma);

	return 0;
}

static void *vb2_dma_sg_cookie(void *buf_priv)
{
	struct vb2_dma_sg_buf *buf = buf_priv;

355
	return &buf->sg_table;
356 357 358 359 360 361 362 363 364 365 366 367 368 369
}

const struct vb2_mem_ops vb2_dma_sg_memops = {
	.alloc		= vb2_dma_sg_alloc,
	.put		= vb2_dma_sg_put,
	.get_userptr	= vb2_dma_sg_get_userptr,
	.put_userptr	= vb2_dma_sg_put_userptr,
	.vaddr		= vb2_dma_sg_vaddr,
	.mmap		= vb2_dma_sg_mmap,
	.num_users	= vb2_dma_sg_num_users,
	.cookie		= vb2_dma_sg_cookie,
};
EXPORT_SYMBOL_GPL(vb2_dma_sg_memops);

370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390
void *vb2_dma_sg_init_ctx(struct device *dev)
{
	struct vb2_dma_sg_conf *conf;

	conf = kzalloc(sizeof(*conf), GFP_KERNEL);
	if (!conf)
		return ERR_PTR(-ENOMEM);

	conf->dev = dev;

	return conf;
}
EXPORT_SYMBOL_GPL(vb2_dma_sg_init_ctx);

void vb2_dma_sg_cleanup_ctx(void *alloc_ctx)
{
	if (!IS_ERR_OR_NULL(alloc_ctx))
		kfree(alloc_ctx);
}
EXPORT_SYMBOL_GPL(vb2_dma_sg_cleanup_ctx);

391 392 393
MODULE_DESCRIPTION("dma scatter/gather memory handling routines for videobuf2");
MODULE_AUTHOR("Andrzej Pietrasiewicz");
MODULE_LICENSE("GPL");