videobuf-dma-sg.c 18.1 KB
Newer Older
1
/*
2
 * helper functions for SG DMA video4linux capture buffers
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26
 *
 * The functions expect the hardware being able to scatter gatter
 * (i.e. the buffers are not linear in physical memory, but fragmented
 * into PAGE_SIZE chunks).  They also assume the driver does not need
 * to touch the video data.
 *
 * (c) 2007 Mauro Carvalho Chehab, <mchehab@infradead.org>
 *
 * Highly based on video-buf written originally by:
 * (c) 2001,02 Gerd Knorr <kraxel@bytesex.org>
 * (c) 2006 Mauro Carvalho Chehab, <mchehab@infradead.org>
 * (c) 2006 Ted Walther and John Sokol
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2
 */

#include <linux/init.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/slab.h>
#include <linux/interrupt.h>

27
#include <linux/dma-mapping.h>
28 29
#include <linux/vmalloc.h>
#include <linux/pagemap.h>
30
#include <linux/scatterlist.h>
31 32 33 34 35 36 37 38 39 40 41
#include <asm/page.h>
#include <asm/pgtable.h>

#include <media/videobuf-dma-sg.h>

#define MAGIC_DMABUF 0x19721112
#define MAGIC_SG_MEM 0x17890714

#define MAGIC_CHECK(is,should)	if (unlikely((is) != (should))) \
	{ printk(KERN_ERR "magic mismatch: %x (expected %x)\n",is,should); BUG(); }

42
static int debug;
43 44
module_param(debug, int, 0644);

45
MODULE_DESCRIPTION("helper module to manage video4linux dma sg buffers");
46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63
MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@infradead.org>");
MODULE_LICENSE("GPL");

#define dprintk(level, fmt, arg...)	if (debug >= level) \
	printk(KERN_DEBUG "vbuf-sg: " fmt , ## arg)

/* --------------------------------------------------------------------- */

struct scatterlist*
videobuf_vmalloc_to_sg(unsigned char *virt, int nr_pages)
{
	struct scatterlist *sglist;
	struct page *pg;
	int i;

	sglist = kcalloc(nr_pages, sizeof(struct scatterlist), GFP_KERNEL);
	if (NULL == sglist)
		return NULL;
J
Jens Axboe 已提交
64
	sg_init_table(sglist, nr_pages);
65 66 67 68 69
	for (i = 0; i < nr_pages; i++, virt += PAGE_SIZE) {
		pg = vmalloc_to_page(virt);
		if (NULL == pg)
			goto err;
		BUG_ON(PageHighMem(pg));
70
		sg_set_page(&sglist[i], pg, PAGE_SIZE, 0);
71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89
	}
	return sglist;

 err:
	kfree(sglist);
	return NULL;
}

struct scatterlist*
videobuf_pages_to_sg(struct page **pages, int nr_pages, int offset)
{
	struct scatterlist *sglist;
	int i = 0;

	if (NULL == pages[0])
		return NULL;
	sglist = kcalloc(nr_pages, sizeof(*sglist), GFP_KERNEL);
	if (NULL == sglist)
		return NULL;
J
Jens Axboe 已提交
90
	sg_init_table(sglist, nr_pages);
91 92 93 94 95 96

	if (NULL == pages[0])
		goto nopage;
	if (PageHighMem(pages[0]))
		/* DMA to highmem pages might not work */
		goto highmem;
97
	sg_set_page(&sglist[0], pages[0], PAGE_SIZE - offset, offset);
98 99 100 101 102
	for (i = 1; i < nr_pages; i++) {
		if (NULL == pages[i])
			goto nopage;
		if (PageHighMem(pages[i]))
			goto highmem;
103
		sg_set_page(&sglist[i], pages[i], PAGE_SIZE, 0);
104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121
	}
	return sglist;

 nopage:
	dprintk(2,"sgl: oops - no page\n");
	kfree(sglist);
	return NULL;

 highmem:
	dprintk(2,"sgl: oops - highmem page\n");
	kfree(sglist);
	return NULL;
}

/* --------------------------------------------------------------------- */

struct videobuf_dmabuf *videobuf_to_dma (struct videobuf_buffer *buf)
{
122 123
	struct videobuf_dma_sg_memory *mem = buf->priv;
	BUG_ON(!mem);
124

125
	MAGIC_CHECK(mem->magic, MAGIC_SG_MEM);
126 127 128 129 130 131 132 133 134 135

	return &mem->dma;
}

void videobuf_dma_init(struct videobuf_dmabuf *dma)
{
	memset(dma,0,sizeof(*dma));
	dma->magic = MAGIC_DMABUF;
}

136 137
static int videobuf_dma_init_user_locked(struct videobuf_dmabuf *dma,
			int direction, unsigned long data, unsigned long size)
138 139 140 141 142 143
{
	unsigned long first,last;
	int err, rw = 0;

	dma->direction = direction;
	switch (dma->direction) {
144 145 146 147 148 149 150 151
	case DMA_FROM_DEVICE:
		rw = READ;
		break;
	case DMA_TO_DEVICE:
		rw = WRITE;
		break;
	default:
		BUG();
152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168
	}

	first = (data          & PAGE_MASK) >> PAGE_SHIFT;
	last  = ((data+size-1) & PAGE_MASK) >> PAGE_SHIFT;
	dma->offset   = data & ~PAGE_MASK;
	dma->nr_pages = last-first+1;
	dma->pages = kmalloc(dma->nr_pages * sizeof(struct page*),
			     GFP_KERNEL);
	if (NULL == dma->pages)
		return -ENOMEM;
	dprintk(1,"init user [0x%lx+0x%lx => %d pages]\n",
		data,size,dma->nr_pages);

	err = get_user_pages(current,current->mm,
			     data & PAGE_MASK, dma->nr_pages,
			     rw == READ, 1, /* force */
			     dma->pages, NULL);
169

170 171 172 173 174 175 176 177
	if (err != dma->nr_pages) {
		dma->nr_pages = (err >= 0) ? err : 0;
		dprintk(1,"get_user_pages: err=%d [%d]\n",err,dma->nr_pages);
		return err < 0 ? err : -EINVAL;
	}
	return 0;
}

178 179 180 181 182 183 184 185 186 187 188
int videobuf_dma_init_user(struct videobuf_dmabuf *dma, int direction,
			   unsigned long data, unsigned long size)
{
	int ret;
	down_read(&current->mm->mmap_sem);
	ret = videobuf_dma_init_user_locked(dma, direction, data, size);
	up_read(&current->mm->mmap_sem);

	return ret;
}

189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220
int videobuf_dma_init_kernel(struct videobuf_dmabuf *dma, int direction,
			     int nr_pages)
{
	dprintk(1,"init kernel [%d pages]\n",nr_pages);
	dma->direction = direction;
	dma->vmalloc = vmalloc_32(nr_pages << PAGE_SHIFT);
	if (NULL == dma->vmalloc) {
		dprintk(1,"vmalloc_32(%d pages) failed\n",nr_pages);
		return -ENOMEM;
	}
	dprintk(1,"vmalloc is at addr 0x%08lx, size=%d\n",
				(unsigned long)dma->vmalloc,
				nr_pages << PAGE_SHIFT);
	memset(dma->vmalloc,0,nr_pages << PAGE_SHIFT);
	dma->nr_pages = nr_pages;
	return 0;
}

int videobuf_dma_init_overlay(struct videobuf_dmabuf *dma, int direction,
			      dma_addr_t addr, int nr_pages)
{
	dprintk(1,"init overlay [%d pages @ bus 0x%lx]\n",
		nr_pages,(unsigned long)addr);
	dma->direction = direction;
	if (0 == addr)
		return -EINVAL;

	dma->bus_addr = addr;
	dma->nr_pages = nr_pages;
	return 0;
}

221
int videobuf_dma_map(struct videobuf_queue* q, struct videobuf_dmabuf *dma)
222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247
{
	MAGIC_CHECK(dma->magic,MAGIC_DMABUF);
	BUG_ON(0 == dma->nr_pages);

	if (dma->pages) {
		dma->sglist = videobuf_pages_to_sg(dma->pages, dma->nr_pages,
						   dma->offset);
	}
	if (dma->vmalloc) {
		dma->sglist = videobuf_vmalloc_to_sg
						(dma->vmalloc,dma->nr_pages);
	}
	if (dma->bus_addr) {
		dma->sglist = kmalloc(sizeof(struct scatterlist), GFP_KERNEL);
		if (NULL != dma->sglist) {
			dma->sglen  = 1;
			sg_dma_address(&dma->sglist[0]) = dma->bus_addr & PAGE_MASK;
			dma->sglist[0].offset           = dma->bus_addr & ~PAGE_MASK;
			sg_dma_len(&dma->sglist[0])     = dma->nr_pages * PAGE_SIZE;
		}
	}
	if (NULL == dma->sglist) {
		dprintk(1,"scatterlist is NULL\n");
		return -ENOMEM;
	}
	if (!dma->bus_addr) {
248
		dma->sglen = dma_map_sg(q->dev, dma->sglist,
249 250 251
					dma->nr_pages, dma->direction);
		if (0 == dma->sglen) {
			printk(KERN_WARNING
252
			       "%s: videobuf_map_sg failed\n",__func__);
253 254 255 256 257 258 259 260 261
			kfree(dma->sglist);
			dma->sglist = NULL;
			dma->sglen = 0;
			return -EIO;
		}
	}
	return 0;
}

262
int videobuf_dma_sync(struct videobuf_queue *q, struct videobuf_dmabuf *dma)
263
{
264
	MAGIC_CHECK(dma->magic, MAGIC_DMABUF);
265 266
	BUG_ON(!dma->sglen);

267
	dma_sync_sg_for_cpu(q->dev, dma->sglist, dma->nr_pages, dma->direction);
268 269 270 271 272
	return 0;
}

int videobuf_dma_unmap(struct videobuf_queue* q,struct videobuf_dmabuf *dma)
{
273
	MAGIC_CHECK(dma->magic, MAGIC_DMABUF);
274 275 276
	if (!dma->sglen)
		return 0;

277
	dma_unmap_sg(q->dev, dma->sglist, dma->nr_pages, dma->direction);
278

279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303
	kfree(dma->sglist);
	dma->sglist = NULL;
	dma->sglen = 0;
	return 0;
}

int videobuf_dma_free(struct videobuf_dmabuf *dma)
{
	MAGIC_CHECK(dma->magic,MAGIC_DMABUF);
	BUG_ON(dma->sglen);

	if (dma->pages) {
		int i;
		for (i=0; i < dma->nr_pages; i++)
			page_cache_release(dma->pages[i]);
		kfree(dma->pages);
		dma->pages = NULL;
	}

	vfree(dma->vmalloc);
	dma->vmalloc = NULL;

	if (dma->bus_addr) {
		dma->bus_addr = 0;
	}
304
	dma->direction = DMA_NONE;
305 306 307 308 309
	return 0;
}

/* --------------------------------------------------------------------- */

310
int videobuf_sg_dma_map(struct device *dev, struct videobuf_dmabuf *dma)
311 312 313
{
	struct videobuf_queue q;

314
	q.dev = dev;
315

316
	return videobuf_dma_map(&q, dma);
317 318
}

319
int videobuf_sg_dma_unmap(struct device *dev, struct videobuf_dmabuf *dma)
320 321 322
{
	struct videobuf_queue q;

323
	q.dev = dev;
324

325
	return videobuf_dma_unmap(&q, dma);
326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344
}

/* --------------------------------------------------------------------- */

static void
videobuf_vm_open(struct vm_area_struct *vma)
{
	struct videobuf_mapping *map = vma->vm_private_data;

	dprintk(2,"vm_open %p [count=%d,vma=%08lx-%08lx]\n",map,
		map->count,vma->vm_start,vma->vm_end);
	map->count++;
}

static void
videobuf_vm_close(struct vm_area_struct *vma)
{
	struct videobuf_mapping *map = vma->vm_private_data;
	struct videobuf_queue *q = map->q;
345
	struct videobuf_dma_sg_memory *mem;
346 347 348 349 350 351 352 353
	int i;

	dprintk(2,"vm_close %p [count=%d,vma=%08lx-%08lx]\n",map,
		map->count,vma->vm_start,vma->vm_end);

	map->count--;
	if (0 == map->count) {
		dprintk(1,"munmap %p q=%p\n",map,q);
354
		mutex_lock(&q->vb_lock);
355 356 357 358 359 360 361 362 363 364
		for (i = 0; i < VIDEO_MAX_FRAME; i++) {
			if (NULL == q->bufs[i])
				continue;
			mem=q->bufs[i]->priv;

			if (!mem)
				continue;

			MAGIC_CHECK(mem->magic,MAGIC_SG_MEM);

365
			if (q->bufs[i]->map != map)
366
				continue;
367
			q->bufs[i]->map   = NULL;
368 369 370
			q->bufs[i]->baddr = 0;
			q->ops->buf_release(q,q->bufs[i]);
		}
371
		mutex_unlock(&q->vb_lock);
372 373 374 375 376 377 378 379 380 381 382
		kfree(map);
	}
	return;
}

/*
 * Get a anonymous page for the mapping.  Make sure we can DMA to that
 * memory location with 32bit PCI devices (i.e. don't use highmem for
 * now ...).  Bounce buffers don't work very well for the data rates
 * video capture has.
 */
383 384
static int
videobuf_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
385 386 387
{
	struct page *page;

388 389
	dprintk(3,"fault: fault @ %08lx [vma %08lx-%08lx]\n",
		(unsigned long)vmf->virtual_address,vma->vm_start,vma->vm_end);
390 391
	page = alloc_page(GFP_USER | __GFP_DMA32);
	if (!page)
392
		return VM_FAULT_OOM;
A
Andrew Morton 已提交
393 394
	clear_user_page(page_address(page), (unsigned long)vmf->virtual_address,
			page);
395 396
	vmf->page = page;
	return 0;
397 398 399 400 401 402
}

static struct vm_operations_struct videobuf_vm_ops =
{
	.open     = videobuf_vm_open,
	.close    = videobuf_vm_close,
403
	.fault    = videobuf_vm_fault,
404 405 406
};

/* ---------------------------------------------------------------------
407
 * SG handlers for the generic methods
408 409 410 411 412
 */

/* Allocated area consists on 3 parts:
	struct video_buffer
	struct <driver>_buffer (cx88_buffer, saa7134_buf, ...)
413
	struct videobuf_dma_sg_memory
414 415 416 417
 */

static void *__videobuf_alloc(size_t size)
{
418
	struct videobuf_dma_sg_memory *mem;
419 420 421 422 423 424 425 426 427 428
	struct videobuf_buffer *vb;

	vb = kzalloc(size+sizeof(*mem),GFP_KERNEL);

	mem = vb->priv = ((char *)vb)+size;
	mem->magic=MAGIC_SG_MEM;

	videobuf_dma_init(&mem->dma);

	dprintk(1,"%s: allocated at %p(%ld+%ld) & %p(%ld)\n",
429
		__func__,vb,(long)sizeof(*vb),(long)size-sizeof(*vb),
430 431 432 433 434
		mem,(long)sizeof(*mem));

	return vb;
}

435 436 437 438 439 440 441 442 443 444
static void *__videobuf_to_vmalloc (struct videobuf_buffer *buf)
{
	struct videobuf_dma_sg_memory *mem = buf->priv;
	BUG_ON(!mem);

	MAGIC_CHECK(mem->magic, MAGIC_SG_MEM);

	return mem->dma.vmalloc;
}

445 446 447 448 449 450
static int __videobuf_iolock (struct videobuf_queue* q,
			      struct videobuf_buffer *vb,
			      struct v4l2_framebuffer *fbuf)
{
	int err,pages;
	dma_addr_t bus;
451
	struct videobuf_dma_sg_memory *mem = vb->priv;
452 453
	BUG_ON(!mem);

454
	MAGIC_CHECK(mem->magic, MAGIC_SG_MEM);
455 456 457 458 459 460 461 462

	switch (vb->memory) {
	case V4L2_MEMORY_MMAP:
	case V4L2_MEMORY_USERPTR:
		if (0 == vb->baddr) {
			/* no userspace addr -- kernel bounce buffer */
			pages = PAGE_ALIGN(vb->size) >> PAGE_SHIFT;
			err = videobuf_dma_init_kernel( &mem->dma,
463
							DMA_FROM_DEVICE,
464 465 466
							pages );
			if (0 != err)
				return err;
467
		} else if (vb->memory == V4L2_MEMORY_USERPTR) {
468 469
			/* dma directly to userspace */
			err = videobuf_dma_init_user( &mem->dma,
470
						      DMA_FROM_DEVICE,
471 472 473
						      vb->baddr,vb->bsize );
			if (0 != err)
				return err;
474 475 476 477 478 479 480
		} else {
			/* NOTE: HACK: videobuf_iolock on V4L2_MEMORY_MMAP
			buffers can only be called from videobuf_qbuf
			we take current->mm->mmap_sem there, to prevent
			locking inversion, so don't take it here */

			err = videobuf_dma_init_user_locked(&mem->dma,
481
						      DMA_FROM_DEVICE,
482 483 484
						      vb->baddr, vb->bsize);
			if (0 != err)
				return err;
485 486 487 488 489 490 491 492 493 494 495 496 497
		}
		break;
	case V4L2_MEMORY_OVERLAY:
		if (NULL == fbuf)
			return -EINVAL;
		/* FIXME: need sanity checks for vb->boff */
		/*
		 * Using a double cast to avoid compiler warnings when
		 * building for PAE. Compiler doesn't like direct casting
		 * of a 32 bit ptr to 64 bit integer.
		 */
		bus   = (dma_addr_t)(unsigned long)fbuf->base + vb->boff;
		pages = PAGE_ALIGN(vb->size) >> PAGE_SHIFT;
498
		err = videobuf_dma_init_overlay(&mem->dma, DMA_FROM_DEVICE,
499 500 501 502 503 504 505
						bus, pages);
		if (0 != err)
			return err;
		break;
	default:
		BUG();
	}
506
	err = videobuf_dma_map(q, &mem->dma);
507 508 509 510 511 512 513 514 515
	if (0 != err)
		return err;

	return 0;
}

static int __videobuf_sync(struct videobuf_queue *q,
			   struct videobuf_buffer *buf)
{
516 517
	struct videobuf_dma_sg_memory *mem = buf->priv;
	BUG_ON(!mem);
518 519 520 521 522 523 524 525 526 527 528
	MAGIC_CHECK(mem->magic,MAGIC_SG_MEM);

	return	videobuf_dma_sync(q,&mem->dma);
}

static int __videobuf_mmap_free(struct videobuf_queue *q)
{
	int i;

	for (i = 0; i < VIDEO_MAX_FRAME; i++) {
		if (q->bufs[i]) {
529
			if (q->bufs[i]->map)
530 531 532 533 534 535 536 537 538 539
				return -EBUSY;
		}
	}

	return 0;
}

static int __videobuf_mmap_mapper(struct videobuf_queue *q,
			 struct vm_area_struct *vma)
{
540
	struct videobuf_dma_sg_memory *mem;
541 542 543 544 545 546 547 548 549 550 551 552 553 554
	struct videobuf_mapping *map;
	unsigned int first,last,size,i;
	int retval;

	retval = -EINVAL;
	if (!(vma->vm_flags & VM_WRITE)) {
		dprintk(1,"mmap app bug: PROT_WRITE please\n");
		goto done;
	}
	if (!(vma->vm_flags & VM_SHARED)) {
		dprintk(1,"mmap app bug: MAP_SHARED please\n");
		goto done;
	}

555 556 557 558 559 560 561 562
	/* This function maintains backwards compatibility with V4L1 and will
	 * map more than one buffer if the vma length is equal to the combined
	 * size of multiple buffers than it will map them together.  See
	 * VIDIOCGMBUF in the v4l spec
	 *
	 * TODO: Allow drivers to specify if they support this mode
	 */

563 564 565 566 567
	/* look for first buffer to map */
	for (first = 0; first < VIDEO_MAX_FRAME; first++) {
		if (NULL == q->bufs[first])
			continue;
		mem=q->bufs[first]->priv;
568
		BUG_ON(!mem);
569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587
		MAGIC_CHECK(mem->magic,MAGIC_SG_MEM);

		if (V4L2_MEMORY_MMAP != q->bufs[first]->memory)
			continue;
		if (q->bufs[first]->boff == (vma->vm_pgoff << PAGE_SHIFT))
			break;
	}
	if (VIDEO_MAX_FRAME == first) {
		dprintk(1,"mmap app bug: offset invalid [offset=0x%lx]\n",
			(vma->vm_pgoff << PAGE_SHIFT));
		goto done;
	}

	/* look for last buffer to map */
	for (size = 0, last = first; last < VIDEO_MAX_FRAME; last++) {
		if (NULL == q->bufs[last])
			continue;
		if (V4L2_MEMORY_MMAP != q->bufs[last]->memory)
			continue;
588
		if (q->bufs[last]->map) {
589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606
			retval = -EBUSY;
			goto done;
		}
		size += q->bufs[last]->bsize;
		if (size == (vma->vm_end - vma->vm_start))
			break;
	}
	if (VIDEO_MAX_FRAME == last) {
		dprintk(1,"mmap app bug: size invalid [size=0x%lx]\n",
			(vma->vm_end - vma->vm_start));
		goto done;
	}

	/* create mapping + update buffer list */
	retval = -ENOMEM;
	map = kmalloc(sizeof(struct videobuf_mapping),GFP_KERNEL);
	if (NULL == map)
		goto done;
607 608 609 610 611

	size = 0;
	for (i = first; i <= last; i++) {
		if (NULL == q->bufs[i])
			continue;
612
		q->bufs[i]->map   = map;
613
		q->bufs[i]->baddr = vma->vm_start + size;
614
		size += q->bufs[i]->bsize;
615
	}
616

617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636
	map->count    = 1;
	map->start    = vma->vm_start;
	map->end      = vma->vm_end;
	map->q        = q;
	vma->vm_ops   = &videobuf_vm_ops;
	vma->vm_flags |= VM_DONTEXPAND | VM_RESERVED;
	vma->vm_flags &= ~VM_IO; /* using shared anonymous pages */
	vma->vm_private_data = map;
	dprintk(1,"mmap %p: q=%p %08lx-%08lx pgoff %08lx bufs %d-%d\n",
		map,q,vma->vm_start,vma->vm_end,vma->vm_pgoff,first,last);
	retval = 0;

 done:
	return retval;
}

static int __videobuf_copy_to_user ( struct videobuf_queue *q,
				char __user *data, size_t count,
				int nonblocking )
{
637 638
	struct videobuf_dma_sg_memory *mem = q->read_buf->priv;
	BUG_ON(!mem);
639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655
	MAGIC_CHECK(mem->magic,MAGIC_SG_MEM);

	/* copy to userspace */
	if (count > q->read_buf->size - q->read_off)
		count = q->read_buf->size - q->read_off;

	if (copy_to_user(data, mem->dma.vmalloc+q->read_off, count))
		return -EFAULT;

	return count;
}

static int __videobuf_copy_stream ( struct videobuf_queue *q,
				char __user *data, size_t count, size_t pos,
				int vbihack, int nonblocking )
{
	unsigned int  *fc;
656 657
	struct videobuf_dma_sg_memory *mem = q->read_buf->priv;
	BUG_ON(!mem);
658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679
	MAGIC_CHECK(mem->magic,MAGIC_SG_MEM);

	if (vbihack) {
		/* dirty, undocumented hack -- pass the frame counter
			* within the last four bytes of each vbi data block.
			* We need that one to maintain backward compatibility
			* to all vbi decoding software out there ... */
		fc  = (unsigned int*)mem->dma.vmalloc;
		fc += (q->read_buf->size>>2) -1;
		*fc = q->read_buf->field_count >> 1;
		dprintk(1,"vbihack: %d\n",*fc);
	}

	/* copy stuff using the common method */
	count = __videobuf_copy_to_user (q,data,count,nonblocking);

	if ( (count==-EFAULT) && (0 == pos) )
		return -EFAULT;

	return count;
}

680
static struct videobuf_qtype_ops sg_ops = {
681 682 683 684 685 686 687
	.magic        = MAGIC_QTYPE_OPS,

	.alloc        = __videobuf_alloc,
	.iolock       = __videobuf_iolock,
	.sync         = __videobuf_sync,
	.mmap_free    = __videobuf_mmap_free,
	.mmap_mapper  = __videobuf_mmap_mapper,
688
	.video_copy_to_user = __videobuf_copy_to_user,
689
	.copy_stream  = __videobuf_copy_stream,
690
	.vmalloc      = __videobuf_to_vmalloc,
691 692
};

693
void *videobuf_sg_alloc(size_t size)
694 695 696 697
{
	struct videobuf_queue q;

	/* Required to make generic handler to call __videobuf_alloc */
698
	q.int_ops = &sg_ops;
699

700
	q.msize = size;
701

702
	return videobuf_alloc(&q);
703 704
}

705
void videobuf_queue_sg_init(struct videobuf_queue* q,
706
			 struct videobuf_queue_ops *ops,
707
			 struct device *dev,
708 709 710 711 712 713
			 spinlock_t *irqlock,
			 enum v4l2_buf_type type,
			 enum v4l2_field field,
			 unsigned int msize,
			 void *priv)
{
714
	videobuf_queue_core_init(q, ops, dev, irqlock, type, field, msize,
715
				 priv, &sg_ops);
716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731
}

/* --------------------------------------------------------------------- */

EXPORT_SYMBOL_GPL(videobuf_vmalloc_to_sg);

EXPORT_SYMBOL_GPL(videobuf_to_dma);
EXPORT_SYMBOL_GPL(videobuf_dma_init);
EXPORT_SYMBOL_GPL(videobuf_dma_init_user);
EXPORT_SYMBOL_GPL(videobuf_dma_init_kernel);
EXPORT_SYMBOL_GPL(videobuf_dma_init_overlay);
EXPORT_SYMBOL_GPL(videobuf_dma_map);
EXPORT_SYMBOL_GPL(videobuf_dma_sync);
EXPORT_SYMBOL_GPL(videobuf_dma_unmap);
EXPORT_SYMBOL_GPL(videobuf_dma_free);

732 733 734
EXPORT_SYMBOL_GPL(videobuf_sg_dma_map);
EXPORT_SYMBOL_GPL(videobuf_sg_dma_unmap);
EXPORT_SYMBOL_GPL(videobuf_sg_alloc);
735

736
EXPORT_SYMBOL_GPL(videobuf_queue_sg_init);
737 738 739 740 741 742

/*
 * Local variables:
 * c-basic-offset: 8
 * End:
 */