splice.c 32.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11
/*
 * "splice": joining two ropes together by interweaving their strands.
 *
 * This is the "extended pipe" functionality, where a pipe is used as
 * an arbitrary in-memory buffer. Think of a pipe as a small kernel
 * buffer that you can use to transfer data from one end to the other.
 *
 * The traditional unix read/write is extended with a "splice()" operation
 * that transfers data buffers to or from a pipe buffer.
 *
 * Named by Larry McVoy, original implementation from Linus, extended by
12 13
 * Jens to support splicing to files, network, direct splicing, etc and
 * fixing lots of bugs.
14
 *
15 16 17
 * Copyright (C) 2005-2006 Jens Axboe <axboe@suse.de>
 * Copyright (C) 2005-2006 Linus Torvalds <torvalds@osdl.org>
 * Copyright (C) 2006 Ingo Molnar <mingo@elte.hu>
18 19 20 21 22 23 24
 *
 */
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/pagemap.h>
#include <linux/pipe_fs_i.h>
#include <linux/mm_inline.h>
25
#include <linux/swap.h>
26 27
#include <linux/writeback.h>
#include <linux/buffer_head.h>
J
Jeff Garzik 已提交
28
#include <linux/module.h>
29
#include <linux/syscalls.h>
30
#include <linux/uio.h>
31

32 33 34 35 36 37
struct partial_page {
	unsigned int offset;
	unsigned int len;
};

/*
38
 * Passed to splice_to_pipe
39 40 41 42 43 44 45 46 47
 */
struct splice_pipe_desc {
	struct page **pages;		/* page map */
	struct partial_page *partial;	/* pages[] may not be contig */
	int nr_pages;			/* number of pages in map */
	unsigned int flags;		/* splice flags */
	struct pipe_buf_operations *ops;/* ops associated with output pipe */
};

48 49 50 51 52 53
/*
 * Attempt to steal a page from a pipe buffer. This should perhaps go into
 * a vm helper function, it's already simplified quite a bit by the
 * addition of remove_mapping(). If success is returned, the caller may
 * attempt to reuse this page for another destination.
 */
54 55 56 57
static int page_cache_pipe_buf_steal(struct pipe_inode_info *info,
				     struct pipe_buffer *buf)
{
	struct page *page = buf->page;
58
	struct address_space *mapping = page_mapping(page);
59

60 61
	lock_page(page);

62 63
	WARN_ON(!PageUptodate(page));

64 65 66 67 68 69 70 71 72
	/*
	 * At least for ext2 with nobh option, we need to wait on writeback
	 * completing on this page, since we'll remove it from the pagecache.
	 * Otherwise truncate wont wait on the page, allowing the disk
	 * blocks to be reused by someone else before we actually wrote our
	 * data to them. fs corruption ensues.
	 */
	wait_on_page_writeback(page);

73 74 75
	if (PagePrivate(page))
		try_to_release_page(page, mapping_gfp_mask(mapping));

76 77
	if (!remove_mapping(mapping, page)) {
		unlock_page(page);
78
		return 1;
79
	}
80

81
	buf->flags |= PIPE_BUF_FLAG_STOLEN | PIPE_BUF_FLAG_LRU;
82 83 84
	return 0;
}

85 86 87 88 89
static void page_cache_pipe_buf_release(struct pipe_inode_info *info,
					struct pipe_buffer *buf)
{
	page_cache_release(buf->page);
	buf->page = NULL;
90
	buf->flags &= ~(PIPE_BUF_FLAG_STOLEN | PIPE_BUF_FLAG_LRU);
91 92 93 94 95 96 97
}

static void *page_cache_pipe_buf_map(struct file *file,
				     struct pipe_inode_info *info,
				     struct pipe_buffer *buf)
{
	struct page *page = buf->page;
98
	int err;
99 100

	if (!PageUptodate(page)) {
101 102 103 104
		lock_page(page);

		/*
		 * Page got truncated/unhashed. This will cause a 0-byte
I
Ingo Molnar 已提交
105
		 * splice, if this is the first page.
106 107 108 109 110
		 */
		if (!page->mapping) {
			err = -ENODATA;
			goto error;
		}
111

112
		/*
I
Ingo Molnar 已提交
113
		 * Uh oh, read-error from disk.
114 115 116 117 118 119 120
		 */
		if (!PageUptodate(page)) {
			err = -EIO;
			goto error;
		}

		/*
I
Ingo Molnar 已提交
121
		 * Page is ok afterall, fall through to mapping.
122
		 */
123 124 125
		unlock_page(page);
	}

126 127 128 129
	return kmap(page);
error:
	unlock_page(page);
	return ERR_PTR(err);
130 131 132 133 134 135 136 137
}

static void page_cache_pipe_buf_unmap(struct pipe_inode_info *info,
				      struct pipe_buffer *buf)
{
	kunmap(buf->page);
}

138 139 140 141 142 143 144 145 146 147 148 149 150
static void *user_page_pipe_buf_map(struct file *file,
				    struct pipe_inode_info *pipe,
				    struct pipe_buffer *buf)
{
	return kmap(buf->page);
}

static void user_page_pipe_buf_unmap(struct pipe_inode_info *pipe,
				     struct pipe_buffer *buf)
{
	kunmap(buf->page);
}

151 152 153 154 155 156
static void page_cache_pipe_buf_get(struct pipe_inode_info *info,
				    struct pipe_buffer *buf)
{
	page_cache_get(buf->page);
}

157 158 159 160 161
static struct pipe_buf_operations page_cache_pipe_buf_ops = {
	.can_merge = 0,
	.map = page_cache_pipe_buf_map,
	.unmap = page_cache_pipe_buf_unmap,
	.release = page_cache_pipe_buf_release,
162
	.steal = page_cache_pipe_buf_steal,
163
	.get = page_cache_pipe_buf_get,
164 165
};

166 167 168 169 170 171 172 173 174 175 176 177 178 179 180
static int user_page_pipe_buf_steal(struct pipe_inode_info *pipe,
				    struct pipe_buffer *buf)
{
	return 1;
}

static struct pipe_buf_operations user_page_pipe_buf_ops = {
	.can_merge = 0,
	.map = user_page_pipe_buf_map,
	.unmap = user_page_pipe_buf_unmap,
	.release = page_cache_pipe_buf_release,
	.steal = user_page_pipe_buf_steal,
	.get = page_cache_pipe_buf_get,
};

181 182 183 184
/*
 * Pipe output worker. This sets up our pipe format with the page cache
 * pipe buffer operations. Otherwise very similar to the regular pipe_writev().
 */
185 186
static ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
			      struct splice_pipe_desc *spd)
187
{
188
	int ret, do_wakeup, page_nr;
189 190 191

	ret = 0;
	do_wakeup = 0;
192
	page_nr = 0;
193

194 195
	if (pipe->inode)
		mutex_lock(&pipe->inode->i_mutex);
196 197

	for (;;) {
198
		if (!pipe->readers) {
199 200 201 202 203 204
			send_sig(SIGPIPE, current, 0);
			if (!ret)
				ret = -EPIPE;
			break;
		}

205 206
		if (pipe->nrbufs < PIPE_BUFFERS) {
			int newbuf = (pipe->curbuf + pipe->nrbufs) & (PIPE_BUFFERS - 1);
207
			struct pipe_buffer *buf = pipe->bufs + newbuf;
208

209 210 211 212
			buf->page = spd->pages[page_nr];
			buf->offset = spd->partial[page_nr].offset;
			buf->len = spd->partial[page_nr].len;
			buf->ops = spd->ops;
213
			pipe->nrbufs++;
214 215 216
			page_nr++;
			ret += buf->len;

217 218
			if (pipe->inode)
				do_wakeup = 1;
219

220
			if (!--spd->nr_pages)
221
				break;
222
			if (pipe->nrbufs < PIPE_BUFFERS)
223 224 225 226 227
				continue;

			break;
		}

228
		if (spd->flags & SPLICE_F_NONBLOCK) {
229 230 231 232 233
			if (!ret)
				ret = -EAGAIN;
			break;
		}

234 235 236 237 238 239 240
		if (signal_pending(current)) {
			if (!ret)
				ret = -ERESTARTSYS;
			break;
		}

		if (do_wakeup) {
241
			smp_mb();
242 243 244
			if (waitqueue_active(&pipe->wait))
				wake_up_interruptible_sync(&pipe->wait);
			kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
245 246 247
			do_wakeup = 0;
		}

248 249 250
		pipe->waiting_writers++;
		pipe_wait(pipe);
		pipe->waiting_writers--;
251 252
	}

253 254
	if (pipe->inode)
		mutex_unlock(&pipe->inode->i_mutex);
255 256

	if (do_wakeup) {
257
		smp_mb();
258 259 260
		if (waitqueue_active(&pipe->wait))
			wake_up_interruptible(&pipe->wait);
		kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
261 262
	}

263 264
	while (page_nr < spd->nr_pages)
		page_cache_release(spd->pages[page_nr++]);
265 266 267 268

	return ret;
}

269
static int
270 271 272
__generic_file_splice_read(struct file *in, loff_t *ppos,
			   struct pipe_inode_info *pipe, size_t len,
			   unsigned int flags)
273 274
{
	struct address_space *mapping = in->f_mapping;
275
	unsigned int loff, nr_pages;
276
	struct page *pages[PIPE_BUFFERS];
277
	struct partial_page partial[PIPE_BUFFERS];
278
	struct page *page;
279 280
	pgoff_t index, end_index;
	loff_t isize;
281 282 283 284 285 286 287 288
	size_t total_len;
	int error;
	struct splice_pipe_desc spd = {
		.pages = pages,
		.partial = partial,
		.flags = flags,
		.ops = &page_cache_pipe_buf_ops,
	};
289

290
	index = *ppos >> PAGE_CACHE_SHIFT;
291 292
	loff = *ppos & ~PAGE_CACHE_MASK;
	nr_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
293 294 295 296 297

	if (nr_pages > PIPE_BUFFERS)
		nr_pages = PIPE_BUFFERS;

	/*
I
Ingo Molnar 已提交
298
	 * Initiate read-ahead on this page range. however, don't call into
299 300
	 * read-ahead if this is a non-zero offset (we are likely doing small
	 * chunk splice and the page is already there) for a single page.
301
	 */
302 303
	if (!loff || spd.nr_pages > 1)
		do_page_cache_readahead(mapping, in, index, spd.nr_pages);
304 305

	/*
I
Ingo Molnar 已提交
306
	 * Now fill in the holes:
307
	 */
308
	error = 0;
309 310
	total_len = 0;
	for (spd.nr_pages = 0; spd.nr_pages < nr_pages; spd.nr_pages++, index++) {
311 312 313 314 315 316 317 318
		unsigned int this_len;

		if (!len)
			break;

		/*
		 * this_len is the max we'll use from this page
		 */
319
		this_len = min_t(unsigned long, len, PAGE_CACHE_SIZE - loff);
320
find_page:
321
		/*
322
		 * lookup the page for this index
323
		 */
324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346
		page = find_get_page(mapping, index);
		if (!page) {
			/*
			 * page didn't exist, allocate one
			 */
			page = page_cache_alloc_cold(mapping);
			if (!page)
				break;

			error = add_to_page_cache_lru(page, mapping, index,
						mapping_gfp_mask(mapping));
			if (unlikely(error)) {
				page_cache_release(page);
				break;
			}

			goto readpage;
		}

		/*
		 * If the page isn't uptodate, we may need to start io on it
		 */
		if (!PageUptodate(page)) {
347 348 349 350 351 352 353
			/*
			 * If in nonblock mode then dont block on waiting
			 * for an in-flight io page
			 */
			if (flags & SPLICE_F_NONBLOCK)
				break;

354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372
			lock_page(page);

			/*
			 * page was truncated, stop here. if this isn't the
			 * first page, we'll just complete what we already
			 * added
			 */
			if (!page->mapping) {
				unlock_page(page);
				page_cache_release(page);
				break;
			}
			/*
			 * page was already under io and is now done, great
			 */
			if (PageUptodate(page)) {
				unlock_page(page);
				goto fill_it;
			}
373

374 375 376 377 378
readpage:
			/*
			 * need to read in the page
			 */
			error = mapping->a_ops->readpage(in, page);
379 380 381

			if (unlikely(error)) {
				page_cache_release(page);
382 383
				if (error == AOP_TRUNCATED_PAGE)
					goto find_page;
384 385
				break;
			}
386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402

			/*
			 * i_size must be checked after ->readpage().
			 */
			isize = i_size_read(mapping->host);
			end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
			if (unlikely(!isize || index > end_index)) {
				page_cache_release(page);
				break;
			}

			/*
			 * if this is the last page, see if we need to shrink
			 * the length and stop
			 */
			if (end_index == index) {
				loff = PAGE_CACHE_SIZE - (isize & ~PAGE_CACHE_MASK);
403
				if (total_len + loff > isize) {
404 405 406 407 408 409
					page_cache_release(page);
					break;
				}
				/*
				 * force quit after adding this page
				 */
410
				nr_pages = spd.nr_pages;
411
				this_len = min(this_len, loff);
412
				loff = 0;
413
			}
414
		}
415
fill_it:
416 417 418
		pages[spd.nr_pages] = page;
		partial[spd.nr_pages].offset = loff;
		partial[spd.nr_pages].len = this_len;
419
		len -= this_len;
420
		total_len += this_len;
421
		loff = 0;
422 423
	}

424
	if (spd.nr_pages)
425
		return splice_to_pipe(pipe, &spd);
426

427
	return error;
428 429
}

430 431 432 433 434 435 436 437 438
/**
 * generic_file_splice_read - splice data from file to a pipe
 * @in:		file to splice from
 * @pipe:	pipe to splice to
 * @len:	number of bytes to splice
 * @flags:	splice modifier flags
 *
 * Will read pages from given file and fill them into a pipe.
 */
439 440 441
ssize_t generic_file_splice_read(struct file *in, loff_t *ppos,
				 struct pipe_inode_info *pipe, size_t len,
				 unsigned int flags)
442 443 444 445 446 447
{
	ssize_t spliced;
	int ret;

	ret = 0;
	spliced = 0;
448

449
	while (len) {
450
		ret = __generic_file_splice_read(in, ppos, pipe, len, flags);
451

452
		if (ret < 0)
453
			break;
454 455 456 457 458 459 460 461
		else if (!ret) {
			if (spliced)
				break;
			if (flags & SPLICE_F_NONBLOCK) {
				ret = -EAGAIN;
				break;
			}
		}
462

463
		*ppos += ret;
464 465 466 467 468 469 470 471 472 473
		len -= ret;
		spliced += ret;
	}

	if (spliced)
		return spliced;

	return ret;
}

474 475
EXPORT_SYMBOL(generic_file_splice_read);

476
/*
477
 * Send 'sd->len' bytes to socket from 'sd->file' at position 'sd->pos'
J
Jens Axboe 已提交
478
 * using sendpage(). Return the number of bytes sent.
479 480 481 482 483 484 485 486
 */
static int pipe_to_sendpage(struct pipe_inode_info *info,
			    struct pipe_buffer *buf, struct splice_desc *sd)
{
	struct file *file = sd->file;
	loff_t pos = sd->pos;
	ssize_t ret;
	void *ptr;
487
	int more;
488 489

	/*
I
Ingo Molnar 已提交
490
	 * Sub-optimal, but we are limited by the pipe ->map. We don't
491 492
	 * need a kmap'ed buffer here, we just want to make sure we
	 * have the page pinned if the pipe page originates from the
I
Ingo Molnar 已提交
493
	 * page cache.
494 495 496 497 498
	 */
	ptr = buf->ops->map(file, info, buf);
	if (IS_ERR(ptr))
		return PTR_ERR(ptr);

499
	more = (sd->flags & SPLICE_F_MORE) || sd->len < sd->total_len;
500

J
Jens Axboe 已提交
501 502
	ret = file->f_op->sendpage(file, buf->page, buf->offset, sd->len,
				   &pos, more);
503 504

	buf->ops->unmap(info, buf);
J
Jens Axboe 已提交
505
	return ret;
506 507 508 509 510 511 512 513 514 515 516 517 518 519 520
}

/*
 * This is a little more tricky than the file -> pipe splicing. There are
 * basically three cases:
 *
 *	- Destination page already exists in the address space and there
 *	  are users of it. For that case we have no other option that
 *	  copying the data. Tough luck.
 *	- Destination page already exists in the address space, but there
 *	  are no users of it. Make sure it's uptodate, then drop it. Fall
 *	  through to last case.
 *	- Destination page does not exist, we can add the pipe page to
 *	  the page cache and avoid the copy.
 *
521 522 523 524 525 526
 * If asked to move pages to the output file (SPLICE_F_MOVE is set in
 * sd->flags), we attempt to migrate pages from the pipe to the output
 * file address space page cache. This is possible if no one else has
 * the pipe page referenced outside of the pipe and page cache. If
 * SPLICE_F_MOVE isn't set, or we cannot move the page, we simply create
 * a new page in the output file page cache and fill/dirty that.
527 528 529 530 531 532
 */
static int pipe_to_file(struct pipe_inode_info *info, struct pipe_buffer *buf,
			struct splice_desc *sd)
{
	struct file *file = sd->file;
	struct address_space *mapping = file->f_mapping;
533
	gfp_t gfp_mask = mapping_gfp_mask(mapping);
J
Jens Axboe 已提交
534
	unsigned int offset, this_len;
535 536
	struct page *page;
	pgoff_t index;
537
	char *src;
538
	int ret;
539 540

	/*
541
	 * make sure the data in this buffer is uptodate
542 543 544 545 546 547 548 549
	 */
	src = buf->ops->map(file, info, buf);
	if (IS_ERR(src))
		return PTR_ERR(src);

	index = sd->pos >> PAGE_CACHE_SHIFT;
	offset = sd->pos & ~PAGE_CACHE_MASK;

J
Jens Axboe 已提交
550 551 552 553
	this_len = sd->len;
	if (this_len + offset > PAGE_CACHE_SIZE)
		this_len = PAGE_CACHE_SIZE - offset;

554
	/*
I
Ingo Molnar 已提交
555
	 * Reuse buf page, if SPLICE_F_MOVE is set.
556
	 */
557
	if (sd->flags & SPLICE_F_MOVE) {
558 559
		/*
		 * If steal succeeds, buf->page is now pruned from the vm
560 561
		 * side (LRU and page cache) and we can reuse it. The page
		 * will also be looked on successful return.
562
		 */
563 564 565 566
		if (buf->ops->steal(info, buf))
			goto find_page;

		page = buf->page;
567
		if (add_to_page_cache(page, mapping, index, gfp_mask))
568
			goto find_page;
569 570 571

		if (!(buf->flags & PIPE_BUF_FLAG_LRU))
			lru_cache_add(page);
572 573
	} else {
find_page:
574 575 576 577 578 579 580 581 582 583 584 585 586 587 588
		page = find_lock_page(mapping, index);
		if (!page) {
			ret = -ENOMEM;
			page = page_cache_alloc_cold(mapping);
			if (unlikely(!page))
				goto out_nomem;

			/*
			 * This will also lock the page
			 */
			ret = add_to_page_cache_lru(page, mapping, index,
						    gfp_mask);
			if (unlikely(ret))
				goto out;
		}
589 590

		/*
591 592 593 594
		 * We get here with the page locked. If the page is also
		 * uptodate, we don't need to do more. If it isn't, we
		 * may need to bring it in if we are not going to overwrite
		 * the full page.
595 596
		 */
		if (!PageUptodate(page)) {
J
Jens Axboe 已提交
597
			if (this_len < PAGE_CACHE_SIZE) {
598 599 600 601 602 603 604 605
				ret = mapping->a_ops->readpage(file, page);
				if (unlikely(ret))
					goto out;

				lock_page(page);

				if (!PageUptodate(page)) {
					/*
I
Ingo Molnar 已提交
606
					 * Page got invalidated, repeat.
607 608 609 610 611 612 613 614
					 */
					if (!page->mapping) {
						unlock_page(page);
						page_cache_release(page);
						goto find_page;
					}
					ret = -EIO;
					goto out;
615
				}
616
			} else
617
				SetPageUptodate(page);
618 619 620
		}
	}

J
Jens Axboe 已提交
621
	ret = mapping->a_ops->prepare_write(file, page, offset, offset+this_len);
622 623 624 625
	if (ret == AOP_TRUNCATED_PAGE) {
		page_cache_release(page);
		goto find_page;
	} else if (ret)
626 627
		goto out;

628
	if (!(buf->flags & PIPE_BUF_FLAG_STOLEN)) {
629 630
		char *dst = kmap_atomic(page, KM_USER0);

J
Jens Axboe 已提交
631
		memcpy(dst + offset, src + buf->offset, this_len);
632 633 634
		flush_dcache_page(page);
		kunmap_atomic(dst, KM_USER0);
	}
635

J
Jens Axboe 已提交
636
	ret = mapping->a_ops->commit_write(file, page, offset, offset+this_len);
637 638 639 640
	if (ret == AOP_TRUNCATED_PAGE) {
		page_cache_release(page);
		goto find_page;
	} else if (ret)
641 642
		goto out;

J
Jens Axboe 已提交
643 644 645 646
	/*
	 * Return the number of bytes written.
	 */
	ret = this_len;
647
	mark_page_accessed(page);
648
	balance_dirty_pages_ratelimited(mapping);
649
out:
650
	if (!(buf->flags & PIPE_BUF_FLAG_STOLEN))
651
		page_cache_release(page);
652 653

	unlock_page(page);
654
out_nomem:
655 656 657 658
	buf->ops->unmap(info, buf);
	return ret;
}

659 660 661 662 663
/*
 * Pipe input worker. Most of this logic works like a regular pipe, the
 * key here is the 'actor' worker passed in that actually moves the data
 * to the wanted destination. See pipe_to_file/pipe_to_sendpage above.
 */
664 665 666
ssize_t splice_from_pipe(struct pipe_inode_info *pipe, struct file *out,
			 loff_t *ppos, size_t len, unsigned int flags,
			 splice_actor *actor)
667 668 669 670 671 672 673 674 675 676
{
	int ret, do_wakeup, err;
	struct splice_desc sd;

	ret = 0;
	do_wakeup = 0;

	sd.total_len = len;
	sd.flags = flags;
	sd.file = out;
677
	sd.pos = *ppos;
678

679 680
	if (pipe->inode)
		mutex_lock(&pipe->inode->i_mutex);
681 682

	for (;;) {
683 684
		if (pipe->nrbufs) {
			struct pipe_buffer *buf = pipe->bufs + pipe->curbuf;
685 686 687 688 689 690
			struct pipe_buf_operations *ops = buf->ops;

			sd.len = buf->len;
			if (sd.len > sd.total_len)
				sd.len = sd.total_len;

691
			err = actor(pipe, buf, &sd);
J
Jens Axboe 已提交
692
			if (err <= 0) {
693 694 695 696 697 698
				if (!ret && err != -ENODATA)
					ret = err;

				break;
			}

J
Jens Axboe 已提交
699 700 701 702 703 704 705 706 707
			ret += err;
			buf->offset += err;
			buf->len -= err;

			sd.len -= err;
			sd.pos += err;
			sd.total_len -= err;
			if (sd.len)
				continue;
I
Ingo Molnar 已提交
708

709 710
			if (!buf->len) {
				buf->ops = NULL;
711
				ops->release(pipe, buf);
712 713 714 715
				pipe->curbuf = (pipe->curbuf + 1) & (PIPE_BUFFERS - 1);
				pipe->nrbufs--;
				if (pipe->inode)
					do_wakeup = 1;
716 717 718 719 720 721
			}

			if (!sd.total_len)
				break;
		}

722
		if (pipe->nrbufs)
723
			continue;
724
		if (!pipe->writers)
725
			break;
726
		if (!pipe->waiting_writers) {
727 728 729 730
			if (ret)
				break;
		}

731 732 733 734 735 736
		if (flags & SPLICE_F_NONBLOCK) {
			if (!ret)
				ret = -EAGAIN;
			break;
		}

737 738 739 740 741 742 743
		if (signal_pending(current)) {
			if (!ret)
				ret = -ERESTARTSYS;
			break;
		}

		if (do_wakeup) {
744
			smp_mb();
745 746 747
			if (waitqueue_active(&pipe->wait))
				wake_up_interruptible_sync(&pipe->wait);
			kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
748 749 750
			do_wakeup = 0;
		}

751
		pipe_wait(pipe);
752 753
	}

754 755
	if (pipe->inode)
		mutex_unlock(&pipe->inode->i_mutex);
756 757

	if (do_wakeup) {
758
		smp_mb();
759 760 761
		if (waitqueue_active(&pipe->wait))
			wake_up_interruptible(&pipe->wait);
		kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
762 763 764 765 766
	}

	return ret;
}

767 768
/**
 * generic_file_splice_write - splice data from a pipe to a file
769
 * @pipe:	pipe info
770 771 772 773 774 775 776 777
 * @out:	file to write to
 * @len:	number of bytes to splice
 * @flags:	splice modifier flags
 *
 * Will either move or copy pages (determined by @flags options) from
 * the given pipe inode to the given file.
 *
 */
778 779
ssize_t
generic_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
780
			  loff_t *ppos, size_t len, unsigned int flags)
781
{
782
	struct address_space *mapping = out->f_mapping;
783 784
	ssize_t ret;

785
	ret = splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_file);
J
Jens Axboe 已提交
786
	if (ret > 0) {
787 788
		struct inode *inode = mapping->host;

J
Jens Axboe 已提交
789 790 791 792 793 794 795 796 797 798 799 800 801
		*ppos += ret;

		/*
		 * If file or inode is SYNC and we actually wrote some data,
		 * sync it.
		 */
		if (unlikely((out->f_flags & O_SYNC) || IS_SYNC(inode))) {
			int err;

			mutex_lock(&inode->i_mutex);
			err = generic_osync_inode(inode, mapping,
						  OSYNC_METADATA|OSYNC_DATA);
			mutex_unlock(&inode->i_mutex);
802

J
Jens Axboe 已提交
803 804 805
			if (err)
				ret = err;
		}
806 807 808
	}

	return ret;
809 810
}

811 812
EXPORT_SYMBOL(generic_file_splice_write);

813 814 815 816 817 818 819 820 821 822 823
/**
 * generic_splice_sendpage - splice data from a pipe to a socket
 * @inode:	pipe inode
 * @out:	socket to write to
 * @len:	number of bytes to splice
 * @flags:	splice modifier flags
 *
 * Will send @len bytes from the pipe to a network socket. No data copying
 * is involved.
 *
 */
824
ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe, struct file *out,
825
				loff_t *ppos, size_t len, unsigned int flags)
826
{
827
	return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_sendpage);
828 829
}

830
EXPORT_SYMBOL(generic_splice_sendpage);
J
Jeff Garzik 已提交
831

832 833 834
/*
 * Attempt to initiate a splice from pipe to file.
 */
835
static long do_splice_from(struct pipe_inode_info *pipe, struct file *out,
836
			   loff_t *ppos, size_t len, unsigned int flags)
837 838 839
{
	int ret;

840
	if (unlikely(!out->f_op || !out->f_op->splice_write))
841 842
		return -EINVAL;

843
	if (unlikely(!(out->f_mode & FMODE_WRITE)))
844 845
		return -EBADF;

846
	ret = rw_verify_area(WRITE, out, ppos, len);
847 848 849
	if (unlikely(ret < 0))
		return ret;

850
	return out->f_op->splice_write(pipe, out, ppos, len, flags);
851 852
}

853 854 855
/*
 * Attempt to initiate a splice from a file to a pipe.
 */
856 857 858
static long do_splice_to(struct file *in, loff_t *ppos,
			 struct pipe_inode_info *pipe, size_t len,
			 unsigned int flags)
859
{
860
	loff_t isize, left;
861 862
	int ret;

863
	if (unlikely(!in->f_op || !in->f_op->splice_read))
864 865
		return -EINVAL;

866
	if (unlikely(!(in->f_mode & FMODE_READ)))
867 868
		return -EBADF;

869
	ret = rw_verify_area(READ, in, ppos, len);
870 871 872 873
	if (unlikely(ret < 0))
		return ret;

	isize = i_size_read(in->f_mapping->host);
874
	if (unlikely(*ppos >= isize))
875 876
		return 0;
	
877
	left = isize - *ppos;
878
	if (unlikely(left < len))
879 880
		len = left;

881
	return in->f_op->splice_read(in, ppos, pipe, len, flags);
882 883
}

884 885
long do_splice_direct(struct file *in, loff_t *ppos, struct file *out,
		      size_t len, unsigned int flags)
886 887 888
{
	struct pipe_inode_info *pipe;
	long ret, bytes;
889
	loff_t out_off;
890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906
	umode_t i_mode;
	int i;

	/*
	 * We require the input being a regular file, as we don't want to
	 * randomly drop data for eg socket -> socket splicing. Use the
	 * piped splicing for that!
	 */
	i_mode = in->f_dentry->d_inode->i_mode;
	if (unlikely(!S_ISREG(i_mode) && !S_ISBLK(i_mode)))
		return -EINVAL;

	/*
	 * neither in nor out is a pipe, setup an internal pipe attached to
	 * 'out' and transfer the wanted data from 'in' to 'out' through that
	 */
	pipe = current->splice_pipe;
907
	if (unlikely(!pipe)) {
908 909 910 911 912 913
		pipe = alloc_pipe_info(NULL);
		if (!pipe)
			return -ENOMEM;

		/*
		 * We don't have an immediate reader, but we'll read the stuff
914
		 * out of the pipe right after the splice_to_pipe(). So set
915 916 917 918 919 920 921 922
		 * PIPE_READERS appropriately.
		 */
		pipe->readers = 1;

		current->splice_pipe = pipe;
	}

	/*
I
Ingo Molnar 已提交
923
	 * Do the splice.
924 925 926
	 */
	ret = 0;
	bytes = 0;
927
	out_off = 0;
928 929 930 931 932 933 934 935 936

	while (len) {
		size_t read_len, max_read_len;

		/*
		 * Do at most PIPE_BUFFERS pages worth of transfer:
		 */
		max_read_len = min(len, (size_t)(PIPE_BUFFERS*PAGE_SIZE));

937
		ret = do_splice_to(in, ppos, pipe, max_read_len, flags);
938 939 940 941 942 943 944 945 946 947
		if (unlikely(ret < 0))
			goto out_release;

		read_len = ret;

		/*
		 * NOTE: nonblocking mode only applies to the input. We
		 * must not do the output in nonblocking mode as then we
		 * could get stuck data in the internal pipe:
		 */
948
		ret = do_splice_from(pipe, out, &out_off, read_len,
949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997
				     flags & ~SPLICE_F_NONBLOCK);
		if (unlikely(ret < 0))
			goto out_release;

		bytes += ret;
		len -= ret;

		/*
		 * In nonblocking mode, if we got back a short read then
		 * that was due to either an IO error or due to the
		 * pagecache entry not being there. In the IO error case
		 * the _next_ splice attempt will produce a clean IO error
		 * return value (not a short read), so in both cases it's
		 * correct to break out of the loop here:
		 */
		if ((flags & SPLICE_F_NONBLOCK) && (read_len < max_read_len))
			break;
	}

	pipe->nrbufs = pipe->curbuf = 0;

	return bytes;

out_release:
	/*
	 * If we did an incomplete transfer we must release
	 * the pipe buffers in question:
	 */
	for (i = 0; i < PIPE_BUFFERS; i++) {
		struct pipe_buffer *buf = pipe->bufs + i;

		if (buf->ops) {
			buf->ops->release(pipe, buf);
			buf->ops = NULL;
		}
	}
	pipe->nrbufs = pipe->curbuf = 0;

	/*
	 * If we transferred some data, return the number of bytes:
	 */
	if (bytes > 0)
		return bytes;

	return ret;
}

EXPORT_SYMBOL(do_splice_direct);

998 999 1000
/*
 * Determine where to splice to/from.
 */
1001 1002 1003
static long do_splice(struct file *in, loff_t __user *off_in,
		      struct file *out, loff_t __user *off_out,
		      size_t len, unsigned int flags)
1004
{
1005
	struct pipe_inode_info *pipe;
1006
	loff_t offset, *off;
J
Jens Axboe 已提交
1007
	long ret;
1008

1009
	pipe = in->f_dentry->d_inode->i_pipe;
1010 1011 1012
	if (pipe) {
		if (off_in)
			return -ESPIPE;
1013 1014 1015
		if (off_out) {
			if (out->f_op->llseek == no_llseek)
				return -EINVAL;
1016
			if (copy_from_user(&offset, off_out, sizeof(loff_t)))
1017
				return -EFAULT;
1018 1019 1020
			off = &offset;
		} else
			off = &out->f_pos;
1021

J
Jens Axboe 已提交
1022 1023 1024 1025 1026 1027
		ret = do_splice_from(pipe, out, off, len, flags);

		if (off_out && copy_to_user(off_out, off, sizeof(loff_t)))
			ret = -EFAULT;

		return ret;
1028
	}
1029

1030
	pipe = out->f_dentry->d_inode->i_pipe;
1031 1032 1033
	if (pipe) {
		if (off_out)
			return -ESPIPE;
1034 1035 1036
		if (off_in) {
			if (in->f_op->llseek == no_llseek)
				return -EINVAL;
1037
			if (copy_from_user(&offset, off_in, sizeof(loff_t)))
1038
				return -EFAULT;
1039 1040 1041
			off = &offset;
		} else
			off = &in->f_pos;
1042

J
Jens Axboe 已提交
1043 1044 1045 1046 1047 1048
		ret = do_splice_to(in, off, pipe, len, flags);

		if (off_in && copy_to_user(off_in, off, sizeof(loff_t)))
			ret = -EFAULT;

		return ret;
1049
	}
1050 1051 1052 1053

	return -EINVAL;
}

1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199
/*
 * Map an iov into an array of pages and offset/length tupples. With the
 * partial_page structure, we can map several non-contiguous ranges into
 * our ones pages[] map instead of splitting that operation into pieces.
 * Could easily be exported as a generic helper for other users, in which
 * case one would probably want to add a 'max_nr_pages' parameter as well.
 */
static int get_iovec_page_array(const struct iovec __user *iov,
				unsigned int nr_vecs, struct page **pages,
				struct partial_page *partial)
{
	int buffers = 0, error = 0;

	/*
	 * It's ok to take the mmap_sem for reading, even
	 * across a "get_user()".
	 */
	down_read(&current->mm->mmap_sem);

	while (nr_vecs) {
		unsigned long off, npages;
		void __user *base;
		size_t len;
		int i;

		/*
		 * Get user address base and length for this iovec.
		 */
		error = get_user(base, &iov->iov_base);
		if (unlikely(error))
			break;
		error = get_user(len, &iov->iov_len);
		if (unlikely(error))
			break;

		/*
		 * Sanity check this iovec. 0 read succeeds.
		 */
		if (unlikely(!len))
			break;
		error = -EFAULT;
		if (unlikely(!base))
			break;

		/*
		 * Get this base offset and number of pages, then map
		 * in the user pages.
		 */
		off = (unsigned long) base & ~PAGE_MASK;
		npages = (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
		if (npages > PIPE_BUFFERS - buffers)
			npages = PIPE_BUFFERS - buffers;

		error = get_user_pages(current, current->mm,
				       (unsigned long) base, npages, 0, 0,
				       &pages[buffers], NULL);

		if (unlikely(error <= 0))
			break;

		/*
		 * Fill this contiguous range into the partial page map.
		 */
		for (i = 0; i < error; i++) {
			const int plen = min_t(size_t, len, PAGE_SIZE) - off;

			partial[buffers].offset = off;
			partial[buffers].len = plen;

			off = 0;
			len -= plen;
			buffers++;
		}

		/*
		 * We didn't complete this iov, stop here since it probably
		 * means we have to move some of this into a pipe to
		 * be able to continue.
		 */
		if (len)
			break;

		/*
		 * Don't continue if we mapped fewer pages than we asked for,
		 * or if we mapped the max number of pages that we have
		 * room for.
		 */
		if (error < npages || buffers == PIPE_BUFFERS)
			break;

		nr_vecs--;
		iov++;
	}

	up_read(&current->mm->mmap_sem);

	if (buffers)
		return buffers;

	return error;
}

/*
 * vmsplice splices a user address range into a pipe. It can be thought of
 * as splice-from-memory, where the regular splice is splice-from-file (or
 * to file). In both cases the output is a pipe, naturally.
 *
 * Note that vmsplice only supports splicing _from_ user memory to a pipe,
 * not the other way around. Splicing from user memory is a simple operation
 * that can be supported without any funky alignment restrictions or nasty
 * vm tricks. We simply map in the user memory and fill them into a pipe.
 * The reverse isn't quite as easy, though. There are two possible solutions
 * for that:
 *
 *	- memcpy() the data internally, at which point we might as well just
 *	  do a regular read() on the buffer anyway.
 *	- Lots of nasty vm tricks, that are neither fast nor flexible (it
 *	  has restriction limitations on both ends of the pipe).
 *
 * Alas, it isn't here.
 *
 */
static long do_vmsplice(struct file *file, const struct iovec __user *iov,
			unsigned long nr_segs, unsigned int flags)
{
	struct pipe_inode_info *pipe = file->f_dentry->d_inode->i_pipe;
	struct page *pages[PIPE_BUFFERS];
	struct partial_page partial[PIPE_BUFFERS];
	struct splice_pipe_desc spd = {
		.pages = pages,
		.partial = partial,
		.flags = flags,
		.ops = &user_page_pipe_buf_ops,
	};

	if (unlikely(!pipe))
		return -EBADF;
	if (unlikely(nr_segs > UIO_MAXIOV))
		return -EINVAL;
	else if (unlikely(!nr_segs))
		return 0;

	spd.nr_pages = get_iovec_page_array(iov, nr_segs, pages, partial);
	if (spd.nr_pages <= 0)
		return spd.nr_pages;

1200
	return splice_to_pipe(pipe, &spd);
1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221
}

asmlinkage long sys_vmsplice(int fd, const struct iovec __user *iov,
			     unsigned long nr_segs, unsigned int flags)
{
	struct file *file;
	long error;
	int fput;

	error = -EBADF;
	file = fget_light(fd, &fput);
	if (file) {
		if (file->f_mode & FMODE_WRITE)
			error = do_vmsplice(file, iov, nr_segs, flags);

		fput_light(file, fput);
	}

	return error;
}

1222 1223 1224
asmlinkage long sys_splice(int fd_in, loff_t __user *off_in,
			   int fd_out, loff_t __user *off_out,
			   size_t len, unsigned int flags)
1225 1226 1227 1228 1229 1230 1231 1232 1233
{
	long error;
	struct file *in, *out;
	int fput_in, fput_out;

	if (unlikely(!len))
		return 0;

	error = -EBADF;
1234
	in = fget_light(fd_in, &fput_in);
1235 1236
	if (in) {
		if (in->f_mode & FMODE_READ) {
1237
			out = fget_light(fd_out, &fput_out);
1238 1239
			if (out) {
				if (out->f_mode & FMODE_WRITE)
1240 1241 1242
					error = do_splice(in, off_in,
							  out, off_out,
							  len, flags);
1243 1244 1245 1246 1247 1248 1249 1250 1251
				fput_light(out, fput_out);
			}
		}

		fput_light(in, fput_in);
	}

	return error;
}
1252 1253 1254 1255 1256 1257 1258 1259 1260

/*
 * Link contents of ipipe to opipe.
 */
static int link_pipe(struct pipe_inode_info *ipipe,
		     struct pipe_inode_info *opipe,
		     size_t len, unsigned int flags)
{
	struct pipe_buffer *ibuf, *obuf;
1261 1262 1263
	int ret, do_wakeup, i, ipipe_first;

	ret = do_wakeup = ipipe_first = 0;
1264 1265 1266 1267 1268 1269 1270

	/*
	 * Potential ABBA deadlock, work around it by ordering lock
	 * grabbing by inode address. Otherwise two different processes
	 * could deadlock (one doing tee from A -> B, the other from B -> A).
	 */
	if (ipipe->inode < opipe->inode) {
1271
		ipipe_first = 1;
1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319
		mutex_lock(&ipipe->inode->i_mutex);
		mutex_lock(&opipe->inode->i_mutex);
	} else {
		mutex_lock(&opipe->inode->i_mutex);
		mutex_lock(&ipipe->inode->i_mutex);
	}

	for (i = 0;; i++) {
		if (!opipe->readers) {
			send_sig(SIGPIPE, current, 0);
			if (!ret)
				ret = -EPIPE;
			break;
		}
		if (ipipe->nrbufs - i) {
			ibuf = ipipe->bufs + ((ipipe->curbuf + i) & (PIPE_BUFFERS - 1));

			/*
			 * If we have room, fill this buffer
			 */
			if (opipe->nrbufs < PIPE_BUFFERS) {
				int nbuf = (opipe->curbuf + opipe->nrbufs) & (PIPE_BUFFERS - 1);

				/*
				 * Get a reference to this pipe buffer,
				 * so we can copy the contents over.
				 */
				ibuf->ops->get(ipipe, ibuf);

				obuf = opipe->bufs + nbuf;
				*obuf = *ibuf;

				if (obuf->len > len)
					obuf->len = len;

				opipe->nrbufs++;
				do_wakeup = 1;
				ret += obuf->len;
				len -= obuf->len;

				if (!len)
					break;
				if (opipe->nrbufs < PIPE_BUFFERS)
					continue;
			}

			/*
			 * We have input available, but no output room.
1320 1321 1322
			 * If we already copied data, return that. If we
			 * need to drop the opipe lock, it must be ordered
			 * last to avoid deadlocks.
1323
			 */
1324
			if ((flags & SPLICE_F_NONBLOCK) || !ipipe_first) {
1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357
				if (!ret)
					ret = -EAGAIN;
				break;
			}
			if (signal_pending(current)) {
				if (!ret)
					ret = -ERESTARTSYS;
				break;
			}
			if (do_wakeup) {
				smp_mb();
				if (waitqueue_active(&opipe->wait))
					wake_up_interruptible(&opipe->wait);
				kill_fasync(&opipe->fasync_readers, SIGIO, POLL_IN);
				do_wakeup = 0;
			}

			opipe->waiting_writers++;
			pipe_wait(opipe);
			opipe->waiting_writers--;
			continue;
		}

		/*
		 * No input buffers, do the usual checks for available
		 * writers and blocking and wait if necessary
		 */
		if (!ipipe->writers)
			break;
		if (!ipipe->waiting_writers) {
			if (ret)
				break;
		}
1358 1359 1360 1361 1362 1363
		/*
		 * pipe_wait() drops the ipipe mutex. To avoid deadlocks
		 * with another process, we can only safely do that if
		 * the ipipe lock is ordered last.
		 */
		if ((flags & SPLICE_F_NONBLOCK) || ipipe_first) {
1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440
			if (!ret)
				ret = -EAGAIN;
			break;
		}
		if (signal_pending(current)) {
			if (!ret)
				ret = -ERESTARTSYS;
			break;
		}

		if (waitqueue_active(&ipipe->wait))
			wake_up_interruptible_sync(&ipipe->wait);
		kill_fasync(&ipipe->fasync_writers, SIGIO, POLL_OUT);

		pipe_wait(ipipe);
	}

	mutex_unlock(&ipipe->inode->i_mutex);
	mutex_unlock(&opipe->inode->i_mutex);

	if (do_wakeup) {
		smp_mb();
		if (waitqueue_active(&opipe->wait))
			wake_up_interruptible(&opipe->wait);
		kill_fasync(&opipe->fasync_readers, SIGIO, POLL_IN);
	}

	return ret;
}

/*
 * This is a tee(1) implementation that works on pipes. It doesn't copy
 * any data, it simply references the 'in' pages on the 'out' pipe.
 * The 'flags' used are the SPLICE_F_* variants, currently the only
 * applicable one is SPLICE_F_NONBLOCK.
 */
static long do_tee(struct file *in, struct file *out, size_t len,
		   unsigned int flags)
{
	struct pipe_inode_info *ipipe = in->f_dentry->d_inode->i_pipe;
	struct pipe_inode_info *opipe = out->f_dentry->d_inode->i_pipe;

	/*
	 * Link ipipe to the two output pipes, consuming as we go along.
	 */
	if (ipipe && opipe)
		return link_pipe(ipipe, opipe, len, flags);

	return -EINVAL;
}

asmlinkage long sys_tee(int fdin, int fdout, size_t len, unsigned int flags)
{
	struct file *in;
	int error, fput_in;

	if (unlikely(!len))
		return 0;

	error = -EBADF;
	in = fget_light(fdin, &fput_in);
	if (in) {
		if (in->f_mode & FMODE_READ) {
			int fput_out;
			struct file *out = fget_light(fdout, &fput_out);

			if (out) {
				if (out->f_mode & FMODE_WRITE)
					error = do_tee(in, out, len, flags);
				fput_light(out, fput_out);
			}
		}
 		fput_light(in, fput_in);
 	}

	return error;
}