splice.c 35.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11
/*
 * "splice": joining two ropes together by interweaving their strands.
 *
 * This is the "extended pipe" functionality, where a pipe is used as
 * an arbitrary in-memory buffer. Think of a pipe as a small kernel
 * buffer that you can use to transfer data from one end to the other.
 *
 * The traditional unix read/write is extended with a "splice()" operation
 * that transfers data buffers to or from a pipe buffer.
 *
 * Named by Larry McVoy, original implementation from Linus, extended by
12 13
 * Jens to support splicing to files, network, direct splicing, etc and
 * fixing lots of bugs.
14
 *
15
 * Copyright (C) 2005-2006 Jens Axboe <axboe@kernel.dk>
16 17
 * Copyright (C) 2005-2006 Linus Torvalds <torvalds@osdl.org>
 * Copyright (C) 2006 Ingo Molnar <mingo@elte.hu>
18 19 20 21 22 23 24
 *
 */
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/pagemap.h>
#include <linux/pipe_fs_i.h>
#include <linux/mm_inline.h>
25
#include <linux/swap.h>
26 27
#include <linux/writeback.h>
#include <linux/buffer_head.h>
J
Jeff Garzik 已提交
28
#include <linux/module.h>
29
#include <linux/syscalls.h>
30
#include <linux/uio.h>
31

32 33 34 35 36 37
struct partial_page {
	unsigned int offset;
	unsigned int len;
};

/*
38
 * Passed to splice_to_pipe
39 40 41 42 43 44
 */
struct splice_pipe_desc {
	struct page **pages;		/* page map */
	struct partial_page *partial;	/* pages[] may not be contig */
	int nr_pages;			/* number of pages in map */
	unsigned int flags;		/* splice flags */
45
	const struct pipe_buf_operations *ops;/* ops associated with output pipe */
46 47
};

48 49 50 51 52 53
/*
 * Attempt to steal a page from a pipe buffer. This should perhaps go into
 * a vm helper function, it's already simplified quite a bit by the
 * addition of remove_mapping(). If success is returned, the caller may
 * attempt to reuse this page for another destination.
 */
54
static int page_cache_pipe_buf_steal(struct pipe_inode_info *pipe,
55 56 57
				     struct pipe_buffer *buf)
{
	struct page *page = buf->page;
58
	struct address_space *mapping;
59

60 61
	lock_page(page);

62 63 64
	mapping = page_mapping(page);
	if (mapping) {
		WARN_ON(!PageUptodate(page));
65

66 67 68 69 70 71 72 73 74
		/*
		 * At least for ext2 with nobh option, we need to wait on
		 * writeback completing on this page, since we'll remove it
		 * from the pagecache.  Otherwise truncate wont wait on the
		 * page, allowing the disk blocks to be reused by someone else
		 * before we actually wrote our data to them. fs corruption
		 * ensues.
		 */
		wait_on_page_writeback(page);
75

76
		if (PagePrivate(page))
77
			try_to_release_page(page, GFP_KERNEL);
78

79 80 81 82 83 84 85 86
		/*
		 * If we succeeded in removing the mapping, set LRU flag
		 * and return good.
		 */
		if (remove_mapping(mapping, page)) {
			buf->flags |= PIPE_BUF_FLAG_LRU;
			return 0;
		}
87
	}
88

89 90 91 92 93 94
	/*
	 * Raced with truncate or failed to remove page from current
	 * address space, unlock and return failure.
	 */
	unlock_page(page);
	return 1;
95 96
}

97
static void page_cache_pipe_buf_release(struct pipe_inode_info *pipe,
98 99 100
					struct pipe_buffer *buf)
{
	page_cache_release(buf->page);
J
Jens Axboe 已提交
101
	buf->flags &= ~PIPE_BUF_FLAG_LRU;
102 103
}

104
static int page_cache_pipe_buf_pin(struct pipe_inode_info *pipe,
105
				   struct pipe_buffer *buf)
106 107
{
	struct page *page = buf->page;
108
	int err;
109 110

	if (!PageUptodate(page)) {
111 112 113 114
		lock_page(page);

		/*
		 * Page got truncated/unhashed. This will cause a 0-byte
I
Ingo Molnar 已提交
115
		 * splice, if this is the first page.
116 117 118 119 120
		 */
		if (!page->mapping) {
			err = -ENODATA;
			goto error;
		}
121

122
		/*
I
Ingo Molnar 已提交
123
		 * Uh oh, read-error from disk.
124 125 126 127 128 129 130
		 */
		if (!PageUptodate(page)) {
			err = -EIO;
			goto error;
		}

		/*
131
		 * Page is ok afterall, we are done.
132
		 */
133 134 135
		unlock_page(page);
	}

136
	return 0;
137 138
error:
	unlock_page(page);
139
	return err;
140 141
}

142
static const struct pipe_buf_operations page_cache_pipe_buf_ops = {
143
	.can_merge = 0,
144 145 146
	.map = generic_pipe_buf_map,
	.unmap = generic_pipe_buf_unmap,
	.pin = page_cache_pipe_buf_pin,
147
	.release = page_cache_pipe_buf_release,
148
	.steal = page_cache_pipe_buf_steal,
149
	.get = generic_pipe_buf_get,
150 151
};

152 153 154
static int user_page_pipe_buf_steal(struct pipe_inode_info *pipe,
				    struct pipe_buffer *buf)
{
155 156 157
	if (!(buf->flags & PIPE_BUF_FLAG_GIFT))
		return 1;

J
Jens Axboe 已提交
158
	buf->flags |= PIPE_BUF_FLAG_LRU;
159
	return generic_pipe_buf_steal(pipe, buf);
160 161
}

162
static const struct pipe_buf_operations user_page_pipe_buf_ops = {
163
	.can_merge = 0,
164 165 166
	.map = generic_pipe_buf_map,
	.unmap = generic_pipe_buf_unmap,
	.pin = generic_pipe_buf_pin,
167 168
	.release = page_cache_pipe_buf_release,
	.steal = user_page_pipe_buf_steal,
169
	.get = generic_pipe_buf_get,
170 171
};

172 173 174 175
/*
 * Pipe output worker. This sets up our pipe format with the page cache
 * pipe buffer operations. Otherwise very similar to the regular pipe_writev().
 */
176 177
static ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
			      struct splice_pipe_desc *spd)
178
{
179
	int ret, do_wakeup, page_nr;
180 181 182

	ret = 0;
	do_wakeup = 0;
183
	page_nr = 0;
184

185 186
	if (pipe->inode)
		mutex_lock(&pipe->inode->i_mutex);
187 188

	for (;;) {
189
		if (!pipe->readers) {
190 191 192 193 194 195
			send_sig(SIGPIPE, current, 0);
			if (!ret)
				ret = -EPIPE;
			break;
		}

196 197
		if (pipe->nrbufs < PIPE_BUFFERS) {
			int newbuf = (pipe->curbuf + pipe->nrbufs) & (PIPE_BUFFERS - 1);
198
			struct pipe_buffer *buf = pipe->bufs + newbuf;
199

200 201 202 203
			buf->page = spd->pages[page_nr];
			buf->offset = spd->partial[page_nr].offset;
			buf->len = spd->partial[page_nr].len;
			buf->ops = spd->ops;
204 205 206
			if (spd->flags & SPLICE_F_GIFT)
				buf->flags |= PIPE_BUF_FLAG_GIFT;

207
			pipe->nrbufs++;
208 209 210
			page_nr++;
			ret += buf->len;

211 212
			if (pipe->inode)
				do_wakeup = 1;
213

214
			if (!--spd->nr_pages)
215
				break;
216
			if (pipe->nrbufs < PIPE_BUFFERS)
217 218 219 220 221
				continue;

			break;
		}

222
		if (spd->flags & SPLICE_F_NONBLOCK) {
223 224 225 226 227
			if (!ret)
				ret = -EAGAIN;
			break;
		}

228 229 230 231 232 233 234
		if (signal_pending(current)) {
			if (!ret)
				ret = -ERESTARTSYS;
			break;
		}

		if (do_wakeup) {
235
			smp_mb();
236 237 238
			if (waitqueue_active(&pipe->wait))
				wake_up_interruptible_sync(&pipe->wait);
			kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
239 240 241
			do_wakeup = 0;
		}

242 243 244
		pipe->waiting_writers++;
		pipe_wait(pipe);
		pipe->waiting_writers--;
245 246
	}

247 248
	if (pipe->inode)
		mutex_unlock(&pipe->inode->i_mutex);
249 250

	if (do_wakeup) {
251
		smp_mb();
252 253 254
		if (waitqueue_active(&pipe->wait))
			wake_up_interruptible(&pipe->wait);
		kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
255 256
	}

257 258
	while (page_nr < spd->nr_pages)
		page_cache_release(spd->pages[page_nr++]);
259 260 261 262

	return ret;
}

263
static int
264 265 266
__generic_file_splice_read(struct file *in, loff_t *ppos,
			   struct pipe_inode_info *pipe, size_t len,
			   unsigned int flags)
267 268
{
	struct address_space *mapping = in->f_mapping;
269
	unsigned int loff, nr_pages;
270
	struct page *pages[PIPE_BUFFERS];
271
	struct partial_page partial[PIPE_BUFFERS];
272
	struct page *page;
273 274
	pgoff_t index, end_index;
	loff_t isize;
275
	int error, page_nr;
276 277 278 279 280 281
	struct splice_pipe_desc spd = {
		.pages = pages,
		.partial = partial,
		.flags = flags,
		.ops = &page_cache_pipe_buf_ops,
	};
282

283
	index = *ppos >> PAGE_CACHE_SHIFT;
284 285
	loff = *ppos & ~PAGE_CACHE_MASK;
	nr_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
286 287 288 289 290

	if (nr_pages > PIPE_BUFFERS)
		nr_pages = PIPE_BUFFERS;

	/*
291 292
	 * Don't try to 2nd guess the read-ahead logic, call into
	 * page_cache_readahead() like the page cache reads would do.
293
	 */
294
	page_cache_readahead(mapping, &in->f_ra, in, index, nr_pages);
295 296

	/*
I
Ingo Molnar 已提交
297
	 * Now fill in the holes:
298
	 */
299
	error = 0;
300

301 302 303 304
	/*
	 * Lookup the (hopefully) full range of pages we need.
	 */
	spd.nr_pages = find_get_pages_contig(mapping, index, nr_pages, pages);
305

306 307 308 309 310 311
	/*
	 * If find_get_pages_contig() returned fewer pages than we needed,
	 * allocate the rest.
	 */
	index += spd.nr_pages;
	while (spd.nr_pages < nr_pages) {
312
		/*
313 314
		 * Page could be there, find_get_pages_contig() breaks on
		 * the first hole.
315
		 */
316 317
		page = find_get_page(mapping, index);
		if (!page) {
318 319 320 321 322 323
			/*
			 * Make sure the read-ahead engine is notified
			 * about this failure.
			 */
			handle_ra_miss(mapping, &in->f_ra, index);

324
			/*
325
			 * page didn't exist, allocate one.
326 327 328 329 330 331
			 */
			page = page_cache_alloc_cold(mapping);
			if (!page)
				break;

			error = add_to_page_cache_lru(page, mapping, index,
332
					      GFP_KERNEL);
333 334
			if (unlikely(error)) {
				page_cache_release(page);
335 336
				if (error == -EEXIST)
					continue;
337 338
				break;
			}
339 340 341 342 343
			/*
			 * add_to_page_cache() locks the page, unlock it
			 * to avoid convoluting the logic below even more.
			 */
			unlock_page(page);
344 345
		}

346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368
		pages[spd.nr_pages++] = page;
		index++;
	}

	/*
	 * Now loop over the map and see if we need to start IO on any
	 * pages, fill in the partial map, etc.
	 */
	index = *ppos >> PAGE_CACHE_SHIFT;
	nr_pages = spd.nr_pages;
	spd.nr_pages = 0;
	for (page_nr = 0; page_nr < nr_pages; page_nr++) {
		unsigned int this_len;

		if (!len)
			break;

		/*
		 * this_len is the max we'll use from this page
		 */
		this_len = min_t(unsigned long, len, PAGE_CACHE_SIZE - loff);
		page = pages[page_nr];

369 370 371 372
		/*
		 * If the page isn't uptodate, we may need to start io on it
		 */
		if (!PageUptodate(page)) {
373 374 375 376
			/*
			 * If in nonblock mode then dont block on waiting
			 * for an in-flight io page
			 */
377 378 379 380 381
			if (flags & SPLICE_F_NONBLOCK) {
				if (TestSetPageLocked(page))
					break;
			} else
				lock_page(page);
382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398

			/*
			 * page was truncated, stop here. if this isn't the
			 * first page, we'll just complete what we already
			 * added
			 */
			if (!page->mapping) {
				unlock_page(page);
				break;
			}
			/*
			 * page was already under io and is now done, great
			 */
			if (PageUptodate(page)) {
				unlock_page(page);
				goto fill_it;
			}
399

400 401 402 403
			/*
			 * need to read in the page
			 */
			error = mapping->a_ops->readpage(in, page);
404
			if (unlikely(error)) {
405 406 407 408 409 410
				/*
				 * We really should re-lookup the page here,
				 * but it complicates things a lot. Instead
				 * lets just do what we already stored, and
				 * we'll get it the next time we are called.
				 */
411
				if (error == AOP_TRUNCATED_PAGE)
412 413
					error = 0;

414 415
				break;
			}
416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431
		}
fill_it:
		/*
		 * i_size must be checked after PageUptodate.
		 */
		isize = i_size_read(mapping->host);
		end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
		if (unlikely(!isize || index > end_index))
			break;

		/*
		 * if this is the last page, see if we need to shrink
		 * the length and stop
		 */
		if (end_index == index) {
			unsigned int plen;
432 433

			/*
434
			 * max good bytes in this page
435
			 */
436 437
			plen = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
			if (plen <= loff)
438 439 440
				break;

			/*
441
			 * force quit after adding this page
442
			 */
443 444
			this_len = min(this_len, plen - loff);
			len = this_len;
445
		}
446

447 448
		partial[page_nr].offset = loff;
		partial[page_nr].len = this_len;
449
		len -= this_len;
450
		loff = 0;
451 452
		spd.nr_pages++;
		index++;
453 454
	}

455
	/*
456
	 * Release any pages at the end, if we quit early. 'page_nr' is how far
457 458 459 460 461
	 * we got, 'nr_pages' is how many pages are in the map.
	 */
	while (page_nr < nr_pages)
		page_cache_release(pages[page_nr++]);

462
	if (spd.nr_pages)
463
		return splice_to_pipe(pipe, &spd);
464

465
	return error;
466 467
}

468 469 470 471 472 473 474 475 476
/**
 * generic_file_splice_read - splice data from file to a pipe
 * @in:		file to splice from
 * @pipe:	pipe to splice to
 * @len:	number of bytes to splice
 * @flags:	splice modifier flags
 *
 * Will read pages from given file and fill them into a pipe.
 */
477 478 479
ssize_t generic_file_splice_read(struct file *in, loff_t *ppos,
				 struct pipe_inode_info *pipe, size_t len,
				 unsigned int flags)
480 481 482
{
	ssize_t spliced;
	int ret;
483 484 485 486 487 488 489 490 491
	loff_t isize, left;

	isize = i_size_read(in->f_mapping->host);
	if (unlikely(*ppos >= isize))
		return 0;

	left = isize - *ppos;
	if (unlikely(left < len))
		len = left;
492 493 494 495

	ret = 0;
	spliced = 0;
	while (len) {
496
		ret = __generic_file_splice_read(in, ppos, pipe, len, flags);
497

498
		if (ret < 0)
499
			break;
500 501 502 503 504 505 506 507
		else if (!ret) {
			if (spliced)
				break;
			if (flags & SPLICE_F_NONBLOCK) {
				ret = -EAGAIN;
				break;
			}
		}
508

509
		*ppos += ret;
510 511 512 513 514 515 516 517 518 519
		len -= ret;
		spliced += ret;
	}

	if (spliced)
		return spliced;

	return ret;
}

520 521
EXPORT_SYMBOL(generic_file_splice_read);

522
/*
523
 * Send 'sd->len' bytes to socket from 'sd->file' at position 'sd->pos'
J
Jens Axboe 已提交
524
 * using sendpage(). Return the number of bytes sent.
525
 */
526
static int pipe_to_sendpage(struct pipe_inode_info *pipe,
527 528 529 530
			    struct pipe_buffer *buf, struct splice_desc *sd)
{
	struct file *file = sd->file;
	loff_t pos = sd->pos;
531
	int ret, more;
532

533
	ret = buf->ops->pin(pipe, buf);
534 535
	if (!ret) {
		more = (sd->flags & SPLICE_F_MORE) || sd->len < sd->total_len;
536

537 538 539
		ret = file->f_op->sendpage(file, buf->page, buf->offset,
					   sd->len, &pos, more);
	}
540

J
Jens Axboe 已提交
541
	return ret;
542 543 544 545 546 547 548 549 550 551 552 553 554 555 556
}

/*
 * This is a little more tricky than the file -> pipe splicing. There are
 * basically three cases:
 *
 *	- Destination page already exists in the address space and there
 *	  are users of it. For that case we have no other option that
 *	  copying the data. Tough luck.
 *	- Destination page already exists in the address space, but there
 *	  are no users of it. Make sure it's uptodate, then drop it. Fall
 *	  through to last case.
 *	- Destination page does not exist, we can add the pipe page to
 *	  the page cache and avoid the copy.
 *
557 558 559 560 561 562
 * If asked to move pages to the output file (SPLICE_F_MOVE is set in
 * sd->flags), we attempt to migrate pages from the pipe to the output
 * file address space page cache. This is possible if no one else has
 * the pipe page referenced outside of the pipe and page cache. If
 * SPLICE_F_MOVE isn't set, or we cannot move the page, we simply create
 * a new page in the output file page cache and fill/dirty that.
563
 */
564
static int pipe_to_file(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
565 566 567 568
			struct splice_desc *sd)
{
	struct file *file = sd->file;
	struct address_space *mapping = file->f_mapping;
J
Jens Axboe 已提交
569
	unsigned int offset, this_len;
570 571
	struct page *page;
	pgoff_t index;
572
	int ret;
573 574

	/*
575
	 * make sure the data in this buffer is uptodate
576
	 */
577
	ret = buf->ops->pin(pipe, buf);
578 579
	if (unlikely(ret))
		return ret;
580 581 582 583

	index = sd->pos >> PAGE_CACHE_SHIFT;
	offset = sd->pos & ~PAGE_CACHE_MASK;

J
Jens Axboe 已提交
584 585 586 587
	this_len = sd->len;
	if (this_len + offset > PAGE_CACHE_SIZE)
		this_len = PAGE_CACHE_SIZE - offset;

N
Nick Piggin 已提交
588 589 590 591 592 593 594 595
find_page:
	page = find_lock_page(mapping, index);
	if (!page) {
		ret = -ENOMEM;
		page = page_cache_alloc_cold(mapping);
		if (unlikely(!page))
			goto out_ret;

596
		/*
N
Nick Piggin 已提交
597
		 * This will also lock the page
598
		 */
N
Nick Piggin 已提交
599 600 601 602 603
		ret = add_to_page_cache_lru(page, mapping, index,
					    GFP_KERNEL);
		if (unlikely(ret))
			goto out;
	}
604

J
Jens Axboe 已提交
605
	ret = mapping->a_ops->prepare_write(file, page, offset, offset+this_len);
606 607 608 609 610
	if (unlikely(ret)) {
		loff_t isize = i_size_read(mapping->host);

		if (ret != AOP_TRUNCATED_PAGE)
			unlock_page(page);
611
		page_cache_release(page);
612 613 614 615 616 617 618 619 620 621
		if (ret == AOP_TRUNCATED_PAGE)
			goto find_page;

		/*
		 * prepare_write() may have instantiated a few blocks
		 * outside i_size.  Trim these off again.
		 */
		if (sd->pos + this_len > isize)
			vmtruncate(mapping->host, isize);

622
		goto out_ret;
623
	}
624

625
	if (buf->page != page) {
626 627 628
		/*
		 * Careful, ->map() uses KM_USER0!
		 */
629
		char *src = buf->ops->map(pipe, buf, 1);
630
		char *dst = kmap_atomic(page, KM_USER1);
631

J
Jens Axboe 已提交
632
		memcpy(dst + offset, src + buf->offset, this_len);
633
		flush_dcache_page(page);
634
		kunmap_atomic(dst, KM_USER1);
635
		buf->ops->unmap(pipe, buf, src);
636
	}
637

J
Jens Axboe 已提交
638
	ret = mapping->a_ops->commit_write(file, page, offset, offset+this_len);
639 640 641 642 643 644 645
	if (ret) {
		if (ret == AOP_TRUNCATED_PAGE) {
			page_cache_release(page);
			goto find_page;
		}
		if (ret < 0)
			goto out;
646
		/*
647 648
		 * Partial write has happened, so 'ret' already initialized by
		 * number of bytes written, Where is nothing we have to do here.
649
		 */
650
	} else
651
		ret = this_len;
652 653 654 655 656
	/*
	 * Return the number of bytes written and mark page as
	 * accessed, we are now done!
	 */
	mark_page_accessed(page);
657
out:
658
	page_cache_release(page);
659
	unlock_page(page);
660
out_ret:
661 662 663
	return ret;
}

664 665 666 667 668
/*
 * Pipe input worker. Most of this logic works like a regular pipe, the
 * key here is the 'actor' worker passed in that actually moves the data
 * to the wanted destination. See pipe_to_file/pipe_to_sendpage above.
 */
M
Mark Fasheh 已提交
669 670 671
ssize_t __splice_from_pipe(struct pipe_inode_info *pipe,
			   struct file *out, loff_t *ppos, size_t len,
			   unsigned int flags, splice_actor *actor)
672 673 674 675 676 677 678 679 680 681
{
	int ret, do_wakeup, err;
	struct splice_desc sd;

	ret = 0;
	do_wakeup = 0;

	sd.total_len = len;
	sd.flags = flags;
	sd.file = out;
682
	sd.pos = *ppos;
683 684

	for (;;) {
685 686
		if (pipe->nrbufs) {
			struct pipe_buffer *buf = pipe->bufs + pipe->curbuf;
687
			const struct pipe_buf_operations *ops = buf->ops;
688 689 690 691 692

			sd.len = buf->len;
			if (sd.len > sd.total_len)
				sd.len = sd.total_len;

693
			err = actor(pipe, buf, &sd);
J
Jens Axboe 已提交
694
			if (err <= 0) {
695 696 697 698 699 700
				if (!ret && err != -ENODATA)
					ret = err;

				break;
			}

J
Jens Axboe 已提交
701 702 703 704 705 706 707 708 709
			ret += err;
			buf->offset += err;
			buf->len -= err;

			sd.len -= err;
			sd.pos += err;
			sd.total_len -= err;
			if (sd.len)
				continue;
I
Ingo Molnar 已提交
710

711 712
			if (!buf->len) {
				buf->ops = NULL;
713
				ops->release(pipe, buf);
714 715 716 717
				pipe->curbuf = (pipe->curbuf + 1) & (PIPE_BUFFERS - 1);
				pipe->nrbufs--;
				if (pipe->inode)
					do_wakeup = 1;
718 719 720 721 722 723
			}

			if (!sd.total_len)
				break;
		}

724
		if (pipe->nrbufs)
725
			continue;
726
		if (!pipe->writers)
727
			break;
728
		if (!pipe->waiting_writers) {
729 730 731 732
			if (ret)
				break;
		}

733 734 735 736 737 738
		if (flags & SPLICE_F_NONBLOCK) {
			if (!ret)
				ret = -EAGAIN;
			break;
		}

739 740 741 742 743 744 745
		if (signal_pending(current)) {
			if (!ret)
				ret = -ERESTARTSYS;
			break;
		}

		if (do_wakeup) {
746
			smp_mb();
747 748 749
			if (waitqueue_active(&pipe->wait))
				wake_up_interruptible_sync(&pipe->wait);
			kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
750 751 752
			do_wakeup = 0;
		}

753
		pipe_wait(pipe);
754 755 756
	}

	if (do_wakeup) {
757
		smp_mb();
758 759 760
		if (waitqueue_active(&pipe->wait))
			wake_up_interruptible(&pipe->wait);
		kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
761 762 763 764
	}

	return ret;
}
M
Mark Fasheh 已提交
765
EXPORT_SYMBOL(__splice_from_pipe);
766

767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807
ssize_t splice_from_pipe(struct pipe_inode_info *pipe, struct file *out,
			 loff_t *ppos, size_t len, unsigned int flags,
			 splice_actor *actor)
{
	ssize_t ret;
	struct inode *inode = out->f_mapping->host;

	/*
	 * The actor worker might be calling ->prepare_write and
	 * ->commit_write. Most of the time, these expect i_mutex to
	 * be held. Since this may result in an ABBA deadlock with
	 * pipe->inode, we have to order lock acquiry here.
	 */
	inode_double_lock(inode, pipe->inode);
	ret = __splice_from_pipe(pipe, out, ppos, len, flags, actor);
	inode_double_unlock(inode, pipe->inode);

	return ret;
}

/**
 * generic_file_splice_write_nolock - generic_file_splice_write without mutexes
 * @pipe:	pipe info
 * @out:	file to write to
 * @len:	number of bytes to splice
 * @flags:	splice modifier flags
 *
 * Will either move or copy pages (determined by @flags options) from
 * the given pipe inode to the given file. The caller is responsible
 * for acquiring i_mutex on both inodes.
 *
 */
ssize_t
generic_file_splice_write_nolock(struct pipe_inode_info *pipe, struct file *out,
				 loff_t *ppos, size_t len, unsigned int flags)
{
	struct address_space *mapping = out->f_mapping;
	struct inode *inode = mapping->host;
	ssize_t ret;
	int err;

808
	err = remove_suid(out->f_path.dentry);
809 810 811
	if (unlikely(err))
		return err;

812 813
	ret = __splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_file);
	if (ret > 0) {
814 815
		unsigned long nr_pages;

816
		*ppos += ret;
817
		nr_pages = (ret + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
818 819 820 821 822 823 824 825 826 827 828 829

		/*
		 * If file or inode is SYNC and we actually wrote some data,
		 * sync it.
		 */
		if (unlikely((out->f_flags & O_SYNC) || IS_SYNC(inode))) {
			err = generic_osync_inode(inode, mapping,
						  OSYNC_METADATA|OSYNC_DATA);

			if (err)
				ret = err;
		}
830
		balance_dirty_pages_ratelimited_nr(mapping, nr_pages);
831 832 833 834 835 836 837
	}

	return ret;
}

EXPORT_SYMBOL(generic_file_splice_write_nolock);

838 839
/**
 * generic_file_splice_write - splice data from a pipe to a file
840
 * @pipe:	pipe info
841 842 843 844 845 846 847 848
 * @out:	file to write to
 * @len:	number of bytes to splice
 * @flags:	splice modifier flags
 *
 * Will either move or copy pages (determined by @flags options) from
 * the given pipe inode to the given file.
 *
 */
849 850
ssize_t
generic_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
851
			  loff_t *ppos, size_t len, unsigned int flags)
852
{
853
	struct address_space *mapping = out->f_mapping;
854
	struct inode *inode = mapping->host;
855
	ssize_t ret;
856 857
	int err;

858
	err = should_remove_suid(out->f_path.dentry);
859 860
	if (unlikely(err)) {
		mutex_lock(&inode->i_mutex);
861
		err = __remove_suid(out->f_path.dentry, err);
862 863 864 865
		mutex_unlock(&inode->i_mutex);
		if (err)
			return err;
	}
866

867
	ret = splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_file);
J
Jens Axboe 已提交
868
	if (ret > 0) {
869 870
		unsigned long nr_pages;

J
Jens Axboe 已提交
871
		*ppos += ret;
872
		nr_pages = (ret + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
J
Jens Axboe 已提交
873 874 875 876 877 878 879 880 881 882

		/*
		 * If file or inode is SYNC and we actually wrote some data,
		 * sync it.
		 */
		if (unlikely((out->f_flags & O_SYNC) || IS_SYNC(inode))) {
			mutex_lock(&inode->i_mutex);
			err = generic_osync_inode(inode, mapping,
						  OSYNC_METADATA|OSYNC_DATA);
			mutex_unlock(&inode->i_mutex);
883

J
Jens Axboe 已提交
884 885 886
			if (err)
				ret = err;
		}
887
		balance_dirty_pages_ratelimited_nr(mapping, nr_pages);
888 889 890
	}

	return ret;
891 892
}

893 894
EXPORT_SYMBOL(generic_file_splice_write);

895 896 897 898 899 900 901 902 903 904 905
/**
 * generic_splice_sendpage - splice data from a pipe to a socket
 * @inode:	pipe inode
 * @out:	socket to write to
 * @len:	number of bytes to splice
 * @flags:	splice modifier flags
 *
 * Will send @len bytes from the pipe to a network socket. No data copying
 * is involved.
 *
 */
906
ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe, struct file *out,
907
				loff_t *ppos, size_t len, unsigned int flags)
908
{
909
	return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_sendpage);
910 911
}

912
EXPORT_SYMBOL(generic_splice_sendpage);
J
Jeff Garzik 已提交
913

914 915 916
/*
 * Attempt to initiate a splice from pipe to file.
 */
917
static long do_splice_from(struct pipe_inode_info *pipe, struct file *out,
918
			   loff_t *ppos, size_t len, unsigned int flags)
919 920 921
{
	int ret;

922
	if (unlikely(!out->f_op || !out->f_op->splice_write))
923 924
		return -EINVAL;

925
	if (unlikely(!(out->f_mode & FMODE_WRITE)))
926 927
		return -EBADF;

928
	ret = rw_verify_area(WRITE, out, ppos, len);
929 930 931
	if (unlikely(ret < 0))
		return ret;

932
	return out->f_op->splice_write(pipe, out, ppos, len, flags);
933 934
}

935 936 937
/*
 * Attempt to initiate a splice from a file to a pipe.
 */
938 939 940
static long do_splice_to(struct file *in, loff_t *ppos,
			 struct pipe_inode_info *pipe, size_t len,
			 unsigned int flags)
941 942 943
{
	int ret;

944
	if (unlikely(!in->f_op || !in->f_op->splice_read))
945 946
		return -EINVAL;

947
	if (unlikely(!(in->f_mode & FMODE_READ)))
948 949
		return -EBADF;

950
	ret = rw_verify_area(READ, in, ppos, len);
951 952 953
	if (unlikely(ret < 0))
		return ret;

954
	return in->f_op->splice_read(in, ppos, pipe, len, flags);
955 956
}

957 958
long do_splice_direct(struct file *in, loff_t *ppos, struct file *out,
		      size_t len, unsigned int flags)
959 960 961
{
	struct pipe_inode_info *pipe;
	long ret, bytes;
962
	loff_t out_off;
963 964 965 966 967 968 969 970
	umode_t i_mode;
	int i;

	/*
	 * We require the input being a regular file, as we don't want to
	 * randomly drop data for eg socket -> socket splicing. Use the
	 * piped splicing for that!
	 */
971
	i_mode = in->f_path.dentry->d_inode->i_mode;
972 973 974 975 976 977 978 979
	if (unlikely(!S_ISREG(i_mode) && !S_ISBLK(i_mode)))
		return -EINVAL;

	/*
	 * neither in nor out is a pipe, setup an internal pipe attached to
	 * 'out' and transfer the wanted data from 'in' to 'out' through that
	 */
	pipe = current->splice_pipe;
980
	if (unlikely(!pipe)) {
981 982 983 984 985 986
		pipe = alloc_pipe_info(NULL);
		if (!pipe)
			return -ENOMEM;

		/*
		 * We don't have an immediate reader, but we'll read the stuff
987
		 * out of the pipe right after the splice_to_pipe(). So set
988 989 990 991 992 993 994 995
		 * PIPE_READERS appropriately.
		 */
		pipe->readers = 1;

		current->splice_pipe = pipe;
	}

	/*
I
Ingo Molnar 已提交
996
	 * Do the splice.
997 998 999
	 */
	ret = 0;
	bytes = 0;
1000
	out_off = 0;
1001 1002 1003 1004 1005 1006 1007 1008 1009

	while (len) {
		size_t read_len, max_read_len;

		/*
		 * Do at most PIPE_BUFFERS pages worth of transfer:
		 */
		max_read_len = min(len, (size_t)(PIPE_BUFFERS*PAGE_SIZE));

1010
		ret = do_splice_to(in, ppos, pipe, max_read_len, flags);
1011 1012 1013 1014 1015 1016 1017 1018 1019 1020
		if (unlikely(ret < 0))
			goto out_release;

		read_len = ret;

		/*
		 * NOTE: nonblocking mode only applies to the input. We
		 * must not do the output in nonblocking mode as then we
		 * could get stuck data in the internal pipe:
		 */
1021
		ret = do_splice_from(pipe, out, &out_off, read_len,
1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068
				     flags & ~SPLICE_F_NONBLOCK);
		if (unlikely(ret < 0))
			goto out_release;

		bytes += ret;
		len -= ret;

		/*
		 * In nonblocking mode, if we got back a short read then
		 * that was due to either an IO error or due to the
		 * pagecache entry not being there. In the IO error case
		 * the _next_ splice attempt will produce a clean IO error
		 * return value (not a short read), so in both cases it's
		 * correct to break out of the loop here:
		 */
		if ((flags & SPLICE_F_NONBLOCK) && (read_len < max_read_len))
			break;
	}

	pipe->nrbufs = pipe->curbuf = 0;

	return bytes;

out_release:
	/*
	 * If we did an incomplete transfer we must release
	 * the pipe buffers in question:
	 */
	for (i = 0; i < PIPE_BUFFERS; i++) {
		struct pipe_buffer *buf = pipe->bufs + i;

		if (buf->ops) {
			buf->ops->release(pipe, buf);
			buf->ops = NULL;
		}
	}
	pipe->nrbufs = pipe->curbuf = 0;

	/*
	 * If we transferred some data, return the number of bytes:
	 */
	if (bytes > 0)
		return bytes;

	return ret;
}

1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081
/*
 * After the inode slimming patch, i_pipe/i_bdev/i_cdev share the same
 * location, so checking ->i_pipe is not enough to verify that this is a
 * pipe.
 */
static inline struct pipe_inode_info *pipe_info(struct inode *inode)
{
	if (S_ISFIFO(inode->i_mode))
		return inode->i_pipe;

	return NULL;
}

1082 1083 1084
/*
 * Determine where to splice to/from.
 */
1085 1086 1087
static long do_splice(struct file *in, loff_t __user *off_in,
		      struct file *out, loff_t __user *off_out,
		      size_t len, unsigned int flags)
1088
{
1089
	struct pipe_inode_info *pipe;
1090
	loff_t offset, *off;
J
Jens Axboe 已提交
1091
	long ret;
1092

1093
	pipe = pipe_info(in->f_path.dentry->d_inode);
1094 1095 1096
	if (pipe) {
		if (off_in)
			return -ESPIPE;
1097 1098 1099
		if (off_out) {
			if (out->f_op->llseek == no_llseek)
				return -EINVAL;
1100
			if (copy_from_user(&offset, off_out, sizeof(loff_t)))
1101
				return -EFAULT;
1102 1103 1104
			off = &offset;
		} else
			off = &out->f_pos;
1105

J
Jens Axboe 已提交
1106 1107 1108 1109 1110 1111
		ret = do_splice_from(pipe, out, off, len, flags);

		if (off_out && copy_to_user(off_out, off, sizeof(loff_t)))
			ret = -EFAULT;

		return ret;
1112
	}
1113

1114
	pipe = pipe_info(out->f_path.dentry->d_inode);
1115 1116 1117
	if (pipe) {
		if (off_out)
			return -ESPIPE;
1118 1119 1120
		if (off_in) {
			if (in->f_op->llseek == no_llseek)
				return -EINVAL;
1121
			if (copy_from_user(&offset, off_in, sizeof(loff_t)))
1122
				return -EFAULT;
1123 1124 1125
			off = &offset;
		} else
			off = &in->f_pos;
1126

J
Jens Axboe 已提交
1127 1128 1129 1130 1131 1132
		ret = do_splice_to(in, off, pipe, len, flags);

		if (off_in && copy_to_user(off_in, off, sizeof(loff_t)))
			ret = -EFAULT;

		return ret;
1133
	}
1134 1135 1136 1137

	return -EINVAL;
}

1138 1139 1140 1141 1142 1143 1144 1145 1146
/*
 * Map an iov into an array of pages and offset/length tupples. With the
 * partial_page structure, we can map several non-contiguous ranges into
 * our ones pages[] map instead of splitting that operation into pieces.
 * Could easily be exported as a generic helper for other users, in which
 * case one would probably want to add a 'max_nr_pages' parameter as well.
 */
static int get_iovec_page_array(const struct iovec __user *iov,
				unsigned int nr_vecs, struct page **pages,
1147
				struct partial_page *partial, int aligned)
1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186
{
	int buffers = 0, error = 0;

	/*
	 * It's ok to take the mmap_sem for reading, even
	 * across a "get_user()".
	 */
	down_read(&current->mm->mmap_sem);

	while (nr_vecs) {
		unsigned long off, npages;
		void __user *base;
		size_t len;
		int i;

		/*
		 * Get user address base and length for this iovec.
		 */
		error = get_user(base, &iov->iov_base);
		if (unlikely(error))
			break;
		error = get_user(len, &iov->iov_len);
		if (unlikely(error))
			break;

		/*
		 * Sanity check this iovec. 0 read succeeds.
		 */
		if (unlikely(!len))
			break;
		error = -EFAULT;
		if (unlikely(!base))
			break;

		/*
		 * Get this base offset and number of pages, then map
		 * in the user pages.
		 */
		off = (unsigned long) base & ~PAGE_MASK;
1187 1188 1189 1190 1191 1192 1193 1194 1195

		/*
		 * If asked for alignment, the offset must be zero and the
		 * length a multiple of the PAGE_SIZE.
		 */
		error = -EINVAL;
		if (aligned && (off || len & ~PAGE_MASK))
			break;

1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210
		npages = (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
		if (npages > PIPE_BUFFERS - buffers)
			npages = PIPE_BUFFERS - buffers;

		error = get_user_pages(current, current->mm,
				       (unsigned long) base, npages, 0, 0,
				       &pages[buffers], NULL);

		if (unlikely(error <= 0))
			break;

		/*
		 * Fill this contiguous range into the partial page map.
		 */
		for (i = 0; i < error; i++) {
1211
			const int plen = min_t(size_t, len, PAGE_SIZE - off);
1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271

			partial[buffers].offset = off;
			partial[buffers].len = plen;

			off = 0;
			len -= plen;
			buffers++;
		}

		/*
		 * We didn't complete this iov, stop here since it probably
		 * means we have to move some of this into a pipe to
		 * be able to continue.
		 */
		if (len)
			break;

		/*
		 * Don't continue if we mapped fewer pages than we asked for,
		 * or if we mapped the max number of pages that we have
		 * room for.
		 */
		if (error < npages || buffers == PIPE_BUFFERS)
			break;

		nr_vecs--;
		iov++;
	}

	up_read(&current->mm->mmap_sem);

	if (buffers)
		return buffers;

	return error;
}

/*
 * vmsplice splices a user address range into a pipe. It can be thought of
 * as splice-from-memory, where the regular splice is splice-from-file (or
 * to file). In both cases the output is a pipe, naturally.
 *
 * Note that vmsplice only supports splicing _from_ user memory to a pipe,
 * not the other way around. Splicing from user memory is a simple operation
 * that can be supported without any funky alignment restrictions or nasty
 * vm tricks. We simply map in the user memory and fill them into a pipe.
 * The reverse isn't quite as easy, though. There are two possible solutions
 * for that:
 *
 *	- memcpy() the data internally, at which point we might as well just
 *	  do a regular read() on the buffer anyway.
 *	- Lots of nasty vm tricks, that are neither fast nor flexible (it
 *	  has restriction limitations on both ends of the pipe).
 *
 * Alas, it isn't here.
 *
 */
static long do_vmsplice(struct file *file, const struct iovec __user *iov,
			unsigned long nr_segs, unsigned int flags)
{
1272
	struct pipe_inode_info *pipe;
1273 1274 1275 1276 1277 1278 1279 1280 1281
	struct page *pages[PIPE_BUFFERS];
	struct partial_page partial[PIPE_BUFFERS];
	struct splice_pipe_desc spd = {
		.pages = pages,
		.partial = partial,
		.flags = flags,
		.ops = &user_page_pipe_buf_ops,
	};

1282
	pipe = pipe_info(file->f_path.dentry->d_inode);
1283
	if (!pipe)
1284 1285 1286 1287 1288 1289
		return -EBADF;
	if (unlikely(nr_segs > UIO_MAXIOV))
		return -EINVAL;
	else if (unlikely(!nr_segs))
		return 0;

1290 1291
	spd.nr_pages = get_iovec_page_array(iov, nr_segs, pages, partial,
					    flags & SPLICE_F_GIFT);
1292 1293 1294
	if (spd.nr_pages <= 0)
		return spd.nr_pages;

1295
	return splice_to_pipe(pipe, &spd);
1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316
}

asmlinkage long sys_vmsplice(int fd, const struct iovec __user *iov,
			     unsigned long nr_segs, unsigned int flags)
{
	struct file *file;
	long error;
	int fput;

	error = -EBADF;
	file = fget_light(fd, &fput);
	if (file) {
		if (file->f_mode & FMODE_WRITE)
			error = do_vmsplice(file, iov, nr_segs, flags);

		fput_light(file, fput);
	}

	return error;
}

1317 1318 1319
asmlinkage long sys_splice(int fd_in, loff_t __user *off_in,
			   int fd_out, loff_t __user *off_out,
			   size_t len, unsigned int flags)
1320 1321 1322 1323 1324 1325 1326 1327 1328
{
	long error;
	struct file *in, *out;
	int fput_in, fput_out;

	if (unlikely(!len))
		return 0;

	error = -EBADF;
1329
	in = fget_light(fd_in, &fput_in);
1330 1331
	if (in) {
		if (in->f_mode & FMODE_READ) {
1332
			out = fget_light(fd_out, &fput_out);
1333 1334
			if (out) {
				if (out->f_mode & FMODE_WRITE)
1335 1336 1337
					error = do_splice(in, off_in,
							  out, off_out,
							  len, flags);
1338 1339 1340 1341 1342 1343 1344 1345 1346
				fput_light(out, fput_out);
			}
		}

		fput_light(in, fput_in);
	}

	return error;
}
1347

1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426
/*
 * Make sure there's data to read. Wait for input if we can, otherwise
 * return an appropriate error.
 */
static int link_ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
{
	int ret;

	/*
	 * Check ->nrbufs without the inode lock first. This function
	 * is speculative anyways, so missing one is ok.
	 */
	if (pipe->nrbufs)
		return 0;

	ret = 0;
	mutex_lock(&pipe->inode->i_mutex);

	while (!pipe->nrbufs) {
		if (signal_pending(current)) {
			ret = -ERESTARTSYS;
			break;
		}
		if (!pipe->writers)
			break;
		if (!pipe->waiting_writers) {
			if (flags & SPLICE_F_NONBLOCK) {
				ret = -EAGAIN;
				break;
			}
		}
		pipe_wait(pipe);
	}

	mutex_unlock(&pipe->inode->i_mutex);
	return ret;
}

/*
 * Make sure there's writeable room. Wait for room if we can, otherwise
 * return an appropriate error.
 */
static int link_opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
{
	int ret;

	/*
	 * Check ->nrbufs without the inode lock first. This function
	 * is speculative anyways, so missing one is ok.
	 */
	if (pipe->nrbufs < PIPE_BUFFERS)
		return 0;

	ret = 0;
	mutex_lock(&pipe->inode->i_mutex);

	while (pipe->nrbufs >= PIPE_BUFFERS) {
		if (!pipe->readers) {
			send_sig(SIGPIPE, current, 0);
			ret = -EPIPE;
			break;
		}
		if (flags & SPLICE_F_NONBLOCK) {
			ret = -EAGAIN;
			break;
		}
		if (signal_pending(current)) {
			ret = -ERESTARTSYS;
			break;
		}
		pipe->waiting_writers++;
		pipe_wait(pipe);
		pipe->waiting_writers--;
	}

	mutex_unlock(&pipe->inode->i_mutex);
	return ret;
}

1427 1428 1429 1430 1431 1432 1433 1434
/*
 * Link contents of ipipe to opipe.
 */
static int link_pipe(struct pipe_inode_info *ipipe,
		     struct pipe_inode_info *opipe,
		     size_t len, unsigned int flags)
{
	struct pipe_buffer *ibuf, *obuf;
1435
	int ret = 0, i = 0, nbuf;
1436 1437 1438 1439 1440 1441

	/*
	 * Potential ABBA deadlock, work around it by ordering lock
	 * grabbing by inode address. Otherwise two different processes
	 * could deadlock (one doing tee from A -> B, the other from B -> A).
	 */
1442
	inode_double_lock(ipipe->inode, opipe->inode);
1443

1444
	do {
1445 1446 1447 1448 1449 1450 1451
		if (!opipe->readers) {
			send_sig(SIGPIPE, current, 0);
			if (!ret)
				ret = -EPIPE;
			break;
		}

1452 1453 1454 1455 1456 1457
		/*
		 * If we have iterated all input buffers or ran out of
		 * output room, break.
		 */
		if (i >= ipipe->nrbufs || opipe->nrbufs >= PIPE_BUFFERS)
			break;
1458

1459 1460
		ibuf = ipipe->bufs + ((ipipe->curbuf + i) & (PIPE_BUFFERS - 1));
		nbuf = (opipe->curbuf + opipe->nrbufs) & (PIPE_BUFFERS - 1);
1461 1462

		/*
1463 1464
		 * Get a reference to this pipe buffer,
		 * so we can copy the contents over.
1465
		 */
1466 1467 1468 1469 1470
		ibuf->ops->get(ipipe, ibuf);

		obuf = opipe->bufs + nbuf;
		*obuf = *ibuf;

1471
		/*
1472 1473
		 * Don't inherit the gift flag, we need to
		 * prevent multiple steals of this page.
1474
		 */
1475
		obuf->flags &= ~PIPE_BUF_FLAG_GIFT;
1476

1477 1478
		if (obuf->len > len)
			obuf->len = len;
1479

1480 1481 1482 1483 1484
		opipe->nrbufs++;
		ret += obuf->len;
		len -= obuf->len;
		i++;
	} while (len);
1485

1486
	inode_double_unlock(ipipe->inode, opipe->inode);
1487

1488 1489 1490 1491
	/*
	 * If we put data in the output pipe, wakeup any potential readers.
	 */
	if (ret > 0) {
1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509
		smp_mb();
		if (waitqueue_active(&opipe->wait))
			wake_up_interruptible(&opipe->wait);
		kill_fasync(&opipe->fasync_readers, SIGIO, POLL_IN);
	}

	return ret;
}

/*
 * This is a tee(1) implementation that works on pipes. It doesn't copy
 * any data, it simply references the 'in' pages on the 'out' pipe.
 * The 'flags' used are the SPLICE_F_* variants, currently the only
 * applicable one is SPLICE_F_NONBLOCK.
 */
static long do_tee(struct file *in, struct file *out, size_t len,
		   unsigned int flags)
{
1510 1511
	struct pipe_inode_info *ipipe = pipe_info(in->f_path.dentry->d_inode);
	struct pipe_inode_info *opipe = pipe_info(out->f_path.dentry->d_inode);
1512
	int ret = -EINVAL;
1513 1514

	/*
1515 1516
	 * Duplicate the contents of ipipe to opipe without actually
	 * copying the data.
1517
	 */
1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532
	if (ipipe && opipe && ipipe != opipe) {
		/*
		 * Keep going, unless we encounter an error. The ipipe/opipe
		 * ordering doesn't really matter.
		 */
		ret = link_ipipe_prep(ipipe, flags);
		if (!ret) {
			ret = link_opipe_prep(opipe, flags);
			if (!ret) {
				ret = link_pipe(ipipe, opipe, len, flags);
				if (!ret && (flags & SPLICE_F_NONBLOCK))
					ret = -EAGAIN;
			}
		}
	}
1533

1534
	return ret;
1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562
}

asmlinkage long sys_tee(int fdin, int fdout, size_t len, unsigned int flags)
{
	struct file *in;
	int error, fput_in;

	if (unlikely(!len))
		return 0;

	error = -EBADF;
	in = fget_light(fdin, &fput_in);
	if (in) {
		if (in->f_mode & FMODE_READ) {
			int fput_out;
			struct file *out = fget_light(fdout, &fput_out);

			if (out) {
				if (out->f_mode & FMODE_WRITE)
					error = do_tee(in, out, len, flags);
				fput_light(out, fput_out);
			}
		}
 		fput_light(in, fput_in);
 	}

	return error;
}