提交 bc40d73c 编写于 作者: N Nick Piggin 提交者: Linus Torvalds

splice: use get_user_pages_fast

Use get_user_pages_fast in splice.  This reverts some mmap_sem batching
there, however the biggest problem with mmap_sem tends to be hold times
blocking out other threads rather than cacheline bouncing.  Further: on
architectures that implement get_user_pages_fast without locks, mmap_sem
can be avoided completely anyway.
Signed-off-by: NNick Piggin <npiggin@suse.de>
Cc: Dave Kleikamp <shaggy@austin.ibm.com>
Cc: Andy Whitcroft <apw@shadowen.org>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Dave Kleikamp <shaggy@austin.ibm.com>
Cc: Badari Pulavarty <pbadari@us.ibm.com>
Cc: Zach Brown <zach.brown@oracle.com>
Cc: Jens Axboe <jens.axboe@oracle.com>
Reviewed-by: NPeter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 f5dd33c4
...@@ -1160,36 +1160,6 @@ static long do_splice(struct file *in, loff_t __user *off_in, ...@@ -1160,36 +1160,6 @@ static long do_splice(struct file *in, loff_t __user *off_in,
return -EINVAL; return -EINVAL;
} }
/*
* Do a copy-from-user while holding the mmap_semaphore for reading, in a
* manner safe from deadlocking with simultaneous mmap() (grabbing mmap_sem
* for writing) and page faulting on the user memory pointed to by src.
* This assumes that we will very rarely hit the partial != 0 path, or this
* will not be a win.
*/
static int copy_from_user_mmap_sem(void *dst, const void __user *src, size_t n)
{
int partial;
if (!access_ok(VERIFY_READ, src, n))
return -EFAULT;
pagefault_disable();
partial = __copy_from_user_inatomic(dst, src, n);
pagefault_enable();
/*
* Didn't copy everything, drop the mmap_sem and do a faulting copy
*/
if (unlikely(partial)) {
up_read(&current->mm->mmap_sem);
partial = copy_from_user(dst, src, n);
down_read(&current->mm->mmap_sem);
}
return partial;
}
/* /*
* Map an iov into an array of pages and offset/length tupples. With the * Map an iov into an array of pages and offset/length tupples. With the
* partial_page structure, we can map several non-contiguous ranges into * partial_page structure, we can map several non-contiguous ranges into
...@@ -1203,8 +1173,6 @@ static int get_iovec_page_array(const struct iovec __user *iov, ...@@ -1203,8 +1173,6 @@ static int get_iovec_page_array(const struct iovec __user *iov,
{ {
int buffers = 0, error = 0; int buffers = 0, error = 0;
down_read(&current->mm->mmap_sem);
while (nr_vecs) { while (nr_vecs) {
unsigned long off, npages; unsigned long off, npages;
struct iovec entry; struct iovec entry;
...@@ -1213,7 +1181,7 @@ static int get_iovec_page_array(const struct iovec __user *iov, ...@@ -1213,7 +1181,7 @@ static int get_iovec_page_array(const struct iovec __user *iov,
int i; int i;
error = -EFAULT; error = -EFAULT;
if (copy_from_user_mmap_sem(&entry, iov, sizeof(entry))) if (copy_from_user(&entry, iov, sizeof(entry)))
break; break;
base = entry.iov_base; base = entry.iov_base;
...@@ -1247,9 +1215,8 @@ static int get_iovec_page_array(const struct iovec __user *iov, ...@@ -1247,9 +1215,8 @@ static int get_iovec_page_array(const struct iovec __user *iov,
if (npages > PIPE_BUFFERS - buffers) if (npages > PIPE_BUFFERS - buffers)
npages = PIPE_BUFFERS - buffers; npages = PIPE_BUFFERS - buffers;
error = get_user_pages(current, current->mm, error = get_user_pages_fast((unsigned long)base, npages,
(unsigned long) base, npages, 0, 0, 0, &pages[buffers]);
&pages[buffers], NULL);
if (unlikely(error <= 0)) if (unlikely(error <= 0))
break; break;
...@@ -1288,8 +1255,6 @@ static int get_iovec_page_array(const struct iovec __user *iov, ...@@ -1288,8 +1255,6 @@ static int get_iovec_page_array(const struct iovec __user *iov,
iov++; iov++;
} }
up_read(&current->mm->mmap_sem);
if (buffers) if (buffers)
return buffers; return buffers;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册