提交 f1800536 编写于 作者: I Ingo Molnar

x86, mm: dont use non-temporal stores in pagecache accesses

Impact: standardize IO on cached ops

On modern CPUs it is almost always a bad idea to use non-temporal stores,
as the regression in this commit has shown it:

  30d697fa: x86: fix performance regression in write() syscall

The kernel simply has no good information about whether using non-temporal
stores is a good idea or not - and trying to add heuristics only increases
complexity and inserts fragility.

The regression on cached write()s took very long to be found - over two
years. So dont take any chances and let the hardware decide how it makes
use of its caches.

The only exception is drivers/gpu/drm/i915/i915_gem.c: there were we are
absolutely sure that another entity (the GPU) will pick up the dirty
data immediately and that the CPU will not touch that data before the
GPU will.

Also, keep the _nocache() primitives to make it easier for people to
experiment with these details. There may be more clear-cut cases where
non-cached copies can be used, outside of filemap.c.

Cc: Salman Qazi <sqazi@google.com>
Cc: Nick Piggin <npiggin@suse.de>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: NIngo Molnar <mingo@elte.hu>
上级 34754b69
...@@ -157,7 +157,7 @@ __copy_from_user(void *to, const void __user *from, unsigned long n) ...@@ -157,7 +157,7 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
} }
static __always_inline unsigned long __copy_from_user_nocache(void *to, static __always_inline unsigned long __copy_from_user_nocache(void *to,
const void __user *from, unsigned long n, unsigned long total) const void __user *from, unsigned long n)
{ {
might_fault(); might_fault();
if (__builtin_constant_p(n)) { if (__builtin_constant_p(n)) {
...@@ -180,7 +180,7 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to, ...@@ -180,7 +180,7 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
static __always_inline unsigned long static __always_inline unsigned long
__copy_from_user_inatomic_nocache(void *to, const void __user *from, __copy_from_user_inatomic_nocache(void *to, const void __user *from,
unsigned long n, unsigned long total) unsigned long n)
{ {
return __copy_from_user_ll_nocache_nozero(to, from, n); return __copy_from_user_ll_nocache_nozero(to, from, n);
} }
......
...@@ -188,29 +188,18 @@ __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size) ...@@ -188,29 +188,18 @@ __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
extern long __copy_user_nocache(void *dst, const void __user *src, extern long __copy_user_nocache(void *dst, const void __user *src,
unsigned size, int zerorest); unsigned size, int zerorest);
static inline int __copy_from_user_nocache(void *dst, const void __user *src, static inline int
unsigned size, unsigned long total) __copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
{ {
might_sleep(); might_sleep();
/* return __copy_user_nocache(dst, src, size, 1);
* In practice this limit means that large file write()s
* which get chunked to 4K copies get handled via
* non-temporal stores here. Smaller writes get handled
* via regular __copy_from_user():
*/
if (likely(total >= PAGE_SIZE))
return __copy_user_nocache(dst, src, size, 1);
else
return __copy_from_user(dst, src, size);
} }
static inline int __copy_from_user_inatomic_nocache(void *dst, static inline int
const void __user *src, unsigned size, unsigned total) __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
unsigned size)
{ {
if (likely(total >= PAGE_SIZE)) return __copy_user_nocache(dst, src, size, 0);
return __copy_user_nocache(dst, src, size, 0);
else
return __copy_from_user_inatomic(dst, src, size);
} }
unsigned long unsigned long
......
...@@ -215,7 +215,7 @@ fast_user_write(struct io_mapping *mapping, ...@@ -215,7 +215,7 @@ fast_user_write(struct io_mapping *mapping,
vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base); vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset, unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset,
user_data, length, length); user_data, length);
io_mapping_unmap_atomic(vaddr_atomic); io_mapping_unmap_atomic(vaddr_atomic);
if (unwritten) if (unwritten)
return -EFAULT; return -EFAULT;
......
...@@ -41,13 +41,13 @@ static inline void pagefault_enable(void) ...@@ -41,13 +41,13 @@ static inline void pagefault_enable(void)
#ifndef ARCH_HAS_NOCACHE_UACCESS #ifndef ARCH_HAS_NOCACHE_UACCESS
static inline unsigned long __copy_from_user_inatomic_nocache(void *to, static inline unsigned long __copy_from_user_inatomic_nocache(void *to,
const void __user *from, unsigned long n, unsigned long total) const void __user *from, unsigned long n)
{ {
return __copy_from_user_inatomic(to, from, n); return __copy_from_user_inatomic(to, from, n);
} }
static inline unsigned long __copy_from_user_nocache(void *to, static inline unsigned long __copy_from_user_nocache(void *to,
const void __user *from, unsigned long n, unsigned long total) const void __user *from, unsigned long n)
{ {
return __copy_from_user(to, from, n); return __copy_from_user(to, from, n);
} }
......
...@@ -1816,14 +1816,14 @@ EXPORT_SYMBOL(file_remove_suid); ...@@ -1816,14 +1816,14 @@ EXPORT_SYMBOL(file_remove_suid);
static size_t __iovec_copy_from_user_inatomic(char *vaddr, static size_t __iovec_copy_from_user_inatomic(char *vaddr,
const struct iovec *iov, size_t base, size_t bytes) const struct iovec *iov, size_t base, size_t bytes)
{ {
size_t copied = 0, left = 0, total = bytes; size_t copied = 0, left = 0;
while (bytes) { while (bytes) {
char __user *buf = iov->iov_base + base; char __user *buf = iov->iov_base + base;
int copy = min(bytes, iov->iov_len - base); int copy = min(bytes, iov->iov_len - base);
base = 0; base = 0;
left = __copy_from_user_inatomic_nocache(vaddr, buf, copy, total); left = __copy_from_user_inatomic(vaddr, buf, copy);
copied += copy; copied += copy;
bytes -= copy; bytes -= copy;
vaddr += copy; vaddr += copy;
...@@ -1851,9 +1851,7 @@ size_t iov_iter_copy_from_user_atomic(struct page *page, ...@@ -1851,9 +1851,7 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
if (likely(i->nr_segs == 1)) { if (likely(i->nr_segs == 1)) {
int left; int left;
char __user *buf = i->iov->iov_base + i->iov_offset; char __user *buf = i->iov->iov_base + i->iov_offset;
left = __copy_from_user_inatomic(kaddr + offset, buf, bytes);
left = __copy_from_user_inatomic_nocache(kaddr + offset,
buf, bytes, bytes);
copied = bytes - left; copied = bytes - left;
} else { } else {
copied = __iovec_copy_from_user_inatomic(kaddr + offset, copied = __iovec_copy_from_user_inatomic(kaddr + offset,
...@@ -1881,8 +1879,7 @@ size_t iov_iter_copy_from_user(struct page *page, ...@@ -1881,8 +1879,7 @@ size_t iov_iter_copy_from_user(struct page *page,
if (likely(i->nr_segs == 1)) { if (likely(i->nr_segs == 1)) {
int left; int left;
char __user *buf = i->iov->iov_base + i->iov_offset; char __user *buf = i->iov->iov_base + i->iov_offset;
left = __copy_from_user(kaddr + offset, buf, bytes);
left = __copy_from_user_nocache(kaddr + offset, buf, bytes, bytes);
copied = bytes - left; copied = bytes - left;
} else { } else {
copied = __iovec_copy_from_user_inatomic(kaddr + offset, copied = __iovec_copy_from_user_inatomic(kaddr + offset,
......
...@@ -354,7 +354,7 @@ __xip_file_write(struct file *filp, const char __user *buf, ...@@ -354,7 +354,7 @@ __xip_file_write(struct file *filp, const char __user *buf,
break; break;
copied = bytes - copied = bytes -
__copy_from_user_nocache(xip_mem + offset, buf, bytes, bytes); __copy_from_user_nocache(xip_mem + offset, buf, bytes);
if (likely(copied > 0)) { if (likely(copied > 0)) {
status = copied; status = copied;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册