提交 9a6fb28a 编写于 作者: T Tony Luck 提交者: Thomas Gleixner

x86/mce: Improve memcpy_mcsafe()

Use the mcsafe_key defined in the previous patch to make decisions on which
copy function to use. We can't use the FEATURE bit any more because PCI
quirks run too late to affect the patching of code. So we use a static key.

Turn memcpy_mcsafe() into an inline function to make life easier for
callers. The assembly code that actually does the copy is now named
memcpy_mcsafe_unrolled()
Signed-off-by: NTony Luck <tony.luck@intel.com>
Acked-by: NBorislav Petkov <bp@suse.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Boris Petkov <bp@suse.de>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Link: http://lkml.kernel.org/r/bfde2fc774e94f53d91b70a4321c85a0d33e7118.1472754712.git.tony.luck@intel.comSigned-off-by: NThomas Gleixner <tglx@linutronix.de>
上级 3637efb0
...@@ -46,10 +46,7 @@ static inline void arch_memcpy_to_pmem(void *dst, const void *src, size_t n) ...@@ -46,10 +46,7 @@ static inline void arch_memcpy_to_pmem(void *dst, const void *src, size_t n)
static inline int arch_memcpy_from_pmem(void *dst, const void *src, size_t n) static inline int arch_memcpy_from_pmem(void *dst, const void *src, size_t n)
{ {
if (static_cpu_has(X86_FEATURE_MCE_RECOVERY))
return memcpy_mcsafe(dst, src, n); return memcpy_mcsafe(dst, src, n);
memcpy(dst, src, n);
return 0;
} }
/** /**
......
...@@ -79,6 +79,7 @@ int strcmp(const char *cs, const char *ct); ...@@ -79,6 +79,7 @@ int strcmp(const char *cs, const char *ct);
#define memset(s, c, n) __memset(s, c, n) #define memset(s, c, n) __memset(s, c, n)
#endif #endif
__must_check int memcpy_mcsafe_unrolled(void *dst, const void *src, size_t cnt);
DECLARE_STATIC_KEY_FALSE(mcsafe_key); DECLARE_STATIC_KEY_FALSE(mcsafe_key);
/** /**
...@@ -89,10 +90,23 @@ DECLARE_STATIC_KEY_FALSE(mcsafe_key); ...@@ -89,10 +90,23 @@ DECLARE_STATIC_KEY_FALSE(mcsafe_key);
* @cnt: number of bytes to copy * @cnt: number of bytes to copy
* *
* Low level memory copy function that catches machine checks * Low level memory copy function that catches machine checks
* We only call into the "safe" function on systems that can
* actually do machine check recovery. Everyone else can just
* use memcpy().
* *
* Return 0 for success, -EFAULT for fail * Return 0 for success, -EFAULT for fail
*/ */
int memcpy_mcsafe(void *dst, const void *src, size_t cnt); static __always_inline __must_check int
memcpy_mcsafe(void *dst, const void *src, size_t cnt)
{
#ifdef CONFIG_X86_MCE
if (static_branch_unlikely(&mcsafe_key))
return memcpy_mcsafe_unrolled(dst, src, cnt);
else
#endif
memcpy(dst, src, cnt);
return 0;
}
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
......
...@@ -38,7 +38,7 @@ EXPORT_SYMBOL(__copy_user_nocache); ...@@ -38,7 +38,7 @@ EXPORT_SYMBOL(__copy_user_nocache);
EXPORT_SYMBOL(_copy_from_user); EXPORT_SYMBOL(_copy_from_user);
EXPORT_SYMBOL(_copy_to_user); EXPORT_SYMBOL(_copy_to_user);
EXPORT_SYMBOL_GPL(memcpy_mcsafe); EXPORT_SYMBOL_GPL(memcpy_mcsafe_unrolled);
EXPORT_SYMBOL(copy_page); EXPORT_SYMBOL(copy_page);
EXPORT_SYMBOL(clear_page); EXPORT_SYMBOL(clear_page);
......
...@@ -181,11 +181,11 @@ ENDPROC(memcpy_orig) ...@@ -181,11 +181,11 @@ ENDPROC(memcpy_orig)
#ifndef CONFIG_UML #ifndef CONFIG_UML
/* /*
* memcpy_mcsafe - memory copy with machine check exception handling * memcpy_mcsafe_unrolled - memory copy with machine check exception handling
* Note that we only catch machine checks when reading the source addresses. * Note that we only catch machine checks when reading the source addresses.
* Writes to target are posted and don't generate machine checks. * Writes to target are posted and don't generate machine checks.
*/ */
ENTRY(memcpy_mcsafe) ENTRY(memcpy_mcsafe_unrolled)
cmpl $8, %edx cmpl $8, %edx
/* Less than 8 bytes? Go to byte copy loop */ /* Less than 8 bytes? Go to byte copy loop */
jb .L_no_whole_words jb .L_no_whole_words
...@@ -273,7 +273,7 @@ ENTRY(memcpy_mcsafe) ...@@ -273,7 +273,7 @@ ENTRY(memcpy_mcsafe)
.L_done_memcpy_trap: .L_done_memcpy_trap:
xorq %rax, %rax xorq %rax, %rax
ret ret
ENDPROC(memcpy_mcsafe) ENDPROC(memcpy_mcsafe_unrolled)
.section .fixup, "ax" .section .fixup, "ax"
/* Return -EFAULT for any failure */ /* Return -EFAULT for any failure */
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册