From f09174c501f8bb259788cc36d5a7aa5b2831fb5e Mon Sep 17 00:00:00 2001 From: Qiaowei Ren Date: Sat, 14 Dec 2013 14:25:02 +0800 Subject: [PATCH] x86: add user_atomic_cmpxchg_inatomic at uaccess.h This patch adds user_atomic_cmpxchg_inatomic() to use CMPXCHG instruction against a user space address. This generalizes the already existing futex_atomic_cmpxchg_inatomic() so it can be used in other contexts. This will be used in the upcoming support for Intel MPX (Memory Protection Extensions.) [ hpa: replaced #ifdef inside a macro with IS_ENABLED() ] Signed-off-by: Qiaowei Ren Link: http://lkml.kernel.org/r/1387002303-6620-1-git-send-email-qiaowei.ren@intel.com Signed-off-by: H. Peter Anvin Cc: Peter Zijlstra --- arch/x86/include/asm/uaccess.h | 92 ++++++++++++++++++++++++++++++++++ 1 file changed, 92 insertions(+) diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index 8ec57c07b125..48ff83855268 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h @@ -525,6 +525,98 @@ extern __must_check long strnlen_user(const char __user *str, long n); unsigned long __must_check clear_user(void __user *mem, unsigned long len); unsigned long __must_check __clear_user(void __user *mem, unsigned long len); +extern void __cmpxchg_wrong_size(void) + __compiletime_error("Bad argument size for cmpxchg"); + +#define __user_atomic_cmpxchg_inatomic(uval, ptr, old, new, size) \ +({ \ + int __ret = 0; \ + __typeof__(ptr) __uval = (uval); \ + __typeof__(*(ptr)) __old = (old); \ + __typeof__(*(ptr)) __new = (new); \ + switch (size) { \ + case 1: \ + { \ + asm volatile("\t" ASM_STAC "\n" \ + "1:\t" LOCK_PREFIX "cmpxchgb %4, %2\n" \ + "2:\t" ASM_CLAC "\n" \ + "\t.section .fixup, \"ax\"\n" \ + "3:\tmov %3, %0\n" \ + "\tjmp 2b\n" \ + "\t.previous\n" \ + _ASM_EXTABLE(1b, 3b) \ + : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \ + : "i" (-EFAULT), "q" (__new), "1" (__old) \ + : "memory" \ + ); \ + break; \ + } \ + case 2: \ + { \ + asm volatile("\t" ASM_STAC "\n" \ + "1:\t" LOCK_PREFIX "cmpxchgw %4, %2\n" \ + "2:\t" ASM_CLAC "\n" \ + "\t.section .fixup, \"ax\"\n" \ + "3:\tmov %3, %0\n" \ + "\tjmp 2b\n" \ + "\t.previous\n" \ + _ASM_EXTABLE(1b, 3b) \ + : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \ + : "i" (-EFAULT), "r" (__new), "1" (__old) \ + : "memory" \ + ); \ + break; \ + } \ + case 4: \ + { \ + asm volatile("\t" ASM_STAC "\n" \ + "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" \ + "2:\t" ASM_CLAC "\n" \ + "\t.section .fixup, \"ax\"\n" \ + "3:\tmov %3, %0\n" \ + "\tjmp 2b\n" \ + "\t.previous\n" \ + _ASM_EXTABLE(1b, 3b) \ + : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \ + : "i" (-EFAULT), "r" (__new), "1" (__old) \ + : "memory" \ + ); \ + break; \ + } \ + case 8: \ + { \ + if (!IS_ENABLED(CONFIG_X86_64)) \ + __cmpxchg_wrong_size(); \ + \ + asm volatile("\t" ASM_STAC "\n" \ + "1:\t" LOCK_PREFIX "cmpxchgq %4, %2\n" \ + "2:\t" ASM_CLAC "\n" \ + "\t.section .fixup, \"ax\"\n" \ + "3:\tmov %3, %0\n" \ + "\tjmp 2b\n" \ + "\t.previous\n" \ + _ASM_EXTABLE(1b, 3b) \ + : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \ + : "i" (-EFAULT), "r" (__new), "1" (__old) \ + : "memory" \ + ); \ + break; \ + } \ + default: \ + __cmpxchg_wrong_size(); \ + } \ + *__uval = __old; \ + __ret; \ +}) + +#define user_atomic_cmpxchg_inatomic(uval, ptr, old, new) \ +({ \ + access_ok(VERIFY_WRITE, (ptr), sizeof(*(ptr))) ? \ + __user_atomic_cmpxchg_inatomic((uval), (ptr), \ + (old), (new), sizeof(*(ptr))) : \ + -EFAULT; \ +}) + /* * movsl can be slow when source and dest are not both 8-byte aligned */ -- GitLab