提交 63bcff2a 编写于 作者: H H. Peter Anvin

x86, smap: Add STAC and CLAC instructions to control user space access

When Supervisor Mode Access Prevention (SMAP) is enabled, access to
userspace from the kernel is controlled by the AC flag.  To make the
performance of manipulating that flag acceptable, there are two new
instructions, STAC and CLAC, to set and clear it.

This patch adds those instructions, via alternative(), when the SMAP
feature is enabled.  It also adds X86_EFLAGS_AC unconditionally to the
SYSCALL entry mask; there is simply no reason to make that one
conditional.
Signed-off-by: NH. Peter Anvin <hpa@linux.intel.com>
Link: http://lkml.kernel.org/r/1348256595-29119-9-git-send-email-hpa@linux.intel.com
上级 a052858f
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#include <asm/segment.h> #include <asm/segment.h>
#include <asm/irqflags.h> #include <asm/irqflags.h>
#include <asm/asm.h> #include <asm/asm.h>
#include <asm/smap.h>
#include <linux/linkage.h> #include <linux/linkage.h>
#include <linux/err.h> #include <linux/err.h>
...@@ -146,8 +147,10 @@ ENTRY(ia32_sysenter_target) ...@@ -146,8 +147,10 @@ ENTRY(ia32_sysenter_target)
SAVE_ARGS 0,1,0 SAVE_ARGS 0,1,0
/* no need to do an access_ok check here because rbp has been /* no need to do an access_ok check here because rbp has been
32bit zero extended */ 32bit zero extended */
ASM_STAC
1: movl (%rbp),%ebp 1: movl (%rbp),%ebp
_ASM_EXTABLE(1b,ia32_badarg) _ASM_EXTABLE(1b,ia32_badarg)
ASM_CLAC
orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET) orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
CFI_REMEMBER_STATE CFI_REMEMBER_STATE
...@@ -301,8 +304,10 @@ ENTRY(ia32_cstar_target) ...@@ -301,8 +304,10 @@ ENTRY(ia32_cstar_target)
/* no need to do an access_ok check here because r8 has been /* no need to do an access_ok check here because r8 has been
32bit zero extended */ 32bit zero extended */
/* hardware stack frame is complete now */ /* hardware stack frame is complete now */
ASM_STAC
1: movl (%r8),%r9d 1: movl (%r8),%r9d
_ASM_EXTABLE(1b,ia32_badarg) _ASM_EXTABLE(1b,ia32_badarg)
ASM_CLAC
orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET) orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
CFI_REMEMBER_STATE CFI_REMEMBER_STATE
...@@ -365,6 +370,7 @@ cstar_tracesys: ...@@ -365,6 +370,7 @@ cstar_tracesys:
END(ia32_cstar_target) END(ia32_cstar_target)
ia32_badarg: ia32_badarg:
ASM_CLAC
movq $-EFAULT,%rax movq $-EFAULT,%rax
jmp ia32_sysret jmp ia32_sysret
CFI_ENDPROC CFI_ENDPROC
......
...@@ -126,8 +126,9 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx) ...@@ -126,8 +126,9 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
/* See comment in fxsave() below. */ /* See comment in fxsave() below. */
#ifdef CONFIG_AS_FXSAVEQ #ifdef CONFIG_AS_FXSAVEQ
asm volatile("1: fxsaveq %[fx]\n\t" asm volatile(ASM_STAC "\n"
"2:\n" "1: fxsaveq %[fx]\n\t"
"2: " ASM_CLAC "\n"
".section .fixup,\"ax\"\n" ".section .fixup,\"ax\"\n"
"3: movl $-1,%[err]\n" "3: movl $-1,%[err]\n"
" jmp 2b\n" " jmp 2b\n"
...@@ -136,8 +137,9 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx) ...@@ -136,8 +137,9 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
: [err] "=r" (err), [fx] "=m" (*fx) : [err] "=r" (err), [fx] "=m" (*fx)
: "0" (0)); : "0" (0));
#else #else
asm volatile("1: rex64/fxsave (%[fx])\n\t" asm volatile(ASM_STAC "\n"
"2:\n" "1: rex64/fxsave (%[fx])\n\t"
"2: " ASM_CLAC "\n"
".section .fixup,\"ax\"\n" ".section .fixup,\"ax\"\n"
"3: movl $-1,%[err]\n" "3: movl $-1,%[err]\n"
" jmp 2b\n" " jmp 2b\n"
......
...@@ -9,10 +9,13 @@ ...@@ -9,10 +9,13 @@
#include <asm/asm.h> #include <asm/asm.h>
#include <asm/errno.h> #include <asm/errno.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/smap.h>
#define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \ #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
asm volatile("1:\t" insn "\n" \ asm volatile("\t" ASM_STAC "\n" \
"2:\t.section .fixup,\"ax\"\n" \ "1:\t" insn "\n" \
"2:\t" ASM_CLAC "\n" \
"\t.section .fixup,\"ax\"\n" \
"3:\tmov\t%3, %1\n" \ "3:\tmov\t%3, %1\n" \
"\tjmp\t2b\n" \ "\tjmp\t2b\n" \
"\t.previous\n" \ "\t.previous\n" \
...@@ -21,12 +24,14 @@ ...@@ -21,12 +24,14 @@
: "i" (-EFAULT), "0" (oparg), "1" (0)) : "i" (-EFAULT), "0" (oparg), "1" (0))
#define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \ #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
asm volatile("1:\tmovl %2, %0\n" \ asm volatile("\t" ASM_STAC "\n" \
"1:\tmovl %2, %0\n" \
"\tmovl\t%0, %3\n" \ "\tmovl\t%0, %3\n" \
"\t" insn "\n" \ "\t" insn "\n" \
"2:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" \ "2:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" \
"\tjnz\t1b\n" \ "\tjnz\t1b\n" \
"3:\t.section .fixup,\"ax\"\n" \ "3:\t" ASM_CLAC "\n" \
"\t.section .fixup,\"ax\"\n" \
"4:\tmov\t%5, %1\n" \ "4:\tmov\t%5, %1\n" \
"\tjmp\t3b\n" \ "\tjmp\t3b\n" \
"\t.previous\n" \ "\t.previous\n" \
...@@ -122,8 +127,10 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, ...@@ -122,8 +127,10 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
return -EFAULT; return -EFAULT;
asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" asm volatile("\t" ASM_STAC "\n"
"2:\t.section .fixup, \"ax\"\n" "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
"2:\t" ASM_CLAC "\n"
"\t.section .fixup, \"ax\"\n"
"3:\tmov %3, %0\n" "3:\tmov %3, %0\n"
"\tjmp 2b\n" "\tjmp 2b\n"
"\t.previous\n" "\t.previous\n"
......
...@@ -58,13 +58,13 @@ ...@@ -58,13 +58,13 @@
#ifdef CONFIG_X86_SMAP #ifdef CONFIG_X86_SMAP
static inline void clac(void) static __always_inline void clac(void)
{ {
/* Note: a barrier is implicit in alternative() */ /* Note: a barrier is implicit in alternative() */
alternative(ASM_NOP3, __stringify(__ASM_CLAC), X86_FEATURE_SMAP); alternative(ASM_NOP3, __stringify(__ASM_CLAC), X86_FEATURE_SMAP);
} }
static inline void stac(void) static __always_inline void stac(void)
{ {
/* Note: a barrier is implicit in alternative() */ /* Note: a barrier is implicit in alternative() */
alternative(ASM_NOP3, __stringify(__ASM_STAC), X86_FEATURE_SMAP); alternative(ASM_NOP3, __stringify(__ASM_STAC), X86_FEATURE_SMAP);
......
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
#include <linux/string.h> #include <linux/string.h>
#include <asm/asm.h> #include <asm/asm.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/smap.h>
#define VERIFY_READ 0 #define VERIFY_READ 0
#define VERIFY_WRITE 1 #define VERIFY_WRITE 1
...@@ -192,9 +193,10 @@ extern int __get_user_bad(void); ...@@ -192,9 +193,10 @@ extern int __get_user_bad(void);
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
#define __put_user_asm_u64(x, addr, err, errret) \ #define __put_user_asm_u64(x, addr, err, errret) \
asm volatile("1: movl %%eax,0(%2)\n" \ asm volatile(ASM_STAC "\n" \
"1: movl %%eax,0(%2)\n" \
"2: movl %%edx,4(%2)\n" \ "2: movl %%edx,4(%2)\n" \
"3:\n" \ "3: " ASM_CLAC "\n" \
".section .fixup,\"ax\"\n" \ ".section .fixup,\"ax\"\n" \
"4: movl %3,%0\n" \ "4: movl %3,%0\n" \
" jmp 3b\n" \ " jmp 3b\n" \
...@@ -205,9 +207,10 @@ extern int __get_user_bad(void); ...@@ -205,9 +207,10 @@ extern int __get_user_bad(void);
: "A" (x), "r" (addr), "i" (errret), "0" (err)) : "A" (x), "r" (addr), "i" (errret), "0" (err))
#define __put_user_asm_ex_u64(x, addr) \ #define __put_user_asm_ex_u64(x, addr) \
asm volatile("1: movl %%eax,0(%1)\n" \ asm volatile(ASM_STAC "\n" \
"1: movl %%eax,0(%1)\n" \
"2: movl %%edx,4(%1)\n" \ "2: movl %%edx,4(%1)\n" \
"3:\n" \ "3: " ASM_CLAC "\n" \
_ASM_EXTABLE_EX(1b, 2b) \ _ASM_EXTABLE_EX(1b, 2b) \
_ASM_EXTABLE_EX(2b, 3b) \ _ASM_EXTABLE_EX(2b, 3b) \
: : "A" (x), "r" (addr)) : : "A" (x), "r" (addr))
...@@ -379,8 +382,9 @@ do { \ ...@@ -379,8 +382,9 @@ do { \
} while (0) } while (0)
#define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \ #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
asm volatile("1: mov"itype" %2,%"rtype"1\n" \ asm volatile(ASM_STAC "\n" \
"2:\n" \ "1: mov"itype" %2,%"rtype"1\n" \
"2: " ASM_CLAC "\n" \
".section .fixup,\"ax\"\n" \ ".section .fixup,\"ax\"\n" \
"3: mov %3,%0\n" \ "3: mov %3,%0\n" \
" xor"itype" %"rtype"1,%"rtype"1\n" \ " xor"itype" %"rtype"1,%"rtype"1\n" \
...@@ -412,8 +416,9 @@ do { \ ...@@ -412,8 +416,9 @@ do { \
} while (0) } while (0)
#define __get_user_asm_ex(x, addr, itype, rtype, ltype) \ #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
asm volatile("1: mov"itype" %1,%"rtype"0\n" \ asm volatile(ASM_STAC "\n" \
"2:\n" \ "1: mov"itype" %1,%"rtype"0\n" \
"2: " ASM_CLAC "\n" \
_ASM_EXTABLE_EX(1b, 2b) \ _ASM_EXTABLE_EX(1b, 2b) \
: ltype(x) : "m" (__m(addr))) : ltype(x) : "m" (__m(addr)))
...@@ -443,8 +448,9 @@ struct __large_struct { unsigned long buf[100]; }; ...@@ -443,8 +448,9 @@ struct __large_struct { unsigned long buf[100]; };
* aliasing issues. * aliasing issues.
*/ */
#define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \ #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
asm volatile("1: mov"itype" %"rtype"1,%2\n" \ asm volatile(ASM_STAC "\n" \
"2:\n" \ "1: mov"itype" %"rtype"1,%2\n" \
"2: " ASM_CLAC "\n" \
".section .fixup,\"ax\"\n" \ ".section .fixup,\"ax\"\n" \
"3: mov %3,%0\n" \ "3: mov %3,%0\n" \
" jmp 2b\n" \ " jmp 2b\n" \
...@@ -454,8 +460,9 @@ struct __large_struct { unsigned long buf[100]; }; ...@@ -454,8 +460,9 @@ struct __large_struct { unsigned long buf[100]; };
: ltype(x), "m" (__m(addr)), "i" (errret), "0" (err)) : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
#define __put_user_asm_ex(x, addr, itype, rtype, ltype) \ #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
asm volatile("1: mov"itype" %"rtype"0,%1\n" \ asm volatile(ASM_STAC "\n" \
"2:\n" \ "1: mov"itype" %"rtype"0,%1\n" \
"2: " ASM_CLAC "\n" \
_ASM_EXTABLE_EX(1b, 2b) \ _ASM_EXTABLE_EX(1b, 2b) \
: : ltype(x), "m" (__m(addr))) : : ltype(x), "m" (__m(addr)))
......
...@@ -74,8 +74,9 @@ static inline int xsave_user(struct xsave_struct __user *buf) ...@@ -74,8 +74,9 @@ static inline int xsave_user(struct xsave_struct __user *buf)
if (unlikely(err)) if (unlikely(err))
return -EFAULT; return -EFAULT;
__asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x27\n" __asm__ __volatile__(ASM_STAC "\n"
"2:\n" "1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
"2: " ASM_CLAC "\n"
".section .fixup,\"ax\"\n" ".section .fixup,\"ax\"\n"
"3: movl $-1,%[err]\n" "3: movl $-1,%[err]\n"
" jmp 2b\n" " jmp 2b\n"
...@@ -97,8 +98,9 @@ static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask) ...@@ -97,8 +98,9 @@ static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
u32 lmask = mask; u32 lmask = mask;
u32 hmask = mask >> 32; u32 hmask = mask >> 32;
__asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n" __asm__ __volatile__(ASM_STAC "\n"
"2:\n" "1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
"2: " ASM_CLAC "\n"
".section .fixup,\"ax\"\n" ".section .fixup,\"ax\"\n"
"3: movl $-1,%[err]\n" "3: movl $-1,%[err]\n"
" jmp 2b\n" " jmp 2b\n"
......
...@@ -1113,7 +1113,8 @@ void syscall_init(void) ...@@ -1113,7 +1113,8 @@ void syscall_init(void)
/* Flags to clear on syscall */ /* Flags to clear on syscall */
wrmsrl(MSR_SYSCALL_MASK, wrmsrl(MSR_SYSCALL_MASK,
X86_EFLAGS_TF|X86_EFLAGS_DF|X86_EFLAGS_IF|X86_EFLAGS_IOPL); X86_EFLAGS_TF|X86_EFLAGS_DF|X86_EFLAGS_IF|
X86_EFLAGS_IOPL|X86_EFLAGS_AC);
} }
unsigned long kernel_eflags; unsigned long kernel_eflags;
......
...@@ -56,6 +56,7 @@ ...@@ -56,6 +56,7 @@
#include <asm/ftrace.h> #include <asm/ftrace.h>
#include <asm/percpu.h> #include <asm/percpu.h>
#include <asm/asm.h> #include <asm/asm.h>
#include <asm/smap.h>
#include <linux/err.h> #include <linux/err.h>
/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */ /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
...@@ -465,7 +466,8 @@ END(ret_from_fork) ...@@ -465,7 +466,8 @@ END(ret_from_fork)
* System call entry. Up to 6 arguments in registers are supported. * System call entry. Up to 6 arguments in registers are supported.
* *
* SYSCALL does not save anything on the stack and does not change the * SYSCALL does not save anything on the stack and does not change the
* stack pointer. * stack pointer. However, it does mask the flags register for us, so
* CLD and CLAC are not needed.
*/ */
/* /*
...@@ -884,6 +886,7 @@ END(interrupt) ...@@ -884,6 +886,7 @@ END(interrupt)
*/ */
.p2align CONFIG_X86_L1_CACHE_SHIFT .p2align CONFIG_X86_L1_CACHE_SHIFT
common_interrupt: common_interrupt:
ASM_CLAC
XCPT_FRAME XCPT_FRAME
addq $-0x80,(%rsp) /* Adjust vector to [-256,-1] range */ addq $-0x80,(%rsp) /* Adjust vector to [-256,-1] range */
interrupt do_IRQ interrupt do_IRQ
...@@ -1023,6 +1026,7 @@ END(common_interrupt) ...@@ -1023,6 +1026,7 @@ END(common_interrupt)
*/ */
.macro apicinterrupt num sym do_sym .macro apicinterrupt num sym do_sym
ENTRY(\sym) ENTRY(\sym)
ASM_CLAC
INTR_FRAME INTR_FRAME
pushq_cfi $~(\num) pushq_cfi $~(\num)
.Lcommon_\sym: .Lcommon_\sym:
...@@ -1077,6 +1081,7 @@ apicinterrupt IRQ_WORK_VECTOR \ ...@@ -1077,6 +1081,7 @@ apicinterrupt IRQ_WORK_VECTOR \
*/ */
.macro zeroentry sym do_sym .macro zeroentry sym do_sym
ENTRY(\sym) ENTRY(\sym)
ASM_CLAC
INTR_FRAME INTR_FRAME
PARAVIRT_ADJUST_EXCEPTION_FRAME PARAVIRT_ADJUST_EXCEPTION_FRAME
pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */ pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */
...@@ -1094,6 +1099,7 @@ END(\sym) ...@@ -1094,6 +1099,7 @@ END(\sym)
.macro paranoidzeroentry sym do_sym .macro paranoidzeroentry sym do_sym
ENTRY(\sym) ENTRY(\sym)
ASM_CLAC
INTR_FRAME INTR_FRAME
PARAVIRT_ADJUST_EXCEPTION_FRAME PARAVIRT_ADJUST_EXCEPTION_FRAME
pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */ pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */
...@@ -1112,6 +1118,7 @@ END(\sym) ...@@ -1112,6 +1118,7 @@ END(\sym)
#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8) #define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
.macro paranoidzeroentry_ist sym do_sym ist .macro paranoidzeroentry_ist sym do_sym ist
ENTRY(\sym) ENTRY(\sym)
ASM_CLAC
INTR_FRAME INTR_FRAME
PARAVIRT_ADJUST_EXCEPTION_FRAME PARAVIRT_ADJUST_EXCEPTION_FRAME
pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */ pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */
...@@ -1131,6 +1138,7 @@ END(\sym) ...@@ -1131,6 +1138,7 @@ END(\sym)
.macro errorentry sym do_sym .macro errorentry sym do_sym
ENTRY(\sym) ENTRY(\sym)
ASM_CLAC
XCPT_FRAME XCPT_FRAME
PARAVIRT_ADJUST_EXCEPTION_FRAME PARAVIRT_ADJUST_EXCEPTION_FRAME
subq $ORIG_RAX-R15, %rsp subq $ORIG_RAX-R15, %rsp
...@@ -1149,6 +1157,7 @@ END(\sym) ...@@ -1149,6 +1157,7 @@ END(\sym)
/* error code is on the stack already */ /* error code is on the stack already */
.macro paranoiderrorentry sym do_sym .macro paranoiderrorentry sym do_sym
ENTRY(\sym) ENTRY(\sym)
ASM_CLAC
XCPT_FRAME XCPT_FRAME
PARAVIRT_ADJUST_EXCEPTION_FRAME PARAVIRT_ADJUST_EXCEPTION_FRAME
subq $ORIG_RAX-R15, %rsp subq $ORIG_RAX-R15, %rsp
......
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#include <asm/cpufeature.h> #include <asm/cpufeature.h>
#include <asm/alternative-asm.h> #include <asm/alternative-asm.h>
#include <asm/asm.h> #include <asm/asm.h>
#include <asm/smap.h>
/* /*
* By placing feature2 after feature1 in altinstructions section, we logically * By placing feature2 after feature1 in altinstructions section, we logically
...@@ -130,6 +131,7 @@ ENDPROC(bad_from_user) ...@@ -130,6 +131,7 @@ ENDPROC(bad_from_user)
*/ */
ENTRY(copy_user_generic_unrolled) ENTRY(copy_user_generic_unrolled)
CFI_STARTPROC CFI_STARTPROC
ASM_STAC
cmpl $8,%edx cmpl $8,%edx
jb 20f /* less then 8 bytes, go to byte copy loop */ jb 20f /* less then 8 bytes, go to byte copy loop */
ALIGN_DESTINATION ALIGN_DESTINATION
...@@ -177,6 +179,7 @@ ENTRY(copy_user_generic_unrolled) ...@@ -177,6 +179,7 @@ ENTRY(copy_user_generic_unrolled)
decl %ecx decl %ecx
jnz 21b jnz 21b
23: xor %eax,%eax 23: xor %eax,%eax
ASM_CLAC
ret ret
.section .fixup,"ax" .section .fixup,"ax"
...@@ -232,6 +235,7 @@ ENDPROC(copy_user_generic_unrolled) ...@@ -232,6 +235,7 @@ ENDPROC(copy_user_generic_unrolled)
*/ */
ENTRY(copy_user_generic_string) ENTRY(copy_user_generic_string)
CFI_STARTPROC CFI_STARTPROC
ASM_STAC
andl %edx,%edx andl %edx,%edx
jz 4f jz 4f
cmpl $8,%edx cmpl $8,%edx
...@@ -246,6 +250,7 @@ ENTRY(copy_user_generic_string) ...@@ -246,6 +250,7 @@ ENTRY(copy_user_generic_string)
3: rep 3: rep
movsb movsb
4: xorl %eax,%eax 4: xorl %eax,%eax
ASM_CLAC
ret ret
.section .fixup,"ax" .section .fixup,"ax"
...@@ -273,12 +278,14 @@ ENDPROC(copy_user_generic_string) ...@@ -273,12 +278,14 @@ ENDPROC(copy_user_generic_string)
*/ */
ENTRY(copy_user_enhanced_fast_string) ENTRY(copy_user_enhanced_fast_string)
CFI_STARTPROC CFI_STARTPROC
ASM_STAC
andl %edx,%edx andl %edx,%edx
jz 2f jz 2f
movl %edx,%ecx movl %edx,%ecx
1: rep 1: rep
movsb movsb
2: xorl %eax,%eax 2: xorl %eax,%eax
ASM_CLAC
ret ret
.section .fixup,"ax" .section .fixup,"ax"
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/thread_info.h> #include <asm/thread_info.h>
#include <asm/asm.h> #include <asm/asm.h>
#include <asm/smap.h>
.macro ALIGN_DESTINATION .macro ALIGN_DESTINATION
#ifdef FIX_ALIGNMENT #ifdef FIX_ALIGNMENT
...@@ -48,6 +49,7 @@ ...@@ -48,6 +49,7 @@
*/ */
ENTRY(__copy_user_nocache) ENTRY(__copy_user_nocache)
CFI_STARTPROC CFI_STARTPROC
ASM_STAC
cmpl $8,%edx cmpl $8,%edx
jb 20f /* less then 8 bytes, go to byte copy loop */ jb 20f /* less then 8 bytes, go to byte copy loop */
ALIGN_DESTINATION ALIGN_DESTINATION
...@@ -95,6 +97,7 @@ ENTRY(__copy_user_nocache) ...@@ -95,6 +97,7 @@ ENTRY(__copy_user_nocache)
decl %ecx decl %ecx
jnz 21b jnz 21b
23: xorl %eax,%eax 23: xorl %eax,%eax
ASM_CLAC
sfence sfence
ret ret
......
...@@ -33,6 +33,7 @@ ...@@ -33,6 +33,7 @@
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/thread_info.h> #include <asm/thread_info.h>
#include <asm/asm.h> #include <asm/asm.h>
#include <asm/smap.h>
.text .text
ENTRY(__get_user_1) ENTRY(__get_user_1)
...@@ -40,8 +41,10 @@ ENTRY(__get_user_1) ...@@ -40,8 +41,10 @@ ENTRY(__get_user_1)
GET_THREAD_INFO(%_ASM_DX) GET_THREAD_INFO(%_ASM_DX)
cmp TI_addr_limit(%_ASM_DX),%_ASM_AX cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
jae bad_get_user jae bad_get_user
ASM_STAC
1: movzb (%_ASM_AX),%edx 1: movzb (%_ASM_AX),%edx
xor %eax,%eax xor %eax,%eax
ASM_CLAC
ret ret
CFI_ENDPROC CFI_ENDPROC
ENDPROC(__get_user_1) ENDPROC(__get_user_1)
...@@ -53,8 +56,10 @@ ENTRY(__get_user_2) ...@@ -53,8 +56,10 @@ ENTRY(__get_user_2)
GET_THREAD_INFO(%_ASM_DX) GET_THREAD_INFO(%_ASM_DX)
cmp TI_addr_limit(%_ASM_DX),%_ASM_AX cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
jae bad_get_user jae bad_get_user
ASM_STAC
2: movzwl -1(%_ASM_AX),%edx 2: movzwl -1(%_ASM_AX),%edx
xor %eax,%eax xor %eax,%eax
ASM_CLAC
ret ret
CFI_ENDPROC CFI_ENDPROC
ENDPROC(__get_user_2) ENDPROC(__get_user_2)
...@@ -66,8 +71,10 @@ ENTRY(__get_user_4) ...@@ -66,8 +71,10 @@ ENTRY(__get_user_4)
GET_THREAD_INFO(%_ASM_DX) GET_THREAD_INFO(%_ASM_DX)
cmp TI_addr_limit(%_ASM_DX),%_ASM_AX cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
jae bad_get_user jae bad_get_user
ASM_STAC
3: mov -3(%_ASM_AX),%edx 3: mov -3(%_ASM_AX),%edx
xor %eax,%eax xor %eax,%eax
ASM_CLAC
ret ret
CFI_ENDPROC CFI_ENDPROC
ENDPROC(__get_user_4) ENDPROC(__get_user_4)
...@@ -80,8 +87,10 @@ ENTRY(__get_user_8) ...@@ -80,8 +87,10 @@ ENTRY(__get_user_8)
GET_THREAD_INFO(%_ASM_DX) GET_THREAD_INFO(%_ASM_DX)
cmp TI_addr_limit(%_ASM_DX),%_ASM_AX cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
jae bad_get_user jae bad_get_user
ASM_STAC
4: movq -7(%_ASM_AX),%_ASM_DX 4: movq -7(%_ASM_AX),%_ASM_DX
xor %eax,%eax xor %eax,%eax
ASM_CLAC
ret ret
CFI_ENDPROC CFI_ENDPROC
ENDPROC(__get_user_8) ENDPROC(__get_user_8)
...@@ -91,6 +100,7 @@ bad_get_user: ...@@ -91,6 +100,7 @@ bad_get_user:
CFI_STARTPROC CFI_STARTPROC
xor %edx,%edx xor %edx,%edx
mov $(-EFAULT),%_ASM_AX mov $(-EFAULT),%_ASM_AX
ASM_CLAC
ret ret
CFI_ENDPROC CFI_ENDPROC
END(bad_get_user) END(bad_get_user)
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#include <asm/thread_info.h> #include <asm/thread_info.h>
#include <asm/errno.h> #include <asm/errno.h>
#include <asm/asm.h> #include <asm/asm.h>
#include <asm/smap.h>
/* /*
...@@ -31,7 +32,8 @@ ...@@ -31,7 +32,8 @@
#define ENTER CFI_STARTPROC ; \ #define ENTER CFI_STARTPROC ; \
GET_THREAD_INFO(%_ASM_BX) GET_THREAD_INFO(%_ASM_BX)
#define EXIT ret ; \ #define EXIT ASM_CLAC ; \
ret ; \
CFI_ENDPROC CFI_ENDPROC
.text .text
...@@ -39,6 +41,7 @@ ENTRY(__put_user_1) ...@@ -39,6 +41,7 @@ ENTRY(__put_user_1)
ENTER ENTER
cmp TI_addr_limit(%_ASM_BX),%_ASM_CX cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
jae bad_put_user jae bad_put_user
ASM_STAC
1: movb %al,(%_ASM_CX) 1: movb %al,(%_ASM_CX)
xor %eax,%eax xor %eax,%eax
EXIT EXIT
...@@ -50,6 +53,7 @@ ENTRY(__put_user_2) ...@@ -50,6 +53,7 @@ ENTRY(__put_user_2)
sub $1,%_ASM_BX sub $1,%_ASM_BX
cmp %_ASM_BX,%_ASM_CX cmp %_ASM_BX,%_ASM_CX
jae bad_put_user jae bad_put_user
ASM_STAC
2: movw %ax,(%_ASM_CX) 2: movw %ax,(%_ASM_CX)
xor %eax,%eax xor %eax,%eax
EXIT EXIT
...@@ -61,6 +65,7 @@ ENTRY(__put_user_4) ...@@ -61,6 +65,7 @@ ENTRY(__put_user_4)
sub $3,%_ASM_BX sub $3,%_ASM_BX
cmp %_ASM_BX,%_ASM_CX cmp %_ASM_BX,%_ASM_CX
jae bad_put_user jae bad_put_user
ASM_STAC
3: movl %eax,(%_ASM_CX) 3: movl %eax,(%_ASM_CX)
xor %eax,%eax xor %eax,%eax
EXIT EXIT
...@@ -72,6 +77,7 @@ ENTRY(__put_user_8) ...@@ -72,6 +77,7 @@ ENTRY(__put_user_8)
sub $7,%_ASM_BX sub $7,%_ASM_BX
cmp %_ASM_BX,%_ASM_CX cmp %_ASM_BX,%_ASM_CX
jae bad_put_user jae bad_put_user
ASM_STAC
4: mov %_ASM_AX,(%_ASM_CX) 4: mov %_ASM_AX,(%_ASM_CX)
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
5: movl %edx,4(%_ASM_CX) 5: movl %edx,4(%_ASM_CX)
......
...@@ -42,10 +42,11 @@ do { \ ...@@ -42,10 +42,11 @@ do { \
int __d0; \ int __d0; \
might_fault(); \ might_fault(); \
__asm__ __volatile__( \ __asm__ __volatile__( \
ASM_STAC "\n" \
"0: rep; stosl\n" \ "0: rep; stosl\n" \
" movl %2,%0\n" \ " movl %2,%0\n" \
"1: rep; stosb\n" \ "1: rep; stosb\n" \
"2:\n" \ "2: " ASM_CLAC "\n" \
".section .fixup,\"ax\"\n" \ ".section .fixup,\"ax\"\n" \
"3: lea 0(%2,%0,4),%0\n" \ "3: lea 0(%2,%0,4),%0\n" \
" jmp 2b\n" \ " jmp 2b\n" \
...@@ -626,10 +627,12 @@ unsigned long __copy_to_user_ll(void __user *to, const void *from, ...@@ -626,10 +627,12 @@ unsigned long __copy_to_user_ll(void __user *to, const void *from,
return n; return n;
} }
#endif #endif
stac();
if (movsl_is_ok(to, from, n)) if (movsl_is_ok(to, from, n))
__copy_user(to, from, n); __copy_user(to, from, n);
else else
n = __copy_user_intel(to, from, n); n = __copy_user_intel(to, from, n);
clac();
return n; return n;
} }
EXPORT_SYMBOL(__copy_to_user_ll); EXPORT_SYMBOL(__copy_to_user_ll);
...@@ -637,10 +640,12 @@ EXPORT_SYMBOL(__copy_to_user_ll); ...@@ -637,10 +640,12 @@ EXPORT_SYMBOL(__copy_to_user_ll);
unsigned long __copy_from_user_ll(void *to, const void __user *from, unsigned long __copy_from_user_ll(void *to, const void __user *from,
unsigned long n) unsigned long n)
{ {
stac();
if (movsl_is_ok(to, from, n)) if (movsl_is_ok(to, from, n))
__copy_user_zeroing(to, from, n); __copy_user_zeroing(to, from, n);
else else
n = __copy_user_zeroing_intel(to, from, n); n = __copy_user_zeroing_intel(to, from, n);
clac();
return n; return n;
} }
EXPORT_SYMBOL(__copy_from_user_ll); EXPORT_SYMBOL(__copy_from_user_ll);
...@@ -648,11 +653,13 @@ EXPORT_SYMBOL(__copy_from_user_ll); ...@@ -648,11 +653,13 @@ EXPORT_SYMBOL(__copy_from_user_ll);
unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from, unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
unsigned long n) unsigned long n)
{ {
stac();
if (movsl_is_ok(to, from, n)) if (movsl_is_ok(to, from, n))
__copy_user(to, from, n); __copy_user(to, from, n);
else else
n = __copy_user_intel((void __user *)to, n = __copy_user_intel((void __user *)to,
(const void *)from, n); (const void *)from, n);
clac();
return n; return n;
} }
EXPORT_SYMBOL(__copy_from_user_ll_nozero); EXPORT_SYMBOL(__copy_from_user_ll_nozero);
...@@ -660,6 +667,7 @@ EXPORT_SYMBOL(__copy_from_user_ll_nozero); ...@@ -660,6 +667,7 @@ EXPORT_SYMBOL(__copy_from_user_ll_nozero);
unsigned long __copy_from_user_ll_nocache(void *to, const void __user *from, unsigned long __copy_from_user_ll_nocache(void *to, const void __user *from,
unsigned long n) unsigned long n)
{ {
stac();
#ifdef CONFIG_X86_INTEL_USERCOPY #ifdef CONFIG_X86_INTEL_USERCOPY
if (n > 64 && cpu_has_xmm2) if (n > 64 && cpu_has_xmm2)
n = __copy_user_zeroing_intel_nocache(to, from, n); n = __copy_user_zeroing_intel_nocache(to, from, n);
...@@ -668,6 +676,7 @@ unsigned long __copy_from_user_ll_nocache(void *to, const void __user *from, ...@@ -668,6 +676,7 @@ unsigned long __copy_from_user_ll_nocache(void *to, const void __user *from,
#else #else
__copy_user_zeroing(to, from, n); __copy_user_zeroing(to, from, n);
#endif #endif
clac();
return n; return n;
} }
EXPORT_SYMBOL(__copy_from_user_ll_nocache); EXPORT_SYMBOL(__copy_from_user_ll_nocache);
...@@ -675,6 +684,7 @@ EXPORT_SYMBOL(__copy_from_user_ll_nocache); ...@@ -675,6 +684,7 @@ EXPORT_SYMBOL(__copy_from_user_ll_nocache);
unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *from, unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *from,
unsigned long n) unsigned long n)
{ {
stac();
#ifdef CONFIG_X86_INTEL_USERCOPY #ifdef CONFIG_X86_INTEL_USERCOPY
if (n > 64 && cpu_has_xmm2) if (n > 64 && cpu_has_xmm2)
n = __copy_user_intel_nocache(to, from, n); n = __copy_user_intel_nocache(to, from, n);
...@@ -683,6 +693,7 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr ...@@ -683,6 +693,7 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
#else #else
__copy_user(to, from, n); __copy_user(to, from, n);
#endif #endif
clac();
return n; return n;
} }
EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero); EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
......
...@@ -18,6 +18,7 @@ unsigned long __clear_user(void __user *addr, unsigned long size) ...@@ -18,6 +18,7 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
might_fault(); might_fault();
/* no memory constraint because it doesn't change any memory gcc knows /* no memory constraint because it doesn't change any memory gcc knows
about */ about */
stac();
asm volatile( asm volatile(
" testq %[size8],%[size8]\n" " testq %[size8],%[size8]\n"
" jz 4f\n" " jz 4f\n"
...@@ -40,6 +41,7 @@ unsigned long __clear_user(void __user *addr, unsigned long size) ...@@ -40,6 +41,7 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
: [size8] "=&c"(size), [dst] "=&D" (__d0) : [size8] "=&c"(size), [dst] "=&D" (__d0)
: [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr), : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr),
[zero] "r" (0UL), [eight] "r" (8UL)); [zero] "r" (0UL), [eight] "r" (8UL));
clac();
return size; return size;
} }
EXPORT_SYMBOL(__clear_user); EXPORT_SYMBOL(__clear_user);
...@@ -82,5 +84,6 @@ copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest) ...@@ -82,5 +84,6 @@ copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
for (c = 0, zero_len = len; zerorest && zero_len; --zero_len) for (c = 0, zero_len = len; zerorest && zero_len; --zero_len)
if (__put_user_nocheck(c, to++, sizeof(char))) if (__put_user_nocheck(c, to++, sizeof(char)))
break; break;
clac();
return len; return len;
} }
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册