提交 1ef55be1 编写于 作者: A Andy Lutomirski 提交者: Thomas Gleixner

x86/asm: Get rid of __read_cr4_safe()

We use __read_cr4() vs __read_cr4_safe() inconsistently.  On
CR4-less CPUs, all CR4 bits are effectively clear, so we can make
the code simpler and more robust by making __read_cr4() always fix
up faults on 32-bit kernels.

This may fix some bugs on old 486-like CPUs, but I don't have any
easy way to test that.
Signed-off-by: NAndy Lutomirski <luto@kernel.org>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: david@saggiorato.net
Link: http://lkml.kernel.org/r/ea647033d357d9ce2ad2bbde5a631045f5052fb6.1475178370.git.luto@kernel.orgSigned-off-by: NThomas Gleixner <tglx@linutronix.de>
上级 d7e25c66
...@@ -80,10 +80,6 @@ static inline unsigned long __read_cr4(void) ...@@ -80,10 +80,6 @@ static inline unsigned long __read_cr4(void)
{ {
return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4); return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4);
} }
static inline unsigned long __read_cr4_safe(void)
{
return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4_safe);
}
static inline void __write_cr4(unsigned long x) static inline void __write_cr4(unsigned long x)
{ {
......
...@@ -108,7 +108,6 @@ struct pv_cpu_ops { ...@@ -108,7 +108,6 @@ struct pv_cpu_ops {
unsigned long (*read_cr0)(void); unsigned long (*read_cr0)(void);
void (*write_cr0)(unsigned long); void (*write_cr0)(unsigned long);
unsigned long (*read_cr4_safe)(void);
unsigned long (*read_cr4)(void); unsigned long (*read_cr4)(void);
void (*write_cr4)(unsigned long); void (*write_cr4)(unsigned long);
......
...@@ -59,22 +59,19 @@ static inline void native_write_cr3(unsigned long val) ...@@ -59,22 +59,19 @@ static inline void native_write_cr3(unsigned long val)
static inline unsigned long native_read_cr4(void) static inline unsigned long native_read_cr4(void)
{ {
unsigned long val; unsigned long val;
asm volatile("mov %%cr4,%0\n\t" : "=r" (val), "=m" (__force_order));
return val;
}
static inline unsigned long native_read_cr4_safe(void)
{
unsigned long val;
/* This could fault if %cr4 does not exist. In x86_64, a cr4 always
* exists, so it will never fail. */
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
/*
* This could fault if CR4 does not exist. Non-existent CR4
* is functionally equivalent to CR4 == 0. Keep it simple and pretend
* that CR4 == 0 on CPUs that don't have CR4.
*/
asm volatile("1: mov %%cr4, %0\n" asm volatile("1: mov %%cr4, %0\n"
"2:\n" "2:\n"
_ASM_EXTABLE(1b, 2b) _ASM_EXTABLE(1b, 2b)
: "=r" (val), "=m" (__force_order) : "0" (0)); : "=r" (val), "=m" (__force_order) : "0" (0));
#else #else
val = native_read_cr4(); /* CR4 always exists on x86_64. */
asm volatile("mov %%cr4,%0\n\t" : "=r" (val), "=m" (__force_order));
#endif #endif
return val; return val;
} }
...@@ -182,11 +179,6 @@ static inline unsigned long __read_cr4(void) ...@@ -182,11 +179,6 @@ static inline unsigned long __read_cr4(void)
return native_read_cr4(); return native_read_cr4();
} }
static inline unsigned long __read_cr4_safe(void)
{
return native_read_cr4_safe();
}
static inline void __write_cr4(unsigned long x) static inline void __write_cr4(unsigned long x)
{ {
native_write_cr4(x); native_write_cr4(x);
......
...@@ -81,7 +81,7 @@ DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate); ...@@ -81,7 +81,7 @@ DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);
/* Initialize cr4 shadow for this CPU. */ /* Initialize cr4 shadow for this CPU. */
static inline void cr4_init_shadow(void) static inline void cr4_init_shadow(void)
{ {
this_cpu_write(cpu_tlbstate.cr4, __read_cr4_safe()); this_cpu_write(cpu_tlbstate.cr4, __read_cr4());
} }
/* Set in this cpu's CR4. */ /* Set in this cpu's CR4. */
......
...@@ -332,7 +332,6 @@ __visible struct pv_cpu_ops pv_cpu_ops = { ...@@ -332,7 +332,6 @@ __visible struct pv_cpu_ops pv_cpu_ops = {
.read_cr0 = native_read_cr0, .read_cr0 = native_read_cr0,
.write_cr0 = native_write_cr0, .write_cr0 = native_write_cr0,
.read_cr4 = native_read_cr4, .read_cr4 = native_read_cr4,
.read_cr4_safe = native_read_cr4_safe,
.write_cr4 = native_write_cr4, .write_cr4 = native_write_cr4,
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
.read_cr8 = native_read_cr8, .read_cr8 = native_read_cr8,
......
...@@ -90,7 +90,7 @@ void __show_regs(struct pt_regs *regs, int all) ...@@ -90,7 +90,7 @@ void __show_regs(struct pt_regs *regs, int all)
cr0 = read_cr0(); cr0 = read_cr0();
cr2 = read_cr2(); cr2 = read_cr2();
cr3 = read_cr3(); cr3 = read_cr3();
cr4 = __read_cr4_safe(); cr4 = __read_cr4();
printk(KERN_DEFAULT "CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n", printk(KERN_DEFAULT "CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n",
cr0, cr2, cr3, cr4); cr0, cr2, cr3, cr4);
......
...@@ -1137,7 +1137,7 @@ void __init setup_arch(char **cmdline_p) ...@@ -1137,7 +1137,7 @@ void __init setup_arch(char **cmdline_p)
* auditing all the early-boot CR4 manipulation would be needed to * auditing all the early-boot CR4 manipulation would be needed to
* rule it out. * rule it out.
*/ */
mmu_cr4_features = __read_cr4_safe(); mmu_cr4_features = __read_cr4();
memblock_set_current_limit(get_max_mapped()); memblock_set_current_limit(get_max_mapped());
......
...@@ -130,7 +130,7 @@ static void __save_processor_state(struct saved_context *ctxt) ...@@ -130,7 +130,7 @@ static void __save_processor_state(struct saved_context *ctxt)
ctxt->cr0 = read_cr0(); ctxt->cr0 = read_cr0();
ctxt->cr2 = read_cr2(); ctxt->cr2 = read_cr2();
ctxt->cr3 = read_cr3(); ctxt->cr3 = read_cr3();
ctxt->cr4 = __read_cr4_safe(); ctxt->cr4 = __read_cr4();
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
ctxt->cr8 = read_cr8(); ctxt->cr8 = read_cr8();
#endif #endif
......
...@@ -1237,7 +1237,6 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = { ...@@ -1237,7 +1237,6 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = {
.write_cr0 = xen_write_cr0, .write_cr0 = xen_write_cr0,
.read_cr4 = native_read_cr4, .read_cr4 = native_read_cr4,
.read_cr4_safe = native_read_cr4_safe,
.write_cr4 = xen_write_cr4, .write_cr4 = xen_write_cr4,
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册