提交 47edb651 编写于 作者: A Andy Lutomirski 提交者: Ingo Molnar

x86/asm/msr: Make wrmsrl() a function

As of cf991de2 ("x86/asm/msr: Make wrmsrl_safe() a
function"), wrmsrl_safe is a function, but wrmsrl is still a
macro.  The wrmsrl macro performs invalid shifts if the value
argument is 32 bits. This makes it unnecessarily awkward to
write code that puts an unsigned long into an MSR.

To make this work, syscall_init needs tweaking to stop passing
a function pointer to wrmsrl.
Signed-off-by: NAndy Lutomirski <luto@kernel.org>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Willy Tarreau <w@1wt.eu>
Link: http://lkml.kernel.org/r/690f0c629a1085d054e2d1ef3da073cfb3f7db92.1437678821.git.luto@kernel.orgSigned-off-by: NIngo Molnar <mingo@kernel.org>
上级 b466bdb6
...@@ -188,8 +188,10 @@ static inline void wrmsr(unsigned msr, unsigned low, unsigned high) ...@@ -188,8 +188,10 @@ static inline void wrmsr(unsigned msr, unsigned low, unsigned high)
#define rdmsrl(msr, val) \ #define rdmsrl(msr, val) \
((val) = native_read_msr((msr))) ((val) = native_read_msr((msr)))
#define wrmsrl(msr, val) \ static inline void wrmsrl(unsigned msr, u64 val)
native_write_msr((msr), (u32)((u64)(val)), (u32)((u64)(val) >> 32)) {
native_write_msr(msr, (u32)val, (u32)(val >> 32));
}
/* wrmsr with exception handling */ /* wrmsr with exception handling */
static inline int wrmsr_safe(unsigned msr, unsigned low, unsigned high) static inline int wrmsr_safe(unsigned msr, unsigned low, unsigned high)
......
...@@ -153,7 +153,11 @@ do { \ ...@@ -153,7 +153,11 @@ do { \
val = paravirt_read_msr(msr, &_err); \ val = paravirt_read_msr(msr, &_err); \
} while (0) } while (0)
#define wrmsrl(msr, val) wrmsr(msr, (u32)((u64)(val)), ((u64)(val))>>32) static inline void wrmsrl(unsigned msr, u64 val)
{
wrmsr(msr, (u32)val, (u32)(val>>32));
}
#define wrmsr_safe(msr, a, b) paravirt_write_msr(msr, a, b) #define wrmsr_safe(msr, a, b) paravirt_write_msr(msr, a, b)
/* rdmsr with exception handling */ /* rdmsr with exception handling */
......
...@@ -1185,10 +1185,10 @@ void syscall_init(void) ...@@ -1185,10 +1185,10 @@ void syscall_init(void)
* set CS/DS but only a 32bit target. LSTAR sets the 64bit rip. * set CS/DS but only a 32bit target. LSTAR sets the 64bit rip.
*/ */
wrmsrl(MSR_STAR, ((u64)__USER32_CS)<<48 | ((u64)__KERNEL_CS)<<32); wrmsrl(MSR_STAR, ((u64)__USER32_CS)<<48 | ((u64)__KERNEL_CS)<<32);
wrmsrl(MSR_LSTAR, entry_SYSCALL_64); wrmsrl(MSR_LSTAR, (unsigned long)entry_SYSCALL_64);
#ifdef CONFIG_IA32_EMULATION #ifdef CONFIG_IA32_EMULATION
wrmsrl(MSR_CSTAR, entry_SYSCALL_compat); wrmsrl(MSR_CSTAR, (unsigned long)entry_SYSCALL_compat);
/* /*
* This only works on Intel CPUs. * This only works on Intel CPUs.
* On AMD CPUs these MSRs are 32-bit, CPU truncates MSR_IA32_SYSENTER_EIP. * On AMD CPUs these MSRs are 32-bit, CPU truncates MSR_IA32_SYSENTER_EIP.
...@@ -1199,7 +1199,7 @@ void syscall_init(void) ...@@ -1199,7 +1199,7 @@ void syscall_init(void)
wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL); wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL);
wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)entry_SYSENTER_compat); wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)entry_SYSENTER_compat);
#else #else
wrmsrl(MSR_CSTAR, ignore_sysret); wrmsrl(MSR_CSTAR, (unsigned long)ignore_sysret);
wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)GDT_ENTRY_INVALID_SEG); wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)GDT_ENTRY_INVALID_SEG);
wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL); wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL);
wrmsrl_safe(MSR_IA32_SYSENTER_EIP, 0ULL); wrmsrl_safe(MSR_IA32_SYSENTER_EIP, 0ULL);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册