提交 7c17e486 编写于 作者: L Linus Torvalds

Merge branch 'x86/urgent' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fixes from Peter Anvin.

This includes the resume-time FPU corruption fix from the chromeos guys,
marked for stable.

* 'x86/urgent' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86, fpu: Avoid FPU lazy restore after suspend
  x86-32: Unbreak booting on some 486 clones
  x86, kvm: Remove incorrect redundant assembly constraint
...@@ -399,14 +399,17 @@ static inline void drop_init_fpu(struct task_struct *tsk) ...@@ -399,14 +399,17 @@ static inline void drop_init_fpu(struct task_struct *tsk)
typedef struct { int preload; } fpu_switch_t; typedef struct { int preload; } fpu_switch_t;
/* /*
* FIXME! We could do a totally lazy restore, but we need to * Must be run with preemption disabled: this clears the fpu_owner_task,
* add a per-cpu "this was the task that last touched the FPU * on this CPU.
* on this CPU" variable, and the task needs to have a "I last
* touched the FPU on this CPU" and check them.
* *
* We don't do that yet, so "fpu_lazy_restore()" always returns * This will disable any lazy FPU state restore of the current FPU state,
* false, but some day.. * but if the current thread owns the FPU, it will still be saved by.
*/ */
static inline void __cpu_disable_lazy_restore(unsigned int cpu)
{
per_cpu(fpu_owner_task, cpu) = NULL;
}
static inline int fpu_lazy_restore(struct task_struct *new, unsigned int cpu) static inline int fpu_lazy_restore(struct task_struct *new, unsigned int cpu)
{ {
return new == this_cpu_read_stable(fpu_owner_task) && return new == this_cpu_read_stable(fpu_owner_task) &&
......
...@@ -292,8 +292,8 @@ default_entry: ...@@ -292,8 +292,8 @@ default_entry:
* be using the global pages. * be using the global pages.
* *
* NOTE! If we are on a 486 we may have no cr4 at all! * NOTE! If we are on a 486 we may have no cr4 at all!
* Specifically, cr4 exists if and only if CPUID exists, * Specifically, cr4 exists if and only if CPUID exists
* which in turn exists if and only if EFLAGS.ID exists. * and has flags other than the FPU flag set.
*/ */
movl $X86_EFLAGS_ID,%ecx movl $X86_EFLAGS_ID,%ecx
pushl %ecx pushl %ecx
...@@ -308,6 +308,11 @@ default_entry: ...@@ -308,6 +308,11 @@ default_entry:
testl %ecx,%eax testl %ecx,%eax
jz 6f # No ID flag = no CPUID = no CR4 jz 6f # No ID flag = no CPUID = no CR4
movl $1,%eax
cpuid
andl $~1,%edx # Ignore CPUID.FPU
jz 6f # No flags or only CPUID.FPU = no CR4
movl pa(mmu_cr4_features),%eax movl pa(mmu_cr4_features),%eax
movl %eax,%cr4 movl %eax,%cr4
......
...@@ -68,6 +68,8 @@ ...@@ -68,6 +68,8 @@
#include <asm/mwait.h> #include <asm/mwait.h>
#include <asm/apic.h> #include <asm/apic.h>
#include <asm/io_apic.h> #include <asm/io_apic.h>
#include <asm/i387.h>
#include <asm/fpu-internal.h>
#include <asm/setup.h> #include <asm/setup.h>
#include <asm/uv/uv.h> #include <asm/uv/uv.h>
#include <linux/mc146818rtc.h> #include <linux/mc146818rtc.h>
...@@ -818,6 +820,9 @@ int __cpuinit native_cpu_up(unsigned int cpu, struct task_struct *tidle) ...@@ -818,6 +820,9 @@ int __cpuinit native_cpu_up(unsigned int cpu, struct task_struct *tidle)
per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
/* the FPU context is blank, nobody can own it */
__cpu_disable_lazy_restore(cpu);
err = do_boot_cpu(apicid, cpu, tidle); err = do_boot_cpu(apicid, cpu, tidle);
if (err) { if (err) {
pr_debug("do_boot_cpu failed %d\n", err); pr_debug("do_boot_cpu failed %d\n", err);
......
...@@ -426,8 +426,7 @@ static void invalidate_registers(struct x86_emulate_ctxt *ctxt) ...@@ -426,8 +426,7 @@ static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
_ASM_EXTABLE(1b, 3b) \ _ASM_EXTABLE(1b, 3b) \
: "=m" ((ctxt)->eflags), "=&r" (_tmp), \ : "=m" ((ctxt)->eflags), "=&r" (_tmp), \
"+a" (*rax), "+d" (*rdx), "+qm"(_ex) \ "+a" (*rax), "+d" (*rdx), "+qm"(_ex) \
: "i" (EFLAGS_MASK), "m" ((ctxt)->src.val), \ : "i" (EFLAGS_MASK), "m" ((ctxt)->src.val)); \
"a" (*rax), "d" (*rdx)); \
} while (0) } while (0)
/* instruction has only one source operand, destination is implicit (e.g. mul, div, imul, idiv) */ /* instruction has only one source operand, destination is implicit (e.g. mul, div, imul, idiv) */
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册