提交 43a255c2 编写于 作者: L Linus Torvalds

Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fixes from Peter Anvin:
 "A couple of crash fixes, plus a fix that on 32 bits would cause a
  missing -ENOSYS for nonexistent system calls"

* 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86, cpu: Fix cache topology for early P4-SMT
  x86_32, entry: Store badsys error code in %eax
  x86, MCE: Robustify mcheck_init_device
...@@ -370,6 +370,17 @@ static void init_intel(struct cpuinfo_x86 *c) ...@@ -370,6 +370,17 @@ static void init_intel(struct cpuinfo_x86 *c)
*/ */
detect_extended_topology(c); detect_extended_topology(c);
if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) {
/*
* let's use the legacy cpuid vector 0x1 and 0x4 for topology
* detection.
*/
c->x86_max_cores = intel_num_cpu_cores(c);
#ifdef CONFIG_X86_32
detect_ht(c);
#endif
}
l2 = init_intel_cacheinfo(c); l2 = init_intel_cacheinfo(c);
if (c->cpuid_level > 9) { if (c->cpuid_level > 9) {
unsigned eax = cpuid_eax(10); unsigned eax = cpuid_eax(10);
...@@ -438,17 +449,6 @@ static void init_intel(struct cpuinfo_x86 *c) ...@@ -438,17 +449,6 @@ static void init_intel(struct cpuinfo_x86 *c)
set_cpu_cap(c, X86_FEATURE_P3); set_cpu_cap(c, X86_FEATURE_P3);
#endif #endif
if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) {
/*
* let's use the legacy cpuid vector 0x1 and 0x4 for topology
* detection.
*/
c->x86_max_cores = intel_num_cpu_cores(c);
#ifdef CONFIG_X86_32
detect_ht(c);
#endif
}
/* Work around errata */ /* Work around errata */
srat_detect_node(c); srat_detect_node(c);
......
...@@ -730,6 +730,18 @@ unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c) ...@@ -730,6 +730,18 @@ unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c)
#endif #endif
} }
#ifdef CONFIG_X86_HT
/*
* If cpu_llc_id is not yet set, this means cpuid_level < 4 which in
* turns means that the only possibility is SMT (as indicated in
* cpuid1). Since cpuid2 doesn't specify shared caches, and we know
* that SMT shares all caches, we can unconditionally set cpu_llc_id to
* c->phys_proc_id.
*/
if (per_cpu(cpu_llc_id, cpu) == BAD_APICID)
per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
#endif
c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d)); c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));
return l2; return l2;
......
...@@ -2451,6 +2451,12 @@ static __init int mcheck_init_device(void) ...@@ -2451,6 +2451,12 @@ static __init int mcheck_init_device(void)
for_each_online_cpu(i) { for_each_online_cpu(i) {
err = mce_device_create(i); err = mce_device_create(i);
if (err) { if (err) {
/*
* Register notifier anyway (and do not unreg it) so
* that we don't leave undeleted timers, see notifier
* callback above.
*/
__register_hotcpu_notifier(&mce_cpu_notifier);
cpu_notifier_register_done(); cpu_notifier_register_done();
goto err_device_create; goto err_device_create;
} }
...@@ -2471,10 +2477,6 @@ static __init int mcheck_init_device(void) ...@@ -2471,10 +2477,6 @@ static __init int mcheck_init_device(void)
err_register: err_register:
unregister_syscore_ops(&mce_syscore_ops); unregister_syscore_ops(&mce_syscore_ops);
cpu_notifier_register_begin();
__unregister_hotcpu_notifier(&mce_cpu_notifier);
cpu_notifier_register_done();
err_device_create: err_device_create:
/* /*
* We didn't keep track of which devices were created above, but * We didn't keep track of which devices were created above, but
......
...@@ -425,8 +425,8 @@ sysenter_do_call: ...@@ -425,8 +425,8 @@ sysenter_do_call:
cmpl $(NR_syscalls), %eax cmpl $(NR_syscalls), %eax
jae sysenter_badsys jae sysenter_badsys
call *sys_call_table(,%eax,4) call *sys_call_table(,%eax,4)
movl %eax,PT_EAX(%esp)
sysenter_after_call: sysenter_after_call:
movl %eax,PT_EAX(%esp)
LOCKDEP_SYS_EXIT LOCKDEP_SYS_EXIT
DISABLE_INTERRUPTS(CLBR_ANY) DISABLE_INTERRUPTS(CLBR_ANY)
TRACE_IRQS_OFF TRACE_IRQS_OFF
...@@ -502,6 +502,7 @@ ENTRY(system_call) ...@@ -502,6 +502,7 @@ ENTRY(system_call)
jae syscall_badsys jae syscall_badsys
syscall_call: syscall_call:
call *sys_call_table(,%eax,4) call *sys_call_table(,%eax,4)
syscall_after_call:
movl %eax,PT_EAX(%esp) # store the return value movl %eax,PT_EAX(%esp) # store the return value
syscall_exit: syscall_exit:
LOCKDEP_SYS_EXIT LOCKDEP_SYS_EXIT
...@@ -675,12 +676,12 @@ syscall_fault: ...@@ -675,12 +676,12 @@ syscall_fault:
END(syscall_fault) END(syscall_fault)
syscall_badsys: syscall_badsys:
movl $-ENOSYS,PT_EAX(%esp) movl $-ENOSYS,%eax
jmp syscall_exit jmp syscall_after_call
END(syscall_badsys) END(syscall_badsys)
sysenter_badsys: sysenter_badsys:
movl $-ENOSYS,PT_EAX(%esp) movl $-ENOSYS,%eax
jmp sysenter_after_call jmp sysenter_after_call
END(syscall_badsys) END(syscall_badsys)
CFI_ENDPROC CFI_ENDPROC
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册