提交 cb715a83 编写于 作者: L Linus Torvalds

Merge branch 'x86-cpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 cpu updates from Peter Anvin:
 "This is a corrected attempt at the x86/cpu branch, this time with the
  fixes in that makes it not break on KVM (current or past), or any
  other virtualizer which traps on this configuration.

  Again, the biggest change here is enabling the WC+ memory type on AMD
  processors, if the BIOS doesn't."

* 'x86-cpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86, kvm: Add MSR_AMD64_BU_CFG2 to the list of ignored MSRs
  x86, cpu, amd: Fix WC+ workaround for older virtual hosts
  x86, AMD: Enable WC+ memory type on family 10 processors
  x86, AMD: Clean up init_amd()
  x86/process: Change %8s to %s for pr_warn() in release_thread()
  x86/cpu/hotplug: Remove CONFIG_EXPERIMENTAL dependency
...@@ -1722,7 +1722,7 @@ config HOTPLUG_CPU ...@@ -1722,7 +1722,7 @@ config HOTPLUG_CPU
config BOOTPARAM_HOTPLUG_CPU0 config BOOTPARAM_HOTPLUG_CPU0
bool "Set default setting of cpu0_hotpluggable" bool "Set default setting of cpu0_hotpluggable"
default n default n
depends on HOTPLUG_CPU && EXPERIMENTAL depends on HOTPLUG_CPU
---help--- ---help---
Set whether default state of cpu0_hotpluggable is on or off. Set whether default state of cpu0_hotpluggable is on or off.
...@@ -1751,7 +1751,7 @@ config BOOTPARAM_HOTPLUG_CPU0 ...@@ -1751,7 +1751,7 @@ config BOOTPARAM_HOTPLUG_CPU0
config DEBUG_HOTPLUG_CPU0 config DEBUG_HOTPLUG_CPU0
def_bool n def_bool n
prompt "Debug CPU0 hotplug" prompt "Debug CPU0 hotplug"
depends on HOTPLUG_CPU && EXPERIMENTAL depends on HOTPLUG_CPU
---help--- ---help---
Enabling this option offlines CPU0 (if CPU0 can be offlined) as Enabling this option offlines CPU0 (if CPU0 can be offlined) as
soon as possible and boots up userspace with CPU0 offlined. User soon as possible and boots up userspace with CPU0 offlined. User
......
...@@ -175,6 +175,7 @@ ...@@ -175,6 +175,7 @@
#define MSR_AMD64_OSVW_ID_LENGTH 0xc0010140 #define MSR_AMD64_OSVW_ID_LENGTH 0xc0010140
#define MSR_AMD64_OSVW_STATUS 0xc0010141 #define MSR_AMD64_OSVW_STATUS 0xc0010141
#define MSR_AMD64_DC_CFG 0xc0011022 #define MSR_AMD64_DC_CFG 0xc0011022
#define MSR_AMD64_BU_CFG2 0xc001102a
#define MSR_AMD64_IBSFETCHCTL 0xc0011030 #define MSR_AMD64_IBSFETCHCTL 0xc0011030
#define MSR_AMD64_IBSFETCHLINAD 0xc0011031 #define MSR_AMD64_IBSFETCHLINAD 0xc0011031
#define MSR_AMD64_IBSFETCHPHYSAD 0xc0011032 #define MSR_AMD64_IBSFETCHPHYSAD 0xc0011032
......
...@@ -518,10 +518,9 @@ static void __cpuinit early_init_amd(struct cpuinfo_x86 *c) ...@@ -518,10 +518,9 @@ static void __cpuinit early_init_amd(struct cpuinfo_x86 *c)
static void __cpuinit init_amd(struct cpuinfo_x86 *c) static void __cpuinit init_amd(struct cpuinfo_x86 *c)
{ {
u32 dummy; u32 dummy;
#ifdef CONFIG_SMP
unsigned long long value; unsigned long long value;
#ifdef CONFIG_SMP
/* /*
* Disable TLB flush filter by setting HWCR.FFDIS on K8 * Disable TLB flush filter by setting HWCR.FFDIS on K8
* bit 6 of msr C001_0015 * bit 6 of msr C001_0015
...@@ -559,12 +558,10 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) ...@@ -559,12 +558,10 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
* (AMD Erratum #110, docId: 25759). * (AMD Erratum #110, docId: 25759).
*/ */
if (c->x86_model < 0x14 && cpu_has(c, X86_FEATURE_LAHF_LM)) { if (c->x86_model < 0x14 && cpu_has(c, X86_FEATURE_LAHF_LM)) {
u64 val;
clear_cpu_cap(c, X86_FEATURE_LAHF_LM); clear_cpu_cap(c, X86_FEATURE_LAHF_LM);
if (!rdmsrl_amd_safe(0xc001100d, &val)) { if (!rdmsrl_amd_safe(0xc001100d, &value)) {
val &= ~(1ULL << 32); value &= ~(1ULL << 32);
wrmsrl_amd_safe(0xc001100d, val); wrmsrl_amd_safe(0xc001100d, value);
} }
} }
...@@ -617,13 +614,12 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) ...@@ -617,13 +614,12 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
if ((c->x86 == 0x15) && if ((c->x86 == 0x15) &&
(c->x86_model >= 0x10) && (c->x86_model <= 0x1f) && (c->x86_model >= 0x10) && (c->x86_model <= 0x1f) &&
!cpu_has(c, X86_FEATURE_TOPOEXT)) { !cpu_has(c, X86_FEATURE_TOPOEXT)) {
u64 val;
if (!rdmsrl_safe(0xc0011005, &val)) { if (!rdmsrl_safe(0xc0011005, &value)) {
val |= 1ULL << 54; value |= 1ULL << 54;
wrmsrl_safe(0xc0011005, val); wrmsrl_safe(0xc0011005, value);
rdmsrl(0xc0011005, val); rdmsrl(0xc0011005, value);
if (val & (1ULL << 54)) { if (value & (1ULL << 54)) {
set_cpu_cap(c, X86_FEATURE_TOPOEXT); set_cpu_cap(c, X86_FEATURE_TOPOEXT);
printk(KERN_INFO FW_INFO "CPU: Re-enabling " printk(KERN_INFO FW_INFO "CPU: Re-enabling "
"disabled Topology Extensions Support\n"); "disabled Topology Extensions Support\n");
...@@ -637,11 +633,10 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) ...@@ -637,11 +633,10 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
*/ */
if ((c->x86 == 0x15) && if ((c->x86 == 0x15) &&
(c->x86_model >= 0x02) && (c->x86_model < 0x20)) { (c->x86_model >= 0x02) && (c->x86_model < 0x20)) {
u64 val;
if (!rdmsrl_safe(0xc0011021, &val) && !(val & 0x1E)) { if (!rdmsrl_safe(0xc0011021, &value) && !(value & 0x1E)) {
val |= 0x1E; value |= 0x1E;
wrmsrl_safe(0xc0011021, val); wrmsrl_safe(0xc0011021, value);
} }
} }
...@@ -703,13 +698,11 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) ...@@ -703,13 +698,11 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
if (c->x86 > 0x11) if (c->x86 > 0x11)
set_cpu_cap(c, X86_FEATURE_ARAT); set_cpu_cap(c, X86_FEATURE_ARAT);
if (c->x86 == 0x10) {
/* /*
* Disable GART TLB Walk Errors on Fam10h. We do this here * Disable GART TLB Walk Errors on Fam10h. We do this here
* because this is always needed when GART is enabled, even in a * because this is always needed when GART is enabled, even in a
* kernel which has no MCE support built in. * kernel which has no MCE support built in.
*/
if (c->x86 == 0x10) {
/*
* BIOS should disable GartTlbWlk Errors themself. If * BIOS should disable GartTlbWlk Errors themself. If
* it doesn't do it here as suggested by the BKDG. * it doesn't do it here as suggested by the BKDG.
* *
...@@ -723,6 +716,21 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) ...@@ -723,6 +716,21 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
mask |= (1 << 10); mask |= (1 << 10);
wrmsrl_safe(MSR_AMD64_MCx_MASK(4), mask); wrmsrl_safe(MSR_AMD64_MCx_MASK(4), mask);
} }
/*
* On family 10h BIOS may not have properly enabled WC+ support,
* causing it to be converted to CD memtype. This may result in
* performance degradation for certain nested-paging guests.
* Prevent this conversion by clearing bit 24 in
* MSR_AMD64_BU_CFG2.
*
* NOTE: we want to use the _safe accessors so as not to #GP kvm
* guests on older kvm hosts.
*/
rdmsrl_safe(MSR_AMD64_BU_CFG2, &value);
value &= ~(1ULL << 24);
wrmsrl_safe(MSR_AMD64_BU_CFG2, value);
} }
rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy); rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
......
...@@ -117,7 +117,7 @@ void release_thread(struct task_struct *dead_task) ...@@ -117,7 +117,7 @@ void release_thread(struct task_struct *dead_task)
{ {
if (dead_task->mm) { if (dead_task->mm) {
if (dead_task->mm->context.size) { if (dead_task->mm->context.size) {
pr_warn("WARNING: dead process %8s still has LDT? <%p/%d>\n", pr_warn("WARNING: dead process %s still has LDT? <%p/%d>\n",
dead_task->comm, dead_task->comm,
dead_task->mm->context.ldt, dead_task->mm->context.ldt,
dead_task->mm->context.size); dead_task->mm->context.size);
......
...@@ -1881,6 +1881,14 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) ...@@ -1881,6 +1881,14 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
u64 data = msr_info->data; u64 data = msr_info->data;
switch (msr) { switch (msr) {
case MSR_AMD64_NB_CFG:
case MSR_IA32_UCODE_REV:
case MSR_IA32_UCODE_WRITE:
case MSR_VM_HSAVE_PA:
case MSR_AMD64_PATCH_LOADER:
case MSR_AMD64_BU_CFG2:
break;
case MSR_EFER: case MSR_EFER:
return set_efer(vcpu, data); return set_efer(vcpu, data);
case MSR_K7_HWCR: case MSR_K7_HWCR:
...@@ -1900,8 +1908,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) ...@@ -1900,8 +1908,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
return 1; return 1;
} }
break; break;
case MSR_AMD64_NB_CFG:
break;
case MSR_IA32_DEBUGCTLMSR: case MSR_IA32_DEBUGCTLMSR:
if (!data) { if (!data) {
/* We support the non-activated case already */ /* We support the non-activated case already */
...@@ -1914,11 +1920,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) ...@@ -1914,11 +1920,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n", vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n",
__func__, data); __func__, data);
break; break;
case MSR_IA32_UCODE_REV:
case MSR_IA32_UCODE_WRITE:
case MSR_VM_HSAVE_PA:
case MSR_AMD64_PATCH_LOADER:
break;
case 0x200 ... 0x2ff: case 0x200 ... 0x2ff:
return set_msr_mtrr(vcpu, msr, data); return set_msr_mtrr(vcpu, msr, data);
case MSR_IA32_APICBASE: case MSR_IA32_APICBASE:
...@@ -2253,6 +2254,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) ...@@ -2253,6 +2254,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
case MSR_K8_INT_PENDING_MSG: case MSR_K8_INT_PENDING_MSG:
case MSR_AMD64_NB_CFG: case MSR_AMD64_NB_CFG:
case MSR_FAM10H_MMIO_CONF_BASE: case MSR_FAM10H_MMIO_CONF_BASE:
case MSR_AMD64_BU_CFG2:
data = 0; data = 0;
break; break;
case MSR_P6_PERFCTR0: case MSR_P6_PERFCTR0:
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册