提交 26bfa5f8 编写于 作者: B Borislav Petkov 提交者: H. Peter Anvin

x86, amd: Cleanup init_amd

Distribute family-specific code to corresponding functions.

Also,

* move the direct mapping splitting around the TSEG SMM area to
bsp_init_amd().

* kill ancient comment about what we should do for K5.

* merge amd_k7_smp_check() into its only caller init_amd_k7 and drop
cpu_has_mp macro.
Signed-off-by: NBorislav Petkov <bp@suse.de>
Link: http://lkml.kernel.org/r/1403609105-8332-3-git-send-email-bp@alien8.deSigned-off-by: NH. Peter Anvin <hpa@zytor.com>
上级 80a208bd
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/apic.h> #include <asm/apic.h>
#include <asm/cpu.h> #include <asm/cpu.h>
#include <asm/smp.h>
#include <asm/pci-direct.h> #include <asm/pci-direct.h>
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
...@@ -50,7 +51,6 @@ static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val) ...@@ -50,7 +51,6 @@ static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val)
return wrmsr_safe_regs(gprs); return wrmsr_safe_regs(gprs);
} }
#ifdef CONFIG_X86_32
/* /*
* B step AMD K6 before B 9730xxxx have hardware bugs that can cause * B step AMD K6 before B 9730xxxx have hardware bugs that can cause
* misexecution of code under Linux. Owners of such processors should * misexecution of code under Linux. Owners of such processors should
...@@ -70,6 +70,7 @@ __asm__(".globl vide\n\t.align 4\nvide: ret"); ...@@ -70,6 +70,7 @@ __asm__(".globl vide\n\t.align 4\nvide: ret");
static void init_amd_k5(struct cpuinfo_x86 *c) static void init_amd_k5(struct cpuinfo_x86 *c)
{ {
#ifdef CONFIG_X86_32
/* /*
* General Systems BIOSen alias the cpu frequency registers * General Systems BIOSen alias the cpu frequency registers
* of the Elan at 0x000df000. Unfortuantly, one of the Linux * of the Elan at 0x000df000. Unfortuantly, one of the Linux
...@@ -83,11 +84,12 @@ static void init_amd_k5(struct cpuinfo_x86 *c) ...@@ -83,11 +84,12 @@ static void init_amd_k5(struct cpuinfo_x86 *c)
if (inl(CBAR) & CBAR_ENB) if (inl(CBAR) & CBAR_ENB)
outl(0 | CBAR_KEY, CBAR); outl(0 | CBAR_KEY, CBAR);
} }
#endif
} }
static void init_amd_k6(struct cpuinfo_x86 *c) static void init_amd_k6(struct cpuinfo_x86 *c)
{ {
#ifdef CONFIG_X86_32
u32 l, h; u32 l, h;
int mbytes = get_num_physpages() >> (20-PAGE_SHIFT); int mbytes = get_num_physpages() >> (20-PAGE_SHIFT);
...@@ -176,10 +178,44 @@ static void init_amd_k6(struct cpuinfo_x86 *c) ...@@ -176,10 +178,44 @@ static void init_amd_k6(struct cpuinfo_x86 *c)
/* placeholder for any needed mods */ /* placeholder for any needed mods */
return; return;
} }
#endif
} }
static void amd_k7_smp_check(struct cpuinfo_x86 *c) static void init_amd_k7(struct cpuinfo_x86 *c)
{ {
#ifdef CONFIG_X86_32
u32 l, h;
/*
* Bit 15 of Athlon specific MSR 15, needs to be 0
* to enable SSE on Palomino/Morgan/Barton CPU's.
* If the BIOS didn't enable it already, enable it here.
*/
if (c->x86_model >= 6 && c->x86_model <= 10) {
if (!cpu_has(c, X86_FEATURE_XMM)) {
printk(KERN_INFO "Enabling disabled K7/SSE Support.\n");
msr_clear_bit(MSR_K7_HWCR, 15);
set_cpu_cap(c, X86_FEATURE_XMM);
}
}
/*
* It's been determined by AMD that Athlons since model 8 stepping 1
* are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx
* As per AMD technical note 27212 0.2
*/
if ((c->x86_model == 8 && c->x86_mask >= 1) || (c->x86_model > 8)) {
rdmsr(MSR_K7_CLK_CTL, l, h);
if ((l & 0xfff00000) != 0x20000000) {
printk(KERN_INFO
"CPU: CLK_CTL MSR was %x. Reprogramming to %x\n",
l, ((l & 0x000fffff)|0x20000000));
wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h);
}
}
set_cpu_cap(c, X86_FEATURE_K7);
/* calling is from identify_secondary_cpu() ? */ /* calling is from identify_secondary_cpu() ? */
if (!c->cpu_index) if (!c->cpu_index)
return; return;
...@@ -207,7 +243,7 @@ static void amd_k7_smp_check(struct cpuinfo_x86 *c) ...@@ -207,7 +243,7 @@ static void amd_k7_smp_check(struct cpuinfo_x86 *c)
if (((c->x86_model == 6) && (c->x86_mask >= 2)) || if (((c->x86_model == 6) && (c->x86_mask >= 2)) ||
((c->x86_model == 7) && (c->x86_mask >= 1)) || ((c->x86_model == 7) && (c->x86_mask >= 1)) ||
(c->x86_model > 7)) (c->x86_model > 7))
if (cpu_has_mp) if (cpu_has(c, X86_FEATURE_MP))
return; return;
/* If we get here, not a certified SMP capable AMD system. */ /* If we get here, not a certified SMP capable AMD system. */
...@@ -219,45 +255,8 @@ static void amd_k7_smp_check(struct cpuinfo_x86 *c) ...@@ -219,45 +255,8 @@ static void amd_k7_smp_check(struct cpuinfo_x86 *c)
WARN_ONCE(1, "WARNING: This combination of AMD" WARN_ONCE(1, "WARNING: This combination of AMD"
" processors is not suitable for SMP.\n"); " processors is not suitable for SMP.\n");
add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_NOW_UNRELIABLE); add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_NOW_UNRELIABLE);
}
static void init_amd_k7(struct cpuinfo_x86 *c)
{
u32 l, h;
/*
* Bit 15 of Athlon specific MSR 15, needs to be 0
* to enable SSE on Palomino/Morgan/Barton CPU's.
* If the BIOS didn't enable it already, enable it here.
*/
if (c->x86_model >= 6 && c->x86_model <= 10) {
if (!cpu_has(c, X86_FEATURE_XMM)) {
printk(KERN_INFO "Enabling disabled K7/SSE Support.\n");
msr_clear_bit(MSR_K7_HWCR, 15);
set_cpu_cap(c, X86_FEATURE_XMM);
}
}
/*
* It's been determined by AMD that Athlons since model 8 stepping 1
* are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx
* As per AMD technical note 27212 0.2
*/
if ((c->x86_model == 8 && c->x86_mask >= 1) || (c->x86_model > 8)) {
rdmsr(MSR_K7_CLK_CTL, l, h);
if ((l & 0xfff00000) != 0x20000000) {
printk(KERN_INFO
"CPU: CLK_CTL MSR was %x. Reprogramming to %x\n",
l, ((l & 0x000fffff)|0x20000000));
wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h);
}
}
set_cpu_cap(c, X86_FEATURE_K7);
amd_k7_smp_check(c);
}
#endif #endif
}
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
/* /*
...@@ -446,6 +445,26 @@ static void early_init_amd_mc(struct cpuinfo_x86 *c) ...@@ -446,6 +445,26 @@ static void early_init_amd_mc(struct cpuinfo_x86 *c)
static void bsp_init_amd(struct cpuinfo_x86 *c) static void bsp_init_amd(struct cpuinfo_x86 *c)
{ {
#ifdef CONFIG_X86_64
if (c->x86 >= 0xf) {
unsigned long long tseg;
/*
* Split up direct mapping around the TSEG SMM area.
* Don't do it for gbpages because there seems very little
* benefit in doing so.
*/
if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg)) {
unsigned long pfn = tseg >> PAGE_SHIFT;
printk(KERN_DEBUG "tseg: %010llx\n", tseg);
if (pfn_range_is_mapped(pfn, pfn + 1))
set_memory_4k((unsigned long)__va(tseg), 1);
}
}
#endif
if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) { if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
if (c->x86 > 0x10 || if (c->x86 > 0x10 ||
...@@ -515,101 +534,74 @@ static const int amd_erratum_383[]; ...@@ -515,101 +534,74 @@ static const int amd_erratum_383[];
static const int amd_erratum_400[]; static const int amd_erratum_400[];
static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum); static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum);
static void init_amd(struct cpuinfo_x86 *c) static void init_amd_k8(struct cpuinfo_x86 *c)
{ {
u32 dummy;
unsigned long long value;
#ifdef CONFIG_SMP
/*
* Disable TLB flush filter by setting HWCR.FFDIS on K8
* bit 6 of msr C001_0015
*
* Errata 63 for SH-B3 steppings
* Errata 122 for all steppings (F+ have it disabled by default)
*/
if (c->x86 == 0xf)
msr_set_bit(MSR_K7_HWCR, 6);
#endif
early_init_amd(c);
/*
* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
* 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway
*/
clear_cpu_cap(c, 0*32+31);
#ifdef CONFIG_X86_64
/* On C+ stepping K8 rep microcode works well for copy/memset */
if (c->x86 == 0xf) {
u32 level; u32 level;
u64 value;
/* On C+ stepping K8 rep microcode works well for copy/memset */
level = cpuid_eax(1); level = cpuid_eax(1);
if ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58) if ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58)
set_cpu_cap(c, X86_FEATURE_REP_GOOD); set_cpu_cap(c, X86_FEATURE_REP_GOOD);
/* /*
* Some BIOSes incorrectly force this feature, but only K8 * Some BIOSes incorrectly force this feature, but only K8 revision D
* revision D (model = 0x14) and later actually support it. * (model = 0x14) and later actually support it.
* (AMD Erratum #110, docId: 25759). * (AMD Erratum #110, docId: 25759).
*/ */
if (c->x86_model < 0x14 && cpu_has(c, X86_FEATURE_LAHF_LM)) { if (c->x86_model < 0x14 && cpu_has(c, X86_FEATURE_LAHF_LM)) {
clear_cpu_cap(c, X86_FEATURE_LAHF_LM); clear_cpu_cap(c, X86_FEATURE_LAHF_LM);
if (!rdmsrl_amd_safe(0xc001100d, &value)) { if (!rdmsrl_amd_safe(0xc001100d, &value)) {
value &= ~(1ULL << 32); value &= ~BIT_64(32);
wrmsrl_amd_safe(0xc001100d, value); wrmsrl_amd_safe(0xc001100d, value);
} }
} }
} if (!c->x86_model_id[0])
if (c->x86 >= 0x10) strcpy(c->x86_model_id, "Hammer");
set_cpu_cap(c, X86_FEATURE_REP_GOOD); }
/* get apicid instead of initial apic id from cpuid */ static void init_amd_gh(struct cpuinfo_x86 *c)
c->apicid = hard_smp_processor_id(); {
#else #ifdef CONFIG_X86_64
/* do this for boot cpu */
if (c == &boot_cpu_data)
check_enable_amd_mmconf_dmi();
fam10h_check_enable_mmcfg();
#endif
/* /*
* FIXME: We should handle the K5 here. Set up the write * Disable GART TLB Walk Errors on Fam10h. We do this here because this
* range and also turn on MSR 83 bits 4 and 31 (write alloc, * is always needed when GART is enabled, even in a kernel which has no
* no bus pipeline) * MCE support built in. BIOS should disable GartTlbWlk Errors already.
* If it doesn't, we do it here as suggested by the BKDG.
*
* Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=33012
*/ */
msr_set_bit(MSR_AMD64_MCx_MASK(4), 10);
switch (c->x86) { /*
case 4: * On family 10h BIOS may not have properly enabled WC+ support, causing
init_amd_k5(c); * it to be converted to CD memtype. This may result in performance
break; * degradation for certain nested-paging guests. Prevent this conversion
case 5: * by clearing bit 24 in MSR_AMD64_BU_CFG2.
init_amd_k6(c); *
break; * NOTE: we want to use the _safe accessors so as not to #GP kvm
case 6: /* An Athlon/Duron */ * guests on older kvm hosts.
init_amd_k7(c); */
break; msr_clear_bit(MSR_AMD64_BU_CFG2, 24);
}
/* K6s reports MCEs but don't actually have all the MSRs */
if (c->x86 < 6)
clear_cpu_cap(c, X86_FEATURE_MCE);
#endif
/* Enable workaround for FXSAVE leak */ if (cpu_has_amd_erratum(c, amd_erratum_383))
if (c->x86 >= 6) set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH);
set_cpu_bug(c, X86_BUG_FXSAVE_LEAK); }
if (!c->x86_model_id[0]) { static void init_amd_bd(struct cpuinfo_x86 *c)
switch (c->x86) { {
case 0xf: u64 value;
/* Should distinguish Models here, but this is only
a fallback anyways. */
strcpy(c->x86_model_id, "Hammer");
break;
}
}
/* re-enable TopologyExtensions if switched off by BIOS */ /* re-enable TopologyExtensions if switched off by BIOS */
if ((c->x86 == 0x15) && if ((c->x86_model >= 0x10) && (c->x86_model <= 0x1f) &&
(c->x86_model >= 0x10) && (c->x86_model <= 0x1f) &&
!cpu_has(c, X86_FEATURE_TOPOEXT)) { !cpu_has(c, X86_FEATURE_TOPOEXT)) {
if (msr_set_bit(0xc0011005, 54) > 0) { if (msr_set_bit(0xc0011005, 54) > 0) {
...@@ -625,14 +617,60 @@ static void init_amd(struct cpuinfo_x86 *c) ...@@ -625,14 +617,60 @@ static void init_amd(struct cpuinfo_x86 *c)
* The way access filter has a performance penalty on some workloads. * The way access filter has a performance penalty on some workloads.
* Disable it on the affected CPUs. * Disable it on the affected CPUs.
*/ */
if ((c->x86 == 0x15) && if ((c->x86_model >= 0x02) && (c->x86_model < 0x20)) {
(c->x86_model >= 0x02) && (c->x86_model < 0x20)) {
if (!rdmsrl_safe(0xc0011021, &value) && !(value & 0x1E)) { if (!rdmsrl_safe(0xc0011021, &value) && !(value & 0x1E)) {
value |= 0x1E; value |= 0x1E;
wrmsrl_safe(0xc0011021, value); wrmsrl_safe(0xc0011021, value);
} }
} }
}
static void init_amd(struct cpuinfo_x86 *c)
{
u32 dummy;
#ifdef CONFIG_SMP
/*
* Disable TLB flush filter by setting HWCR.FFDIS on K8
* bit 6 of msr C001_0015
*
* Errata 63 for SH-B3 steppings
* Errata 122 for all steppings (F+ have it disabled by default)
*/
if (c->x86 == 0xf)
msr_set_bit(MSR_K7_HWCR, 6);
#endif
early_init_amd(c);
/*
* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
* 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway
*/
clear_cpu_cap(c, 0*32+31);
if (c->x86 >= 0x10)
set_cpu_cap(c, X86_FEATURE_REP_GOOD);
/* get apicid instead of initial apic id from cpuid */
c->apicid = hard_smp_processor_id();
/* K6s reports MCEs but don't actually have all the MSRs */
if (c->x86 < 6)
clear_cpu_cap(c, X86_FEATURE_MCE);
switch (c->x86) {
case 4: init_amd_k5(c); break;
case 5: init_amd_k6(c); break;
case 6: init_amd_k7(c); break;
case 0xf: init_amd_k8(c); break;
case 0x10: init_amd_gh(c); break;
case 0x15: init_amd_bd(c); break;
}
/* Enable workaround for FXSAVE leak */
if (c->x86 >= 6)
set_cpu_bug(c, X86_BUG_FXSAVE_LEAK);
cpu_detect_cache_sizes(c); cpu_detect_cache_sizes(c);
...@@ -656,33 +694,6 @@ static void init_amd(struct cpuinfo_x86 *c) ...@@ -656,33 +694,6 @@ static void init_amd(struct cpuinfo_x86 *c)
set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC); set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC);
} }
#ifdef CONFIG_X86_64
if (c->x86 == 0x10) {
/* do this for boot cpu */
if (c == &boot_cpu_data)
check_enable_amd_mmconf_dmi();
fam10h_check_enable_mmcfg();
}
if (c == &boot_cpu_data && c->x86 >= 0xf) {
unsigned long long tseg;
/*
* Split up direct mapping around the TSEG SMM area.
* Don't do it for gbpages because there seems very little
* benefit in doing so.
*/
if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg)) {
unsigned long pfn = tseg >> PAGE_SHIFT;
printk(KERN_DEBUG "tseg: %010llx\n", tseg);
if (pfn_range_is_mapped(pfn, pfn + 1))
set_memory_4k((unsigned long)__va(tseg), 1);
}
}
#endif
/* /*
* Family 0x12 and above processors have APIC timer * Family 0x12 and above processors have APIC timer
* running in deep C states. * running in deep C states.
...@@ -690,34 +701,6 @@ static void init_amd(struct cpuinfo_x86 *c) ...@@ -690,34 +701,6 @@ static void init_amd(struct cpuinfo_x86 *c)
if (c->x86 > 0x11) if (c->x86 > 0x11)
set_cpu_cap(c, X86_FEATURE_ARAT); set_cpu_cap(c, X86_FEATURE_ARAT);
if (c->x86 == 0x10) {
/*
* Disable GART TLB Walk Errors on Fam10h. We do this here
* because this is always needed when GART is enabled, even in a
* kernel which has no MCE support built in.
* BIOS should disable GartTlbWlk Errors already. If
* it doesn't, do it here as suggested by the BKDG.
*
* Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=33012
*/
msr_set_bit(MSR_AMD64_MCx_MASK(4), 10);
/*
* On family 10h BIOS may not have properly enabled WC+ support,
* causing it to be converted to CD memtype. This may result in
* performance degradation for certain nested-paging guests.
* Prevent this conversion by clearing bit 24 in
* MSR_AMD64_BU_CFG2.
*
* NOTE: we want to use the _safe accessors so as not to #GP kvm
* guests on older kvm hosts.
*/
msr_clear_bit(MSR_AMD64_BU_CFG2, 24);
if (cpu_has_amd_erratum(c, amd_erratum_383))
set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH);
}
if (cpu_has_amd_erratum(c, amd_erratum_400)) if (cpu_has_amd_erratum(c, amd_erratum_400))
set_cpu_bug(c, X86_BUG_AMD_APIC_C1E); set_cpu_bug(c, X86_BUG_AMD_APIC_C1E);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册