diff --git a/MAINTAINERS b/MAINTAINERS index 69dd1394718266d4052184b793205b455ca97eb7..d47de7aa94439f04b93d8a0032a25c6a46f15d53 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -6787,6 +6787,12 @@ S: Maintained F: mm/memory-failure.c F: mm/hwpoison-inject.c +HYGON PROCESSOR SUPPORT +M: Pu Wen +L: linux-kernel@vger.kernel.org +S: Maintained +F: arch/x86/kernel/cpu/hygon.c + Hyper-V CORE AND DRIVERS M: "K. Y. Srinivasan" M: Haiyang Zhang diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu index 638411f22267aa34bd6ddda10eeccded1a1f6b48..6adce15268bd89ac70bcf529d736f895d6663085 100644 --- a/arch/x86/Kconfig.cpu +++ b/arch/x86/Kconfig.cpu @@ -426,6 +426,20 @@ config CPU_SUP_AMD If unsure, say N. +config CPU_SUP_HYGON + default y + bool "Support Hygon processors" if PROCESSOR_SELECT + select CPU_SUP_AMD + help + This enables detection, tunings and quirks for Hygon processors + + You need this enabled if you want your kernel to run on an + Hygon CPU. Disabling this option on other types of CPUs + makes the kernel a tiny bit smaller. Disabling it on an Hygon + CPU might render the kernel unbootable. + + If unsure, say N. + config CPU_SUP_CENTAUR default y bool "Support Centaur processors" if PROCESSOR_SELECT diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c index c84584bb940280b56f3b7d6d5365803ec4364505..7d2d7c801dba6abb226b630104d1f038242562cf 100644 --- a/arch/x86/events/amd/core.c +++ b/arch/x86/events/amd/core.c @@ -669,6 +669,10 @@ static int __init amd_core_pmu_init(void) * We fallback to using default amd_get_event_constraints. */ break; + case 0x18: + pr_cont("Fam18h "); + /* Using default amd_get_event_constraints. */ + break; default: pr_err("core perfctr but no constraints; unknown hardware!\n"); return -ENODEV; diff --git a/arch/x86/events/amd/uncore.c b/arch/x86/events/amd/uncore.c index 8671de126eac09e0a63358d72305ce0a5e9f4f31..398df6eaa1094b749cb38bcdda285712d314c9ff 100644 --- a/arch/x86/events/amd/uncore.c +++ b/arch/x86/events/amd/uncore.c @@ -515,17 +515,19 @@ static int __init amd_uncore_init(void) { int ret = -ENODEV; - if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) + if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && + boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) return -ENODEV; if (!boot_cpu_has(X86_FEATURE_TOPOEXT)) return -ENODEV; - if (boot_cpu_data.x86 == 0x17) { + if (boot_cpu_data.x86 == 0x17 || boot_cpu_data.x86 == 0x18) { /* - * For F17h, the Northbridge counters are repurposed as Data - * Fabric counters. Also, L3 counters are supported too. The PMUs - * are exported based on family as either L2 or L3 and NB or DF. + * For F17h or F18h, the Northbridge counters are + * repurposed as Data Fabric counters. Also, L3 + * counters are supported too. The PMUs are exported + * based on family as either L2 or L3 and NB or DF. */ num_counters_nb = NUM_COUNTERS_NB; num_counters_llc = NUM_COUNTERS_L3; @@ -557,7 +559,9 @@ static int __init amd_uncore_init(void) if (ret) goto fail_nb; - pr_info("AMD NB counters detected\n"); + pr_info("%s NB counters detected\n", + boot_cpu_data.x86_vendor == X86_VENDOR_HYGON ? + "HYGON" : "AMD"); ret = 0; } @@ -571,7 +575,9 @@ static int __init amd_uncore_init(void) if (ret) goto fail_llc; - pr_info("AMD LLC counters detected\n"); + pr_info("%s LLC counters detected\n", + boot_cpu_data.x86_vendor == X86_VENDOR_HYGON ? + "HYGON" : "AMD"); ret = 0; } diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index de32741d041a456923ea6c07b0b277f41446428a..106911b603bd95b355ddcf5d16028c4374c17035 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -1797,6 +1797,10 @@ static int __init init_hw_perf_events(void) case X86_VENDOR_AMD: err = amd_pmu_init(); break; + case X86_VENDOR_HYGON: + err = amd_pmu_init(); + x86_pmu.name = "HYGON"; + break; default: err = -ENOTSUPP; } diff --git a/arch/x86/include/asm/amd_nb.h b/arch/x86/include/asm/amd_nb.h index fddb6d26239f59f5c679617f310f290a1161a9cb..1ae4e5791afafbe56c188cf14a3223e06f724869 100644 --- a/arch/x86/include/asm/amd_nb.h +++ b/arch/x86/include/asm/amd_nb.h @@ -103,6 +103,9 @@ static inline u16 amd_pci_dev_to_node_id(struct pci_dev *pdev) static inline bool amd_gart_present(void) { + if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) + return false; + /* GART present only on Fam15h, upto model 0fh */ if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10 || (boot_cpu_data.x86 == 0x15 && boot_cpu_data.x86_model < 0x10)) diff --git a/arch/x86/include/asm/cacheinfo.h b/arch/x86/include/asm/cacheinfo.h index e958e28f7ab5c8e865fe613ba55912d5acf8d1dc..86b63c7feab75d8111c9e194ad56a3069c471881 100644 --- a/arch/x86/include/asm/cacheinfo.h +++ b/arch/x86/include/asm/cacheinfo.h @@ -3,5 +3,6 @@ #define _ASM_X86_CACHEINFO_H void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, int cpu, u8 node_id); +void cacheinfo_hygon_init_llc_id(struct cpuinfo_x86 *c, int cpu, u8 node_id); #endif /* _ASM_X86_CACHEINFO_H */ diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h index 0f82cd91cd3c4d630993d573d7c6e9970d476414..93c4bf598fb06c7e53865141dd3e7faa514194ff 100644 --- a/arch/x86/include/asm/kvm_emulate.h +++ b/arch/x86/include/asm/kvm_emulate.h @@ -364,6 +364,10 @@ struct x86_emulate_ctxt { #define X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx 0x21726574 #define X86EMUL_CPUID_VENDOR_AMDisbetterI_edx 0x74656273 +#define X86EMUL_CPUID_VENDOR_HygonGenuine_ebx 0x6f677948 +#define X86EMUL_CPUID_VENDOR_HygonGenuine_ecx 0x656e6975 +#define X86EMUL_CPUID_VENDOR_HygonGenuine_edx 0x6e65476e + #define X86EMUL_CPUID_VENDOR_GenuineIntel_ebx 0x756e6547 #define X86EMUL_CPUID_VENDOR_GenuineIntel_ecx 0x6c65746e #define X86EMUL_CPUID_VENDOR_GenuineIntel_edx 0x49656e69 diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h index 97d6969f9a8af5ba97fc843844ba05ac90f0b121..4da9b1c58d287bbdda427e31dd67a1653b519043 100644 --- a/arch/x86/include/asm/mce.h +++ b/arch/x86/include/asm/mce.h @@ -217,6 +217,8 @@ static inline void mce_amd_feature_init(struct cpuinfo_x86 *c) { } static inline int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr) { return -EINVAL; }; #endif +static inline void mce_hygon_feature_init(struct cpuinfo_x86 *c) { return mce_amd_feature_init(c); } + int mce_available(struct cpuinfo_x86 *c); bool mce_is_memory_error(struct mce *m); diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index d53c54b842daca1c847d8076aadd89564b23cf00..d4dfd02b740e881972bbc10c60bd7dd10f948177 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -155,7 +155,8 @@ enum cpuid_regs_idx { #define X86_VENDOR_CENTAUR 5 #define X86_VENDOR_TRANSMETA 7 #define X86_VENDOR_NSC 8 -#define X86_VENDOR_NUM 9 +#define X86_VENDOR_HYGON 9 +#define X86_VENDOR_NUM 10 #define X86_VENDOR_UNKNOWN 0xff diff --git a/arch/x86/include/asm/virtext.h b/arch/x86/include/asm/virtext.h index 0116b2ee9e64f34ebf612a0380410af39b4dd2e5..e05e0d3092445736ffd6a25ecbfddaa06168e7c6 100644 --- a/arch/x86/include/asm/virtext.h +++ b/arch/x86/include/asm/virtext.h @@ -83,9 +83,10 @@ static inline void cpu_emergency_vmxoff(void) */ static inline int cpu_has_svm(const char **msg) { - if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) { + if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && + boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) { if (msg) - *msg = "not amd"; + *msg = "not amd or hygon"; return 0; } diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index b9d5e7c9ef43e66c0b8d19fc0346be1779fa78d4..184e9a06b0ff2a5f28aeaf1b67b32c24bc738e71 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -222,6 +222,10 @@ void __init arch_init_ideal_nops(void) } break; + case X86_VENDOR_HYGON: + ideal_nops = p6_nops; + return; + case X86_VENDOR_AMD: if (boot_cpu_data.x86 > 0xf) { ideal_nops = p6_nops; diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c index b481b95bd8f6b9e439c5d72e42af21b18d250af4..a6eca647bc76e33c8d9bc730ada8cb9d384dba97 100644 --- a/arch/x86/kernel/amd_nb.c +++ b/arch/x86/kernel/amd_nb.c @@ -61,6 +61,21 @@ static const struct pci_device_id amd_nb_link_ids[] = { {} }; +static const struct pci_device_id hygon_root_ids[] = { + { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_ROOT) }, + {} +}; + +const struct pci_device_id hygon_nb_misc_ids[] = { + { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_DF_F3) }, + {} +}; + +static const struct pci_device_id hygon_nb_link_ids[] = { + { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_DF_F4) }, + {} +}; + const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = { { 0x00, 0x18, 0x20 }, { 0xff, 0x00, 0x20 }, @@ -194,15 +209,24 @@ EXPORT_SYMBOL_GPL(amd_df_indirect_read); int amd_cache_northbridges(void) { - u16 i = 0; - struct amd_northbridge *nb; + const struct pci_device_id *misc_ids = amd_nb_misc_ids; + const struct pci_device_id *link_ids = amd_nb_link_ids; + const struct pci_device_id *root_ids = amd_root_ids; struct pci_dev *root, *misc, *link; + struct amd_northbridge *nb; + u16 i = 0; if (amd_northbridges.num) return 0; + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { + root_ids = hygon_root_ids; + misc_ids = hygon_nb_misc_ids; + link_ids = hygon_nb_link_ids; + } + misc = NULL; - while ((misc = next_northbridge(misc, amd_nb_misc_ids)) != NULL) + while ((misc = next_northbridge(misc, misc_ids)) != NULL) i++; if (!i) @@ -218,11 +242,11 @@ int amd_cache_northbridges(void) link = misc = root = NULL; for (i = 0; i != amd_northbridges.num; i++) { node_to_amd_nb(i)->root = root = - next_northbridge(root, amd_root_ids); + next_northbridge(root, root_ids); node_to_amd_nb(i)->misc = misc = - next_northbridge(misc, amd_nb_misc_ids); + next_northbridge(misc, misc_ids); node_to_amd_nb(i)->link = link = - next_northbridge(link, amd_nb_link_ids); + next_northbridge(link, link_ids); } if (amd_gart_present()) @@ -261,11 +285,19 @@ EXPORT_SYMBOL_GPL(amd_cache_northbridges); */ bool __init early_is_amd_nb(u32 device) { + const struct pci_device_id *misc_ids = amd_nb_misc_ids; const struct pci_device_id *id; u32 vendor = device & 0xffff; + if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && + boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) + return false; + + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) + misc_ids = hygon_nb_misc_ids; + device >>= 16; - for (id = amd_nb_misc_ids; id->vendor; id++) + for (id = misc_ids; id->vendor; id++) if (vendor == id->vendor && device == id->device) return true; return false; @@ -277,7 +309,8 @@ struct resource *amd_get_mmconfig_range(struct resource *res) u64 base, msr; unsigned int segn_busn_bits; - if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) + if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && + boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) return NULL; /* assume all cpus from fam10h have mmconfig */ diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 84132eddb5a858ff97b4c83a7ac6708eb49f4ab1..ab731ab09f06d1cff436f3b94cb363fb6c271e7b 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -224,6 +224,11 @@ static int modern_apic(void) if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD && boot_cpu_data.x86 >= 0xf) return 1; + + /* Hygon systems use modern APIC */ + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) + return 1; + return lapic_get_version() >= 0x14; } @@ -1912,6 +1917,8 @@ static int __init detect_init_APIC(void) (boot_cpu_data.x86 >= 15)) break; goto no_apic; + case X86_VENDOR_HYGON: + break; case X86_VENDOR_INTEL: if (boot_cpu_data.x86 == 6 || boot_cpu_data.x86 == 15 || (boot_cpu_data.x86 == 5 && boot_cpu_has(X86_FEATURE_APIC))) diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c index 02e8acb134f856faa0030bf1c007dc3ac1bd9ca3..47ff2976c2927432047abbbe6e055975f6cc0ad7 100644 --- a/arch/x86/kernel/apic/probe_32.c +++ b/arch/x86/kernel/apic/probe_32.c @@ -185,6 +185,7 @@ void __init default_setup_apic_routing(void) break; } /* If P4 and above fall through */ + case X86_VENDOR_HYGON: case X86_VENDOR_AMD: def_to_bigsmp = 1; } diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile index 347137e80bf5ace65a2688a78897f6f704e96800..1f5d2291c31ec24a765bda0f4b0f0bcb7aae069d 100644 --- a/arch/x86/kernel/cpu/Makefile +++ b/arch/x86/kernel/cpu/Makefile @@ -30,6 +30,7 @@ obj-$(CONFIG_X86_FEATURE_NAMES) += capflags.o powerflags.o obj-$(CONFIG_CPU_SUP_INTEL) += intel.o intel_pconfig.o obj-$(CONFIG_CPU_SUP_AMD) += amd.o +obj-$(CONFIG_CPU_SUP_HYGON) += hygon.o obj-$(CONFIG_CPU_SUP_CYRIX_32) += cyrix.o obj-$(CONFIG_CPU_SUP_CENTAUR) += centaur.o obj-$(CONFIG_CPU_SUP_TRANSMETA_32) += transmeta.o diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 40bdaea97fe7cac25f2b04bc239fdc520d68f09e..b810cc23937515430c8b24f51a4831051039e383 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -312,6 +312,7 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void) } if (cmd == SPECTRE_V2_CMD_RETPOLINE_AMD && + boot_cpu_data.x86_vendor != X86_VENDOR_HYGON && boot_cpu_data.x86_vendor != X86_VENDOR_AMD) { pr_err("retpoline,amd selected but CPU is not AMD. Switching to AUTO select\n"); return SPECTRE_V2_CMD_AUTO; @@ -371,7 +372,8 @@ static void __init spectre_v2_select_mitigation(void) return; retpoline_auto: - if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) { + if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD || + boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { retpoline_amd: if (!boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) { pr_err("Spectre mitigation: LFENCE not serializing, switching to generic retpoline\n"); diff --git a/arch/x86/kernel/cpu/cacheinfo.c b/arch/x86/kernel/cpu/cacheinfo.c index 0c5fcbd998cf11badefad906a2122400a3512d58..dc1b9342e9c4f9ff7fbdd6a26309186b37e47ec0 100644 --- a/arch/x86/kernel/cpu/cacheinfo.c +++ b/arch/x86/kernel/cpu/cacheinfo.c @@ -602,6 +602,10 @@ cpuid4_cache_lookup_regs(int index, struct _cpuid4_info_regs *this_leaf) else amd_cpuid4(index, &eax, &ebx, &ecx); amd_init_l3_cache(this_leaf, index); + } else if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { + cpuid_count(0x8000001d, index, &eax.full, + &ebx.full, &ecx.full, &edx); + amd_init_l3_cache(this_leaf, index); } else { cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx); } @@ -625,7 +629,8 @@ static int find_num_cache_leaves(struct cpuinfo_x86 *c) union _cpuid4_leaf_eax cache_eax; int i = -1; - if (c->x86_vendor == X86_VENDOR_AMD) + if (c->x86_vendor == X86_VENDOR_AMD || + c->x86_vendor == X86_VENDOR_HYGON) op = 0x8000001d; else op = 4; @@ -678,6 +683,22 @@ void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, int cpu, u8 node_id) } } +void cacheinfo_hygon_init_llc_id(struct cpuinfo_x86 *c, int cpu, u8 node_id) +{ + /* + * We may have multiple LLCs if L3 caches exist, so check if we + * have an L3 cache by looking at the L3 cache CPUID leaf. + */ + if (!cpuid_edx(0x80000006)) + return; + + /* + * LLC is at the core complex level. + * Core complex ID is ApicId[3] for these processors. + */ + per_cpu(cpu_llc_id, cpu) = c->apicid >> 3; +} + void init_amd_cacheinfo(struct cpuinfo_x86 *c) { @@ -691,6 +712,11 @@ void init_amd_cacheinfo(struct cpuinfo_x86 *c) } } +void init_hygon_cacheinfo(struct cpuinfo_x86 *c) +{ + num_cache_leaves = find_num_cache_leaves(c); +} + void init_intel_cacheinfo(struct cpuinfo_x86 *c) { /* Cache sizes */ @@ -913,7 +939,8 @@ static void __cache_cpumap_setup(unsigned int cpu, int index, int index_msb, i; struct cpuinfo_x86 *c = &cpu_data(cpu); - if (c->x86_vendor == X86_VENDOR_AMD) { + if (c->x86_vendor == X86_VENDOR_AMD || + c->x86_vendor == X86_VENDOR_HYGON) { if (__cache_amd_cpumap_setup(cpu, index, base)) return; } diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 0b99a7fae6be8a7dae6f3c045f3b8f76abaccb86..c519a079b3d50b5ea118c183db01471bb94cf24f 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -963,6 +963,7 @@ static const __initconst struct x86_cpu_id cpu_no_speculation[] = { static const __initconst struct x86_cpu_id cpu_no_meltdown[] = { { X86_VENDOR_AMD }, + { X86_VENDOR_HYGON }, {} }; @@ -1076,6 +1077,9 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c) memset(&c->x86_capability, 0, sizeof c->x86_capability); c->extended_cpuid_level = 0; + if (!have_cpuid_p()) + identify_cpu_without_cpuid(c); + /* cyrix could have cpuid enabled via c_identify()*/ if (have_cpuid_p()) { cpu_detect(c); @@ -1093,7 +1097,6 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c) if (this_cpu->c_bsp_init) this_cpu->c_bsp_init(c); } else { - identify_cpu_without_cpuid(c); setup_clear_cpu_cap(X86_FEATURE_CPUID); } diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h index 7b229afa0a37a6a5bd6c910eaaa2e515fb562e82..da5446acc2411f82bacc22aea32798d71d1ead5a 100644 --- a/arch/x86/kernel/cpu/cpu.h +++ b/arch/x86/kernel/cpu/cpu.h @@ -54,6 +54,7 @@ extern u32 get_scattered_cpuid_leaf(unsigned int level, enum cpuid_regs_idx reg); extern void init_intel_cacheinfo(struct cpuinfo_x86 *c); extern void init_amd_cacheinfo(struct cpuinfo_x86 *c); +extern void init_hygon_cacheinfo(struct cpuinfo_x86 *c); extern void detect_num_cpu_cores(struct cpuinfo_x86 *c); extern int detect_extended_topology_early(struct cpuinfo_x86 *c); diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c index 8949b7ae6d92536c1bbff659d7463588d2bdfb06..d12226f60168e1d844be6dded3c666509326e7a2 100644 --- a/arch/x86/kernel/cpu/cyrix.c +++ b/arch/x86/kernel/cpu/cyrix.c @@ -437,7 +437,7 @@ static void cyrix_identify(struct cpuinfo_x86 *c) /* enable MAPEN */ setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable cpuid */ - setCx86_old(CX86_CCR4, getCx86_old(CX86_CCR4) | 0x80); + setCx86(CX86_CCR4, getCx86(CX86_CCR4) | 0x80); /* disable MAPEN */ setCx86(CX86_CCR3, ccr3); local_irq_restore(flags); diff --git a/arch/x86/kernel/cpu/hygon.c b/arch/x86/kernel/cpu/hygon.c new file mode 100644 index 0000000000000000000000000000000000000000..cf25405444ab37814d11073ccc2e949327c2fe8b --- /dev/null +++ b/arch/x86/kernel/cpu/hygon.c @@ -0,0 +1,408 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Hygon Processor Support for Linux + * + * Copyright (C) 2018 Chengdu Haiguang IC Design Co., Ltd. + * + * Author: Pu Wen + */ +#include + +#include +#include +#include +#include +#include +#ifdef CONFIG_X86_64 +# include +#endif + +#include "cpu.h" + +/* + * nodes_per_socket: Stores the number of nodes per socket. + * Refer to CPUID Fn8000_001E_ECX Node Identifiers[10:8] + */ +static u32 nodes_per_socket = 1; + +#ifdef CONFIG_NUMA +/* + * To workaround broken NUMA config. Read the comment in + * srat_detect_node(). + */ +static int nearby_node(int apicid) +{ + int i, node; + + for (i = apicid - 1; i >= 0; i--) { + node = __apicid_to_node[i]; + if (node != NUMA_NO_NODE && node_online(node)) + return node; + } + for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) { + node = __apicid_to_node[i]; + if (node != NUMA_NO_NODE && node_online(node)) + return node; + } + return first_node(node_online_map); /* Shouldn't happen */ +} +#endif + +static void hygon_get_topology_early(struct cpuinfo_x86 *c) +{ + if (cpu_has(c, X86_FEATURE_TOPOEXT)) + smp_num_siblings = ((cpuid_ebx(0x8000001e) >> 8) & 0xff) + 1; +} + +/* + * Fixup core topology information for + * (1) Hygon multi-node processors + * Assumption: Number of cores in each internal node is the same. + * (2) Hygon processors supporting compute units + */ +static void hygon_get_topology(struct cpuinfo_x86 *c) +{ + u8 node_id; + int cpu = smp_processor_id(); + + /* get information required for multi-node processors */ + if (boot_cpu_has(X86_FEATURE_TOPOEXT)) { + int err; + u32 eax, ebx, ecx, edx; + + cpuid(0x8000001e, &eax, &ebx, &ecx, &edx); + + node_id = ecx & 0xff; + + c->cpu_core_id = ebx & 0xff; + + if (smp_num_siblings > 1) + c->x86_max_cores /= smp_num_siblings; + + /* + * In case leaf B is available, use it to derive + * topology information. + */ + err = detect_extended_topology(c); + if (!err) + c->x86_coreid_bits = get_count_order(c->x86_max_cores); + + cacheinfo_hygon_init_llc_id(c, cpu, node_id); + } else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) { + u64 value; + + rdmsrl(MSR_FAM10H_NODE_ID, value); + node_id = value & 7; + + per_cpu(cpu_llc_id, cpu) = node_id; + } else + return; + + if (nodes_per_socket > 1) + set_cpu_cap(c, X86_FEATURE_AMD_DCM); +} + +/* + * On Hygon setup the lower bits of the APIC id distinguish the cores. + * Assumes number of cores is a power of two. + */ +static void hygon_detect_cmp(struct cpuinfo_x86 *c) +{ + unsigned int bits; + int cpu = smp_processor_id(); + + bits = c->x86_coreid_bits; + /* Low order bits define the core id (index of core in socket) */ + c->cpu_core_id = c->initial_apicid & ((1 << bits)-1); + /* Convert the initial APIC ID into the socket ID */ + c->phys_proc_id = c->initial_apicid >> bits; + /* use socket ID also for last level cache */ + per_cpu(cpu_llc_id, cpu) = c->phys_proc_id; +} + +static void srat_detect_node(struct cpuinfo_x86 *c) +{ +#ifdef CONFIG_NUMA + int cpu = smp_processor_id(); + int node; + unsigned int apicid = c->apicid; + + node = numa_cpu_node(cpu); + if (node == NUMA_NO_NODE) + node = per_cpu(cpu_llc_id, cpu); + + /* + * On multi-fabric platform (e.g. Numascale NumaChip) a + * platform-specific handler needs to be called to fixup some + * IDs of the CPU. + */ + if (x86_cpuinit.fixup_cpu_id) + x86_cpuinit.fixup_cpu_id(c, node); + + if (!node_online(node)) { + /* + * Two possibilities here: + * + * - The CPU is missing memory and no node was created. In + * that case try picking one from a nearby CPU. + * + * - The APIC IDs differ from the HyperTransport node IDs. + * Assume they are all increased by a constant offset, but + * in the same order as the HT nodeids. If that doesn't + * result in a usable node fall back to the path for the + * previous case. + * + * This workaround operates directly on the mapping between + * APIC ID and NUMA node, assuming certain relationship + * between APIC ID, HT node ID and NUMA topology. As going + * through CPU mapping may alter the outcome, directly + * access __apicid_to_node[]. + */ + int ht_nodeid = c->initial_apicid; + + if (__apicid_to_node[ht_nodeid] != NUMA_NO_NODE) + node = __apicid_to_node[ht_nodeid]; + /* Pick a nearby node */ + if (!node_online(node)) + node = nearby_node(apicid); + } + numa_set_node(cpu, node); +#endif +} + +static void early_init_hygon_mc(struct cpuinfo_x86 *c) +{ +#ifdef CONFIG_SMP + unsigned int bits, ecx; + + /* Multi core CPU? */ + if (c->extended_cpuid_level < 0x80000008) + return; + + ecx = cpuid_ecx(0x80000008); + + c->x86_max_cores = (ecx & 0xff) + 1; + + /* CPU telling us the core id bits shift? */ + bits = (ecx >> 12) & 0xF; + + /* Otherwise recompute */ + if (bits == 0) { + while ((1 << bits) < c->x86_max_cores) + bits++; + } + + c->x86_coreid_bits = bits; +#endif +} + +static void bsp_init_hygon(struct cpuinfo_x86 *c) +{ +#ifdef CONFIG_X86_64 + unsigned long long tseg; + + /* + * Split up direct mapping around the TSEG SMM area. + * Don't do it for gbpages because there seems very little + * benefit in doing so. + */ + if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg)) { + unsigned long pfn = tseg >> PAGE_SHIFT; + + pr_debug("tseg: %010llx\n", tseg); + if (pfn_range_is_mapped(pfn, pfn + 1)) + set_memory_4k((unsigned long)__va(tseg), 1); + } +#endif + + if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) { + u64 val; + + rdmsrl(MSR_K7_HWCR, val); + if (!(val & BIT(24))) + pr_warn(FW_BUG "TSC doesn't count with P0 frequency!\n"); + } + + if (cpu_has(c, X86_FEATURE_MWAITX)) + use_mwaitx_delay(); + + if (boot_cpu_has(X86_FEATURE_TOPOEXT)) { + u32 ecx; + + ecx = cpuid_ecx(0x8000001e); + nodes_per_socket = ((ecx >> 8) & 7) + 1; + } else if (boot_cpu_has(X86_FEATURE_NODEID_MSR)) { + u64 value; + + rdmsrl(MSR_FAM10H_NODE_ID, value); + nodes_per_socket = ((value >> 3) & 7) + 1; + } + + if (!boot_cpu_has(X86_FEATURE_AMD_SSBD) && + !boot_cpu_has(X86_FEATURE_VIRT_SSBD)) { + /* + * Try to cache the base value so further operations can + * avoid RMW. If that faults, do not enable SSBD. + */ + if (!rdmsrl_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) { + setup_force_cpu_cap(X86_FEATURE_LS_CFG_SSBD); + setup_force_cpu_cap(X86_FEATURE_SSBD); + x86_amd_ls_cfg_ssbd_mask = 1ULL << 10; + } + } +} + +static void early_init_hygon(struct cpuinfo_x86 *c) +{ + u32 dummy; + + early_init_hygon_mc(c); + + set_cpu_cap(c, X86_FEATURE_K8); + + rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy); + + /* + * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate + * with P/T states and does not stop in deep C-states + */ + if (c->x86_power & (1 << 8)) { + set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); + set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); + } + + /* Bit 12 of 8000_0007 edx is accumulated power mechanism. */ + if (c->x86_power & BIT(12)) + set_cpu_cap(c, X86_FEATURE_ACC_POWER); + +#ifdef CONFIG_X86_64 + set_cpu_cap(c, X86_FEATURE_SYSCALL32); +#endif + +#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI) + /* + * ApicID can always be treated as an 8-bit value for Hygon APIC So, we + * can safely set X86_FEATURE_EXTD_APICID unconditionally. + */ + if (boot_cpu_has(X86_FEATURE_APIC)) + set_cpu_cap(c, X86_FEATURE_EXTD_APICID); +#endif + + /* + * This is only needed to tell the kernel whether to use VMCALL + * and VMMCALL. VMMCALL is never executed except under virt, so + * we can set it unconditionally. + */ + set_cpu_cap(c, X86_FEATURE_VMMCALL); + + hygon_get_topology_early(c); +} + +static void init_hygon(struct cpuinfo_x86 *c) +{ + early_init_hygon(c); + + /* + * Bit 31 in normal CPUID used for nonstandard 3DNow ID; + * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway + */ + clear_cpu_cap(c, 0*32+31); + + set_cpu_cap(c, X86_FEATURE_REP_GOOD); + + /* get apicid instead of initial apic id from cpuid */ + c->apicid = hard_smp_processor_id(); + + set_cpu_cap(c, X86_FEATURE_ZEN); + set_cpu_cap(c, X86_FEATURE_CPB); + + cpu_detect_cache_sizes(c); + + hygon_detect_cmp(c); + hygon_get_topology(c); + srat_detect_node(c); + + init_hygon_cacheinfo(c); + + if (cpu_has(c, X86_FEATURE_XMM2)) { + unsigned long long val; + int ret; + + /* + * A serializing LFENCE has less overhead than MFENCE, so + * use it for execution serialization. On families which + * don't have that MSR, LFENCE is already serializing. + * msr_set_bit() uses the safe accessors, too, even if the MSR + * is not present. + */ + msr_set_bit(MSR_F10H_DECFG, + MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT); + + /* + * Verify that the MSR write was successful (could be running + * under a hypervisor) and only then assume that LFENCE is + * serializing. + */ + ret = rdmsrl_safe(MSR_F10H_DECFG, &val); + if (!ret && (val & MSR_F10H_DECFG_LFENCE_SERIALIZE)) { + /* A serializing LFENCE stops RDTSC speculation */ + set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); + } else { + /* MFENCE stops RDTSC speculation */ + set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC); + } + } + + /* + * Hygon processors have APIC timer running in deep C states. + */ + set_cpu_cap(c, X86_FEATURE_ARAT); + + /* Hygon CPUs don't reset SS attributes on SYSRET, Xen does. */ + if (!cpu_has(c, X86_FEATURE_XENPV)) + set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS); +} + +static void cpu_detect_tlb_hygon(struct cpuinfo_x86 *c) +{ + u32 ebx, eax, ecx, edx; + u16 mask = 0xfff; + + if (c->extended_cpuid_level < 0x80000006) + return; + + cpuid(0x80000006, &eax, &ebx, &ecx, &edx); + + tlb_lld_4k[ENTRIES] = (ebx >> 16) & mask; + tlb_lli_4k[ENTRIES] = ebx & mask; + + /* Handle DTLB 2M and 4M sizes, fall back to L1 if L2 is disabled */ + if (!((eax >> 16) & mask)) + tlb_lld_2m[ENTRIES] = (cpuid_eax(0x80000005) >> 16) & 0xff; + else + tlb_lld_2m[ENTRIES] = (eax >> 16) & mask; + + /* a 4M entry uses two 2M entries */ + tlb_lld_4m[ENTRIES] = tlb_lld_2m[ENTRIES] >> 1; + + /* Handle ITLB 2M and 4M sizes, fall back to L1 if L2 is disabled */ + if (!(eax & mask)) { + cpuid(0x80000005, &eax, &ebx, &ecx, &edx); + tlb_lli_2m[ENTRIES] = eax & 0xff; + } else + tlb_lli_2m[ENTRIES] = eax & mask; + + tlb_lli_4m[ENTRIES] = tlb_lli_2m[ENTRIES] >> 1; +} + +static const struct cpu_dev hygon_cpu_dev = { + .c_vendor = "Hygon", + .c_ident = { "HygonGenuine" }, + .c_early_init = early_init_hygon, + .c_detect_tlb = cpu_detect_tlb_hygon, + .c_bsp_init = bsp_init_hygon, + .c_init = init_hygon, + .c_x86_vendor = X86_VENDOR_HYGON, +}; + +cpu_dev_register(hygon_cpu_dev); diff --git a/arch/x86/kernel/cpu/mcheck/mce-severity.c b/arch/x86/kernel/cpu/mcheck/mce-severity.c index f34d89c01edc5c761e0df331da1331f8a0f98f3a..44396d52198719df0b0ad7652ba009409ff19037 100644 --- a/arch/x86/kernel/cpu/mcheck/mce-severity.c +++ b/arch/x86/kernel/cpu/mcheck/mce-severity.c @@ -336,7 +336,8 @@ int (*mce_severity)(struct mce *m, int tolerant, char **msg, bool is_excp) = void __init mcheck_vendor_init_severity(void) { - if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) + if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD || + boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) mce_severity = mce_severity_amd; } diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index ef8fd1f2ede0495f63689a3889979d1f8b78dd94..8cb3c02980cfa72f9d6c810f84f080565c296400 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c @@ -270,7 +270,7 @@ static void print_mce(struct mce *m) { __print_mce(m); - if (m->cpuvendor != X86_VENDOR_AMD) + if (m->cpuvendor != X86_VENDOR_AMD && m->cpuvendor != X86_VENDOR_HYGON) pr_emerg_ratelimited(HW_ERR "Run the above through 'mcelog --ascii'\n"); } @@ -508,9 +508,9 @@ static int mce_usable_address(struct mce *m) bool mce_is_memory_error(struct mce *m) { - if (m->cpuvendor == X86_VENDOR_AMD) { + if (m->cpuvendor == X86_VENDOR_AMD || + m->cpuvendor == X86_VENDOR_HYGON) { return amd_mce_is_memory_error(m); - } else if (m->cpuvendor == X86_VENDOR_INTEL) { /* * Intel SDM Volume 3B - 15.9.2 Compound Error Codes @@ -539,6 +539,9 @@ static bool mce_is_correctable(struct mce *m) if (m->cpuvendor == X86_VENDOR_AMD && m->status & MCI_STATUS_DEFERRED) return false; + if (m->cpuvendor == X86_VENDOR_HYGON && m->status & MCI_STATUS_DEFERRED) + return false; + if (m->status & MCI_STATUS_UC) return false; @@ -1705,7 +1708,7 @@ static int __mcheck_cpu_ancient_init(struct cpuinfo_x86 *c) */ static void __mcheck_cpu_init_early(struct cpuinfo_x86 *c) { - if (c->x86_vendor == X86_VENDOR_AMD) { + if (c->x86_vendor == X86_VENDOR_AMD || c->x86_vendor == X86_VENDOR_HYGON) { mce_flags.overflow_recov = !!cpu_has(c, X86_FEATURE_OVERFLOW_RECOV); mce_flags.succor = !!cpu_has(c, X86_FEATURE_SUCCOR); mce_flags.smca = !!cpu_has(c, X86_FEATURE_SMCA); @@ -1746,6 +1749,11 @@ static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c) mce_amd_feature_init(c); break; } + + case X86_VENDOR_HYGON: + mce_hygon_feature_init(c); + break; + case X86_VENDOR_CENTAUR: mce_centaur_feature_init(c); break; @@ -1971,12 +1979,14 @@ static void mce_disable_error_reporting(void) static void vendor_disable_error_reporting(void) { /* - * Don't clear on Intel or AMD CPUs. Some of these MSRs are socket-wide. + * Don't clear on Intel or AMD or Hygon CPUs. Some of these MSRs + * are socket-wide. * Disabling them for just a single offlined CPU is bad, since it will * inhibit reporting for all shared resources on the socket like the * last level cache (LLC), the integrated memory controller (iMC), etc. */ if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL || + boot_cpu_data.x86_vendor == X86_VENDOR_HYGON || boot_cpu_data.x86_vendor == X86_VENDOR_AMD) return; diff --git a/arch/x86/kernel/cpu/mtrr/cleanup.c b/arch/x86/kernel/cpu/mtrr/cleanup.c index 765afd5990398c9ef687d1cfe8b3b97b6fc46335..3668c5df90c6997737ddaf5de028e51d943080aa 100644 --- a/arch/x86/kernel/cpu/mtrr/cleanup.c +++ b/arch/x86/kernel/cpu/mtrr/cleanup.c @@ -831,7 +831,8 @@ int __init amd_special_default_mtrr(void) { u32 l, h; - if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) + if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && + boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) return 0; if (boot_cpu_data.x86 < 0xf) return 0; diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.c b/arch/x86/kernel/cpu/mtrr/mtrr.c index 9a19c800fe40095f1c706e4a50d5378339d429b8..507039c20128a6870e1e660d5f5bb3b470cfb18f 100644 --- a/arch/x86/kernel/cpu/mtrr/mtrr.c +++ b/arch/x86/kernel/cpu/mtrr/mtrr.c @@ -127,7 +127,7 @@ static void __init set_num_var_ranges(void) if (use_intel()) rdmsr(MSR_MTRRcap, config, dummy); - else if (is_cpu(AMD)) + else if (is_cpu(AMD) || is_cpu(HYGON)) config = 2; else if (is_cpu(CYRIX) || is_cpu(CENTAUR)) config = 8; diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c index d389083330c50f8efb57a436a6af6773365ae00e..9556930cd8c1a4248db2937d63e82cfaec18a1f2 100644 --- a/arch/x86/kernel/cpu/perfctr-watchdog.c +++ b/arch/x86/kernel/cpu/perfctr-watchdog.c @@ -46,6 +46,7 @@ static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr) { /* returns the bit offset of the performance counter register */ switch (boot_cpu_data.x86_vendor) { + case X86_VENDOR_HYGON: case X86_VENDOR_AMD: if (msr >= MSR_F15H_PERF_CTR) return (msr - MSR_F15H_PERF_CTR) >> 1; @@ -74,6 +75,7 @@ static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr) { /* returns the bit offset of the event selection register */ switch (boot_cpu_data.x86_vendor) { + case X86_VENDOR_HYGON: case X86_VENDOR_AMD: if (msr >= MSR_F15H_PERF_CTL) return (msr - MSR_F15H_PERF_CTL) >> 1; diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index f02ecaf97904bd2a1822cb242539ae3eeeefee20..5369d7fac7978b9216c43c88931ab66e41a6fac6 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -676,6 +676,7 @@ static void __init smp_quirk_init_udelay(void) /* if modern processor, use no delay */ if (((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && (boot_cpu_data.x86 == 6)) || + ((boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) && (boot_cpu_data.x86 >= 0x18)) || ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && (boot_cpu_data.x86 >= 0xF))) { init_udelay = 0; return; @@ -1592,7 +1593,8 @@ static inline void mwait_play_dead(void) void *mwait_ptr; int i; - if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) + if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD || + boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) return; if (!this_cpu_has(X86_FEATURE_MWAIT)) return; diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 106482da6388e29cec054aa72dc5872708d9cd4a..34edf198708f76883d5fa099bfe3683dbf52c753 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -2711,7 +2711,16 @@ static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt) edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx) return true; - /* default: (not Intel, not AMD), apply Intel's stricter rules... */ + /* Hygon ("HygonGenuine") */ + if (ebx == X86EMUL_CPUID_VENDOR_HygonGenuine_ebx && + ecx == X86EMUL_CPUID_VENDOR_HygonGenuine_ecx && + edx == X86EMUL_CPUID_VENDOR_HygonGenuine_edx) + return true; + + /* + * default: (not Intel, not AMD, not Hygon), apply Intel's + * stricter rules... + */ return false; } diff --git a/arch/x86/pci/amd_bus.c b/arch/x86/pci/amd_bus.c index 649bdde63e328b223ff8371057e9052371ddb7e3..bfa50e65ef6c380557d576681a8cf6cede4db8db 100644 --- a/arch/x86/pci/amd_bus.c +++ b/arch/x86/pci/amd_bus.c @@ -93,7 +93,8 @@ static int __init early_root_info_init(void) vendor = id & 0xffff; device = (id>>16) & 0xffff; - if (vendor != PCI_VENDOR_ID_AMD) + if (vendor != PCI_VENDOR_ID_AMD && + vendor != PCI_VENDOR_ID_HYGON) continue; if (hb_probes[i].device == device) { @@ -390,7 +391,8 @@ static int __init pci_io_ecs_init(void) static int __init amd_postcore_init(void) { - if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) + if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && + boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) return 0; early_root_info_init(); diff --git a/arch/x86/xen/pmu.c b/arch/x86/xen/pmu.c index 0972184f3f199272d534c1f5d2cad46093fdce54..e13b0b49fcdfc181c19f76de1c968b9a344e8a70 100644 --- a/arch/x86/xen/pmu.c +++ b/arch/x86/xen/pmu.c @@ -91,6 +91,12 @@ static void xen_pmu_arch_init(void) k7_counters_mirrored = 0; break; } + } else if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { + amd_num_counters = F10H_NUM_COUNTERS; + amd_counters_base = MSR_K7_PERFCTR0; + amd_ctrls_base = MSR_K7_EVNTSEL0; + amd_msr_step = 1; + k7_counters_mirrored = 0; } else { uint32_t eax, ebx, ecx, edx; @@ -286,7 +292,7 @@ static bool xen_amd_pmu_emulate(unsigned int msr, u64 *val, bool is_read) bool pmu_msr_read(unsigned int msr, uint64_t *val, int *err) { - if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) { + if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) { if (is_amd_pmu_msr(msr)) { if (!xen_amd_pmu_emulate(msr, val, 1)) *val = native_read_msr_safe(msr, err); @@ -309,7 +315,7 @@ bool pmu_msr_write(unsigned int msr, uint32_t low, uint32_t high, int *err) { uint64_t val = ((uint64_t)high << 32) | low; - if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) { + if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) { if (is_amd_pmu_msr(msr)) { if (!xen_amd_pmu_emulate(msr, &val, 0)) *err = native_write_msr_safe(msr, low, high); @@ -380,7 +386,7 @@ static unsigned long long xen_intel_read_pmc(int counter) unsigned long long xen_read_pmc(int counter) { - if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) + if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) return xen_amd_read_pmc(counter); else return xen_intel_read_pmc(counter); diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c index 552c1f725b6cf5ab8d4a86ba556c37a053d91b1d..a47676a55b840992769c84baf30ebb51a64794fc 100644 --- a/drivers/acpi/acpi_pad.c +++ b/drivers/acpi/acpi_pad.c @@ -70,6 +70,7 @@ static void power_saving_mwait_init(void) #if defined(CONFIG_X86) switch (boot_cpu_data.x86_vendor) { + case X86_VENDOR_HYGON: case X86_VENDOR_AMD: case X86_VENDOR_INTEL: /* diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index abb559cd28d793d052b6408e606469538e80c65b..b2131c4ea1245c43f00b63c20ae4703a3603f14a 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -205,6 +205,7 @@ static void lapic_timer_state_broadcast(struct acpi_processor *pr, static void tsc_check_state(int state) { switch (boot_cpu_data.x86_vendor) { + case X86_VENDOR_HYGON: case X86_VENDOR_AMD: case X86_VENDOR_INTEL: case X86_VENDOR_CENTAUR: diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c index b61f4ec43e0685f6b1ea28035cff16a19a8247d9..d62fd374d5c70f58fdd4fb7c874584a00dc00c16 100644 --- a/drivers/cpufreq/acpi-cpufreq.c +++ b/drivers/cpufreq/acpi-cpufreq.c @@ -61,6 +61,7 @@ enum { #define INTEL_MSR_RANGE (0xffff) #define AMD_MSR_RANGE (0x7) +#define HYGON_MSR_RANGE (0x7) #define MSR_K7_HWCR_CPB_DIS (1ULL << 25) @@ -95,6 +96,7 @@ static bool boost_state(unsigned int cpu) rdmsr_on_cpu(cpu, MSR_IA32_MISC_ENABLE, &lo, &hi); msr = lo | ((u64)hi << 32); return !(msr & MSR_IA32_MISC_ENABLE_TURBO_DISABLE); + case X86_VENDOR_HYGON: case X86_VENDOR_AMD: rdmsr_on_cpu(cpu, MSR_K7_HWCR, &lo, &hi); msr = lo | ((u64)hi << 32); @@ -113,6 +115,7 @@ static int boost_set_msr(bool enable) msr_addr = MSR_IA32_MISC_ENABLE; msr_mask = MSR_IA32_MISC_ENABLE_TURBO_DISABLE; break; + case X86_VENDOR_HYGON: case X86_VENDOR_AMD: msr_addr = MSR_K7_HWCR; msr_mask = MSR_K7_HWCR_CPB_DIS; @@ -225,6 +228,8 @@ static unsigned extract_msr(struct cpufreq_policy *policy, u32 msr) if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) msr &= AMD_MSR_RANGE; + else if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) + msr &= HYGON_MSR_RANGE; else msr &= INTEL_MSR_RANGE; diff --git a/drivers/cpufreq/amd_freq_sensitivity.c b/drivers/cpufreq/amd_freq_sensitivity.c index be926d9a66e574604758fb40300504bec9ca3039..4ac7c3cf34bef93ab2070d1c5f69a7c3a8b0ec25 100644 --- a/drivers/cpufreq/amd_freq_sensitivity.c +++ b/drivers/cpufreq/amd_freq_sensitivity.c @@ -111,11 +111,16 @@ static int __init amd_freq_sensitivity_init(void) { u64 val; struct pci_dev *pcidev; + unsigned int pci_vendor; - if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) + if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) + pci_vendor = PCI_VENDOR_ID_AMD; + else if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) + pci_vendor = PCI_VENDOR_ID_HYGON; + else return -ENODEV; - pcidev = pci_get_device(PCI_VENDOR_ID_AMD, + pcidev = pci_get_device(pci_vendor, PCI_DEVICE_ID_AMD_KERNCZ_SMBUS, NULL); if (!pcidev) { diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index d4afd8086ed90a09f3df691f8e083ef02d901ff0..8ddc2ffe38951035700b42b5bdc16fd4522c5a7e 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -2565,6 +2565,8 @@ #define PCI_VENDOR_ID_AMAZON 0x1d0f +#define PCI_VENDOR_ID_HYGON 0x1d94 + #define PCI_VENDOR_ID_TEKRAM 0x1de1 #define PCI_DEVICE_ID_TEKRAM_DC290 0xdc29 diff --git a/tools/power/cpupower/utils/cpufreq-info.c b/tools/power/cpupower/utils/cpufreq-info.c index ccd08dd009962de64d09ce1ceacf38ca9bfed6c2..c3f39d5128ee4e8445cb0921f1485224fc252914 100644 --- a/tools/power/cpupower/utils/cpufreq-info.c +++ b/tools/power/cpupower/utils/cpufreq-info.c @@ -170,6 +170,7 @@ static int get_boost_mode(unsigned int cpu) unsigned long pstates[MAX_HW_PSTATES] = {0,}; if (cpupower_cpu_info.vendor != X86_VENDOR_AMD && + cpupower_cpu_info.vendor != X86_VENDOR_HYGON && cpupower_cpu_info.vendor != X86_VENDOR_INTEL) return 0; @@ -190,8 +191,9 @@ static int get_boost_mode(unsigned int cpu) printf(_(" Supported: %s\n"), support ? _("yes") : _("no")); printf(_(" Active: %s\n"), active ? _("yes") : _("no")); - if (cpupower_cpu_info.vendor == X86_VENDOR_AMD && - cpupower_cpu_info.family >= 0x10) { + if ((cpupower_cpu_info.vendor == X86_VENDOR_AMD && + cpupower_cpu_info.family >= 0x10) || + cpupower_cpu_info.vendor == X86_VENDOR_HYGON) { ret = decode_pstates(cpu, cpupower_cpu_info.family, b_states, pstates, &pstate_no); if (ret) diff --git a/tools/power/cpupower/utils/helpers/amd.c b/tools/power/cpupower/utils/helpers/amd.c index 9607ada5b29a6a5e50825c267aadbe441c3cc426..7c4f83a8c97371a5e073d969238ad491ea09fc62 100644 --- a/tools/power/cpupower/utils/helpers/amd.c +++ b/tools/power/cpupower/utils/helpers/amd.c @@ -45,7 +45,7 @@ static int get_did(int family, union msr_pstate pstate) if (family == 0x12) t = pstate.val & 0xf; - else if (family == 0x17) + else if (family == 0x17 || family == 0x18) t = pstate.fam17h_bits.did; else t = pstate.bits.did; @@ -59,7 +59,7 @@ static int get_cof(int family, union msr_pstate pstate) int fid, did, cof; did = get_did(family, pstate); - if (family == 0x17) { + if (family == 0x17 || family == 0x18) { fid = pstate.fam17h_bits.fid; cof = 200 * fid / did; } else { diff --git a/tools/power/cpupower/utils/helpers/cpuid.c b/tools/power/cpupower/utils/helpers/cpuid.c index 732b0b41ba261664eefb9e862f7a7174bcc9fca0..5cc39d4e23edb29871d72aac9f2361a90f4de0d2 100644 --- a/tools/power/cpupower/utils/helpers/cpuid.c +++ b/tools/power/cpupower/utils/helpers/cpuid.c @@ -8,7 +8,7 @@ #include "helpers/helpers.h" static const char *cpu_vendor_table[X86_VENDOR_MAX] = { - "Unknown", "GenuineIntel", "AuthenticAMD", + "Unknown", "GenuineIntel", "AuthenticAMD", "HygonGenuine", }; #if defined(__i386__) || defined(__x86_64__) @@ -109,6 +109,7 @@ int get_cpu_info(struct cpupower_cpu_info *cpu_info) fclose(fp); /* Get some useful CPU capabilities from cpuid */ if (cpu_info->vendor != X86_VENDOR_AMD && + cpu_info->vendor != X86_VENDOR_HYGON && cpu_info->vendor != X86_VENDOR_INTEL) return ret; @@ -124,8 +125,9 @@ int get_cpu_info(struct cpupower_cpu_info *cpu_info) if (cpuid_level >= 6 && (cpuid_ecx(6) & 0x1)) cpu_info->caps |= CPUPOWER_CAP_APERF; - /* AMD Boost state enable/disable register */ - if (cpu_info->vendor == X86_VENDOR_AMD) { + /* AMD or Hygon Boost state enable/disable register */ + if (cpu_info->vendor == X86_VENDOR_AMD || + cpu_info->vendor == X86_VENDOR_HYGON) { if (ext_cpuid_level >= 0x80000007 && (cpuid_edx(0x80000007) & (1 << 9))) cpu_info->caps |= CPUPOWER_CAP_AMD_CBP; diff --git a/tools/power/cpupower/utils/helpers/helpers.h b/tools/power/cpupower/utils/helpers/helpers.h index 41da392be448a9abc63ec9032ced298095416bd6..902139689315ef290907d0aa6c30f8bcd30ba5ff 100644 --- a/tools/power/cpupower/utils/helpers/helpers.h +++ b/tools/power/cpupower/utils/helpers/helpers.h @@ -61,7 +61,7 @@ extern int be_verbose; /* cpuid and cpuinfo helpers **************************/ enum cpupower_cpu_vendor {X86_VENDOR_UNKNOWN = 0, X86_VENDOR_INTEL, - X86_VENDOR_AMD, X86_VENDOR_MAX}; + X86_VENDOR_AMD, X86_VENDOR_HYGON, X86_VENDOR_MAX}; #define CPUPOWER_CAP_INV_TSC 0x00000001 #define CPUPOWER_CAP_APERF 0x00000002 diff --git a/tools/power/cpupower/utils/helpers/misc.c b/tools/power/cpupower/utils/helpers/misc.c index 80fdf55f414dce474a87a38606bb83d3d816aaef..f406adc40bad5189529817a4fbb8b8981a5ef4c8 100644 --- a/tools/power/cpupower/utils/helpers/misc.c +++ b/tools/power/cpupower/utils/helpers/misc.c @@ -26,7 +26,7 @@ int cpufreq_has_boost_support(unsigned int cpu, int *support, int *active, * has Hardware determined variable increments instead. */ - if (cpu_info.family == 0x17) { + if (cpu_info.family == 0x17 || cpu_info.family == 0x18) { if (!read_msr(cpu, MSR_AMD_HWCR, &val)) { if (!(val & CPUPOWER_AMD_CPBDIS)) *active = 1; diff --git a/tools/power/cpupower/utils/idle_monitor/mperf_monitor.c b/tools/power/cpupower/utils/idle_monitor/mperf_monitor.c index d7c2a6d13dea1a1d4734fb51842eb12b86ef0317..f2a7e9cfd5770f973f681b2ada1b7ed9eeea01cd 100644 --- a/tools/power/cpupower/utils/idle_monitor/mperf_monitor.c +++ b/tools/power/cpupower/utils/idle_monitor/mperf_monitor.c @@ -241,7 +241,8 @@ static int init_maxfreq_mode(void) if (!(cpupower_cpu_info.caps & CPUPOWER_CAP_INV_TSC)) goto use_sysfs; - if (cpupower_cpu_info.vendor == X86_VENDOR_AMD) { + if (cpupower_cpu_info.vendor == X86_VENDOR_AMD || + cpupower_cpu_info.vendor == X86_VENDOR_HYGON) { /* MSR_AMD_HWCR tells us whether TSC runs at P0/mperf * freq. * A test whether hwcr is accessable/available would be: