未验证 提交 4cc3b010 编写于 作者: O openeuler-ci-bot 提交者: Gitee

!175 AMD: Add minimum support for AMD EPYC Genoa platform

Merge Pull Request from: @haochengxie 
 
This PR includes minimum essential patches to enable AMD EPYC Genoa platform.

Issue
Enable AMD EPYC Genoa platform support for openEuler
#I5NGRU

Backporting Patches included
crypto:
3438de03 5.15 crypto: ccp - Add support for new CCP/PSP device ID

hwmon:
2ade8fc6 5.14 x86/amd_nb: Add AMD family 19h model 50h PCI ids (front patch)
c8d0d3fa 5.12 hwmon: (k10temp) Zen3 Ryzen Desktop CPUs support(front patch)
02c9dce4 5.15 hwmon: (k10temp) support Zen3 APUs
128066c8 5.15 hwmon: (k10temp) Add additional missing Zen2 and Zen3 APUs
02a2484c 5.15 hwmon: (k10temp) Don't show Tdie for all Zen/Zen2/Zen3 CPU/APU
0e3f52bb 5.15 hwmon: (k10temp) Rework the temperature offset calculation
25572c81 5.15 hwmon: (k10temp) Add support for yellow carp
23c69b90 5.15 hwmon: (k10temp) Remove residues of current and voltage
f707bcb5 5.17 hwmon: (k10temp) Remove unused definitions
4fb0abfe 5.17 x86/amd_nb: Add AMD Family 19h Models (10h-1Fh) and (A0h-AFh) PCI IDs
3cf90efa 5.17 hwmon: (k10temp) Add support for AMD Family 19h Models 10h-1Fh and A0h-AFh
8bb050cd 5.17 hwmon: (k10temp) Support up to 12 CCDs on AMD Family of processors

EDAC:
f9571124 5.17 EDAC: Add RDDR5 and LRDDR5 memory types
e2be5955 5.17 EDAC/amd64: Add support for AMD Family 19h Models 10h-1Fh and A0h-AFh
75aeaaf2 5.18 EDAC/amd64: Set memory type per DIMM
2151c84e 5.18 EDAC/amd64: Add new register offset support and related changes

MCE:
94a311ce 5.14 x86/MCE/AMD, EDAC/mce_amd: Add new SMCA bank types
5176a93a 5.17 x86/MCE/AMD, EDAC/mce_amd: Add new SMCA bank types
f38ce910 5.16 x86/MCE/AMD: Export smca_get_bank_type symbol
0b746e8c 5.17 x86/MCE/AMD, EDAC/amd64: Move address translation to AMD64 EDAC
91f75eb4 5.17 x86/MCE/AMD, EDAC/mce_amd: Support non-uniform MCA bank type enumeration

kvm Essential:
03ca4589 5.13 KVM: x86: Prevent KVM SVM from loading on kernels with 5-level paging
43e540cc 5.15 KVM: SVM: Add 5-level page table support for SVM
cb0f722a 5.15 KVM: x86/mmu: Support shadowing NPT when 5-level paging is enabled
39150352 5.17 KVM: x86: SVM: move avic definitions from AMD's spec to svm.h
4a204f78 5.18-rc1 KVM: SVM: Allow AVIC support on system w/ physical APIC ID > 255 
 
Link:https://gitee.com/openeuler/kernel/pulls/175 
Reviewed-by: Kevin Zhu <zhukeqian1@huawei.com> 
Reviewed-by: Zheng Zengkai <zhengzengkai@huawei.com> 
Signed-off-by: Zheng Zengkai <zhengzengkai@huawei.com> 
......@@ -401,7 +401,8 @@ struct kvm_mmu {
u32 pkru_mask;
u64 *pae_root;
u64 *lm_root;
u64 *pml4_root;
u64 *pml5_root;
/*
* check zero bits on shadow page table entries, these
......
......@@ -301,7 +301,7 @@ extern void apei_mce_report_mem_error(int corrected,
/* These may be used by multiple smca_hwid_mcatypes */
enum smca_bank_types {
SMCA_LS = 0, /* Load Store */
SMCA_LS_V2, /* Load Store */
SMCA_LS_V2,
SMCA_IF, /* Instruction Fetch */
SMCA_L2_CACHE, /* L2 Cache */
SMCA_DE, /* Decoder Unit */
......@@ -310,36 +310,32 @@ enum smca_bank_types {
SMCA_FP, /* Floating Point */
SMCA_L3_CACHE, /* L3 Cache */
SMCA_CS, /* Coherent Slave */
SMCA_CS_V2, /* Coherent Slave */
SMCA_CS_V2,
SMCA_PIE, /* Power, Interrupts, etc. */
SMCA_UMC, /* Unified Memory Controller */
SMCA_UMC_V2,
SMCA_PB, /* Parameter Block */
SMCA_PSP, /* Platform Security Processor */
SMCA_PSP_V2, /* Platform Security Processor */
SMCA_PSP_V2,
SMCA_SMU, /* System Management Unit */
SMCA_SMU_V2, /* System Management Unit */
SMCA_SMU_V2,
SMCA_MP5, /* Microprocessor 5 Unit */
SMCA_MPDMA, /* MPDMA Unit */
SMCA_NBIO, /* Northbridge IO Unit */
SMCA_PCIE, /* PCI Express Unit */
SMCA_PCIE_V2,
SMCA_XGMI_PCS, /* xGMI PCS Unit */
SMCA_NBIF, /* NBIF Unit */
SMCA_SHUB, /* System HUB Unit */
SMCA_SATA, /* SATA Unit */
SMCA_USB, /* USB Unit */
SMCA_GMI_PCS, /* GMI PCS Unit */
SMCA_XGMI_PHY, /* xGMI PHY Unit */
SMCA_WAFL_PHY, /* WAFL PHY Unit */
SMCA_GMI_PHY, /* GMI PHY Unit */
N_SMCA_BANK_TYPES
};
#define HWID_MCATYPE(hwid, mcatype) (((hwid) << 16) | (mcatype))
struct smca_hwid {
unsigned int bank_type; /* Use with smca_bank_types for easy indexing. */
u32 hwid_mcatype; /* (hwid,mcatype) tuple */
u8 count; /* Number of instances. */
};
struct smca_bank {
struct smca_hwid *hwid;
u32 id; /* Value of MCA_IPID[InstanceId]. */
u8 sysfs_id; /* Value used for sysfs name. */
};
extern struct smca_bank smca_banks[MAX_NR_BANKS];
extern const char *smca_get_long_name(enum smca_bank_types t);
extern bool amd_mce_is_memory_error(struct mce *m);
......@@ -347,16 +343,13 @@ extern int mce_threshold_create_device(unsigned int cpu);
extern int mce_threshold_remove_device(unsigned int cpu);
void mce_amd_feature_init(struct cpuinfo_x86 *c);
int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr);
enum smca_bank_types smca_get_bank_type(unsigned int cpu, unsigned int bank);
#else
static inline int mce_threshold_create_device(unsigned int cpu) { return 0; };
static inline int mce_threshold_remove_device(unsigned int cpu) { return 0; };
static inline bool amd_mce_is_memory_error(struct mce *m) { return false; };
static inline void mce_amd_feature_init(struct cpuinfo_x86 *c) { }
static inline int
umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr) { return -EINVAL; };
#endif
static inline void mce_hygon_feature_init(struct cpuinfo_x86 *c) { return mce_amd_feature_init(c); }
......
......@@ -511,6 +511,7 @@
#define MSR_AMD64_ICIBSEXTDCTL 0xc001103c
#define MSR_AMD64_IBSOPDATA4 0xc001103d
#define MSR_AMD64_IBS_REG_COUNT_MAX 8 /* includes MSR_AMD64_IBSBRTARGET */
#define MSR_AMD64_SVM_AVIC_DOORBELL 0xc001011b
#define MSR_AMD64_SEV_ES_GHCB 0xc0010130
#define MSR_AMD64_SEV 0xc0010131
#define MSR_AMD64_SEV_ENABLED_BIT 0
......
......@@ -200,6 +200,42 @@ struct __attribute__ ((__packed__)) vmcb_control_area {
#define SVM_NESTED_CTL_NP_ENABLE BIT(0)
#define SVM_NESTED_CTL_SEV_ENABLE BIT(1)
/* AVIC */
#define AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK (0xFF)
#define AVIC_LOGICAL_ID_ENTRY_VALID_BIT 31
#define AVIC_LOGICAL_ID_ENTRY_VALID_MASK (1 << 31)
#define AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK GENMASK_ULL(11, 0)
#define AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK (0xFFFFFFFFFFULL << 12)
#define AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK (1ULL << 62)
#define AVIC_PHYSICAL_ID_ENTRY_VALID_MASK (1ULL << 63)
#define AVIC_PHYSICAL_ID_TABLE_SIZE_MASK (0xFF)
#define AVIC_DOORBELL_PHYSICAL_ID_MASK (0xFF)
#define AVIC_UNACCEL_ACCESS_WRITE_MASK 1
#define AVIC_UNACCEL_ACCESS_OFFSET_MASK 0xFF0
#define AVIC_UNACCEL_ACCESS_VECTOR_MASK 0xFFFFFFFF
enum avic_ipi_failure_cause {
AVIC_IPI_FAILURE_INVALID_INT_TYPE,
AVIC_IPI_FAILURE_TARGET_NOT_RUNNING,
AVIC_IPI_FAILURE_INVALID_TARGET,
AVIC_IPI_FAILURE_INVALID_BACKING_PAGE,
};
/*
* 0xff is broadcast, so the max index allowed for physical APIC ID
* table is 0xfe. APIC IDs above 0xff are reserved.
*/
#define AVIC_MAX_PHYSICAL_ID_COUNT 0xff
#define AVIC_HPA_MASK ~((0xFFFULL << 52) | 0xFFF)
#define VMCB_AVIC_APIC_BAR_MASK 0xFFFFFFFFFF000ULL
struct vmcb_seg {
u16 selector;
u16 attrib;
......
......@@ -19,12 +19,17 @@
#define PCI_DEVICE_ID_AMD_17H_M10H_ROOT 0x15d0
#define PCI_DEVICE_ID_AMD_17H_M30H_ROOT 0x1480
#define PCI_DEVICE_ID_AMD_17H_M60H_ROOT 0x1630
#define PCI_DEVICE_ID_AMD_19H_M10H_ROOT 0x14a4
#define PCI_DEVICE_ID_AMD_17H_DF_F4 0x1464
#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F4 0x15ec
#define PCI_DEVICE_ID_AMD_17H_M30H_DF_F4 0x1494
#define PCI_DEVICE_ID_AMD_17H_M60H_DF_F4 0x144c
#define PCI_DEVICE_ID_AMD_17H_M70H_DF_F4 0x1444
#define PCI_DEVICE_ID_AMD_19H_DF_F4 0x1654
#define PCI_DEVICE_ID_AMD_19H_M10H_DF_F4 0x14b1
#define PCI_DEVICE_ID_AMD_19H_M40H_ROOT 0x14b5
#define PCI_DEVICE_ID_AMD_19H_M40H_DF_F4 0x167d
#define PCI_DEVICE_ID_AMD_19H_M50H_DF_F4 0x166e
/* Protect the PCI config register pairs used for SMN and DF indirect access. */
static DEFINE_MUTEX(smn_mutex);
......@@ -36,6 +41,8 @@ static const struct pci_device_id amd_root_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_ROOT) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_ROOT) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_ROOT) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M10H_ROOT) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M40H_ROOT) },
{}
};
......@@ -57,6 +64,9 @@ static const struct pci_device_id amd_nb_misc_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F3) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_DF_F3) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M10H_DF_F3) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M40H_DF_F3) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M50H_DF_F3) },
{}
};
......@@ -72,6 +82,9 @@ static const struct pci_device_id amd_nb_link_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_DF_F4) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F4) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_DF_F4) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M10H_DF_F4) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M40H_DF_F4) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M50H_DF_F4) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) },
{}
};
......
......@@ -71,33 +71,58 @@ static const char * const smca_umc_block_names[] = {
"misc_umc"
};
#define HWID_MCATYPE(hwid, mcatype) (((hwid) << 16) | (mcatype))
struct smca_hwid {
unsigned int bank_type; /* Use with smca_bank_types for easy indexing. */
u32 hwid_mcatype; /* (hwid,mcatype) tuple */
};
struct smca_bank {
const struct smca_hwid *hwid;
u32 id; /* Value of MCA_IPID[InstanceId]. */
u8 sysfs_id; /* Value used for sysfs name. */
};
static DEFINE_PER_CPU_READ_MOSTLY(struct smca_bank[MAX_NR_BANKS], smca_banks);
static DEFINE_PER_CPU_READ_MOSTLY(u8[N_SMCA_BANK_TYPES], smca_bank_counts);
struct smca_bank_name {
const char *name; /* Short name for sysfs */
const char *long_name; /* Long name for pretty-printing */
};
static struct smca_bank_name smca_names[] = {
[SMCA_LS] = { "load_store", "Load Store Unit" },
[SMCA_LS_V2] = { "load_store", "Load Store Unit" },
[SMCA_IF] = { "insn_fetch", "Instruction Fetch Unit" },
[SMCA_L2_CACHE] = { "l2_cache", "L2 Cache" },
[SMCA_DE] = { "decode_unit", "Decode Unit" },
[SMCA_RESERVED] = { "reserved", "Reserved" },
[SMCA_EX] = { "execution_unit", "Execution Unit" },
[SMCA_FP] = { "floating_point", "Floating Point Unit" },
[SMCA_L3_CACHE] = { "l3_cache", "L3 Cache" },
[SMCA_CS] = { "coherent_slave", "Coherent Slave" },
[SMCA_CS_V2] = { "coherent_slave", "Coherent Slave" },
[SMCA_PIE] = { "pie", "Power, Interrupts, etc." },
[SMCA_UMC] = { "umc", "Unified Memory Controller" },
[SMCA_PB] = { "param_block", "Parameter Block" },
[SMCA_PSP] = { "psp", "Platform Security Processor" },
[SMCA_PSP_V2] = { "psp", "Platform Security Processor" },
[SMCA_SMU] = { "smu", "System Management Unit" },
[SMCA_SMU_V2] = { "smu", "System Management Unit" },
[SMCA_MP5] = { "mp5", "Microprocessor 5 Unit" },
[SMCA_NBIO] = { "nbio", "Northbridge IO Unit" },
[SMCA_PCIE] = { "pcie", "PCI Express Unit" },
[SMCA_LS ... SMCA_LS_V2] = { "load_store", "Load Store Unit" },
[SMCA_IF] = { "insn_fetch", "Instruction Fetch Unit" },
[SMCA_L2_CACHE] = { "l2_cache", "L2 Cache" },
[SMCA_DE] = { "decode_unit", "Decode Unit" },
[SMCA_RESERVED] = { "reserved", "Reserved" },
[SMCA_EX] = { "execution_unit", "Execution Unit" },
[SMCA_FP] = { "floating_point", "Floating Point Unit" },
[SMCA_L3_CACHE] = { "l3_cache", "L3 Cache" },
[SMCA_CS ... SMCA_CS_V2] = { "coherent_slave", "Coherent Slave" },
[SMCA_PIE] = { "pie", "Power, Interrupts, etc." },
/* UMC v2 is separate because both of them can exist in a single system. */
[SMCA_UMC] = { "umc", "Unified Memory Controller" },
[SMCA_UMC_V2] = { "umc_v2", "Unified Memory Controller v2" },
[SMCA_PB] = { "param_block", "Parameter Block" },
[SMCA_PSP ... SMCA_PSP_V2] = { "psp", "Platform Security Processor" },
[SMCA_SMU ... SMCA_SMU_V2] = { "smu", "System Management Unit" },
[SMCA_MP5] = { "mp5", "Microprocessor 5 Unit" },
[SMCA_MPDMA] = { "mpdma", "MPDMA Unit" },
[SMCA_NBIO] = { "nbio", "Northbridge IO Unit" },
[SMCA_PCIE ... SMCA_PCIE_V2] = { "pcie", "PCI Express Unit" },
[SMCA_XGMI_PCS] = { "xgmi_pcs", "Ext Global Memory Interconnect PCS Unit" },
[SMCA_NBIF] = { "nbif", "NBIF Unit" },
[SMCA_SHUB] = { "shub", "System Hub Unit" },
[SMCA_SATA] = { "sata", "SATA Unit" },
[SMCA_USB] = { "usb", "USB Unit" },
[SMCA_GMI_PCS] = { "gmi_pcs", "Global Memory Interconnect PCS Unit" },
[SMCA_XGMI_PHY] = { "xgmi_phy", "Ext Global Memory Interconnect PHY Unit" },
[SMCA_WAFL_PHY] = { "wafl_phy", "WAFL PHY Unit" },
[SMCA_GMI_PHY] = { "gmi_phy", "Global Memory Interconnect PHY Unit" },
};
static const char *smca_get_name(enum smca_bank_types t)
......@@ -117,21 +142,22 @@ const char *smca_get_long_name(enum smca_bank_types t)
}
EXPORT_SYMBOL_GPL(smca_get_long_name);
static enum smca_bank_types smca_get_bank_type(unsigned int bank)
enum smca_bank_types smca_get_bank_type(unsigned int cpu, unsigned int bank)
{
struct smca_bank *b;
if (bank >= MAX_NR_BANKS)
return N_SMCA_BANK_TYPES;
b = &smca_banks[bank];
b = &per_cpu(smca_banks, cpu)[bank];
if (!b->hwid)
return N_SMCA_BANK_TYPES;
return b->hwid->bank_type;
}
EXPORT_SYMBOL_GPL(smca_get_bank_type);
static struct smca_hwid smca_hwid_mcatypes[] = {
static const struct smca_hwid smca_hwid_mcatypes[] = {
/* { bank_type, hwid_mcatype } */
/* Reserved type */
......@@ -155,6 +181,7 @@ static struct smca_hwid smca_hwid_mcatypes[] = {
/* Unified Memory Controller MCA type */
{ SMCA_UMC, HWID_MCATYPE(0x96, 0x0) },
{ SMCA_UMC_V2, HWID_MCATYPE(0x96, 0x1) },
/* Parameter Block MCA type */
{ SMCA_PB, HWID_MCATYPE(0x05, 0x0) },
......@@ -170,16 +197,27 @@ static struct smca_hwid smca_hwid_mcatypes[] = {
/* Microprocessor 5 Unit MCA type */
{ SMCA_MP5, HWID_MCATYPE(0x01, 0x2) },
/* MPDMA MCA type */
{ SMCA_MPDMA, HWID_MCATYPE(0x01, 0x3) },
/* Northbridge IO Unit MCA type */
{ SMCA_NBIO, HWID_MCATYPE(0x18, 0x0) },
/* PCI Express Unit MCA type */
{ SMCA_PCIE, HWID_MCATYPE(0x46, 0x0) },
{ SMCA_PCIE_V2, HWID_MCATYPE(0x46, 0x1) },
{ SMCA_XGMI_PCS, HWID_MCATYPE(0x50, 0x0) },
{ SMCA_NBIF, HWID_MCATYPE(0x6C, 0x0) },
{ SMCA_SHUB, HWID_MCATYPE(0x80, 0x0) },
{ SMCA_SATA, HWID_MCATYPE(0xA8, 0x0) },
{ SMCA_USB, HWID_MCATYPE(0xAA, 0x0) },
{ SMCA_GMI_PCS, HWID_MCATYPE(0x241, 0x0) },
{ SMCA_XGMI_PHY, HWID_MCATYPE(0x259, 0x0) },
{ SMCA_WAFL_PHY, HWID_MCATYPE(0x267, 0x0) },
{ SMCA_GMI_PHY, HWID_MCATYPE(0x269, 0x0) },
};
struct smca_bank smca_banks[MAX_NR_BANKS];
EXPORT_SYMBOL_GPL(smca_banks);
/*
* In SMCA enabled processors, we can have multiple banks for a given IP type.
* So to define a unique name for each bank, we use a temp c-string to append
......@@ -235,8 +273,9 @@ static void smca_set_misc_banks_map(unsigned int bank, unsigned int cpu)
static void smca_configure(unsigned int bank, unsigned int cpu)
{
u8 *bank_counts = this_cpu_ptr(smca_bank_counts);
const struct smca_hwid *s_hwid;
unsigned int i, hwid_mcatype;
struct smca_hwid *s_hwid;
u32 high, low;
u32 smca_config = MSR_AMD64_SMCA_MCx_CONFIG(bank);
......@@ -272,10 +311,6 @@ static void smca_configure(unsigned int bank, unsigned int cpu)
smca_set_misc_banks_map(bank, cpu);
/* Return early if this bank was already initialized. */
if (smca_banks[bank].hwid && smca_banks[bank].hwid->hwid_mcatype != 0)
return;
if (rdmsr_safe(MSR_AMD64_SMCA_MCx_IPID(bank), &low, &high)) {
pr_warn("Failed to read MCA_IPID for bank %d\n", bank);
return;
......@@ -286,10 +321,11 @@ static void smca_configure(unsigned int bank, unsigned int cpu)
for (i = 0; i < ARRAY_SIZE(smca_hwid_mcatypes); i++) {
s_hwid = &smca_hwid_mcatypes[i];
if (hwid_mcatype == s_hwid->hwid_mcatype) {
smca_banks[bank].hwid = s_hwid;
smca_banks[bank].id = low;
smca_banks[bank].sysfs_id = s_hwid->count++;
this_cpu_ptr(smca_banks)[bank].hwid = s_hwid;
this_cpu_ptr(smca_banks)[bank].id = low;
this_cpu_ptr(smca_banks)[bank].sysfs_id = bank_counts[s_hwid->bank_type]++;
break;
}
}
......@@ -575,7 +611,7 @@ prepare_threshold_block(unsigned int bank, unsigned int block, u32 addr,
bool amd_filter_mce(struct mce *m)
{
enum smca_bank_types bank_type = smca_get_bank_type(m->bank);
enum smca_bank_types bank_type = smca_get_bank_type(m->extcpu, m->bank);
struct cpuinfo_x86 *c = &boot_cpu_data;
/* See Family 17h Models 10h-2Fh Erratum #1114. */
......@@ -613,7 +649,7 @@ static void disable_err_thresholding(struct cpuinfo_x86 *c, unsigned int bank)
} else if (c->x86 == 0x17 &&
(c->x86_model >= 0x10 && c->x86_model <= 0x2F)) {
if (smca_get_bank_type(bank) != SMCA_IF)
if (smca_get_bank_type(smp_processor_id(), bank) != SMCA_IF)
return;
msrs[0] = MSR_AMD64_SMCA_MCx_MISC(bank);
......@@ -675,213 +711,13 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c)
deferred_error_interrupt_enable(c);
}
int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr)
{
u64 dram_base_addr, dram_limit_addr, dram_hole_base;
/* We start from the normalized address */
u64 ret_addr = norm_addr;
u32 tmp;
u8 die_id_shift, die_id_mask, socket_id_shift, socket_id_mask;
u8 intlv_num_dies, intlv_num_chan, intlv_num_sockets;
u8 intlv_addr_sel, intlv_addr_bit;
u8 num_intlv_bits, hashed_bit;
u8 lgcy_mmio_hole_en, base = 0;
u8 cs_mask, cs_id = 0;
bool hash_enabled = false;
/* Read D18F0x1B4 (DramOffset), check if base 1 is used. */
if (amd_df_indirect_read(nid, 0, 0x1B4, umc, &tmp))
goto out_err;
/* Remove HiAddrOffset from normalized address, if enabled: */
if (tmp & BIT(0)) {
u64 hi_addr_offset = (tmp & GENMASK_ULL(31, 20)) << 8;
if (norm_addr >= hi_addr_offset) {
ret_addr -= hi_addr_offset;
base = 1;
}
}
/* Read D18F0x110 (DramBaseAddress). */
if (amd_df_indirect_read(nid, 0, 0x110 + (8 * base), umc, &tmp))
goto out_err;
/* Check if address range is valid. */
if (!(tmp & BIT(0))) {
pr_err("%s: Invalid DramBaseAddress range: 0x%x.\n",
__func__, tmp);
goto out_err;
}
lgcy_mmio_hole_en = tmp & BIT(1);
intlv_num_chan = (tmp >> 4) & 0xF;
intlv_addr_sel = (tmp >> 8) & 0x7;
dram_base_addr = (tmp & GENMASK_ULL(31, 12)) << 16;
/* {0, 1, 2, 3} map to address bits {8, 9, 10, 11} respectively */
if (intlv_addr_sel > 3) {
pr_err("%s: Invalid interleave address select %d.\n",
__func__, intlv_addr_sel);
goto out_err;
}
/* Read D18F0x114 (DramLimitAddress). */
if (amd_df_indirect_read(nid, 0, 0x114 + (8 * base), umc, &tmp))
goto out_err;
intlv_num_sockets = (tmp >> 8) & 0x1;
intlv_num_dies = (tmp >> 10) & 0x3;
dram_limit_addr = ((tmp & GENMASK_ULL(31, 12)) << 16) | GENMASK_ULL(27, 0);
intlv_addr_bit = intlv_addr_sel + 8;
/* Re-use intlv_num_chan by setting it equal to log2(#channels) */
switch (intlv_num_chan) {
case 0: intlv_num_chan = 0; break;
case 1: intlv_num_chan = 1; break;
case 3: intlv_num_chan = 2; break;
case 5: intlv_num_chan = 3; break;
case 7: intlv_num_chan = 4; break;
case 8: intlv_num_chan = 1;
hash_enabled = true;
break;
default:
pr_err("%s: Invalid number of interleaved channels %d.\n",
__func__, intlv_num_chan);
goto out_err;
}
num_intlv_bits = intlv_num_chan;
if (intlv_num_dies > 2) {
pr_err("%s: Invalid number of interleaved nodes/dies %d.\n",
__func__, intlv_num_dies);
goto out_err;
}
num_intlv_bits += intlv_num_dies;
/* Add a bit if sockets are interleaved. */
num_intlv_bits += intlv_num_sockets;
/* Assert num_intlv_bits <= 4 */
if (num_intlv_bits > 4) {
pr_err("%s: Invalid interleave bits %d.\n",
__func__, num_intlv_bits);
goto out_err;
}
if (num_intlv_bits > 0) {
u64 temp_addr_x, temp_addr_i, temp_addr_y;
u8 die_id_bit, sock_id_bit, cs_fabric_id;
/*
* Read FabricBlockInstanceInformation3_CS[BlockFabricID].
* This is the fabric id for this coherent slave. Use
* umc/channel# as instance id of the coherent slave
* for FICAA.
*/
if (amd_df_indirect_read(nid, 0, 0x50, umc, &tmp))
goto out_err;
cs_fabric_id = (tmp >> 8) & 0xFF;
die_id_bit = 0;
/* If interleaved over more than 1 channel: */
if (intlv_num_chan) {
die_id_bit = intlv_num_chan;
cs_mask = (1 << die_id_bit) - 1;
cs_id = cs_fabric_id & cs_mask;
}
sock_id_bit = die_id_bit;
/* Read D18F1x208 (SystemFabricIdMask). */
if (intlv_num_dies || intlv_num_sockets)
if (amd_df_indirect_read(nid, 1, 0x208, umc, &tmp))
goto out_err;
/* If interleaved over more than 1 die. */
if (intlv_num_dies) {
sock_id_bit = die_id_bit + intlv_num_dies;
die_id_shift = (tmp >> 24) & 0xF;
die_id_mask = (tmp >> 8) & 0xFF;
cs_id |= ((cs_fabric_id & die_id_mask) >> die_id_shift) << die_id_bit;
}
/* If interleaved over more than 1 socket. */
if (intlv_num_sockets) {
socket_id_shift = (tmp >> 28) & 0xF;
socket_id_mask = (tmp >> 16) & 0xFF;
cs_id |= ((cs_fabric_id & socket_id_mask) >> socket_id_shift) << sock_id_bit;
}
/*
* The pre-interleaved address consists of XXXXXXIIIYYYYY
* where III is the ID for this CS, and XXXXXXYYYYY are the
* address bits from the post-interleaved address.
* "num_intlv_bits" has been calculated to tell us how many "I"
* bits there are. "intlv_addr_bit" tells us how many "Y" bits
* there are (where "I" starts).
*/
temp_addr_y = ret_addr & GENMASK_ULL(intlv_addr_bit-1, 0);
temp_addr_i = (cs_id << intlv_addr_bit);
temp_addr_x = (ret_addr & GENMASK_ULL(63, intlv_addr_bit)) << num_intlv_bits;
ret_addr = temp_addr_x | temp_addr_i | temp_addr_y;
}
/* Add dram base address */
ret_addr += dram_base_addr;
/* If legacy MMIO hole enabled */
if (lgcy_mmio_hole_en) {
if (amd_df_indirect_read(nid, 0, 0x104, umc, &tmp))
goto out_err;
dram_hole_base = tmp & GENMASK(31, 24);
if (ret_addr >= dram_hole_base)
ret_addr += (BIT_ULL(32) - dram_hole_base);
}
if (hash_enabled) {
/* Save some parentheses and grab ls-bit at the end. */
hashed_bit = (ret_addr >> 12) ^
(ret_addr >> 18) ^
(ret_addr >> 21) ^
(ret_addr >> 30) ^
cs_id;
hashed_bit &= BIT(0);
if (hashed_bit != ((ret_addr >> intlv_addr_bit) & BIT(0)))
ret_addr ^= BIT(intlv_addr_bit);
}
/* Is calculated system address is above DRAM limit address? */
if (ret_addr > dram_limit_addr)
goto out_err;
*sys_addr = ret_addr;
return 0;
out_err:
return -EINVAL;
}
EXPORT_SYMBOL_GPL(umc_normaddr_to_sysaddr);
bool amd_mce_is_memory_error(struct mce *m)
{
/* ErrCodeExt[20:16] */
u8 xec = (m->status >> 16) & 0x1f;
if (mce_flags.smca)
return smca_get_bank_type(m->bank) == SMCA_UMC && xec == 0x0;
return smca_get_bank_type(m->extcpu, m->bank) == SMCA_UMC && xec == 0x0;
return m->bank == 4 && xec == 0x8;
}
......@@ -1197,7 +1033,7 @@ static struct kobj_type threshold_ktype = {
.release = threshold_block_release,
};
static const char *get_name(unsigned int bank, struct threshold_block *b)
static const char *get_name(unsigned int cpu, unsigned int bank, struct threshold_block *b)
{
enum smca_bank_types bank_type;
......@@ -1208,7 +1044,7 @@ static const char *get_name(unsigned int bank, struct threshold_block *b)
return th_names[bank];
}
bank_type = smca_get_bank_type(bank);
bank_type = smca_get_bank_type(cpu, bank);
if (bank_type >= N_SMCA_BANK_TYPES)
return NULL;
......@@ -1218,12 +1054,12 @@ static const char *get_name(unsigned int bank, struct threshold_block *b)
return NULL;
}
if (smca_banks[bank].hwid->count == 1)
if (per_cpu(smca_bank_counts, cpu)[bank_type] == 1)
return smca_get_name(bank_type);
snprintf(buf_mcatype, MAX_MCATYPE_NAME_LEN,
"%s_%x", smca_get_name(bank_type),
smca_banks[bank].sysfs_id);
"%s_%u", smca_get_name(bank_type),
per_cpu(smca_banks, cpu)[bank].sysfs_id);
return buf_mcatype;
}
......@@ -1279,7 +1115,7 @@ static int allocate_threshold_blocks(unsigned int cpu, struct threshold_bank *tb
else
tb->blocks = b;
err = kobject_init_and_add(&b->kobj, &threshold_ktype, tb->kobj, get_name(bank, b));
err = kobject_init_and_add(&b->kobj, &threshold_ktype, tb->kobj, get_name(cpu, bank, b));
if (err)
goto out_free;
recurse:
......@@ -1334,7 +1170,7 @@ static int threshold_create_bank(struct threshold_bank **bp, unsigned int cpu,
struct device *dev = this_cpu_read(mce_device);
struct amd_northbridge *nb = NULL;
struct threshold_bank *b = NULL;
const char *name = get_name(bank, NULL);
const char *name = get_name(cpu, bank, NULL);
int err = 0;
if (!dev)
......
......@@ -3305,13 +3305,13 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
* the shadow page table may be a PAE or a long mode page table.
*/
pm_mask = PT_PRESENT_MASK;
if (vcpu->arch.mmu->shadow_root_level == PT64_ROOT_4LEVEL) {
if (vcpu->arch.mmu->shadow_root_level >= PT64_ROOT_4LEVEL) {
pm_mask |= PT_ACCESSED_MASK | PT_WRITABLE_MASK | PT_USER_MASK;
/*
* Allocate the page for the PDPTEs when shadowing 32-bit NPT
* with 64-bit only when needed. Unlike 32-bit NPT, it doesn't
* need to be in low mem. See also lm_root below.
* need to be in low mem. See also pml4_root below.
*/
if (!vcpu->arch.mmu->pae_root) {
WARN_ON_ONCE(!tdp_enabled);
......@@ -3351,20 +3351,47 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
* handled above (to share logic with PAE), deal with the PML4 here.
*/
if (vcpu->arch.mmu->shadow_root_level == PT64_ROOT_4LEVEL) {
if (vcpu->arch.mmu->lm_root == NULL) {
u64 *lm_root;
if (vcpu->arch.mmu->pml4_root == NULL) {
u64 *pml4_root;
pml4_root = (void*)get_zeroed_page(GFP_KERNEL_ACCOUNT);
if (!pml4_root)
return -ENOMEM;
pml4_root[0] = __pa(vcpu->arch.mmu->pae_root) | pm_mask;
vcpu->arch.mmu->pml4_root = pml4_root;
}
vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->pml4_root);
}
#ifdef CONFIG_X86_64
if (vcpu->arch.mmu->shadow_root_level == PT64_ROOT_5LEVEL) {
if (vcpu->arch.mmu->pml4_root == NULL) {
u64 *pml4_root;
lm_root = (void*)get_zeroed_page(GFP_KERNEL_ACCOUNT);
if (!lm_root)
pml4_root = (void*)get_zeroed_page(GFP_KERNEL_ACCOUNT);
if (!pml4_root)
return -ENOMEM;
lm_root[0] = __pa(vcpu->arch.mmu->pae_root) | pm_mask;
pml4_root[0] = __pa(vcpu->arch.mmu->pae_root) | pm_mask;
vcpu->arch.mmu->lm_root = lm_root;
vcpu->arch.mmu->pml4_root = pml4_root;
}
if (vcpu->arch.mmu->pml5_root == NULL) {
u64 *pml5_root;
pml5_root = (void*)get_zeroed_page(GFP_KERNEL_ACCOUNT);
if (!pml5_root)
return -ENOMEM;
pml5_root[0] = __pa(vcpu->arch.mmu->pml4_root) | pm_mask;
vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->lm_root);
vcpu->arch.mmu->pml5_root = pml5_root;
}
vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->pml5_root);
}
#endif
set_root_pgd:
vcpu->arch.mmu->root_pgd = root_pgd;
......@@ -5302,7 +5329,10 @@ slot_handle_leaf(struct kvm *kvm, struct kvm_memory_slot *memslot,
static void free_mmu_pages(struct kvm_mmu *mmu)
{
free_page((unsigned long)mmu->pae_root);
free_page((unsigned long)mmu->lm_root);
free_page((unsigned long)mmu->pml4_root);
#ifdef CONFIG_X86_64
free_page((unsigned long)mmu->pml5_root);
#endif
}
static int __kvm_mmu_create(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
......
......@@ -33,20 +33,6 @@ int avic;
module_param(avic, int, S_IRUGO);
#endif
#define SVM_AVIC_DOORBELL 0xc001011b
#define AVIC_HPA_MASK ~((0xFFFULL << 52) | 0xFFF)
/*
* 0xff is broadcast, so the max index allowed for physical APIC ID
* table is 0xfe. APIC IDs above 0xff are reserved.
*/
#define AVIC_MAX_PHYSICAL_ID_COUNT 255
#define AVIC_UNACCEL_ACCESS_WRITE_MASK 1
#define AVIC_UNACCEL_ACCESS_OFFSET_MASK 0xFF0
#define AVIC_UNACCEL_ACCESS_VECTOR_MASK 0xFFFFFFFF
/* AVIC GATAG is encoded using VM and VCPU IDs */
#define AVIC_VCPU_ID_BITS 8
#define AVIC_VCPU_ID_MASK ((1 << AVIC_VCPU_ID_BITS) - 1)
......@@ -79,12 +65,6 @@ struct amd_svm_iommu_ir {
void *data; /* Storing pointer to struct amd_ir_data */
};
enum avic_ipi_failure_cause {
AVIC_IPI_FAILURE_INVALID_INT_TYPE,
AVIC_IPI_FAILURE_TARGET_NOT_RUNNING,
AVIC_IPI_FAILURE_INVALID_TARGET,
AVIC_IPI_FAILURE_INVALID_BACKING_PAGE,
};
/* Note:
* This function is called from IOMMU driver to notify
......@@ -683,7 +663,7 @@ int svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec)
int cpuid = vcpu->cpu;
if (cpuid != get_cpu())
wrmsrl(SVM_AVIC_DOORBELL, kvm_cpu_get_apicid(cpuid));
wrmsrl(MSR_AMD64_SVM_AVIC_DOORBELL, kvm_cpu_get_apicid(cpuid));
put_cpu();
} else
kvm_vcpu_wake_up(vcpu);
......@@ -958,18 +938,13 @@ avic_update_iommu_vcpu_affinity(struct kvm_vcpu *vcpu, int cpu, bool r)
void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
u64 entry;
/* ID = 0xff (broadcast), ID > 0xff (reserved) */
int h_physical_id = kvm_cpu_get_apicid(cpu);
struct vcpu_svm *svm = to_svm(vcpu);
if (!kvm_vcpu_apicv_active(vcpu))
return;
/*
* Since the host physical APIC id is 8 bits,
* we can support host APIC ID upto 255.
*/
if (WARN_ON(h_physical_id > AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK))
if (WARN_ON(h_physical_id & ~AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK))
return;
entry = READ_ONCE(*(svm->avic_physical_id_cache));
......
......@@ -257,7 +257,7 @@ static inline void invlpga(unsigned long addr, u32 asid)
static int get_max_npt_level(void)
{
#ifdef CONFIG_X86_64
return PT64_ROOT_4LEVEL;
return pgtable_l5_enabled() ? PT64_ROOT_5LEVEL : PT64_ROOT_4LEVEL;
#else
return PT32E_ROOT_LEVEL;
#endif
......
......@@ -410,17 +410,6 @@ extern struct kvm_x86_nested_ops svm_nested_ops;
/* avic.c */
#define AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK (0xFF)
#define AVIC_LOGICAL_ID_ENTRY_VALID_BIT 31
#define AVIC_LOGICAL_ID_ENTRY_VALID_MASK (1 << 31)
#define AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK (0xFFULL)
#define AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK (0xFFFFFFFFFFULL << 12)
#define AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK (1ULL << 62)
#define AVIC_PHYSICAL_ID_ENTRY_VALID_MASK (1ULL << 63)
#define VMCB_AVIC_APIC_BAR_MASK 0xFFFFFFFFFF000ULL
extern int avic;
static inline void avic_update_vapic_bar(struct vcpu_svm *svm, u64 data)
......@@ -440,6 +429,17 @@ static inline bool avic_vcpu_is_running(struct kvm_vcpu *vcpu)
return (READ_ONCE(*entry) & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK);
}
#define AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK (0xFF)
#define AVIC_LOGICAL_ID_ENTRY_VALID_BIT 31
#define AVIC_LOGICAL_ID_ENTRY_VALID_MASK (1 << 31)
#define AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK GENMASK_ULL(11, 0)
#define AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK (0xFFFFFFFFFFULL << 12)
#define AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK (1ULL << 62)
#define AVIC_PHYSICAL_ID_ENTRY_VALID_MASK (1ULL << 63)
#define VMCB_AVIC_APIC_BAR_MASK 0xFFFFFFFFFF000ULL
int avic_ga_log_notifier(u32 ga_tag);
void avic_vm_destroy(struct kvm *kvm);
int avic_vm_init(struct kvm *kvm);
......
......@@ -360,6 +360,12 @@ static const struct sp_dev_vdata dev_vdata[] = {
#endif
#ifdef CONFIG_CRYPTO_DEV_SP_PSP
.psp_vdata = &pspv3,
#endif
},
{ /* 5 */
.bar = 2,
#ifdef CONFIG_CRYPTO_DEV_SP_PSP
.psp_vdata = &pspv2,
#endif
},
};
......@@ -369,6 +375,7 @@ static const struct pci_device_id sp_pci_table[] = {
{ PCI_VDEVICE(AMD, 0x1468), (kernel_ulong_t)&dev_vdata[2] },
{ PCI_VDEVICE(AMD, 0x1486), (kernel_ulong_t)&dev_vdata[3] },
{ PCI_VDEVICE(AMD, 0x15DF), (kernel_ulong_t)&dev_vdata[4] },
{ PCI_VDEVICE(AMD, 0x14CA), (kernel_ulong_t)&dev_vdata[5] },
/* Last entry must be zero */
{ 0, }
};
......
......@@ -15,6 +15,21 @@ static struct msr __percpu *msrs;
static struct amd64_family_type *fam_type;
static inline u32 get_umc_reg(u32 reg)
{
if (!fam_type->flags.zn_regs_v2)
return reg;
switch (reg) {
case UMCCH_ADDR_CFG: return UMCCH_ADDR_CFG_DDR5;
case UMCCH_ADDR_MASK_SEC: return UMCCH_ADDR_MASK_SEC_DDR5;
case UMCCH_DIMM_CFG: return UMCCH_DIMM_CFG_DDR5;
}
WARN_ONCE(1, "%s: unknown register 0x%x", __func__, reg);
return 0;
}
/* Per-node stuff */
static struct ecc_settings **ecc_stngs;
......@@ -709,6 +724,205 @@ static int sys_addr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr)
return csrow;
}
static int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr)
{
u64 dram_base_addr, dram_limit_addr, dram_hole_base;
/* We start from the normalized address */
u64 ret_addr = norm_addr;
u32 tmp;
u8 die_id_shift, die_id_mask, socket_id_shift, socket_id_mask;
u8 intlv_num_dies, intlv_num_chan, intlv_num_sockets;
u8 intlv_addr_sel, intlv_addr_bit;
u8 num_intlv_bits, hashed_bit;
u8 lgcy_mmio_hole_en, base = 0;
u8 cs_mask, cs_id = 0;
bool hash_enabled = false;
/* Read D18F0x1B4 (DramOffset), check if base 1 is used. */
if (amd_df_indirect_read(nid, 0, 0x1B4, umc, &tmp))
goto out_err;
/* Remove HiAddrOffset from normalized address, if enabled: */
if (tmp & BIT(0)) {
u64 hi_addr_offset = (tmp & GENMASK_ULL(31, 20)) << 8;
if (norm_addr >= hi_addr_offset) {
ret_addr -= hi_addr_offset;
base = 1;
}
}
/* Read D18F0x110 (DramBaseAddress). */
if (amd_df_indirect_read(nid, 0, 0x110 + (8 * base), umc, &tmp))
goto out_err;
/* Check if address range is valid. */
if (!(tmp & BIT(0))) {
pr_err("%s: Invalid DramBaseAddress range: 0x%x.\n",
__func__, tmp);
goto out_err;
}
lgcy_mmio_hole_en = tmp & BIT(1);
intlv_num_chan = (tmp >> 4) & 0xF;
intlv_addr_sel = (tmp >> 8) & 0x7;
dram_base_addr = (tmp & GENMASK_ULL(31, 12)) << 16;
/* {0, 1, 2, 3} map to address bits {8, 9, 10, 11} respectively */
if (intlv_addr_sel > 3) {
pr_err("%s: Invalid interleave address select %d.\n",
__func__, intlv_addr_sel);
goto out_err;
}
/* Read D18F0x114 (DramLimitAddress). */
if (amd_df_indirect_read(nid, 0, 0x114 + (8 * base), umc, &tmp))
goto out_err;
intlv_num_sockets = (tmp >> 8) & 0x1;
intlv_num_dies = (tmp >> 10) & 0x3;
dram_limit_addr = ((tmp & GENMASK_ULL(31, 12)) << 16) | GENMASK_ULL(27, 0);
intlv_addr_bit = intlv_addr_sel + 8;
/* Re-use intlv_num_chan by setting it equal to log2(#channels) */
switch (intlv_num_chan) {
case 0: intlv_num_chan = 0; break;
case 1: intlv_num_chan = 1; break;
case 3: intlv_num_chan = 2; break;
case 5: intlv_num_chan = 3; break;
case 7: intlv_num_chan = 4; break;
case 8: intlv_num_chan = 1;
hash_enabled = true;
break;
default:
pr_err("%s: Invalid number of interleaved channels %d.\n",
__func__, intlv_num_chan);
goto out_err;
}
num_intlv_bits = intlv_num_chan;
if (intlv_num_dies > 2) {
pr_err("%s: Invalid number of interleaved nodes/dies %d.\n",
__func__, intlv_num_dies);
goto out_err;
}
num_intlv_bits += intlv_num_dies;
/* Add a bit if sockets are interleaved. */
num_intlv_bits += intlv_num_sockets;
/* Assert num_intlv_bits <= 4 */
if (num_intlv_bits > 4) {
pr_err("%s: Invalid interleave bits %d.\n",
__func__, num_intlv_bits);
goto out_err;
}
if (num_intlv_bits > 0) {
u64 temp_addr_x, temp_addr_i, temp_addr_y;
u8 die_id_bit, sock_id_bit, cs_fabric_id;
/*
* Read FabricBlockInstanceInformation3_CS[BlockFabricID].
* This is the fabric id for this coherent slave. Use
* umc/channel# as instance id of the coherent slave
* for FICAA.
*/
if (amd_df_indirect_read(nid, 0, 0x50, umc, &tmp))
goto out_err;
cs_fabric_id = (tmp >> 8) & 0xFF;
die_id_bit = 0;
/* If interleaved over more than 1 channel: */
if (intlv_num_chan) {
die_id_bit = intlv_num_chan;
cs_mask = (1 << die_id_bit) - 1;
cs_id = cs_fabric_id & cs_mask;
}
sock_id_bit = die_id_bit;
/* Read D18F1x208 (SystemFabricIdMask). */
if (intlv_num_dies || intlv_num_sockets)
if (amd_df_indirect_read(nid, 1, 0x208, umc, &tmp))
goto out_err;
/* If interleaved over more than 1 die. */
if (intlv_num_dies) {
sock_id_bit = die_id_bit + intlv_num_dies;
die_id_shift = (tmp >> 24) & 0xF;
die_id_mask = (tmp >> 8) & 0xFF;
cs_id |= ((cs_fabric_id & die_id_mask) >> die_id_shift) << die_id_bit;
}
/* If interleaved over more than 1 socket. */
if (intlv_num_sockets) {
socket_id_shift = (tmp >> 28) & 0xF;
socket_id_mask = (tmp >> 16) & 0xFF;
cs_id |= ((cs_fabric_id & socket_id_mask) >> socket_id_shift) << sock_id_bit;
}
/*
* The pre-interleaved address consists of XXXXXXIIIYYYYY
* where III is the ID for this CS, and XXXXXXYYYYY are the
* address bits from the post-interleaved address.
* "num_intlv_bits" has been calculated to tell us how many "I"
* bits there are. "intlv_addr_bit" tells us how many "Y" bits
* there are (where "I" starts).
*/
temp_addr_y = ret_addr & GENMASK_ULL(intlv_addr_bit-1, 0);
temp_addr_i = (cs_id << intlv_addr_bit);
temp_addr_x = (ret_addr & GENMASK_ULL(63, intlv_addr_bit)) << num_intlv_bits;
ret_addr = temp_addr_x | temp_addr_i | temp_addr_y;
}
/* Add dram base address */
ret_addr += dram_base_addr;
/* If legacy MMIO hole enabled */
if (lgcy_mmio_hole_en) {
if (amd_df_indirect_read(nid, 0, 0x104, umc, &tmp))
goto out_err;
dram_hole_base = tmp & GENMASK(31, 24);
if (ret_addr >= dram_hole_base)
ret_addr += (BIT_ULL(32) - dram_hole_base);
}
if (hash_enabled) {
/* Save some parentheses and grab ls-bit at the end. */
hashed_bit = (ret_addr >> 12) ^
(ret_addr >> 18) ^
(ret_addr >> 21) ^
(ret_addr >> 30) ^
cs_id;
hashed_bit &= BIT(0);
if (hashed_bit != ((ret_addr >> intlv_addr_bit) & BIT(0)))
ret_addr ^= BIT(intlv_addr_bit);
}
/* Is calculated system address is above DRAM limit address? */
if (ret_addr > dram_limit_addr)
goto out_err;
*sys_addr = ret_addr;
return 0;
out_err:
return -EINVAL;
}
static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16);
/*
......@@ -875,8 +1089,10 @@ static void __dump_misc_regs_df(struct amd64_pvt *pvt)
edac_dbg(1, "UMC%d x16 DIMMs present: %s\n",
i, (umc->dimm_cfg & BIT(7)) ? "yes" : "no");
if (pvt->dram_type == MEM_LRDDR4) {
amd_smn_read(pvt->mc_node_id, umc_base + UMCCH_ADDR_CFG, &tmp);
if (umc->dram_type == MEM_LRDDR4 || umc->dram_type == MEM_LRDDR5) {
amd_smn_read(pvt->mc_node_id,
umc_base + get_umc_reg(UMCCH_ADDR_CFG),
&tmp);
edac_dbg(1, "UMC%d LRDIMM %dx rank multiply\n",
i, 1 << ((tmp >> 4) & 0x3));
}
......@@ -951,7 +1167,7 @@ static void prep_chip_selects(struct amd64_pvt *pvt)
for_each_umc(umc) {
pvt->csels[umc].b_cnt = 4;
pvt->csels[umc].m_cnt = 2;
pvt->csels[umc].m_cnt = fam_type->flags.zn_regs_v2 ? 4 : 2;
}
} else {
......@@ -991,7 +1207,7 @@ static void read_umc_base_mask(struct amd64_pvt *pvt)
}
umc_mask_reg = get_umc_base(umc) + UMCCH_ADDR_MASK;
umc_mask_reg_sec = get_umc_base(umc) + UMCCH_ADDR_MASK_SEC;
umc_mask_reg_sec = get_umc_base(umc) + get_umc_reg(UMCCH_ADDR_MASK_SEC);
for_each_chip_select_mask(cs, umc, pvt) {
mask = &pvt->csels[umc].csmasks[cs];
......@@ -1062,19 +1278,49 @@ static void read_dct_base_mask(struct amd64_pvt *pvt)
}
}
static void determine_memory_type_df(struct amd64_pvt *pvt)
{
struct amd64_umc *umc;
u32 i;
for_each_umc(i) {
umc = &pvt->umc[i];
if (!(umc->sdp_ctrl & UMC_SDP_INIT)) {
umc->dram_type = MEM_EMPTY;
continue;
}
/*
* Check if the system supports the "DDR Type" field in UMC Config
* and has DDR5 DIMMs in use.
*/
if (fam_type->flags.zn_regs_v2 && ((umc->umc_cfg & GENMASK(2, 0)) == 0x1)) {
if (umc->dimm_cfg & BIT(5))
umc->dram_type = MEM_LRDDR5;
else if (umc->dimm_cfg & BIT(4))
umc->dram_type = MEM_RDDR5;
else
umc->dram_type = MEM_DDR5;
} else {
if (umc->dimm_cfg & BIT(5))
umc->dram_type = MEM_LRDDR4;
else if (umc->dimm_cfg & BIT(4))
umc->dram_type = MEM_RDDR4;
else
umc->dram_type = MEM_DDR4;
}
edac_dbg(1, " UMC%d DIMM type: %s\n", i, edac_mem_types[umc->dram_type]);
}
}
static void determine_memory_type(struct amd64_pvt *pvt)
{
u32 dram_ctrl, dcsm;
if (pvt->umc) {
if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(5))
pvt->dram_type = MEM_LRDDR4;
else if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(4))
pvt->dram_type = MEM_RDDR4;
else
pvt->dram_type = MEM_DDR4;
return;
}
if (pvt->umc)
return determine_memory_type_df(pvt);
switch (pvt->fam) {
case 0xf:
......@@ -1595,6 +1841,7 @@ static int f17_addr_mask_to_cs_size(struct amd64_pvt *pvt, u8 umc,
{
u32 addr_mask_orig, addr_mask_deinterleaved;
u32 msb, weight, num_zero_bits;
int cs_mask_nr = csrow_nr;
int dimm, size = 0;
/* No Chip Selects are enabled. */
......@@ -1610,17 +1857,33 @@ static int f17_addr_mask_to_cs_size(struct amd64_pvt *pvt, u8 umc,
return size;
/*
* There is one mask per DIMM, and two Chip Selects per DIMM.
* CS0 and CS1 -> DIMM0
* CS2 and CS3 -> DIMM1
* Family 17h introduced systems with one mask per DIMM,
* and two Chip Selects per DIMM.
*
* CS0 and CS1 -> MASK0 / DIMM0
* CS2 and CS3 -> MASK1 / DIMM1
*
* Family 19h Model 10h introduced systems with one mask per Chip Select,
* and two Chip Selects per DIMM.
*
* CS0 -> MASK0 -> DIMM0
* CS1 -> MASK1 -> DIMM0
* CS2 -> MASK2 -> DIMM1
* CS3 -> MASK3 -> DIMM1
*
* Keep the mask number equal to the Chip Select number for newer systems,
* and shift the mask number for older systems.
*/
dimm = csrow_nr >> 1;
if (!fam_type->flags.zn_regs_v2)
cs_mask_nr >>= 1;
/* Asymmetric dual-rank DIMM support. */
if ((csrow_nr & 1) && (cs_mode & CS_ODD_SECONDARY))
addr_mask_orig = pvt->csels[umc].csmasks_sec[dimm];
addr_mask_orig = pvt->csels[umc].csmasks_sec[cs_mask_nr];
else
addr_mask_orig = pvt->csels[umc].csmasks[dimm];
addr_mask_orig = pvt->csels[umc].csmasks[cs_mask_nr];
/*
* The number of zero bits in the mask is equal to the number of bits
......@@ -2371,6 +2634,17 @@ static struct amd64_family_type family_types[] = {
.dbam_to_cs = f17_addr_mask_to_cs_size,
}
},
[F19_M10H_CPUS] = {
.ctl_name = "F19h_M10h",
.f0_id = PCI_DEVICE_ID_AMD_19H_M10H_DF_F0,
.f6_id = PCI_DEVICE_ID_AMD_19H_M10H_DF_F6,
.max_mcs = 12,
.flags.zn_regs_v2 = 1,
.ops = {
.early_channel_count = f17_early_channel_count,
.dbam_to_cs = f17_addr_mask_to_cs_size,
}
},
};
/*
......@@ -2802,7 +3076,7 @@ static void __read_mc_regs_df(struct amd64_pvt *pvt)
umc_base = get_umc_base(i);
umc = &pvt->umc[i];
amd_smn_read(nid, umc_base + UMCCH_DIMM_CFG, &umc->dimm_cfg);
amd_smn_read(nid, umc_base + get_umc_reg(UMCCH_DIMM_CFG), &umc->dimm_cfg);
amd_smn_read(nid, umc_base + UMCCH_UMC_CFG, &umc->umc_cfg);
amd_smn_read(nid, umc_base + UMCCH_SDP_CTRL, &umc->sdp_ctrl);
amd_smn_read(nid, umc_base + UMCCH_ECC_CTRL, &umc->ecc_ctrl);
......@@ -2886,7 +3160,9 @@ static void read_mc_regs(struct amd64_pvt *pvt)
read_dct_base_mask(pvt);
determine_memory_type(pvt);
edac_dbg(1, " DIMM type: %s\n", edac_mem_types[pvt->dram_type]);
if (!pvt->umc)
edac_dbg(1, " DIMM type: %s\n", edac_mem_types[pvt->dram_type]);
determine_ecc_sym_sz(pvt);
}
......@@ -2982,7 +3258,7 @@ static int init_csrows_df(struct mem_ctl_info *mci)
pvt->mc_node_id, cs);
dimm->nr_pages = get_csrow_nr_pages(pvt, umc, cs);
dimm->mtype = pvt->dram_type;
dimm->mtype = pvt->umc[umc].dram_type;
dimm->edac_mode = edac_mode;
dimm->dtype = dev_type;
dimm->grain = 64;
......@@ -3417,11 +3693,20 @@ static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt)
break;
case 0x19:
if (pvt->model >= 0x20 && pvt->model <= 0x2f) {
if (pvt->model >= 0x10 && pvt->model <= 0x1f) {
fam_type = &family_types[F19_M10H_CPUS];
pvt->ops = &family_types[F19_M10H_CPUS].ops;
break;
} else if (pvt->model >= 0x20 && pvt->model <= 0x2f) {
fam_type = &family_types[F17_M70H_CPUS];
pvt->ops = &family_types[F17_M70H_CPUS].ops;
fam_type->ctl_name = "F19h_M20h";
break;
} else if (pvt->model >= 0xa0 && pvt->model <= 0xaf) {
fam_type = &family_types[F19_M10H_CPUS];
pvt->ops = &family_types[F19_M10H_CPUS].ops;
fam_type->ctl_name = "F19h_MA0h";
break;
}
fam_type = &family_types[F19_CPUS];
pvt->ops = &family_types[F19_CPUS].ops;
......
......@@ -96,7 +96,7 @@
/* Hardware limit on ChipSelect rows per MC and processors per system */
#define NUM_CHIPSELECTS 8
#define DRAM_RANGES 8
#define NUM_CONTROLLERS 8
#define NUM_CONTROLLERS 12
#define ON true
#define OFF false
......@@ -126,6 +126,8 @@
#define PCI_DEVICE_ID_AMD_17H_M70H_DF_F6 0x1446
#define PCI_DEVICE_ID_AMD_19H_DF_F0 0x1650
#define PCI_DEVICE_ID_AMD_19H_DF_F6 0x1656
#define PCI_DEVICE_ID_AMD_19H_M10H_DF_F0 0x14ad
#define PCI_DEVICE_ID_AMD_19H_M10H_DF_F6 0x14b3
/*
* Function 1 - Address Map
......@@ -269,8 +271,11 @@
#define UMCCH_BASE_ADDR_SEC 0x10
#define UMCCH_ADDR_MASK 0x20
#define UMCCH_ADDR_MASK_SEC 0x28
#define UMCCH_ADDR_MASK_SEC_DDR5 0x30
#define UMCCH_ADDR_CFG 0x30
#define UMCCH_ADDR_CFG_DDR5 0x40
#define UMCCH_DIMM_CFG 0x80
#define UMCCH_DIMM_CFG_DDR5 0x90
#define UMCCH_UMC_CFG 0x100
#define UMCCH_SDP_CTRL 0x104
#define UMCCH_ECC_CTRL 0x14C
......@@ -298,6 +303,7 @@ enum amd_families {
F17_M60H_CPUS,
F17_M70H_CPUS,
F19_CPUS,
F19_M10H_CPUS,
NUM_FAMILIES,
};
......@@ -338,6 +344,9 @@ struct amd64_umc {
u32 sdp_ctrl; /* SDP Control reg */
u32 ecc_ctrl; /* DRAM ECC Control reg */
u32 umc_cap_hi; /* Capabilities High reg */
/* cache the dram_type */
enum mem_type dram_type;
};
struct amd64_pvt {
......@@ -385,7 +394,12 @@ struct amd64_pvt {
/* place to store error injection parameters prior to issue */
struct error_injection injection;
/* cache the dram_type */
/*
* cache the dram_type
*
* NOTE: Don't use this for Family 17h and later.
* Use dram_type in struct amd64_umc instead.
*/
enum mem_type dram_type;
struct amd64_umc *umc; /* UMC registers */
......@@ -482,11 +496,22 @@ struct low_ops {
unsigned cs_mode, int cs_mask_nr);
};
struct amd64_family_flags {
/*
* Indicates that the system supports the new register offsets, etc.
* first introduced with Family 19h Model 10h.
*/
__u64 zn_regs_v2 : 1,
__reserved : 63;
};
struct amd64_family_type {
const char *ctl_name;
u16 f0_id, f1_id, f2_id, f6_id;
/* Maximum number of memory controllers per die/node. */
u8 max_mcs;
struct amd64_family_flags flags;
struct low_ops ops;
};
......
......@@ -162,6 +162,8 @@ const char * const edac_mem_types[] = {
[MEM_RDDR4] = "Registered-DDR4",
[MEM_LRDDR4] = "Load-Reduced-DDR4-RAM",
[MEM_DDR5] = "Unbuffered-DDR5",
[MEM_RDDR5] = "Registered-DDR5",
[MEM_LRDDR5] = "Load-Reduced-DDR5-RAM",
[MEM_NVDIMM] = "Non-volatile-RAM",
[MEM_HBM2] = "High-bandwidth-memory-Gen2",
};
......
......@@ -323,6 +323,21 @@ static const char * const smca_umc_mce_desc[] = {
"AES SRAM ECC error",
};
static const char * const smca_umc2_mce_desc[] = {
"DRAM ECC error",
"Data poison error",
"SDP parity error",
"Reserved",
"Address/Command parity error",
"Write data parity error",
"DCQ SRAM ECC error",
"Reserved",
"Read data parity error",
"Rdb SRAM ECC error",
"RdRsp SRAM ECC error",
"LM32 MP errors",
};
static const char * const smca_pb_mce_desc[] = {
"An ECC error in the Parameter Block RAM array",
};
......@@ -384,6 +399,63 @@ static const char * const smca_mp5_mce_desc[] = {
"Instruction Tag Cache Bank B ECC or parity error",
};
static const char * const smca_mpdma_mce_desc[] = {
"Main SRAM [31:0] bank ECC or parity error",
"Main SRAM [63:32] bank ECC or parity error",
"Main SRAM [95:64] bank ECC or parity error",
"Main SRAM [127:96] bank ECC or parity error",
"Data Cache Bank A ECC or parity error",
"Data Cache Bank B ECC or parity error",
"Data Tag Cache Bank A ECC or parity error",
"Data Tag Cache Bank B ECC or parity error",
"Instruction Cache Bank A ECC or parity error",
"Instruction Cache Bank B ECC or parity error",
"Instruction Tag Cache Bank A ECC or parity error",
"Instruction Tag Cache Bank B ECC or parity error",
"Data Cache Bank A ECC or parity error",
"Data Cache Bank B ECC or parity error",
"Data Tag Cache Bank A ECC or parity error",
"Data Tag Cache Bank B ECC or parity error",
"Instruction Cache Bank A ECC or parity error",
"Instruction Cache Bank B ECC or parity error",
"Instruction Tag Cache Bank A ECC or parity error",
"Instruction Tag Cache Bank B ECC or parity error",
"Data Cache Bank A ECC or parity error",
"Data Cache Bank B ECC or parity error",
"Data Tag Cache Bank A ECC or parity error",
"Data Tag Cache Bank B ECC or parity error",
"Instruction Cache Bank A ECC or parity error",
"Instruction Cache Bank B ECC or parity error",
"Instruction Tag Cache Bank A ECC or parity error",
"Instruction Tag Cache Bank B ECC or parity error",
"System Hub Read Buffer ECC or parity error",
"MPDMA TVF DVSEC Memory ECC or parity error",
"MPDMA TVF MMIO Mailbox0 ECC or parity error",
"MPDMA TVF MMIO Mailbox1 ECC or parity error",
"MPDMA TVF Doorbell Memory ECC or parity error",
"MPDMA TVF SDP Slave Memory 0 ECC or parity error",
"MPDMA TVF SDP Slave Memory 1 ECC or parity error",
"MPDMA TVF SDP Slave Memory 2 ECC or parity error",
"MPDMA TVF SDP Master Memory 0 ECC or parity error",
"MPDMA TVF SDP Master Memory 1 ECC or parity error",
"MPDMA TVF SDP Master Memory 2 ECC or parity error",
"MPDMA TVF SDP Master Memory 3 ECC or parity error",
"MPDMA TVF SDP Master Memory 4 ECC or parity error",
"MPDMA TVF SDP Master Memory 5 ECC or parity error",
"MPDMA TVF SDP Master Memory 6 ECC or parity error",
"MPDMA PTE Command FIFO ECC or parity error",
"MPDMA PTE Hub Data FIFO ECC or parity error",
"MPDMA PTE Internal Data FIFO ECC or parity error",
"MPDMA PTE Command Memory DMA ECC or parity error",
"MPDMA PTE Command Memory Internal ECC or parity error",
"MPDMA PTE DMA Completion FIFO ECC or parity error",
"MPDMA PTE Tablewalk Completion FIFO ECC or parity error",
"MPDMA PTE Descriptor Completion FIFO ECC or parity error",
"MPDMA PTE ReadOnly Completion FIFO ECC or parity error",
"MPDMA PTE DirectWrite Completion FIFO ECC or parity error",
"SDP Watchdog Timer expired",
};
static const char * const smca_nbio_mce_desc[] = {
"ECC or Parity error",
"PCIE error",
......@@ -400,6 +472,111 @@ static const char * const smca_pcie_mce_desc[] = {
"CCIX Non-okay write response with data error",
};
static const char * const smca_pcie2_mce_desc[] = {
"SDP Parity Error logging",
};
static const char * const smca_xgmipcs_mce_desc[] = {
"Data Loss Error",
"Training Error",
"Flow Control Acknowledge Error",
"Rx Fifo Underflow Error",
"Rx Fifo Overflow Error",
"CRC Error",
"BER Exceeded Error",
"Tx Vcid Data Error",
"Replay Buffer Parity Error",
"Data Parity Error",
"Replay Fifo Overflow Error",
"Replay FIfo Underflow Error",
"Elastic Fifo Overflow Error",
"Deskew Error",
"Flow Control CRC Error",
"Data Startup Limit Error",
"FC Init Timeout Error",
"Recovery Timeout Error",
"Ready Serial Timeout Error",
"Ready Serial Attempt Error",
"Recovery Attempt Error",
"Recovery Relock Attempt Error",
"Replay Attempt Error",
"Sync Header Error",
"Tx Replay Timeout Error",
"Rx Replay Timeout Error",
"LinkSub Tx Timeout Error",
"LinkSub Rx Timeout Error",
"Rx CMD Packet Error",
};
static const char * const smca_xgmiphy_mce_desc[] = {
"RAM ECC Error",
"ARC instruction buffer parity error",
"ARC data buffer parity error",
"PHY APB error",
};
static const char * const smca_nbif_mce_desc[] = {
"Timeout error from GMI",
"SRAM ECC error",
"NTB Error Event",
"SDP Parity error",
};
static const char * const smca_sata_mce_desc[] = {
"Parity error for port 0",
"Parity error for port 1",
"Parity error for port 2",
"Parity error for port 3",
"Parity error for port 4",
"Parity error for port 5",
"Parity error for port 6",
"Parity error for port 7",
};
static const char * const smca_usb_mce_desc[] = {
"Parity error or ECC error for S0 RAM0",
"Parity error or ECC error for S0 RAM1",
"Parity error or ECC error for S0 RAM2",
"Parity error for PHY RAM0",
"Parity error for PHY RAM1",
"AXI Slave Response error",
};
static const char * const smca_gmipcs_mce_desc[] = {
"Data Loss Error",
"Training Error",
"Replay Parity Error",
"Rx Fifo Underflow Error",
"Rx Fifo Overflow Error",
"CRC Error",
"BER Exceeded Error",
"Tx Fifo Underflow Error",
"Replay Buffer Parity Error",
"Tx Overflow Error",
"Replay Fifo Overflow Error",
"Replay Fifo Underflow Error",
"Elastic Fifo Overflow Error",
"Deskew Error",
"Offline Error",
"Data Startup Limit Error",
"FC Init Timeout Error",
"Recovery Timeout Error",
"Ready Serial Timeout Error",
"Ready Serial Attempt Error",
"Recovery Attempt Error",
"Recovery Relock Attempt Error",
"Deskew Abort Error",
"Rx Buffer Error",
"Rx LFDS Fifo Overflow Error",
"Rx LFDS Fifo Underflow Error",
"LinkSub Tx Timeout Error",
"LinkSub Rx Timeout Error",
"Rx CMD Packet Error",
"LFDS Training Timeout Error",
"LFDS FC Init Timeout Error",
"Data Loss Error",
};
struct smca_mce_desc {
const char * const *descs;
unsigned int num_descs;
......@@ -418,14 +595,28 @@ static struct smca_mce_desc smca_mce_descs[] = {
[SMCA_CS_V2] = { smca_cs2_mce_desc, ARRAY_SIZE(smca_cs2_mce_desc) },
[SMCA_PIE] = { smca_pie_mce_desc, ARRAY_SIZE(smca_pie_mce_desc) },
[SMCA_UMC] = { smca_umc_mce_desc, ARRAY_SIZE(smca_umc_mce_desc) },
[SMCA_UMC_V2] = { smca_umc2_mce_desc, ARRAY_SIZE(smca_umc2_mce_desc) },
[SMCA_PB] = { smca_pb_mce_desc, ARRAY_SIZE(smca_pb_mce_desc) },
[SMCA_PSP] = { smca_psp_mce_desc, ARRAY_SIZE(smca_psp_mce_desc) },
[SMCA_PSP_V2] = { smca_psp2_mce_desc, ARRAY_SIZE(smca_psp2_mce_desc) },
[SMCA_SMU] = { smca_smu_mce_desc, ARRAY_SIZE(smca_smu_mce_desc) },
[SMCA_SMU_V2] = { smca_smu2_mce_desc, ARRAY_SIZE(smca_smu2_mce_desc) },
[SMCA_MP5] = { smca_mp5_mce_desc, ARRAY_SIZE(smca_mp5_mce_desc) },
[SMCA_MPDMA] = { smca_mpdma_mce_desc, ARRAY_SIZE(smca_mpdma_mce_desc) },
[SMCA_NBIO] = { smca_nbio_mce_desc, ARRAY_SIZE(smca_nbio_mce_desc) },
[SMCA_PCIE] = { smca_pcie_mce_desc, ARRAY_SIZE(smca_pcie_mce_desc) },
[SMCA_PCIE_V2] = { smca_pcie2_mce_desc, ARRAY_SIZE(smca_pcie2_mce_desc) },
[SMCA_XGMI_PCS] = { smca_xgmipcs_mce_desc, ARRAY_SIZE(smca_xgmipcs_mce_desc) },
/* NBIF and SHUB have the same error descriptions, for now. */
[SMCA_NBIF] = { smca_nbif_mce_desc, ARRAY_SIZE(smca_nbif_mce_desc) },
[SMCA_SHUB] = { smca_nbif_mce_desc, ARRAY_SIZE(smca_nbif_mce_desc) },
[SMCA_SATA] = { smca_sata_mce_desc, ARRAY_SIZE(smca_sata_mce_desc) },
[SMCA_USB] = { smca_usb_mce_desc, ARRAY_SIZE(smca_usb_mce_desc) },
[SMCA_GMI_PCS] = { smca_gmipcs_mce_desc, ARRAY_SIZE(smca_gmipcs_mce_desc) },
/* All the PHY bank types have the same error descriptions, for now. */
[SMCA_XGMI_PHY] = { smca_xgmiphy_mce_desc, ARRAY_SIZE(smca_xgmiphy_mce_desc) },
[SMCA_WAFL_PHY] = { smca_xgmiphy_mce_desc, ARRAY_SIZE(smca_xgmiphy_mce_desc) },
[SMCA_GMI_PHY] = { smca_xgmiphy_mce_desc, ARRAY_SIZE(smca_xgmiphy_mce_desc) },
};
static bool f12h_mc0_mce(u16 ec, u8 xec)
......@@ -975,20 +1166,13 @@ static void decode_mc6_mce(struct mce *m)
/* Decode errors according to Scalable MCA specification */
static void decode_smca_error(struct mce *m)
{
struct smca_hwid *hwid;
enum smca_bank_types bank_type;
enum smca_bank_types bank_type = smca_get_bank_type(m->extcpu, m->bank);
const char *ip_name;
u8 xec = XEC(m->status, xec_mask);
if (m->bank >= ARRAY_SIZE(smca_banks))
if (bank_type >= N_SMCA_BANK_TYPES)
return;
hwid = smca_banks[m->bank].hwid;
if (!hwid)
return;
bank_type = hwid->bank_type;
if (bank_type == SMCA_RESERVED) {
pr_emerg(HW_ERR "Bank %d is reserved.\n", m->bank);
return;
......
......@@ -65,36 +65,17 @@ static DEFINE_MUTEX(nb_smu_ind_mutex);
#define F15H_M60H_HARDWARE_TEMP_CTRL_OFFSET 0xd8200c64
#define F15H_M60H_REPORTED_TEMP_CTRL_OFFSET 0xd8200ca4
/* Common for Zen CPU families (Family 17h and 18h) */
#define ZEN_REPORTED_TEMP_CTRL_OFFSET 0x00059800
/* Common for Zen CPU families (Family 17h and 18h and 19h) */
#define ZEN_REPORTED_TEMP_CTRL_BASE 0x00059800
#define ZEN_CCD_TEMP(x) (0x00059954 + ((x) * 4))
#define ZEN_CCD_TEMP(offset, x) (ZEN_REPORTED_TEMP_CTRL_BASE + \
(offset) + ((x) * 4))
#define ZEN_CCD_TEMP_VALID BIT(11)
#define ZEN_CCD_TEMP_MASK GENMASK(10, 0)
#define ZEN_CUR_TEMP_SHIFT 21
#define ZEN_CUR_TEMP_RANGE_SEL_MASK BIT(19)
#define ZEN_SVI_BASE 0x0005A000
/* F17h thermal registers through SMN */
#define F17H_M01H_SVI_TEL_PLANE0 (ZEN_SVI_BASE + 0xc)
#define F17H_M01H_SVI_TEL_PLANE1 (ZEN_SVI_BASE + 0x10)
#define F17H_M31H_SVI_TEL_PLANE0 (ZEN_SVI_BASE + 0x14)
#define F17H_M31H_SVI_TEL_PLANE1 (ZEN_SVI_BASE + 0x10)
#define F17H_M01H_CFACTOR_ICORE 1000000 /* 1A / LSB */
#define F17H_M01H_CFACTOR_ISOC 250000 /* 0.25A / LSB */
#define F17H_M31H_CFACTOR_ICORE 1000000 /* 1A / LSB */
#define F17H_M31H_CFACTOR_ISOC 310000 /* 0.31A / LSB */
/* F19h thermal registers through SMN */
#define F19H_M01_SVI_TEL_PLANE0 (ZEN_SVI_BASE + 0x14)
#define F19H_M01_SVI_TEL_PLANE1 (ZEN_SVI_BASE + 0x10)
#define F19H_M01H_CFACTOR_ICORE 1000000 /* 1A / LSB */
#define F19H_M01H_CFACTOR_ISOC 310000 /* 0.31A / LSB */
struct k10temp_data {
struct pci_dev *pdev;
void (*read_htcreg)(struct pci_dev *pdev, u32 *regval);
......@@ -103,6 +84,7 @@ struct k10temp_data {
u32 temp_adjust_mask;
u32 show_temp;
bool is_zen;
u32 ccd_offset;
};
#define TCTL_BIT 0
......@@ -163,7 +145,7 @@ static void read_tempreg_nb_f15(struct pci_dev *pdev, u32 *regval)
static void read_tempreg_nb_zen(struct pci_dev *pdev, u32 *regval)
{
amd_smn_read(amd_pci_dev_to_node_id(pdev),
ZEN_REPORTED_TEMP_CTRL_OFFSET, regval);
ZEN_REPORTED_TEMP_CTRL_BASE, regval);
}
static long get_raw_temp(struct k10temp_data *data)
......@@ -189,6 +171,10 @@ static const char *k10temp_temp_label[] = {
"Tccd6",
"Tccd7",
"Tccd8",
"Tccd9",
"Tccd10",
"Tccd11",
"Tccd12",
};
static int k10temp_read_labels(struct device *dev,
......@@ -224,9 +210,10 @@ static int k10temp_read_temp(struct device *dev, u32 attr, int channel,
if (*val < 0)
*val = 0;
break;
case 2 ... 9: /* Tccd{1-8} */
case 2 ... 13: /* Tccd{1-12} */
amd_smn_read(amd_pci_dev_to_node_id(data->pdev),
ZEN_CCD_TEMP(channel - 2), &regval);
ZEN_CCD_TEMP(data->ccd_offset, channel - 2),
&regval);
*val = (regval & ZEN_CCD_TEMP_MASK) * 125 - 49000;
break;
default:
......@@ -358,13 +345,11 @@ static const struct hwmon_channel_info *k10temp_info[] = {
HWMON_T_INPUT | HWMON_T_LABEL,
HWMON_T_INPUT | HWMON_T_LABEL,
HWMON_T_INPUT | HWMON_T_LABEL,
HWMON_T_INPUT | HWMON_T_LABEL,
HWMON_T_INPUT | HWMON_T_LABEL,
HWMON_T_INPUT | HWMON_T_LABEL,
HWMON_T_INPUT | HWMON_T_LABEL,
HWMON_T_INPUT | HWMON_T_LABEL),
HWMON_CHANNEL_INFO(in,
HWMON_I_INPUT | HWMON_I_LABEL,
HWMON_I_INPUT | HWMON_I_LABEL),
HWMON_CHANNEL_INFO(curr,
HWMON_C_INPUT | HWMON_C_LABEL,
HWMON_C_INPUT | HWMON_C_LABEL),
NULL
};
......@@ -387,7 +372,7 @@ static void k10temp_get_ccd_support(struct pci_dev *pdev,
for (i = 0; i < limit; i++) {
amd_smn_read(amd_pci_dev_to_node_id(pdev),
ZEN_CCD_TEMP(i), &regval);
ZEN_CCD_TEMP(data->ccd_offset, i), &regval);
if (regval & ZEN_CCD_TEMP_VALID)
data->show_temp |= BIT(TCCD_BIT(i));
}
......@@ -426,7 +411,6 @@ static int k10temp_probe(struct pci_dev *pdev, const struct pci_device_id *id)
} else if (boot_cpu_data.x86 == 0x17 || boot_cpu_data.x86 == 0x18) {
data->temp_adjust_mask = ZEN_CUR_TEMP_RANGE_SEL_MASK;
data->read_tempreg = read_tempreg_nb_zen;
data->show_temp |= BIT(TDIE_BIT); /* show Tdie */
data->is_zen = true;
switch (boot_cpu_data.x86_model) {
......@@ -434,23 +418,38 @@ static int k10temp_probe(struct pci_dev *pdev, const struct pci_device_id *id)
case 0x8: /* Zen+ */
case 0x11: /* Zen APU */
case 0x18: /* Zen+ APU */
data->ccd_offset = 0x154;
k10temp_get_ccd_support(pdev, data, 4);
break;
case 0x31: /* Zen2 Threadripper */
case 0x60: /* Renoir */
case 0x68: /* Lucienne */
case 0x71: /* Zen2 */
data->ccd_offset = 0x154;
k10temp_get_ccd_support(pdev, data, 8);
break;
}
} else if (boot_cpu_data.x86 == 0x19) {
data->temp_adjust_mask = ZEN_CUR_TEMP_RANGE_SEL_MASK;
data->read_tempreg = read_tempreg_nb_zen;
data->show_temp |= BIT(TDIE_BIT);
data->is_zen = true;
switch (boot_cpu_data.x86_model) {
case 0x0 ... 0x1: /* Zen3 */
case 0x0 ... 0x1: /* Zen3 SP3/TR */
case 0x21: /* Zen3 Ryzen Desktop */
case 0x50 ... 0x5f: /* Green Sardine */
data->ccd_offset = 0x154;
k10temp_get_ccd_support(pdev, data, 8);
break;
case 0x40 ... 0x4f: /* Yellow Carp */
data->ccd_offset = 0x300;
k10temp_get_ccd_support(pdev, data, 8);
break;
case 0x10 ... 0x1f:
case 0xa0 ... 0xaf:
data->ccd_offset = 0x300;
k10temp_get_ccd_support(pdev, data, 12);
break;
}
} else {
data->read_htcreg = read_htcreg_pci;
......@@ -462,6 +461,7 @@ static int k10temp_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (boot_cpu_data.x86 == entry->model &&
strstr(boot_cpu_data.x86_model_id, entry->id)) {
data->show_temp |= BIT(TDIE_BIT); /* show Tdie */
data->temp_offset = entry->offset;
break;
}
......@@ -490,6 +490,9 @@ static const struct pci_device_id k10temp_id_table[] = {
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M60H_DF_F3) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F3) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_19H_DF_F3) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_19H_M10H_DF_F3) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_19H_M40H_DF_F3) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_19H_M50H_DF_F3) },
{ PCI_VDEVICE(HYGON, PCI_DEVICE_ID_AMD_17H_DF_F3) },
{}
};
......
......@@ -180,6 +180,8 @@ static inline char *mc_event_error_type(const unsigned int err_type)
* This is a variant of the DDR4 memories.
* @MEM_LRDDR4: Load-Reduced DDR4 memory.
* @MEM_DDR5: Unbuffered DDR5 RAM
* @MEM_RDDR5: Registered DDR5 RAM
* @MEM_LRDDR5: Load-Reduced DDR5 memory.
* @MEM_NVDIMM: Non-volatile RAM
* @MEM_HBM2: High bandwidth Memory Gen 2.
*/
......@@ -206,6 +208,8 @@ enum mem_type {
MEM_RDDR4,
MEM_LRDDR4,
MEM_DDR5,
MEM_RDDR5,
MEM_LRDDR5,
MEM_NVDIMM,
MEM_HBM2,
};
......@@ -231,6 +235,8 @@ enum mem_type {
#define MEM_FLAG_RDDR4 BIT(MEM_RDDR4)
#define MEM_FLAG_LRDDR4 BIT(MEM_LRDDR4)
#define MEM_FLAG_DDR5 BIT(MEM_DDR5)
#define MEM_FLAG_RDDR5 BIT(MEM_RDDR5)
#define MEM_FLAG_LRDDR5 BIT(MEM_LRDDR5)
#define MEM_FLAG_NVDIMM BIT(MEM_NVDIMM)
#define MEM_FLAG_HBM2 BIT(MEM_HBM2)
......
......@@ -554,6 +554,9 @@
#define PCI_DEVICE_ID_AMD_17H_M60H_DF_F3 0x144b
#define PCI_DEVICE_ID_AMD_17H_M70H_DF_F3 0x1443
#define PCI_DEVICE_ID_AMD_19H_DF_F3 0x1653
#define PCI_DEVICE_ID_AMD_19H_M10H_DF_F3 0x14b0
#define PCI_DEVICE_ID_AMD_19H_M40H_DF_F3 0x167c
#define PCI_DEVICE_ID_AMD_19H_M50H_DF_F3 0x166d
#define PCI_DEVICE_ID_AMD_CNB17H_F3 0x1703
#define PCI_DEVICE_ID_AMD_LANCE 0x2000
#define PCI_DEVICE_ID_AMD_LANCE_HOME 0x2001
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册