提交 d9de5ce8 编写于 作者: L Linus Torvalds

Merge tag 'edac_updates_for_v6.3' of git://git.kernel.org/pub/scm/linux/kernel/git/ras/ras

Pull EDAC updates from Borislav Petkov:

 - Add a driver for the RAS functionality on Xilinx's on chip memory
   controller

 - Add support for decoding errors from the first and second level
   memory on SKL-based hardware

 - Add support for the memory controllers in Intel Granite Rapids and
   Emerald Rapids machines

 - First round of amd64_edac driver simplification and removal of
   unneeded functionality

 - The usual cleanups and fixes

* tag 'edac_updates_for_v6.3' of git://git.kernel.org/pub/scm/linux/kernel/git/ras/ras:
  EDAC/amd64: Shut up an -Werror,-Wsometimes-uninitialized clang false positive
  EDAC/amd64: Remove early_channel_count()
  EDAC/amd64: Remove PCI Function 0
  EDAC/amd64: Remove PCI Function 6
  EDAC/amd64: Remove scrub rate control for Family 17h and later
  EDAC/amd64: Don't set up EDAC PCI control on Family 17h+
  EDAC/i10nm: Add driver decoder for Sapphire Rapids server
  EDAC/i10nm: Add Intel Granite Rapids server support
  EDAC/i10nm: Make more configurations CPU model specific
  EDAC/i10nm: Add Intel Emerald Rapids server support
  EDAC/skx_common: Delete duplicated and unreachable code
  EDAC/skx_common: Enable EDAC support for the "near" memory
  EDAC/qcom: Add platform_device_id table for module autoloading
  EDAC/zynqmp: Add EDAC support for Xilinx ZynqMP OCM
  dt-bindings: edac: Add bindings for Xilinx ZynqMP OCM
# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/memory-controllers/xlnx,zynqmp-ocmc-1.0.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Xilinx Zynqmp OCM(On-Chip Memory) Controller
maintainers:
- Shubhrajyoti Datta <shubhrajyoti.datta@amd.com>
- Sai Krishna Potthuri <sai.krishna.potthuri@amd.com>
description: |
The OCM supports 64-bit wide ECC functionality to detect multi-bit errors
and recover from a single-bit memory fault.On a write, if all bytes are
being written, the ECC is generated and written into the ECC RAM along with
the write-data that is written into the data RAM. If one or more bytes are
not written, then the read operation results in an correctable error or
uncorrectable error.
properties:
compatible:
const: xlnx,zynqmp-ocmc-1.0
reg:
maxItems: 1
interrupts:
maxItems: 1
required:
- compatible
- reg
- interrupts
additionalProperties: false
examples:
- |
#include <dt-bindings/interrupt-controller/arm-gic.h>
memory-controller@ff960000 {
compatible = "xlnx,zynqmp-ocmc-1.0";
reg = <0xff960000 0x1000>;
interrupts = <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
};
......@@ -22743,6 +22743,13 @@ F: Documentation/devicetree/bindings/dma/xilinx/xlnx,zynqmp-dpdma.yaml
F: drivers/dma/xilinx/xilinx_dpdma.c
F: include/dt-bindings/dma/xlnx-zynqmp-dpdma.h
XILINX ZYNQMP OCM EDAC DRIVER
M: Shubhrajyoti Datta <shubhrajyoti.datta@amd.com>
M: Sai Krishna Potthuri <sai.krishna.potthuri@amd.com>
S: Maintained
F: Documentation/devicetree/bindings/memory-controllers/xlnx,zynqmp-ocmc-1.0.yaml
F: drivers/edac/zynqmp_edac.c
XILINX ZYNQMP PSGTR PHY DRIVER
M: Anurag Kumar Vulisha <anurag.kumar.vulisha@xilinx.com>
M: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
......
......@@ -542,4 +542,12 @@ config EDAC_DMC520
Support for error detection and correction on the
SoCs with ARM DMC-520 DRAM controller.
config EDAC_ZYNQMP
tristate "Xilinx ZynqMP OCM Controller"
depends on ARCH_ZYNQMP || COMPILE_TEST
help
This driver supports error detection and correction for the
Xilinx ZynqMP OCM (On Chip Memory) controller. It can also be
built as a module. In that case it will be called zynqmp_edac.
endif # EDAC
......@@ -84,3 +84,4 @@ obj-$(CONFIG_EDAC_QCOM) += qcom_edac.o
obj-$(CONFIG_EDAC_ASPEED) += aspeed_edac.o
obj-$(CONFIG_EDAC_BLUEFIELD) += bluefield_edac.o
obj-$(CONFIG_EDAC_DMC520) += dmc520_edac.o
obj-$(CONFIG_EDAC_ZYNQMP) += zynqmp_edac.o
......@@ -182,21 +182,6 @@ static inline int amd64_read_dct_pci_cfg(struct amd64_pvt *pvt, u8 dct,
* other archs, we might not have access to the caches directly.
*/
static inline void __f17h_set_scrubval(struct amd64_pvt *pvt, u32 scrubval)
{
/*
* Fam17h supports scrub values between 0x5 and 0x14. Also, the values
* are shifted down by 0x5, so scrubval 0x5 is written to the register
* as 0x0, scrubval 0x6 as 0x1, etc.
*/
if (scrubval >= 0x5 && scrubval <= 0x14) {
scrubval -= 0x5;
pci_write_bits32(pvt->F6, F17H_SCR_LIMIT_ADDR, scrubval, 0xF);
pci_write_bits32(pvt->F6, F17H_SCR_BASE_ADDR, 1, 0x1);
} else {
pci_write_bits32(pvt->F6, F17H_SCR_BASE_ADDR, 0, 0x1);
}
}
/*
* Scan the scrub rate mapping table for a close or matching bandwidth value to
* issue. If requested is too big, then use last maximum value found.
......@@ -229,9 +214,7 @@ static int __set_scrub_rate(struct amd64_pvt *pvt, u32 new_bw, u32 min_rate)
scrubval = scrubrates[i].scrubval;
if (pvt->umc) {
__f17h_set_scrubval(pvt, scrubval);
} else if (pvt->fam == 0x15 && pvt->model == 0x60) {
if (pvt->fam == 0x15 && pvt->model == 0x60) {
f15h_select_dct(pvt, 0);
pci_write_bits32(pvt->F2, F15H_M60H_SCRCTRL, scrubval, 0x001F);
f15h_select_dct(pvt, 1);
......@@ -271,16 +254,7 @@ static int get_scrub_rate(struct mem_ctl_info *mci)
int i, retval = -EINVAL;
u32 scrubval = 0;
if (pvt->umc) {
amd64_read_pci_cfg(pvt->F6, F17H_SCR_BASE_ADDR, &scrubval);
if (scrubval & BIT(0)) {
amd64_read_pci_cfg(pvt->F6, F17H_SCR_LIMIT_ADDR, &scrubval);
scrubval &= 0xF;
scrubval += 0x5;
} else {
scrubval = 0;
}
} else if (pvt->fam == 0x15) {
if (pvt->fam == 0x15) {
/* Erratum #505 */
if (pvt->model < 0x10)
f15h_select_dct(pvt, 0);
......@@ -1454,9 +1428,6 @@ static void __dump_misc_regs_df(struct amd64_pvt *pvt)
debug_display_dimm_sizes_df(pvt, i);
}
edac_dbg(1, "F0x104 (DRAM Hole Address): 0x%08x, base: 0x%08x\n",
pvt->dhar, dhar_base(pvt));
}
/* Display and decode various NB registers for debug purposes. */
......@@ -1491,6 +1462,8 @@ static void __dump_misc_regs(struct amd64_pvt *pvt)
/* Only if NOT ganged does dclr1 have valid info */
if (!dct_ganging_enabled(pvt))
debug_dump_dramcfg_low(pvt, pvt->dclr1, 1);
edac_dbg(1, " DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no");
}
/* Display and decode various NB registers for debug purposes. */
......@@ -1501,8 +1474,6 @@ static void dump_misc_regs(struct amd64_pvt *pvt)
else
__dump_misc_regs(pvt);
edac_dbg(1, " DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no");
amd64_info("using x%u syndromes.\n", pvt->ecc_sym_sz);
}
......@@ -1732,24 +1703,6 @@ static void determine_memory_type(struct amd64_pvt *pvt)
pvt->dram_type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3;
}
/* Get the number of DCT channels the memory controller is using. */
static int k8_early_channel_count(struct amd64_pvt *pvt)
{
int flag;
if (pvt->ext_model >= K8_REV_F)
/* RevF (NPT) and later */
flag = pvt->dclr0 & WIDTH_128;
else
/* RevE and earlier */
flag = pvt->dclr0 & REVE_WIDTH_128;
/* not used */
pvt->dclr1 = 0;
return (flag) ? 2 : 1;
}
/* On F10h and later ErrAddr is MC4_ADDR[47:1] */
static u64 get_error_address(struct amd64_pvt *pvt, struct mce *m)
{
......@@ -2001,69 +1954,6 @@ static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
}
}
/*
* Get the number of DCT channels in use.
*
* Return:
* number of Memory Channels in operation
* Pass back:
* contents of the DCL0_LOW register
*/
static int f1x_early_channel_count(struct amd64_pvt *pvt)
{
int i, j, channels = 0;
/* On F10h, if we are in 128 bit mode, then we are using 2 channels */
if (pvt->fam == 0x10 && (pvt->dclr0 & WIDTH_128))
return 2;
/*
* Need to check if in unganged mode: In such, there are 2 channels,
* but they are not in 128 bit mode and thus the above 'dclr0' status
* bit will be OFF.
*
* Need to check DCT0[0] and DCT1[0] to see if only one of them has
* their CSEnable bit on. If so, then SINGLE DIMM case.
*/
edac_dbg(0, "Data width is not 128 bits - need more decoding\n");
/*
* Check DRAM Bank Address Mapping values for each DIMM to see if there
* is more than just one DIMM present in unganged mode. Need to check
* both controllers since DIMMs can be placed in either one.
*/
for (i = 0; i < 2; i++) {
u32 dbam = (i ? pvt->dbam1 : pvt->dbam0);
for (j = 0; j < 4; j++) {
if (DBAM_DIMM(j, dbam) > 0) {
channels++;
break;
}
}
}
if (channels > 2)
channels = 2;
amd64_info("MCT channel count: %d\n", channels);
return channels;
}
static int f17_early_channel_count(struct amd64_pvt *pvt)
{
int i, channels = 0;
/* SDP Control bit 31 (SdpInit) is clear for unused UMC channels */
for_each_umc(i)
channels += !!(pvt->umc[i].sdp_ctrl & UMC_SDP_INIT);
amd64_info("MCT channel count: %d\n", channels);
return channels;
}
static int ddr3_cs_size(unsigned i, bool dct_width)
{
unsigned shift = 0;
......@@ -2858,7 +2748,6 @@ static struct amd64_family_type family_types[] = {
.f2_id = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
.max_mcs = 2,
.ops = {
.early_channel_count = k8_early_channel_count,
.map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow,
.dbam_to_cs = k8_dbam_to_chip_select,
}
......@@ -2869,7 +2758,6 @@ static struct amd64_family_type family_types[] = {
.f2_id = PCI_DEVICE_ID_AMD_10H_NB_DRAM,
.max_mcs = 2,
.ops = {
.early_channel_count = f1x_early_channel_count,
.map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
.dbam_to_cs = f10_dbam_to_chip_select,
}
......@@ -2880,7 +2768,6 @@ static struct amd64_family_type family_types[] = {
.f2_id = PCI_DEVICE_ID_AMD_15H_NB_F2,
.max_mcs = 2,
.ops = {
.early_channel_count = f1x_early_channel_count,
.map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
.dbam_to_cs = f15_dbam_to_chip_select,
}
......@@ -2891,7 +2778,6 @@ static struct amd64_family_type family_types[] = {
.f2_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F2,
.max_mcs = 2,
.ops = {
.early_channel_count = f1x_early_channel_count,
.map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
.dbam_to_cs = f16_dbam_to_chip_select,
}
......@@ -2902,7 +2788,6 @@ static struct amd64_family_type family_types[] = {
.f2_id = PCI_DEVICE_ID_AMD_15H_M60H_NB_F2,
.max_mcs = 2,
.ops = {
.early_channel_count = f1x_early_channel_count,
.map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
.dbam_to_cs = f15_m60h_dbam_to_chip_select,
}
......@@ -2913,7 +2798,6 @@ static struct amd64_family_type family_types[] = {
.f2_id = PCI_DEVICE_ID_AMD_16H_NB_F2,
.max_mcs = 2,
.ops = {
.early_channel_count = f1x_early_channel_count,
.map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
.dbam_to_cs = f16_dbam_to_chip_select,
}
......@@ -2924,89 +2808,64 @@ static struct amd64_family_type family_types[] = {
.f2_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F2,
.max_mcs = 2,
.ops = {
.early_channel_count = f1x_early_channel_count,
.map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
.dbam_to_cs = f16_dbam_to_chip_select,
}
},
[F17_CPUS] = {
.ctl_name = "F17h",
.f0_id = PCI_DEVICE_ID_AMD_17H_DF_F0,
.f6_id = PCI_DEVICE_ID_AMD_17H_DF_F6,
.max_mcs = 2,
.ops = {
.early_channel_count = f17_early_channel_count,
.dbam_to_cs = f17_addr_mask_to_cs_size,
}
},
[F17_M10H_CPUS] = {
.ctl_name = "F17h_M10h",
.f0_id = PCI_DEVICE_ID_AMD_17H_M10H_DF_F0,
.f6_id = PCI_DEVICE_ID_AMD_17H_M10H_DF_F6,
.max_mcs = 2,
.ops = {
.early_channel_count = f17_early_channel_count,
.dbam_to_cs = f17_addr_mask_to_cs_size,
}
},
[F17_M30H_CPUS] = {
.ctl_name = "F17h_M30h",
.f0_id = PCI_DEVICE_ID_AMD_17H_M30H_DF_F0,
.f6_id = PCI_DEVICE_ID_AMD_17H_M30H_DF_F6,
.max_mcs = 8,
.ops = {
.early_channel_count = f17_early_channel_count,
.dbam_to_cs = f17_addr_mask_to_cs_size,
}
},
[F17_M60H_CPUS] = {
.ctl_name = "F17h_M60h",
.f0_id = PCI_DEVICE_ID_AMD_17H_M60H_DF_F0,
.f6_id = PCI_DEVICE_ID_AMD_17H_M60H_DF_F6,
.max_mcs = 2,
.ops = {
.early_channel_count = f17_early_channel_count,
.dbam_to_cs = f17_addr_mask_to_cs_size,
}
},
[F17_M70H_CPUS] = {
.ctl_name = "F17h_M70h",
.f0_id = PCI_DEVICE_ID_AMD_17H_M70H_DF_F0,
.f6_id = PCI_DEVICE_ID_AMD_17H_M70H_DF_F6,
.max_mcs = 2,
.ops = {
.early_channel_count = f17_early_channel_count,
.dbam_to_cs = f17_addr_mask_to_cs_size,
}
},
[F19_CPUS] = {
.ctl_name = "F19h",
.f0_id = PCI_DEVICE_ID_AMD_19H_DF_F0,
.f6_id = PCI_DEVICE_ID_AMD_19H_DF_F6,
.max_mcs = 8,
.ops = {
.early_channel_count = f17_early_channel_count,
.dbam_to_cs = f17_addr_mask_to_cs_size,
}
},
[F19_M10H_CPUS] = {
.ctl_name = "F19h_M10h",
.f0_id = PCI_DEVICE_ID_AMD_19H_M10H_DF_F0,
.f6_id = PCI_DEVICE_ID_AMD_19H_M10H_DF_F6,
.max_mcs = 12,
.flags.zn_regs_v2 = 1,
.ops = {
.early_channel_count = f17_early_channel_count,
.dbam_to_cs = f17_addr_mask_to_cs_size,
}
},
[F19_M50H_CPUS] = {
.ctl_name = "F19h_M50h",
.f0_id = PCI_DEVICE_ID_AMD_19H_M50H_DF_F0,
.f6_id = PCI_DEVICE_ID_AMD_19H_M50H_DF_F6,
.max_mcs = 2,
.ops = {
.early_channel_count = f17_early_channel_count,
.dbam_to_cs = f17_addr_mask_to_cs_size,
}
},
......@@ -3316,36 +3175,12 @@ static void decode_umc_error(int node_id, struct mce *m)
/*
* Use pvt->F3 which contains the F3 CPU PCI device to get the related
* F1 (AddrMap) and F2 (Dct) devices. Return negative value on error.
* Reserve F0 and F6 on systems with a UMC.
*/
static int
reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 pci_id1, u16 pci_id2)
{
if (pvt->umc) {
pvt->F0 = pci_get_related_function(pvt->F3->vendor, pci_id1, pvt->F3);
if (!pvt->F0) {
edac_dbg(1, "F0 not found, device 0x%x\n", pci_id1);
return -ENODEV;
}
pvt->F6 = pci_get_related_function(pvt->F3->vendor, pci_id2, pvt->F3);
if (!pvt->F6) {
pci_dev_put(pvt->F0);
pvt->F0 = NULL;
edac_dbg(1, "F6 not found: device 0x%x\n", pci_id2);
return -ENODEV;
}
if (!pci_ctl_dev)
pci_ctl_dev = &pvt->F0->dev;
edac_dbg(1, "F0: %s\n", pci_name(pvt->F0));
edac_dbg(1, "F3: %s\n", pci_name(pvt->F3));
edac_dbg(1, "F6: %s\n", pci_name(pvt->F6));
if (pvt->umc)
return 0;
}
/* Reserve the ADDRESS MAP Device */
pvt->F1 = pci_get_related_function(pvt->F3->vendor, pci_id1, pvt->F3);
......@@ -3377,8 +3212,7 @@ reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 pci_id1, u16 pci_id2)
static void free_mc_sibling_devs(struct amd64_pvt *pvt)
{
if (pvt->umc) {
pci_dev_put(pvt->F0);
pci_dev_put(pvt->F6);
return;
} else {
pci_dev_put(pvt->F1);
pci_dev_put(pvt->F2);
......@@ -3468,7 +3302,6 @@ static void read_mc_regs(struct amd64_pvt *pvt)
if (pvt->umc) {
__read_mc_regs_df(pvt);
amd64_read_pci_cfg(pvt->F0, DF_DHAR, &pvt->dhar);
goto skip;
}
......@@ -3691,7 +3524,7 @@ static int init_csrows(struct mem_ctl_info *mci)
: EDAC_SECDED;
}
for (j = 0; j < pvt->channel_count; j++) {
for (j = 0; j < fam_type->max_mcs; j++) {
dimm = csrow->channels[j]->dimm;
dimm->mtype = pvt->dram_type;
dimm->edac_mode = edac_mode;
......@@ -3967,6 +3800,9 @@ static void setup_mci_misc_attrs(struct mem_ctl_info *mci)
mci->dev_name = pci_name(pvt->F3);
mci->ctl_page_to_phys = NULL;
if (pvt->fam >= 0x17)
return;
/* memory scrubber interface */
mci->set_sdram_scrub_rate = set_scrub_rate;
mci->get_sdram_scrub_rate = get_scrub_rate;
......@@ -4092,16 +3928,13 @@ static const struct attribute_group *amd64_edac_attr_groups[] = {
static int hw_info_get(struct amd64_pvt *pvt)
{
u16 pci_id1, pci_id2;
u16 pci_id1 = 0, pci_id2 = 0;
int ret;
if (pvt->fam >= 0x17) {
pvt->umc = kcalloc(fam_type->max_mcs, sizeof(struct amd64_umc), GFP_KERNEL);
if (!pvt->umc)
return -ENOMEM;
pci_id1 = fam_type->f0_id;
pci_id2 = fam_type->f6_id;
} else {
pci_id1 = fam_type->f1_id;
pci_id2 = fam_type->f2_id;
......@@ -4118,7 +3951,7 @@ static int hw_info_get(struct amd64_pvt *pvt)
static void hw_info_put(struct amd64_pvt *pvt)
{
if (pvt->F0 || pvt->F1)
if (pvt->F1)
free_mc_sibling_devs(pvt);
kfree(pvt->umc);
......@@ -4128,28 +3961,12 @@ static int init_one_instance(struct amd64_pvt *pvt)
{
struct mem_ctl_info *mci = NULL;
struct edac_mc_layer layers[2];
int ret = -EINVAL;
/*
* We need to determine how many memory channels there are. Then use
* that information for calculating the size of the dynamic instance
* tables in the 'mci' structure.
*/
pvt->channel_count = pvt->ops->early_channel_count(pvt);
if (pvt->channel_count < 0)
return ret;
int ret = -ENOMEM;
ret = -ENOMEM;
layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
layers[0].size = pvt->csels[0].b_cnt;
layers[0].is_virt_csrow = true;
layers[1].type = EDAC_MC_LAYER_CHANNEL;
/*
* Always allocate two channels since we can have setups with DIMMs on
* only one channel. Also, this simplifies handling later for the price
* of a couple of KBs tops.
*/
layers[1].size = fam_type->max_mcs;
layers[1].is_virt_csrow = false;
......@@ -4370,12 +4187,12 @@ static int __init amd64_edac_init(void)
}
/* register stuff with EDAC MCE */
if (boot_cpu_data.x86 >= 0x17)
if (boot_cpu_data.x86 >= 0x17) {
amd_register_ecc_decoder(decode_umc_error);
else
} else {
amd_register_ecc_decoder(decode_bus_error);
setup_pci_device();
setup_pci_device();
}
#ifdef CONFIG_X86_32
amd64_err("%s on 32-bit is unsupported. USE AT YOUR OWN RISK!\n", EDAC_MOD_STR);
......
......@@ -114,22 +114,6 @@
#define PCI_DEVICE_ID_AMD_16H_NB_F2 0x1532
#define PCI_DEVICE_ID_AMD_16H_M30H_NB_F1 0x1581
#define PCI_DEVICE_ID_AMD_16H_M30H_NB_F2 0x1582
#define PCI_DEVICE_ID_AMD_17H_DF_F0 0x1460
#define PCI_DEVICE_ID_AMD_17H_DF_F6 0x1466
#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F0 0x15e8
#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F6 0x15ee
#define PCI_DEVICE_ID_AMD_17H_M30H_DF_F0 0x1490
#define PCI_DEVICE_ID_AMD_17H_M30H_DF_F6 0x1496
#define PCI_DEVICE_ID_AMD_17H_M60H_DF_F0 0x1448
#define PCI_DEVICE_ID_AMD_17H_M60H_DF_F6 0x144e
#define PCI_DEVICE_ID_AMD_17H_M70H_DF_F0 0x1440
#define PCI_DEVICE_ID_AMD_17H_M70H_DF_F6 0x1446
#define PCI_DEVICE_ID_AMD_19H_DF_F0 0x1650
#define PCI_DEVICE_ID_AMD_19H_DF_F6 0x1656
#define PCI_DEVICE_ID_AMD_19H_M10H_DF_F0 0x14ad
#define PCI_DEVICE_ID_AMD_19H_M10H_DF_F6 0x14b3
#define PCI_DEVICE_ID_AMD_19H_M50H_DF_F0 0x166a
#define PCI_DEVICE_ID_AMD_19H_M50H_DF_F6 0x1670
/*
* Function 1 - Address Map
......@@ -215,8 +199,6 @@
#define DCT_SEL_HI 0x114
#define F15H_M60H_SCRCTRL 0x1C8
#define F17H_SCR_BASE_ADDR 0x48
#define F17H_SCR_LIMIT_ADDR 0x4C
/*
* Function 3 - Misc Control
......@@ -356,7 +338,7 @@ struct amd64_pvt {
struct low_ops *ops;
/* pci_device handles which we utilize */
struct pci_dev *F0, *F1, *F2, *F3, *F6;
struct pci_dev *F1, *F2, *F3;
u16 mc_node_id; /* MC index of this MC node */
u8 fam; /* CPU family */
......@@ -364,7 +346,6 @@ struct amd64_pvt {
u8 stepping; /* ... stepping */
int ext_model; /* extended model value of this node */
int channel_count;
/* Raw registers */
u32 dclr0; /* DRAM Configuration Low DCT0 reg */
......@@ -484,7 +465,6 @@ struct ecc_settings {
* functions and per device encoding/decoding logic.
*/
struct low_ops {
int (*early_channel_count) (struct amd64_pvt *pvt);
void (*map_sysaddr_to_csrow) (struct mem_ctl_info *mci, u64 sys_addr,
struct err_info *);
int (*dbam_to_cs) (struct amd64_pvt *pvt, u8 dct,
......@@ -503,7 +483,7 @@ struct amd64_family_flags {
struct amd64_family_type {
const char *ctl_name;
u16 f0_id, f1_id, f2_id, f6_id;
u16 f1_id, f2_id;
/* Maximum number of memory controllers per die/node. */
u8 max_mcs;
struct amd64_family_flags flags;
......
此差异已折叠。
......@@ -396,12 +396,19 @@ static int qcom_llcc_edac_remove(struct platform_device *pdev)
return 0;
}
static const struct platform_device_id qcom_llcc_edac_id_table[] = {
{ .name = "qcom_llcc_edac" },
{}
};
MODULE_DEVICE_TABLE(platform, qcom_llcc_edac_id_table);
static struct platform_driver qcom_llcc_edac_driver = {
.probe = qcom_llcc_edac_probe,
.remove = qcom_llcc_edac_remove,
.driver = {
.name = "qcom_llcc_edac",
},
.id_table = qcom_llcc_edac_id_table,
};
module_platform_driver(qcom_llcc_edac_driver);
......
......@@ -560,44 +560,28 @@ static void skx_mce_output_error(struct mem_ctl_info *mci,
tp_event = HW_EVENT_ERR_CORRECTED;
}
/*
* According to Intel Architecture spec vol 3B,
* Table 15-10 "IA32_MCi_Status [15:0] Compound Error Code Encoding"
* memory errors should fit one of these masks:
* 000f 0000 1mmm cccc (binary)
* 000f 0010 1mmm cccc (binary) [RAM used as cache]
* where:
* f = Correction Report Filtering Bit. If 1, subsequent errors
* won't be shown
* mmm = error type
* cccc = channel
* If the mask doesn't match, report an error to the parsing logic
*/
if (!((errcode & 0xef80) == 0x80 || (errcode & 0xef80) == 0x280)) {
optype = "Can't parse: it is not a mem";
} else {
switch (optypenum) {
case 0:
optype = "generic undef request error";
break;
case 1:
optype = "memory read error";
break;
case 2:
optype = "memory write error";
break;
case 3:
optype = "addr/cmd error";
break;
case 4:
optype = "memory scrubbing error";
scrub_err = true;
break;
default:
optype = "reserved";
break;
}
switch (optypenum) {
case 0:
optype = "generic undef request error";
break;
case 1:
optype = "memory read error";
break;
case 2:
optype = "memory write error";
break;
case 3:
optype = "addr/cmd error";
break;
case 4:
optype = "memory scrubbing error";
scrub_err = true;
break;
default:
optype = "reserved";
break;
}
if (res->decoded_by_adxl) {
len = snprintf(skx_msg, MSG_SIZE, "%s%s err_code:0x%04x:0x%04x %s",
overflow ? " OVERFLOW" : "",
......@@ -632,12 +616,18 @@ static bool skx_error_in_1st_level_mem(const struct mce *m)
if (!skx_mem_cfg_2lm)
return false;
errcode = GET_BITFIELD(m->status, 0, 15);
errcode = GET_BITFIELD(m->status, 0, 15) & MCACOD_MEM_ERR_MASK;
if ((errcode & 0xef80) != 0x280)
return false;
return errcode == MCACOD_EXT_MEM_ERR;
}
return true;
static bool skx_error_in_mem(const struct mce *m)
{
u32 errcode;
errcode = GET_BITFIELD(m->status, 0, 15) & MCACOD_MEM_ERR_MASK;
return (errcode == MCACOD_MEM_CTL_ERR || errcode == MCACOD_EXT_MEM_ERR);
}
int skx_mce_check_error(struct notifier_block *nb, unsigned long val,
......@@ -651,8 +641,8 @@ int skx_mce_check_error(struct notifier_block *nb, unsigned long val,
if (mce->kflags & MCE_HANDLED_CEC)
return NOTIFY_DONE;
/* ignore unless this is memory related with an address */
if ((mce->status & 0xefff) >> 7 != 1 || !(mce->status & MCI_STATUS_ADDRV))
/* Ignore unless this is memory related with an address */
if (!skx_error_in_mem(mce) || !(mce->status & MCI_STATUS_ADDRV))
return NOTIFY_DONE;
memset(&res, 0, sizeof(res));
......
......@@ -33,7 +33,7 @@
#define SKX_NUM_CHANNELS 3 /* Channels per memory controller */
#define SKX_NUM_DIMMS 2 /* Max DIMMS per channel */
#define I10NM_NUM_DDR_IMC 4
#define I10NM_NUM_DDR_IMC 12
#define I10NM_NUM_DDR_CHANNELS 2
#define I10NM_NUM_DDR_DIMMS 2
......@@ -56,6 +56,30 @@
#define MCI_MISC_ECC_MODE(m) (((m) >> 59) & 15)
#define MCI_MISC_ECC_DDRT 8 /* read from DDRT */
/*
* According to Intel Architecture spec vol 3B,
* Table 15-10 "IA32_MCi_Status [15:0] Compound Error Code Encoding"
* memory errors should fit one of these masks:
* 000f 0000 1mmm cccc (binary)
* 000f 0010 1mmm cccc (binary) [RAM used as cache]
* where:
* f = Correction Report Filtering Bit. If 1, subsequent errors
* won't be shown
* mmm = error type
* cccc = channel
*/
#define MCACOD_MEM_ERR_MASK 0xef80
/*
* Errors from either the memory of the 1-level memory system or the
* 2nd level memory (the slow "far" memory) of the 2-level memory system.
*/
#define MCACOD_MEM_CTL_ERR 0x80
/*
* Errors from the 1st level memory (the fast "near" memory as cache)
* of the 2-level memory system.
*/
#define MCACOD_EXT_MEM_ERR 0x280
/*
* Each cpu socket contains some pci devices that provide global
* information, and also some that are local to each of the two
......@@ -105,7 +129,8 @@ struct skx_pvt {
enum type {
SKX,
I10NM,
SPR
SPR,
GNR
};
enum {
......@@ -149,19 +174,47 @@ struct decoded_addr {
bool decoded_by_adxl;
};
struct pci_bdf {
u32 bus : 8;
u32 dev : 5;
u32 fun : 3;
};
struct res_config {
enum type type;
/* Configuration agent device ID */
unsigned int decs_did;
/* Default bus number configuration register offset */
int busno_cfg_offset;
/* DDR memory controllers per socket */
int ddr_imc_num;
/* DDR channels per DDR memory controller */
int ddr_chan_num;
/* DDR DIMMs per DDR memory channel */
int ddr_dimm_num;
/* Per DDR channel memory-mapped I/O size */
int ddr_chan_mmio_sz;
/* HBM memory controllers per socket */
int hbm_imc_num;
/* HBM channels per HBM memory controller */
int hbm_chan_num;
/* HBM DIMMs per HBM memory channel */
int hbm_dimm_num;
/* Per HBM channel memory-mapped I/O size */
int hbm_chan_mmio_sz;
bool support_ddr5;
/* SAD device number and function number */
unsigned int sad_all_devfn;
/* SAD device BDF */
struct pci_bdf sad_all_bdf;
/* PCU device BDF */
struct pci_bdf pcu_cr3_bdf;
/* UTIL device BDF */
struct pci_bdf util_all_bdf;
/* URACU device BDF */
struct pci_bdf uracu_bdf;
/* DDR mdev device BDF */
struct pci_bdf ddr_mdev_bdf;
/* HBM mdev device BDF */
struct pci_bdf hbm_mdev_bdf;
int sad_all_offset;
/* Offsets of retry_rd_err_log registers */
u32 *offsets_scrub;
......
// SPDX-License-Identifier: GPL-2.0
/*
* Xilinx ZynqMP OCM ECC Driver
*
* Copyright (C) 2022 Advanced Micro Devices, Inc.
*/
#include <linux/edac.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include "edac_module.h"
#define ZYNQMP_OCM_EDAC_MSG_SIZE 256
#define ZYNQMP_OCM_EDAC_STRING "zynqmp_ocm"
/* Error/Interrupt registers */
#define ERR_CTRL_OFST 0x0
#define OCM_ISR_OFST 0x04
#define OCM_IMR_OFST 0x08
#define OCM_IEN_OFST 0x0C
#define OCM_IDS_OFST 0x10
/* ECC control register */
#define ECC_CTRL_OFST 0x14
/* Correctable error info registers */
#define CE_FFA_OFST 0x1C
#define CE_FFD0_OFST 0x20
#define CE_FFD1_OFST 0x24
#define CE_FFD2_OFST 0x28
#define CE_FFD3_OFST 0x2C
#define CE_FFE_OFST 0x30
/* Uncorrectable error info registers */
#define UE_FFA_OFST 0x34
#define UE_FFD0_OFST 0x38
#define UE_FFD1_OFST 0x3C
#define UE_FFD2_OFST 0x40
#define UE_FFD3_OFST 0x44
#define UE_FFE_OFST 0x48
/* ECC control register bit field definitions */
#define ECC_CTRL_CLR_CE_ERR 0x40
#define ECC_CTRL_CLR_UE_ERR 0x80
/* Fault injection data and count registers */
#define OCM_FID0_OFST 0x4C
#define OCM_FID1_OFST 0x50
#define OCM_FID2_OFST 0x54
#define OCM_FID3_OFST 0x58
#define OCM_FIC_OFST 0x74
#define UE_MAX_BITPOS_LOWER 31
#define UE_MIN_BITPOS_UPPER 32
#define UE_MAX_BITPOS_UPPER 63
/* Interrupt masks */
#define OCM_CEINTR_MASK BIT(6)
#define OCM_UEINTR_MASK BIT(7)
#define OCM_ECC_ENABLE_MASK BIT(0)
#define OCM_FICOUNT_MASK GENMASK(23, 0)
#define OCM_NUM_UE_BITPOS 2
#define OCM_BASEVAL 0xFFFC0000
#define EDAC_DEVICE "ZynqMP-OCM"
/**
* struct ecc_error_info - ECC error log information
* @addr: Fault generated at this address
* @fault_lo: Generated fault data (lower 32-bit)
* @fault_hi: Generated fault data (upper 32-bit)
*/
struct ecc_error_info {
u32 addr;
u32 fault_lo;
u32 fault_hi;
};
/**
* struct ecc_status - ECC status information to report
* @ce_cnt: Correctable error count
* @ue_cnt: Uncorrectable error count
* @ceinfo: Correctable error log information
* @ueinfo: Uncorrectable error log information
*/
struct ecc_status {
u32 ce_cnt;
u32 ue_cnt;
struct ecc_error_info ceinfo;
struct ecc_error_info ueinfo;
};
/**
* struct edac_priv - OCM private instance data
* @baseaddr: Base address of the OCM
* @message: Buffer for framing the event specific info
* @stat: ECC status information
* @ce_cnt: Correctable Error count
* @ue_cnt: Uncorrectable Error count
* @debugfs_dir: Directory entry for debugfs
* @ce_bitpos: Bit position for Correctable Error
* @ue_bitpos: Array to store UnCorrectable Error bit positions
* @fault_injection_cnt: Fault Injection Counter value
*/
struct edac_priv {
void __iomem *baseaddr;
char message[ZYNQMP_OCM_EDAC_MSG_SIZE];
struct ecc_status stat;
u32 ce_cnt;
u32 ue_cnt;
#ifdef CONFIG_EDAC_DEBUG
struct dentry *debugfs_dir;
u8 ce_bitpos;
u8 ue_bitpos[OCM_NUM_UE_BITPOS];
u32 fault_injection_cnt;
#endif
};
/**
* get_error_info - Get the current ECC error info
* @base: Pointer to the base address of the OCM
* @p: Pointer to the OCM ECC status structure
* @mask: Status register mask value
*
* Determines there is any ECC error or not
*
*/
static void get_error_info(void __iomem *base, struct ecc_status *p, int mask)
{
if (mask & OCM_CEINTR_MASK) {
p->ce_cnt++;
p->ceinfo.fault_lo = readl(base + CE_FFD0_OFST);
p->ceinfo.fault_hi = readl(base + CE_FFD1_OFST);
p->ceinfo.addr = (OCM_BASEVAL | readl(base + CE_FFA_OFST));
writel(ECC_CTRL_CLR_CE_ERR, base + OCM_ISR_OFST);
} else if (mask & OCM_UEINTR_MASK) {
p->ue_cnt++;
p->ueinfo.fault_lo = readl(base + UE_FFD0_OFST);
p->ueinfo.fault_hi = readl(base + UE_FFD1_OFST);
p->ueinfo.addr = (OCM_BASEVAL | readl(base + UE_FFA_OFST));
writel(ECC_CTRL_CLR_UE_ERR, base + OCM_ISR_OFST);
}
}
/**
* handle_error - Handle error types CE and UE
* @dci: Pointer to the EDAC device instance
* @p: Pointer to the OCM ECC status structure
*
* Handles correctable and uncorrectable errors.
*/
static void handle_error(struct edac_device_ctl_info *dci, struct ecc_status *p)
{
struct edac_priv *priv = dci->pvt_info;
struct ecc_error_info *pinf;
if (p->ce_cnt) {
pinf = &p->ceinfo;
snprintf(priv->message, ZYNQMP_OCM_EDAC_MSG_SIZE,
"\nOCM ECC error type :%s\nAddr: [0x%x]\nFault Data[0x%08x%08x]",
"CE", pinf->addr, pinf->fault_hi, pinf->fault_lo);
edac_device_handle_ce(dci, 0, 0, priv->message);
}
if (p->ue_cnt) {
pinf = &p->ueinfo;
snprintf(priv->message, ZYNQMP_OCM_EDAC_MSG_SIZE,
"\nOCM ECC error type :%s\nAddr: [0x%x]\nFault Data[0x%08x%08x]",
"UE", pinf->addr, pinf->fault_hi, pinf->fault_lo);
edac_device_handle_ue(dci, 0, 0, priv->message);
}
memset(p, 0, sizeof(*p));
}
/**
* intr_handler - ISR routine
* @irq: irq number
* @dev_id: device id pointer
*
* Return: IRQ_NONE, if CE/UE interrupt not set or IRQ_HANDLED otherwise
*/
static irqreturn_t intr_handler(int irq, void *dev_id)
{
struct edac_device_ctl_info *dci = dev_id;
struct edac_priv *priv = dci->pvt_info;
int regval;
regval = readl(priv->baseaddr + OCM_ISR_OFST);
if (!(regval & (OCM_CEINTR_MASK | OCM_UEINTR_MASK))) {
WARN_ONCE(1, "Unhandled IRQ%d, ISR: 0x%x", irq, regval);
return IRQ_NONE;
}
get_error_info(priv->baseaddr, &priv->stat, regval);
priv->ce_cnt += priv->stat.ce_cnt;
priv->ue_cnt += priv->stat.ue_cnt;
handle_error(dci, &priv->stat);
return IRQ_HANDLED;
}
/**
* get_eccstate - Return the ECC status
* @base: Pointer to the OCM base address
*
* Get the ECC enable/disable status
*
* Return: ECC status 0/1.
*/
static bool get_eccstate(void __iomem *base)
{
return readl(base + ECC_CTRL_OFST) & OCM_ECC_ENABLE_MASK;
}
#ifdef CONFIG_EDAC_DEBUG
/**
* write_fault_count - write fault injection count
* @priv: Pointer to the EDAC private struct
*
* Update the fault injection count register, once the counter reaches
* zero, it injects errors
*/
static void write_fault_count(struct edac_priv *priv)
{
u32 ficount = priv->fault_injection_cnt;
if (ficount & ~OCM_FICOUNT_MASK) {
ficount &= OCM_FICOUNT_MASK;
edac_printk(KERN_INFO, EDAC_DEVICE,
"Fault injection count value truncated to %d\n", ficount);
}
writel(ficount, priv->baseaddr + OCM_FIC_OFST);
}
/*
* To get the Correctable Error injected, the following steps are needed:
* - Setup the optional Fault Injection Count:
* echo <fault_count val> > /sys/kernel/debug/edac/ocm/inject_fault_count
* - Write the Correctable Error bit position value:
* echo <bit_pos val> > /sys/kernel/debug/edac/ocm/inject_ce_bitpos
*/
static ssize_t inject_ce_write(struct file *file, const char __user *data,
size_t count, loff_t *ppos)
{
struct edac_device_ctl_info *edac_dev = file->private_data;
struct edac_priv *priv = edac_dev->pvt_info;
int ret;
if (!data)
return -EFAULT;
ret = kstrtou8_from_user(data, count, 0, &priv->ce_bitpos);
if (ret)
return ret;
if (priv->ce_bitpos > UE_MAX_BITPOS_UPPER)
return -EINVAL;
if (priv->ce_bitpos <= UE_MAX_BITPOS_LOWER) {
writel(BIT(priv->ce_bitpos), priv->baseaddr + OCM_FID0_OFST);
writel(0, priv->baseaddr + OCM_FID1_OFST);
} else {
writel(BIT(priv->ce_bitpos - UE_MIN_BITPOS_UPPER),
priv->baseaddr + OCM_FID1_OFST);
writel(0, priv->baseaddr + OCM_FID0_OFST);
}
write_fault_count(priv);
return count;
}
static const struct file_operations inject_ce_fops = {
.open = simple_open,
.write = inject_ce_write,
.llseek = generic_file_llseek,
};
/*
* To get the Uncorrectable Error injected, the following steps are needed:
* - Setup the optional Fault Injection Count:
* echo <fault_count val> > /sys/kernel/debug/edac/ocm/inject_fault_count
* - Write the Uncorrectable Error bit position values:
* echo <bit_pos0 val>,<bit_pos1 val> > /sys/kernel/debug/edac/ocm/inject_ue_bitpos
*/
static ssize_t inject_ue_write(struct file *file, const char __user *data,
size_t count, loff_t *ppos)
{
struct edac_device_ctl_info *edac_dev = file->private_data;
struct edac_priv *priv = edac_dev->pvt_info;
char buf[6], *pbuf, *token[2];
u64 ue_bitpos;
int i, ret;
u8 len;
if (!data)
return -EFAULT;
len = min_t(size_t, count, sizeof(buf));
if (copy_from_user(buf, data, len))
return -EFAULT;
buf[len] = '\0';
pbuf = &buf[0];
for (i = 0; i < OCM_NUM_UE_BITPOS; i++)
token[i] = strsep(&pbuf, ",");
ret = kstrtou8(token[0], 0, &priv->ue_bitpos[0]);
if (ret)
return ret;
ret = kstrtou8(token[1], 0, &priv->ue_bitpos[1]);
if (ret)
return ret;
if (priv->ue_bitpos[0] > UE_MAX_BITPOS_UPPER ||
priv->ue_bitpos[1] > UE_MAX_BITPOS_UPPER)
return -EINVAL;
if (priv->ue_bitpos[0] == priv->ue_bitpos[1]) {
edac_printk(KERN_ERR, EDAC_DEVICE, "Bit positions should not be equal\n");
return -EINVAL;
}
ue_bitpos = BIT(priv->ue_bitpos[0]) | BIT(priv->ue_bitpos[1]);
writel((u32)ue_bitpos, priv->baseaddr + OCM_FID0_OFST);
writel((u32)(ue_bitpos >> 32), priv->baseaddr + OCM_FID1_OFST);
write_fault_count(priv);
return count;
}
static const struct file_operations inject_ue_fops = {
.open = simple_open,
.write = inject_ue_write,
.llseek = generic_file_llseek,
};
static void setup_debugfs(struct edac_device_ctl_info *edac_dev)
{
struct edac_priv *priv = edac_dev->pvt_info;
priv->debugfs_dir = edac_debugfs_create_dir("ocm");
if (!priv->debugfs_dir)
return;
edac_debugfs_create_x32("inject_fault_count", 0644, priv->debugfs_dir,
&priv->fault_injection_cnt);
edac_debugfs_create_file("inject_ue_bitpos", 0644, priv->debugfs_dir,
edac_dev, &inject_ue_fops);
edac_debugfs_create_file("inject_ce_bitpos", 0644, priv->debugfs_dir,
edac_dev, &inject_ce_fops);
}
#endif
static int edac_probe(struct platform_device *pdev)
{
struct edac_device_ctl_info *dci;
struct edac_priv *priv;
void __iomem *baseaddr;
struct resource *res;
int irq, ret;
baseaddr = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(baseaddr))
return PTR_ERR(baseaddr);
if (!get_eccstate(baseaddr)) {
edac_printk(KERN_INFO, EDAC_DEVICE, "ECC not enabled\n");
return -ENXIO;
}
dci = edac_device_alloc_ctl_info(sizeof(*priv), ZYNQMP_OCM_EDAC_STRING,
1, ZYNQMP_OCM_EDAC_STRING, 1, 0, NULL, 0,
edac_device_alloc_index());
if (!dci)
return -ENOMEM;
priv = dci->pvt_info;
platform_set_drvdata(pdev, dci);
dci->dev = &pdev->dev;
priv->baseaddr = baseaddr;
dci->mod_name = pdev->dev.driver->name;
dci->ctl_name = ZYNQMP_OCM_EDAC_STRING;
dci->dev_name = dev_name(&pdev->dev);
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
ret = irq;
goto free_dev_ctl;
}
ret = devm_request_irq(&pdev->dev, irq, intr_handler, 0,
dev_name(&pdev->dev), dci);
if (ret) {
edac_printk(KERN_ERR, EDAC_DEVICE, "Failed to request Irq\n");
goto free_dev_ctl;
}
/* Enable UE, CE interrupts */
writel((OCM_CEINTR_MASK | OCM_UEINTR_MASK), priv->baseaddr + OCM_IEN_OFST);
#ifdef CONFIG_EDAC_DEBUG
setup_debugfs(dci);
#endif
ret = edac_device_add_device(dci);
if (ret)
goto free_dev_ctl;
return 0;
free_dev_ctl:
edac_device_free_ctl_info(dci);
return ret;
}
static int edac_remove(struct platform_device *pdev)
{
struct edac_device_ctl_info *dci = platform_get_drvdata(pdev);
struct edac_priv *priv = dci->pvt_info;
/* Disable UE, CE interrupts */
writel((OCM_CEINTR_MASK | OCM_UEINTR_MASK), priv->baseaddr + OCM_IDS_OFST);
#ifdef CONFIG_EDAC_DEBUG
debugfs_remove_recursive(priv->debugfs_dir);
#endif
edac_device_del_device(&pdev->dev);
edac_device_free_ctl_info(dci);
return 0;
}
static const struct of_device_id zynqmp_ocm_edac_match[] = {
{ .compatible = "xlnx,zynqmp-ocmc-1.0"},
{ /* end of table */ }
};
MODULE_DEVICE_TABLE(of, zynqmp_ocm_edac_match);
static struct platform_driver zynqmp_ocm_edac_driver = {
.driver = {
.name = "zynqmp-ocm-edac",
.of_match_table = zynqmp_ocm_edac_match,
},
.probe = edac_probe,
.remove = edac_remove,
};
module_platform_driver(zynqmp_ocm_edac_driver);
MODULE_AUTHOR("Advanced Micro Devices, Inc");
MODULE_DESCRIPTION("Xilinx ZynqMP OCM ECC driver");
MODULE_LICENSE("GPL");
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册