提交 ff8be964 编写于 作者: L Linus Torvalds

Merge tag 'edac_updates_for_v5.17_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/ras/ras

Pull EDAC updates from Borislav Petkov:

 - Add support for version 3 of the Synopsys DDR controller to
   synopsys_edac

 - Add support for DRR5 and new models 0x10-0x1f and 0x50-0x5f of AMD
   family 0x19 CPUs to amd64_edac

 - The usual set of fixes and cleanups

* tag 'edac_updates_for_v5.17_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/ras/ras:
  EDAC/amd64: Add support for family 19h, models 50h-5fh
  EDAC/sb_edac: Remove redundant initialization of variable rc
  RAS/CEC: Remove a repeated 'an' in a comment
  EDAC/amd64: Add support for AMD Family 19h Models 10h-1Fh and A0h-AFh
  EDAC: Add RDDR5 and LRDDR5 memory types
  EDAC/sifive: Fix non-kernel-doc comment
  dt-bindings: memory: Add entry for version 3.80a
  EDAC/synopsys: Enable the driver on Intel's N5X platform
  EDAC/synopsys: Add support for version 3 of the Synopsys EDAC DDR
  EDAC/synopsys: Use the quirk for version instead of ddr version
...@@ -26,6 +26,7 @@ properties: ...@@ -26,6 +26,7 @@ properties:
enum: enum:
- xlnx,zynq-ddrc-a05 - xlnx,zynq-ddrc-a05
- xlnx,zynqmp-ddrc-2.40a - xlnx,zynqmp-ddrc-2.40a
- snps,ddrc-3.80a
interrupts: interrupts:
maxItems: 1 maxItems: 1
......
...@@ -484,7 +484,7 @@ config EDAC_ARMADA_XP ...@@ -484,7 +484,7 @@ config EDAC_ARMADA_XP
config EDAC_SYNOPSYS config EDAC_SYNOPSYS
tristate "Synopsys DDR Memory Controller" tristate "Synopsys DDR Memory Controller"
depends on ARCH_ZYNQ || ARCH_ZYNQMP depends on ARCH_ZYNQ || ARCH_ZYNQMP || ARCH_INTEL_SOCFPGA
help help
Support for error detection and correction on the Synopsys DDR Support for error detection and correction on the Synopsys DDR
memory controller. memory controller.
......
...@@ -2925,6 +2925,26 @@ static struct amd64_family_type family_types[] = { ...@@ -2925,6 +2925,26 @@ static struct amd64_family_type family_types[] = {
.dbam_to_cs = f17_addr_mask_to_cs_size, .dbam_to_cs = f17_addr_mask_to_cs_size,
} }
}, },
[F19_M10H_CPUS] = {
.ctl_name = "F19h_M10h",
.f0_id = PCI_DEVICE_ID_AMD_19H_M10H_DF_F0,
.f6_id = PCI_DEVICE_ID_AMD_19H_M10H_DF_F6,
.max_mcs = 12,
.ops = {
.early_channel_count = f17_early_channel_count,
.dbam_to_cs = f17_addr_mask_to_cs_size,
}
},
[F19_M50H_CPUS] = {
.ctl_name = "F19h_M50h",
.f0_id = PCI_DEVICE_ID_AMD_19H_M50H_DF_F0,
.f6_id = PCI_DEVICE_ID_AMD_19H_M50H_DF_F6,
.max_mcs = 2,
.ops = {
.early_channel_count = f17_early_channel_count,
.dbam_to_cs = f17_addr_mask_to_cs_size,
}
},
}; };
/* /*
...@@ -3962,11 +3982,25 @@ static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt) ...@@ -3962,11 +3982,25 @@ static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt)
break; break;
case 0x19: case 0x19:
if (pvt->model >= 0x20 && pvt->model <= 0x2f) { if (pvt->model >= 0x10 && pvt->model <= 0x1f) {
fam_type = &family_types[F19_M10H_CPUS];
pvt->ops = &family_types[F19_M10H_CPUS].ops;
break;
} else if (pvt->model >= 0x20 && pvt->model <= 0x2f) {
fam_type = &family_types[F17_M70H_CPUS]; fam_type = &family_types[F17_M70H_CPUS];
pvt->ops = &family_types[F17_M70H_CPUS].ops; pvt->ops = &family_types[F17_M70H_CPUS].ops;
fam_type->ctl_name = "F19h_M20h"; fam_type->ctl_name = "F19h_M20h";
break; break;
} else if (pvt->model >= 0x50 && pvt->model <= 0x5f) {
fam_type = &family_types[F19_M50H_CPUS];
pvt->ops = &family_types[F19_M50H_CPUS].ops;
fam_type->ctl_name = "F19h_M50h";
break;
} else if (pvt->model >= 0xa0 && pvt->model <= 0xaf) {
fam_type = &family_types[F19_M10H_CPUS];
pvt->ops = &family_types[F19_M10H_CPUS].ops;
fam_type->ctl_name = "F19h_MA0h";
break;
} }
fam_type = &family_types[F19_CPUS]; fam_type = &family_types[F19_CPUS];
pvt->ops = &family_types[F19_CPUS].ops; pvt->ops = &family_types[F19_CPUS].ops;
......
...@@ -96,7 +96,7 @@ ...@@ -96,7 +96,7 @@
/* Hardware limit on ChipSelect rows per MC and processors per system */ /* Hardware limit on ChipSelect rows per MC and processors per system */
#define NUM_CHIPSELECTS 8 #define NUM_CHIPSELECTS 8
#define DRAM_RANGES 8 #define DRAM_RANGES 8
#define NUM_CONTROLLERS 8 #define NUM_CONTROLLERS 12
#define ON true #define ON true
#define OFF false #define OFF false
...@@ -126,6 +126,10 @@ ...@@ -126,6 +126,10 @@
#define PCI_DEVICE_ID_AMD_17H_M70H_DF_F6 0x1446 #define PCI_DEVICE_ID_AMD_17H_M70H_DF_F6 0x1446
#define PCI_DEVICE_ID_AMD_19H_DF_F0 0x1650 #define PCI_DEVICE_ID_AMD_19H_DF_F0 0x1650
#define PCI_DEVICE_ID_AMD_19H_DF_F6 0x1656 #define PCI_DEVICE_ID_AMD_19H_DF_F6 0x1656
#define PCI_DEVICE_ID_AMD_19H_M10H_DF_F0 0x14ad
#define PCI_DEVICE_ID_AMD_19H_M10H_DF_F6 0x14b3
#define PCI_DEVICE_ID_AMD_19H_M50H_DF_F0 0x166a
#define PCI_DEVICE_ID_AMD_19H_M50H_DF_F6 0x1670
/* /*
* Function 1 - Address Map * Function 1 - Address Map
...@@ -298,6 +302,8 @@ enum amd_families { ...@@ -298,6 +302,8 @@ enum amd_families {
F17_M60H_CPUS, F17_M60H_CPUS,
F17_M70H_CPUS, F17_M70H_CPUS,
F19_CPUS, F19_CPUS,
F19_M10H_CPUS,
F19_M50H_CPUS,
NUM_FAMILIES, NUM_FAMILIES,
}; };
......
...@@ -162,6 +162,8 @@ const char * const edac_mem_types[] = { ...@@ -162,6 +162,8 @@ const char * const edac_mem_types[] = {
[MEM_LPDDR4] = "Low-Power-DDR4-RAM", [MEM_LPDDR4] = "Low-Power-DDR4-RAM",
[MEM_LRDDR4] = "Load-Reduced-DDR4-RAM", [MEM_LRDDR4] = "Load-Reduced-DDR4-RAM",
[MEM_DDR5] = "Unbuffered-DDR5", [MEM_DDR5] = "Unbuffered-DDR5",
[MEM_RDDR5] = "Registered-DDR5",
[MEM_LRDDR5] = "Load-Reduced-DDR5-RAM",
[MEM_NVDIMM] = "Non-volatile-RAM", [MEM_NVDIMM] = "Non-volatile-RAM",
[MEM_WIO2] = "Wide-IO-2", [MEM_WIO2] = "Wide-IO-2",
[MEM_HBM2] = "High-bandwidth-memory-Gen2", [MEM_HBM2] = "High-bandwidth-memory-Gen2",
......
...@@ -3439,7 +3439,7 @@ MODULE_DEVICE_TABLE(x86cpu, sbridge_cpuids); ...@@ -3439,7 +3439,7 @@ MODULE_DEVICE_TABLE(x86cpu, sbridge_cpuids);
static int sbridge_probe(const struct x86_cpu_id *id) static int sbridge_probe(const struct x86_cpu_id *id)
{ {
int rc = -ENODEV; int rc;
u8 mc, num_mc = 0; u8 mc, num_mc = 0;
struct sbridge_dev *sbridge_dev; struct sbridge_dev *sbridge_dev;
struct pci_id_table *ptable = (struct pci_id_table *)id->driver_data; struct pci_id_table *ptable = (struct pci_id_table *)id->driver_data;
......
...@@ -19,7 +19,7 @@ struct sifive_edac_priv { ...@@ -19,7 +19,7 @@ struct sifive_edac_priv {
struct edac_device_ctl_info *dci; struct edac_device_ctl_info *dci;
}; };
/** /*
* EDAC error callback * EDAC error callback
* *
* @event: non-zero if unrecoverable. * @event: non-zero if unrecoverable.
......
...@@ -101,6 +101,7 @@ ...@@ -101,6 +101,7 @@
/* DDR ECC Quirks */ /* DDR ECC Quirks */
#define DDR_ECC_INTR_SUPPORT BIT(0) #define DDR_ECC_INTR_SUPPORT BIT(0)
#define DDR_ECC_DATA_POISON_SUPPORT BIT(1) #define DDR_ECC_DATA_POISON_SUPPORT BIT(1)
#define DDR_ECC_INTR_SELF_CLEAR BIT(2)
/* ZynqMP Enhanced DDR memory controller registers that are relevant to ECC */ /* ZynqMP Enhanced DDR memory controller registers that are relevant to ECC */
/* ECC Configuration Registers */ /* ECC Configuration Registers */
...@@ -171,6 +172,10 @@ ...@@ -171,6 +172,10 @@
#define DDR_QOS_IRQ_EN_OFST 0x20208 #define DDR_QOS_IRQ_EN_OFST 0x20208
#define DDR_QOS_IRQ_DB_OFST 0x2020C #define DDR_QOS_IRQ_DB_OFST 0x2020C
/* DDR QOS Interrupt register definitions */
#define DDR_UE_MASK BIT(9)
#define DDR_CE_MASK BIT(8)
/* ECC Corrected Error Register Mask and Shifts*/ /* ECC Corrected Error Register Mask and Shifts*/
#define ECC_CEADDR0_RW_MASK 0x3FFFF #define ECC_CEADDR0_RW_MASK 0x3FFFF
#define ECC_CEADDR0_RNK_MASK BIT(24) #define ECC_CEADDR0_RNK_MASK BIT(24)
...@@ -533,10 +538,16 @@ static irqreturn_t intr_handler(int irq, void *dev_id) ...@@ -533,10 +538,16 @@ static irqreturn_t intr_handler(int irq, void *dev_id)
priv = mci->pvt_info; priv = mci->pvt_info;
p_data = priv->p_data; p_data = priv->p_data;
regval = readl(priv->baseaddr + DDR_QOS_IRQ_STAT_OFST); /*
regval &= (DDR_QOSCE_MASK | DDR_QOSUE_MASK); * v3.0 of the controller has the ce/ue bits cleared automatically,
if (!(regval & ECC_CE_UE_INTR_MASK)) * so this condition does not apply.
return IRQ_NONE; */
if (!(priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR)) {
regval = readl(priv->baseaddr + DDR_QOS_IRQ_STAT_OFST);
regval &= (DDR_QOSCE_MASK | DDR_QOSUE_MASK);
if (!(regval & ECC_CE_UE_INTR_MASK))
return IRQ_NONE;
}
status = p_data->get_error_info(priv); status = p_data->get_error_info(priv);
if (status) if (status)
...@@ -548,7 +559,9 @@ static irqreturn_t intr_handler(int irq, void *dev_id) ...@@ -548,7 +559,9 @@ static irqreturn_t intr_handler(int irq, void *dev_id)
edac_dbg(3, "Total error count CE %d UE %d\n", edac_dbg(3, "Total error count CE %d UE %d\n",
priv->ce_cnt, priv->ue_cnt); priv->ce_cnt, priv->ue_cnt);
writel(regval, priv->baseaddr + DDR_QOS_IRQ_STAT_OFST); /* v3.0 of the controller does not have this register */
if (!(priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR))
writel(regval, priv->baseaddr + DDR_QOS_IRQ_STAT_OFST);
return IRQ_HANDLED; return IRQ_HANDLED;
} }
...@@ -834,8 +847,13 @@ static void mc_init(struct mem_ctl_info *mci, struct platform_device *pdev) ...@@ -834,8 +847,13 @@ static void mc_init(struct mem_ctl_info *mci, struct platform_device *pdev)
static void enable_intr(struct synps_edac_priv *priv) static void enable_intr(struct synps_edac_priv *priv)
{ {
/* Enable UE/CE Interrupts */ /* Enable UE/CE Interrupts */
writel(DDR_QOSUE_MASK | DDR_QOSCE_MASK, if (priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR)
priv->baseaddr + DDR_QOS_IRQ_EN_OFST); writel(DDR_UE_MASK | DDR_CE_MASK,
priv->baseaddr + ECC_CLR_OFST);
else
writel(DDR_QOSUE_MASK | DDR_QOSCE_MASK,
priv->baseaddr + DDR_QOS_IRQ_EN_OFST);
} }
static void disable_intr(struct synps_edac_priv *priv) static void disable_intr(struct synps_edac_priv *priv)
...@@ -890,6 +908,19 @@ static const struct synps_platform_data zynqmp_edac_def = { ...@@ -890,6 +908,19 @@ static const struct synps_platform_data zynqmp_edac_def = {
), ),
}; };
static const struct synps_platform_data synopsys_edac_def = {
.get_error_info = zynqmp_get_error_info,
.get_mtype = zynqmp_get_mtype,
.get_dtype = zynqmp_get_dtype,
.get_ecc_state = zynqmp_get_ecc_state,
.quirks = (DDR_ECC_INTR_SUPPORT | DDR_ECC_INTR_SELF_CLEAR
#ifdef CONFIG_EDAC_DEBUG
| DDR_ECC_DATA_POISON_SUPPORT
#endif
),
};
static const struct of_device_id synps_edac_match[] = { static const struct of_device_id synps_edac_match[] = {
{ {
.compatible = "xlnx,zynq-ddrc-a05", .compatible = "xlnx,zynq-ddrc-a05",
...@@ -899,6 +930,10 @@ static const struct of_device_id synps_edac_match[] = { ...@@ -899,6 +930,10 @@ static const struct of_device_id synps_edac_match[] = {
.compatible = "xlnx,zynqmp-ddrc-2.40a", .compatible = "xlnx,zynqmp-ddrc-2.40a",
.data = (void *)&zynqmp_edac_def .data = (void *)&zynqmp_edac_def
}, },
{
.compatible = "snps,ddrc-3.80a",
.data = (void *)&synopsys_edac_def
},
{ {
/* end of table */ /* end of table */
} }
...@@ -1352,8 +1387,7 @@ static int mc_probe(struct platform_device *pdev) ...@@ -1352,8 +1387,7 @@ static int mc_probe(struct platform_device *pdev)
} }
} }
if (of_device_is_compatible(pdev->dev.of_node, if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT)
"xlnx,zynqmp-ddrc-2.40a"))
setup_address_map(priv); setup_address_map(priv);
#endif #endif
......
...@@ -38,7 +38,7 @@ ...@@ -38,7 +38,7 @@
* elements entered into the array, during which, we're decaying all elements. * elements entered into the array, during which, we're decaying all elements.
* If, after decay, an element gets inserted again, its generation is set to 11b * If, after decay, an element gets inserted again, its generation is set to 11b
* to make sure it has higher numerical count than other, older elements and * to make sure it has higher numerical count than other, older elements and
* thus emulate an an LRU-like behavior when deleting elements to free up space * thus emulate an LRU-like behavior when deleting elements to free up space
* in the page. * in the page.
* *
* When an element reaches it's max count of action_threshold, we try to poison * When an element reaches it's max count of action_threshold, we try to poison
......
...@@ -182,6 +182,8 @@ static inline char *mc_event_error_type(const unsigned int err_type) ...@@ -182,6 +182,8 @@ static inline char *mc_event_error_type(const unsigned int err_type)
* @MEM_LRDDR4: Load-Reduced DDR4 memory. * @MEM_LRDDR4: Load-Reduced DDR4 memory.
* @MEM_LPDDR4: Low-Power DDR4 memory. * @MEM_LPDDR4: Low-Power DDR4 memory.
* @MEM_DDR5: Unbuffered DDR5 RAM * @MEM_DDR5: Unbuffered DDR5 RAM
* @MEM_RDDR5: Registered DDR5 RAM
* @MEM_LRDDR5: Load-Reduced DDR5 memory.
* @MEM_NVDIMM: Non-volatile RAM * @MEM_NVDIMM: Non-volatile RAM
* @MEM_WIO2: Wide I/O 2. * @MEM_WIO2: Wide I/O 2.
* @MEM_HBM2: High bandwidth Memory Gen 2. * @MEM_HBM2: High bandwidth Memory Gen 2.
...@@ -211,6 +213,8 @@ enum mem_type { ...@@ -211,6 +213,8 @@ enum mem_type {
MEM_LRDDR4, MEM_LRDDR4,
MEM_LPDDR4, MEM_LPDDR4,
MEM_DDR5, MEM_DDR5,
MEM_RDDR5,
MEM_LRDDR5,
MEM_NVDIMM, MEM_NVDIMM,
MEM_WIO2, MEM_WIO2,
MEM_HBM2, MEM_HBM2,
...@@ -239,6 +243,8 @@ enum mem_type { ...@@ -239,6 +243,8 @@ enum mem_type {
#define MEM_FLAG_LRDDR4 BIT(MEM_LRDDR4) #define MEM_FLAG_LRDDR4 BIT(MEM_LRDDR4)
#define MEM_FLAG_LPDDR4 BIT(MEM_LPDDR4) #define MEM_FLAG_LPDDR4 BIT(MEM_LPDDR4)
#define MEM_FLAG_DDR5 BIT(MEM_DDR5) #define MEM_FLAG_DDR5 BIT(MEM_DDR5)
#define MEM_FLAG_RDDR5 BIT(MEM_RDDR5)
#define MEM_FLAG_LRDDR5 BIT(MEM_LRDDR5)
#define MEM_FLAG_NVDIMM BIT(MEM_NVDIMM) #define MEM_FLAG_NVDIMM BIT(MEM_NVDIMM)
#define MEM_FLAG_WIO2 BIT(MEM_WIO2) #define MEM_FLAG_WIO2 BIT(MEM_WIO2)
#define MEM_FLAG_HBM2 BIT(MEM_HBM2) #define MEM_FLAG_HBM2 BIT(MEM_HBM2)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册