提交 c29f5ec0 编写于 作者: L Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/bp/bp

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/bp/bp: (26 commits)
  amd64_edac: add MAINTAINERS entry
  EDAC: do not enable modules by default
  amd64_edac: do not enable module by default
  amd64_edac: add module registration routines
  amd64_edac: add ECC reporting initializers
  amd64_edac: add EDAC core-related initializers
  amd64_edac: add error decoding logic
  amd64_edac: add ECC chipkill syndrome mapping table
  amd64_edac: add per-family descriptors
  amd64_edac: add F10h-and-later methods-p3
  amd64_edac: add F10h-and-later methods-p2
  amd64_edac: add F10h-and-later methods-p1
  amd64_edac: add k8-specific methods
  amd64_edac: assign DRAM chip select base and mask in a family-specific way
  amd64_edac: add helper to dump relevant registers
  amd64_edac: add DRAM address type conversion facilities
  amd64_edac: add functionality to compute the DRAM hole
  amd64_edac: add sys addr to memory controller mapping helpers
  amd64_edac: add memory scrubber interface
  amd64_edac: add MCA error types
  ...
...@@ -1979,6 +1979,16 @@ F: Documentation/edac.txt ...@@ -1979,6 +1979,16 @@ F: Documentation/edac.txt
F: drivers/edac/edac_* F: drivers/edac/edac_*
F: include/linux/edac.h F: include/linux/edac.h
EDAC-AMD64
P: Doug Thompson
M: dougthompson@xmission.com
P: Borislav Petkov
M: borislav.petkov@amd.com
L: bluesmoke-devel@lists.sourceforge.net (moderated for non-subscribers)
W: bluesmoke.sourceforge.net
S: Supported
F: drivers/edac/amd64_edac*
EDAC-E752X EDAC-E752X
P: Mark Gross P: Mark Gross
M: mark.gross@intel.com M: mark.gross@intel.com
......
...@@ -12,6 +12,17 @@ ...@@ -12,6 +12,17 @@
#include <asm/asm.h> #include <asm/asm.h>
#include <asm/errno.h> #include <asm/errno.h>
#include <asm/cpumask.h>
struct msr {
union {
struct {
u32 l;
u32 h;
};
u64 q;
};
};
static inline unsigned long long native_read_tscp(unsigned int *aux) static inline unsigned long long native_read_tscp(unsigned int *aux)
{ {
...@@ -216,6 +227,8 @@ do { \ ...@@ -216,6 +227,8 @@ do { \
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
void rdmsr_on_cpus(const cpumask_t *mask, u32 msr_no, struct msr *msrs);
void wrmsr_on_cpus(const cpumask_t *mask, u32 msr_no, struct msr *msrs);
int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
#else /* CONFIG_SMP */ #else /* CONFIG_SMP */
...@@ -229,6 +242,16 @@ static inline int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) ...@@ -229,6 +242,16 @@ static inline int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
wrmsr(msr_no, l, h); wrmsr(msr_no, l, h);
return 0; return 0;
} }
static inline void rdmsr_on_cpus(const cpumask_t *m, u32 msr_no,
struct msr *msrs)
{
rdmsr_on_cpu(0, msr_no, &(msrs[0].l), &(msrs[0].h));
}
static inline void wrmsr_on_cpus(const cpumask_t *m, u32 msr_no,
struct msr *msrs)
{
wrmsr_on_cpu(0, msr_no, msrs[0].l, msrs[0].h);
}
static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no,
u32 *l, u32 *h) u32 *l, u32 *h)
{ {
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
# Makefile for x86 specific library files. # Makefile for x86 specific library files.
# #
obj-$(CONFIG_SMP) := msr-on-cpu.o obj-$(CONFIG_SMP) := msr.o
lib-y := delay.o lib-y := delay.o
lib-y += thunk_$(BITS).o lib-y += thunk_$(BITS).o
......
...@@ -5,22 +5,38 @@ ...@@ -5,22 +5,38 @@
struct msr_info { struct msr_info {
u32 msr_no; u32 msr_no;
u32 l, h; struct msr reg;
struct msr *msrs;
int off;
int err; int err;
}; };
static void __rdmsr_on_cpu(void *info) static void __rdmsr_on_cpu(void *info)
{ {
struct msr_info *rv = info; struct msr_info *rv = info;
struct msr *reg;
int this_cpu = raw_smp_processor_id();
rdmsr(rv->msr_no, rv->l, rv->h); if (rv->msrs)
reg = &rv->msrs[this_cpu - rv->off];
else
reg = &rv->reg;
rdmsr(rv->msr_no, reg->l, reg->h);
} }
static void __wrmsr_on_cpu(void *info) static void __wrmsr_on_cpu(void *info)
{ {
struct msr_info *rv = info; struct msr_info *rv = info;
struct msr *reg;
int this_cpu = raw_smp_processor_id();
if (rv->msrs)
reg = &rv->msrs[this_cpu - rv->off];
else
reg = &rv->reg;
wrmsr(rv->msr_no, rv->l, rv->h); wrmsr(rv->msr_no, reg->l, reg->h);
} }
int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
...@@ -28,26 +44,95 @@ int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) ...@@ -28,26 +44,95 @@ int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
int err; int err;
struct msr_info rv; struct msr_info rv;
memset(&rv, 0, sizeof(rv));
rv.msr_no = msr_no; rv.msr_no = msr_no;
err = smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1); err = smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1);
*l = rv.l; *l = rv.reg.l;
*h = rv.h; *h = rv.reg.h;
return err; return err;
} }
EXPORT_SYMBOL(rdmsr_on_cpu);
int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
{ {
int err; int err;
struct msr_info rv; struct msr_info rv;
memset(&rv, 0, sizeof(rv));
rv.msr_no = msr_no; rv.msr_no = msr_no;
rv.l = l; rv.reg.l = l;
rv.h = h; rv.reg.h = h;
err = smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1); err = smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1);
return err; return err;
} }
EXPORT_SYMBOL(wrmsr_on_cpu);
/* rdmsr on a bunch of CPUs
*
* @mask: which CPUs
* @msr_no: which MSR
* @msrs: array of MSR values
*
*/
void rdmsr_on_cpus(const cpumask_t *mask, u32 msr_no, struct msr *msrs)
{
struct msr_info rv;
int this_cpu;
memset(&rv, 0, sizeof(rv));
rv.off = cpumask_first(mask);
rv.msrs = msrs;
rv.msr_no = msr_no;
preempt_disable();
/*
* FIXME: handle the CPU we're executing on separately for now until
* smp_call_function_many has been fixed to not skip it.
*/
this_cpu = raw_smp_processor_id();
smp_call_function_single(this_cpu, __rdmsr_on_cpu, &rv, 1);
smp_call_function_many(mask, __rdmsr_on_cpu, &rv, 1);
preempt_enable();
}
EXPORT_SYMBOL(rdmsr_on_cpus);
/*
* wrmsr on a bunch of CPUs
*
* @mask: which CPUs
* @msr_no: which MSR
* @msrs: array of MSR values
*
*/
void wrmsr_on_cpus(const cpumask_t *mask, u32 msr_no, struct msr *msrs)
{
struct msr_info rv;
int this_cpu;
memset(&rv, 0, sizeof(rv));
rv.off = cpumask_first(mask);
rv.msrs = msrs;
rv.msr_no = msr_no;
preempt_disable();
/*
* FIXME: handle the CPU we're executing on separately for now until
* smp_call_function_many has been fixed to not skip it.
*/
this_cpu = raw_smp_processor_id();
smp_call_function_single(this_cpu, __wrmsr_on_cpu, &rv, 1);
smp_call_function_many(mask, __wrmsr_on_cpu, &rv, 1);
preempt_enable();
}
EXPORT_SYMBOL(wrmsr_on_cpus);
/* These "safe" variants are slower and should be used when the target MSR /* These "safe" variants are slower and should be used when the target MSR
may not actually exist. */ may not actually exist. */
...@@ -55,14 +140,14 @@ static void __rdmsr_safe_on_cpu(void *info) ...@@ -55,14 +140,14 @@ static void __rdmsr_safe_on_cpu(void *info)
{ {
struct msr_info *rv = info; struct msr_info *rv = info;
rv->err = rdmsr_safe(rv->msr_no, &rv->l, &rv->h); rv->err = rdmsr_safe(rv->msr_no, &rv->reg.l, &rv->reg.h);
} }
static void __wrmsr_safe_on_cpu(void *info) static void __wrmsr_safe_on_cpu(void *info)
{ {
struct msr_info *rv = info; struct msr_info *rv = info;
rv->err = wrmsr_safe(rv->msr_no, rv->l, rv->h); rv->err = wrmsr_safe(rv->msr_no, rv->reg.l, rv->reg.h);
} }
int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
...@@ -70,28 +155,29 @@ int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) ...@@ -70,28 +155,29 @@ int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
int err; int err;
struct msr_info rv; struct msr_info rv;
memset(&rv, 0, sizeof(rv));
rv.msr_no = msr_no; rv.msr_no = msr_no;
err = smp_call_function_single(cpu, __rdmsr_safe_on_cpu, &rv, 1); err = smp_call_function_single(cpu, __rdmsr_safe_on_cpu, &rv, 1);
*l = rv.l; *l = rv.reg.l;
*h = rv.h; *h = rv.reg.h;
return err ? err : rv.err; return err ? err : rv.err;
} }
EXPORT_SYMBOL(rdmsr_safe_on_cpu);
int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
{ {
int err; int err;
struct msr_info rv; struct msr_info rv;
memset(&rv, 0, sizeof(rv));
rv.msr_no = msr_no; rv.msr_no = msr_no;
rv.l = l; rv.reg.l = l;
rv.h = h; rv.reg.h = h;
err = smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 1); err = smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 1);
return err ? err : rv.err; return err ? err : rv.err;
} }
EXPORT_SYMBOL(rdmsr_on_cpu);
EXPORT_SYMBOL(wrmsr_on_cpu);
EXPORT_SYMBOL(rdmsr_safe_on_cpu);
EXPORT_SYMBOL(wrmsr_safe_on_cpu); EXPORT_SYMBOL(wrmsr_safe_on_cpu);
...@@ -49,7 +49,6 @@ config EDAC_DEBUG_VERBOSE ...@@ -49,7 +49,6 @@ config EDAC_DEBUG_VERBOSE
config EDAC_MM_EDAC config EDAC_MM_EDAC
tristate "Main Memory EDAC (Error Detection And Correction) reporting" tristate "Main Memory EDAC (Error Detection And Correction) reporting"
default y
help help
Some systems are able to detect and correct errors in main Some systems are able to detect and correct errors in main
memory. EDAC can report statistics on memory error memory. EDAC can report statistics on memory error
...@@ -58,6 +57,31 @@ config EDAC_MM_EDAC ...@@ -58,6 +57,31 @@ config EDAC_MM_EDAC
occurred so that a particular failing memory module can be occurred so that a particular failing memory module can be
replaced. If unsure, select 'Y'. replaced. If unsure, select 'Y'.
config EDAC_AMD64
tristate "AMD64 (Opteron, Athlon64) K8, F10h, F11h"
depends on EDAC_MM_EDAC && K8_NB && X86_64 && PCI
help
Support for error detection and correction on the AMD 64
Families of Memory Controllers (K8, F10h and F11h)
config EDAC_AMD64_ERROR_INJECTION
bool "Sysfs Error Injection facilities"
depends on EDAC_AMD64
help
Recent Opterons (Family 10h and later) provide for Memory Error
Injection into the ECC detection circuits. The amd64_edac module
allows the operator/user to inject Uncorrectable and Correctable
errors into DRAM.
When enabled, in each of the respective memory controller directories
(/sys/devices/system/edac/mc/mcX), there are 3 input files:
- inject_section (0..3, 16-byte section of 64-byte cacheline),
- inject_word (0..8, 16-bit word of 16-byte section),
- inject_ecc_vector (hex ecc vector: select bits of inject word)
In addition, there are two control files, inject_read and inject_write,
which trigger the DRAM ECC Read and Write respectively.
config EDAC_AMD76X config EDAC_AMD76X
tristate "AMD 76x (760, 762, 768)" tristate "AMD 76x (760, 762, 768)"
......
...@@ -30,6 +30,13 @@ obj-$(CONFIG_EDAC_I3000) += i3000_edac.o ...@@ -30,6 +30,13 @@ obj-$(CONFIG_EDAC_I3000) += i3000_edac.o
obj-$(CONFIG_EDAC_X38) += x38_edac.o obj-$(CONFIG_EDAC_X38) += x38_edac.o
obj-$(CONFIG_EDAC_I82860) += i82860_edac.o obj-$(CONFIG_EDAC_I82860) += i82860_edac.o
obj-$(CONFIG_EDAC_R82600) += r82600_edac.o obj-$(CONFIG_EDAC_R82600) += r82600_edac.o
amd64_edac_mod-y := amd64_edac_err_types.o amd64_edac.o
amd64_edac_mod-$(CONFIG_EDAC_DEBUG) += amd64_edac_dbg.o
amd64_edac_mod-$(CONFIG_EDAC_AMD64_ERROR_INJECTION) += amd64_edac_inj.o
obj-$(CONFIG_EDAC_AMD64) += amd64_edac_mod.o
obj-$(CONFIG_EDAC_PASEMI) += pasemi_edac.o obj-$(CONFIG_EDAC_PASEMI) += pasemi_edac.o
obj-$(CONFIG_EDAC_MPC85XX) += mpc85xx_edac.o obj-$(CONFIG_EDAC_MPC85XX) += mpc85xx_edac.o
obj-$(CONFIG_EDAC_MV64X60) += mv64x60_edac.o obj-$(CONFIG_EDAC_MV64X60) += mv64x60_edac.o
......
此差异已折叠。
/*
* AMD64 class Memory Controller kernel module
*
* Copyright (c) 2009 SoftwareBitMaker.
* Copyright (c) 2009 Advanced Micro Devices, Inc.
*
* This file may be distributed under the terms of the
* GNU General Public License.
*
* Originally Written by Thayne Harbaugh
*
* Changes by Douglas "norsk" Thompson <dougthompson@xmission.com>:
* - K8 CPU Revision D and greater support
*
* Changes by Dave Peterson <dsp@llnl.gov> <dave_peterson@pobox.com>:
* - Module largely rewritten, with new (and hopefully correct)
* code for dealing with node and chip select interleaving,
* various code cleanup, and bug fixes
* - Added support for memory hoisting using DRAM hole address
* register
*
* Changes by Douglas "norsk" Thompson <dougthompson@xmission.com>:
* -K8 Rev (1207) revision support added, required Revision
* specific mini-driver code to support Rev F as well as
* prior revisions
*
* Changes by Douglas "norsk" Thompson <dougthompson@xmission.com>:
* -Family 10h revision support added. New PCI Device IDs,
* indicating new changes. Actual registers modified
* were slight, less than the Rev E to Rev F transition
* but changing the PCI Device ID was the proper thing to
* do, as it provides for almost automactic family
* detection. The mods to Rev F required more family
* information detection.
*
* Changes/Fixes by Borislav Petkov <borislav.petkov@amd.com>:
* - misc fixes and code cleanups
*
* This module is based on the following documents
* (available from http://www.amd.com/):
*
* Title: BIOS and Kernel Developer's Guide for AMD Athlon 64 and AMD
* Opteron Processors
* AMD publication #: 26094
*` Revision: 3.26
*
* Title: BIOS and Kernel Developer's Guide for AMD NPT Family 0Fh
* Processors
* AMD publication #: 32559
* Revision: 3.00
* Issue Date: May 2006
*
* Title: BIOS and Kernel Developer's Guide (BKDG) For AMD Family 10h
* Processors
* AMD publication #: 31116
* Revision: 3.00
* Issue Date: September 07, 2007
*
* Sections in the first 2 documents are no longer in sync with each other.
* The Family 10h BKDG was totally re-written from scratch with a new
* presentation model.
* Therefore, comments that refer to a Document section might be off.
*/
#include <linux/module.h>
#include <linux/ctype.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/pci_ids.h>
#include <linux/slab.h>
#include <linux/mmzone.h>
#include <linux/edac.h>
#include <asm/msr.h>
#include "edac_core.h"
#define amd64_printk(level, fmt, arg...) \
edac_printk(level, "amd64", fmt, ##arg)
#define amd64_mc_printk(mci, level, fmt, arg...) \
edac_mc_chipset_printk(mci, level, "amd64", fmt, ##arg)
/*
* Throughout the comments in this code, the following terms are used:
*
* SysAddr, DramAddr, and InputAddr
*
* These terms come directly from the amd64 documentation
* (AMD publication #26094). They are defined as follows:
*
* SysAddr:
* This is a physical address generated by a CPU core or a device
* doing DMA. If generated by a CPU core, a SysAddr is the result of
* a virtual to physical address translation by the CPU core's address
* translation mechanism (MMU).
*
* DramAddr:
* A DramAddr is derived from a SysAddr by subtracting an offset that
* depends on which node the SysAddr maps to and whether the SysAddr
* is within a range affected by memory hoisting. The DRAM Base
* (section 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers
* determine which node a SysAddr maps to.
*
* If the DRAM Hole Address Register (DHAR) is enabled and the SysAddr
* is within the range of addresses specified by this register, then
* a value x from the DHAR is subtracted from the SysAddr to produce a
* DramAddr. Here, x represents the base address for the node that
* the SysAddr maps to plus an offset due to memory hoisting. See
* section 3.4.8 and the comments in amd64_get_dram_hole_info() and
* sys_addr_to_dram_addr() below for more information.
*
* If the SysAddr is not affected by the DHAR then a value y is
* subtracted from the SysAddr to produce a DramAddr. Here, y is the
* base address for the node that the SysAddr maps to. See section
* 3.4.4 and the comments in sys_addr_to_dram_addr() below for more
* information.
*
* InputAddr:
* A DramAddr is translated to an InputAddr before being passed to the
* memory controller for the node that the DramAddr is associated
* with. The memory controller then maps the InputAddr to a csrow.
* If node interleaving is not in use, then the InputAddr has the same
* value as the DramAddr. Otherwise, the InputAddr is produced by
* discarding the bits used for node interleaving from the DramAddr.
* See section 3.4.4 for more information.
*
* The memory controller for a given node uses its DRAM CS Base and
* DRAM CS Mask registers to map an InputAddr to a csrow. See
* sections 3.5.4 and 3.5.5 for more information.
*/
#define EDAC_AMD64_VERSION " Ver: 3.2.0 " __DATE__
#define EDAC_MOD_STR "amd64_edac"
/* Extended Model from CPUID, for CPU Revision numbers */
#define OPTERON_CPU_LE_REV_C 0
#define OPTERON_CPU_REV_D 1
#define OPTERON_CPU_REV_E 2
/* NPT processors have the following Extended Models */
#define OPTERON_CPU_REV_F 4
#define OPTERON_CPU_REV_FA 5
/* Hardware limit on ChipSelect rows per MC and processors per system */
#define CHIPSELECT_COUNT 8
#define DRAM_REG_COUNT 8
/*
* PCI-defined configuration space registers
*/
/*
* Function 1 - Address Map
*/
#define K8_DRAM_BASE_LOW 0x40
#define K8_DRAM_LIMIT_LOW 0x44
#define K8_DHAR 0xf0
#define DHAR_VALID BIT(0)
#define F10_DRAM_MEM_HOIST_VALID BIT(1)
#define DHAR_BASE_MASK 0xff000000
#define dhar_base(dhar) (dhar & DHAR_BASE_MASK)
#define K8_DHAR_OFFSET_MASK 0x0000ff00
#define k8_dhar_offset(dhar) ((dhar & K8_DHAR_OFFSET_MASK) << 16)
#define F10_DHAR_OFFSET_MASK 0x0000ff80
/* NOTE: Extra mask bit vs K8 */
#define f10_dhar_offset(dhar) ((dhar & F10_DHAR_OFFSET_MASK) << 16)
/* F10 High BASE/LIMIT registers */
#define F10_DRAM_BASE_HIGH 0x140
#define F10_DRAM_LIMIT_HIGH 0x144
/*
* Function 2 - DRAM controller
*/
#define K8_DCSB0 0x40
#define F10_DCSB1 0x140
#define K8_DCSB_CS_ENABLE BIT(0)
#define K8_DCSB_NPT_SPARE BIT(1)
#define K8_DCSB_NPT_TESTFAIL BIT(2)
/*
* REV E: select [31:21] and [15:9] from DCSB and the shift amount to form
* the address
*/
#define REV_E_DCSB_BASE_BITS (0xFFE0FE00ULL)
#define REV_E_DCS_SHIFT 4
#define REV_E_DCSM_COUNT 8
#define REV_F_F1Xh_DCSB_BASE_BITS (0x1FF83FE0ULL)
#define REV_F_F1Xh_DCS_SHIFT 8
/*
* REV F and later: selects [28:19] and [13:5] from DCSB and the shift amount
* to form the address
*/
#define REV_F_DCSB_BASE_BITS (0x1FF83FE0ULL)
#define REV_F_DCS_SHIFT 8
#define REV_F_DCSM_COUNT 4
#define F10_DCSM_COUNT 4
#define F11_DCSM_COUNT 2
/* DRAM CS Mask Registers */
#define K8_DCSM0 0x60
#define F10_DCSM1 0x160
/* REV E: select [29:21] and [15:9] from DCSM */
#define REV_E_DCSM_MASK_BITS 0x3FE0FE00
/* unused bits [24:20] and [12:0] */
#define REV_E_DCS_NOTUSED_BITS 0x01F01FFF
/* REV F and later: select [28:19] and [13:5] from DCSM */
#define REV_F_F1Xh_DCSM_MASK_BITS 0x1FF83FE0
/* unused bits [26:22] and [12:0] */
#define REV_F_F1Xh_DCS_NOTUSED_BITS 0x07C01FFF
#define DBAM0 0x80
#define DBAM1 0x180
/* Extract the DIMM 'type' on the i'th DIMM from the DBAM reg value passed */
#define DBAM_DIMM(i, reg) ((((reg) >> (4*i))) & 0xF)
#define DBAM_MAX_VALUE 11
#define F10_DCLR_0 0x90
#define F10_DCLR_1 0x190
#define REVE_WIDTH_128 BIT(16)
#define F10_WIDTH_128 BIT(11)
#define F10_DCHR_0 0x94
#define F10_DCHR_1 0x194
#define F10_DCHR_FOUR_RANK_DIMM BIT(18)
#define F10_DCHR_Ddr3Mode BIT(8)
#define F10_DCHR_MblMode BIT(6)
#define F10_DCTL_SEL_LOW 0x110
#define dct_sel_baseaddr(pvt) \
((pvt->dram_ctl_select_low) & 0xFFFFF800)
#define dct_sel_interleave_addr(pvt) \
(((pvt->dram_ctl_select_low) >> 6) & 0x3)
enum {
F10_DCTL_SEL_LOW_DctSelHiRngEn = BIT(0),
F10_DCTL_SEL_LOW_DctSelIntLvEn = BIT(2),
F10_DCTL_SEL_LOW_DctGangEn = BIT(4),
F10_DCTL_SEL_LOW_DctDatIntLv = BIT(5),
F10_DCTL_SEL_LOW_DramEnable = BIT(8),
F10_DCTL_SEL_LOW_MemCleared = BIT(10),
};
#define dct_high_range_enabled(pvt) \
(pvt->dram_ctl_select_low & F10_DCTL_SEL_LOW_DctSelHiRngEn)
#define dct_interleave_enabled(pvt) \
(pvt->dram_ctl_select_low & F10_DCTL_SEL_LOW_DctSelIntLvEn)
#define dct_ganging_enabled(pvt) \
(pvt->dram_ctl_select_low & F10_DCTL_SEL_LOW_DctGangEn)
#define dct_data_intlv_enabled(pvt) \
(pvt->dram_ctl_select_low & F10_DCTL_SEL_LOW_DctDatIntLv)
#define dct_dram_enabled(pvt) \
(pvt->dram_ctl_select_low & F10_DCTL_SEL_LOW_DramEnable)
#define dct_memory_cleared(pvt) \
(pvt->dram_ctl_select_low & F10_DCTL_SEL_LOW_MemCleared)
#define F10_DCTL_SEL_HIGH 0x114
/*
* Function 3 - Misc Control
*/
#define K8_NBCTL 0x40
/* Correctable ECC error reporting enable */
#define K8_NBCTL_CECCEn BIT(0)
/* UnCorrectable ECC error reporting enable */
#define K8_NBCTL_UECCEn BIT(1)
#define K8_NBCFG 0x44
#define K8_NBCFG_CHIPKILL BIT(23)
#define K8_NBCFG_ECC_ENABLE BIT(22)
#define K8_NBSL 0x48
#define EXTRACT_HIGH_SYNDROME(x) (((x) >> 24) & 0xff)
#define EXTRACT_EXT_ERROR_CODE(x) (((x) >> 16) & 0x1f)
/* Family F10h: Normalized Extended Error Codes */
#define F10_NBSL_EXT_ERR_RES 0x0
#define F10_NBSL_EXT_ERR_CRC 0x1
#define F10_NBSL_EXT_ERR_SYNC 0x2
#define F10_NBSL_EXT_ERR_MST 0x3
#define F10_NBSL_EXT_ERR_TGT 0x4
#define F10_NBSL_EXT_ERR_GART 0x5
#define F10_NBSL_EXT_ERR_RMW 0x6
#define F10_NBSL_EXT_ERR_WDT 0x7
#define F10_NBSL_EXT_ERR_ECC 0x8
#define F10_NBSL_EXT_ERR_DEV 0x9
#define F10_NBSL_EXT_ERR_LINK_DATA 0xA
/* Next two are overloaded values */
#define F10_NBSL_EXT_ERR_LINK_PROTO 0xB
#define F10_NBSL_EXT_ERR_L3_PROTO 0xB
#define F10_NBSL_EXT_ERR_NB_ARRAY 0xC
#define F10_NBSL_EXT_ERR_DRAM_PARITY 0xD
#define F10_NBSL_EXT_ERR_LINK_RETRY 0xE
/* Next two are overloaded values */
#define F10_NBSL_EXT_ERR_GART_WALK 0xF
#define F10_NBSL_EXT_ERR_DEV_WALK 0xF
/* 0x10 to 0x1B: Reserved */
#define F10_NBSL_EXT_ERR_L3_DATA 0x1C
#define F10_NBSL_EXT_ERR_L3_TAG 0x1D
#define F10_NBSL_EXT_ERR_L3_LRU 0x1E
/* K8: Normalized Extended Error Codes */
#define K8_NBSL_EXT_ERR_ECC 0x0
#define K8_NBSL_EXT_ERR_CRC 0x1
#define K8_NBSL_EXT_ERR_SYNC 0x2
#define K8_NBSL_EXT_ERR_MST 0x3
#define K8_NBSL_EXT_ERR_TGT 0x4
#define K8_NBSL_EXT_ERR_GART 0x5
#define K8_NBSL_EXT_ERR_RMW 0x6
#define K8_NBSL_EXT_ERR_WDT 0x7
#define K8_NBSL_EXT_ERR_CHIPKILL_ECC 0x8
#define K8_NBSL_EXT_ERR_DRAM_PARITY 0xD
#define EXTRACT_ERROR_CODE(x) ((x) & 0xffff)
#define TEST_TLB_ERROR(x) (((x) & 0xFFF0) == 0x0010)
#define TEST_MEM_ERROR(x) (((x) & 0xFF00) == 0x0100)
#define TEST_BUS_ERROR(x) (((x) & 0xF800) == 0x0800)
#define EXTRACT_TT_CODE(x) (((x) >> 2) & 0x3)
#define EXTRACT_II_CODE(x) (((x) >> 2) & 0x3)
#define EXTRACT_LL_CODE(x) (((x) >> 0) & 0x3)
#define EXTRACT_RRRR_CODE(x) (((x) >> 4) & 0xf)
#define EXTRACT_TO_CODE(x) (((x) >> 8) & 0x1)
#define EXTRACT_PP_CODE(x) (((x) >> 9) & 0x3)
/*
* The following are for BUS type errors AFTER values have been normalized by
* shifting right
*/
#define K8_NBSL_PP_SRC 0x0
#define K8_NBSL_PP_RES 0x1
#define K8_NBSL_PP_OBS 0x2
#define K8_NBSL_PP_GENERIC 0x3
#define K8_NBSH 0x4C
#define K8_NBSH_VALID_BIT BIT(31)
#define K8_NBSH_OVERFLOW BIT(30)
#define K8_NBSH_UNCORRECTED_ERR BIT(29)
#define K8_NBSH_ERR_ENABLE BIT(28)
#define K8_NBSH_MISC_ERR_VALID BIT(27)
#define K8_NBSH_VALID_ERROR_ADDR BIT(26)
#define K8_NBSH_PCC BIT(25)
#define K8_NBSH_CECC BIT(14)
#define K8_NBSH_UECC BIT(13)
#define K8_NBSH_ERR_SCRUBER BIT(8)
#define K8_NBSH_CORE3 BIT(3)
#define K8_NBSH_CORE2 BIT(2)
#define K8_NBSH_CORE1 BIT(1)
#define K8_NBSH_CORE0 BIT(0)
#define EXTRACT_LDT_LINK(x) (((x) >> 4) & 0x7)
#define EXTRACT_ERR_CPU_MAP(x) ((x) & 0xF)
#define EXTRACT_LOW_SYNDROME(x) (((x) >> 15) & 0xff)
#define K8_NBEAL 0x50
#define K8_NBEAH 0x54
#define K8_SCRCTRL 0x58
#define F10_NB_CFG_LOW 0x88
#define F10_NB_CFG_LOW_ENABLE_EXT_CFG BIT(14)
#define F10_NB_CFG_HIGH 0x8C
#define F10_ONLINE_SPARE 0xB0
#define F10_ONLINE_SPARE_SWAPDONE0(x) ((x) & BIT(1))
#define F10_ONLINE_SPARE_SWAPDONE1(x) ((x) & BIT(3))
#define F10_ONLINE_SPARE_BADDRAM_CS0(x) (((x) >> 4) & 0x00000007)
#define F10_ONLINE_SPARE_BADDRAM_CS1(x) (((x) >> 8) & 0x00000007)
#define F10_NB_ARRAY_ADDR 0xB8
#define F10_NB_ARRAY_DRAM_ECC 0x80000000
/* Bits [2:1] are used to select 16-byte section within a 64-byte cacheline */
#define SET_NB_ARRAY_ADDRESS(section) (((section) & 0x3) << 1)
#define F10_NB_ARRAY_DATA 0xBC
#define SET_NB_DRAM_INJECTION_WRITE(word, bits) \
(BIT(((word) & 0xF) + 20) | \
BIT(17) | \
((bits) & 0xF))
#define SET_NB_DRAM_INJECTION_READ(word, bits) \
(BIT(((word) & 0xF) + 20) | \
BIT(16) | \
((bits) & 0xF))
#define K8_NBCAP 0xE8
#define K8_NBCAP_CORES (BIT(12)|BIT(13))
#define K8_NBCAP_CHIPKILL BIT(4)
#define K8_NBCAP_SECDED BIT(3)
#define K8_NBCAP_8_NODE BIT(2)
#define K8_NBCAP_DUAL_NODE BIT(1)
#define K8_NBCAP_DCT_DUAL BIT(0)
/*
* MSR Regs
*/
#define K8_MSR_MCGCTL 0x017b
#define K8_MSR_MCGCTL_NBE BIT(4)
#define K8_MSR_MC4CTL 0x0410
#define K8_MSR_MC4STAT 0x0411
#define K8_MSR_MC4ADDR 0x0412
/* AMD sets the first MC device at device ID 0x18. */
static inline int get_mc_node_id_from_pdev(struct pci_dev *pdev)
{
return PCI_SLOT(pdev->devfn) - 0x18;
}
enum amd64_chipset_families {
K8_CPUS = 0,
F10_CPUS,
F11_CPUS,
};
/*
* Structure to hold:
*
* 1) dynamically read status and error address HW registers
* 2) sysfs entered values
* 3) MCE values
*
* Depends on entry into the modules
*/
struct amd64_error_info_regs {
u32 nbcfg;
u32 nbsh;
u32 nbsl;
u32 nbeah;
u32 nbeal;
};
/* Error injection control structure */
struct error_injection {
u32 section;
u32 word;
u32 bit_map;
};
struct amd64_pvt {
/* pci_device handles which we utilize */
struct pci_dev *addr_f1_ctl;
struct pci_dev *dram_f2_ctl;
struct pci_dev *misc_f3_ctl;
int mc_node_id; /* MC index of this MC node */
int ext_model; /* extended model value of this node */
struct low_ops *ops; /* pointer to per PCI Device ID func table */
int channel_count;
/* Raw registers */
u32 dclr0; /* DRAM Configuration Low DCT0 reg */
u32 dclr1; /* DRAM Configuration Low DCT1 reg */
u32 dchr0; /* DRAM Configuration High DCT0 reg */
u32 dchr1; /* DRAM Configuration High DCT1 reg */
u32 nbcap; /* North Bridge Capabilities */
u32 nbcfg; /* F10 North Bridge Configuration */
u32 ext_nbcfg; /* Extended F10 North Bridge Configuration */
u32 dhar; /* DRAM Hoist reg */
u32 dbam0; /* DRAM Base Address Mapping reg for DCT0 */
u32 dbam1; /* DRAM Base Address Mapping reg for DCT1 */
/* DRAM CS Base Address Registers F2x[1,0][5C:40] */
u32 dcsb0[CHIPSELECT_COUNT];
u32 dcsb1[CHIPSELECT_COUNT];
/* DRAM CS Mask Registers F2x[1,0][6C:60] */
u32 dcsm0[CHIPSELECT_COUNT];
u32 dcsm1[CHIPSELECT_COUNT];
/*
* Decoded parts of DRAM BASE and LIMIT Registers
* F1x[78,70,68,60,58,50,48,40]
*/
u64 dram_base[DRAM_REG_COUNT];
u64 dram_limit[DRAM_REG_COUNT];
u8 dram_IntlvSel[DRAM_REG_COUNT];
u8 dram_IntlvEn[DRAM_REG_COUNT];
u8 dram_DstNode[DRAM_REG_COUNT];
u8 dram_rw_en[DRAM_REG_COUNT];
/*
* The following fields are set at (load) run time, after CPU revision
* has been determined, since the dct_base and dct_mask registers vary
* based on revision
*/
u32 dcsb_base; /* DCSB base bits */
u32 dcsm_mask; /* DCSM mask bits */
u32 num_dcsm; /* Number of DCSM registers */
u32 dcs_mask_notused; /* DCSM notused mask bits */
u32 dcs_shift; /* DCSB and DCSM shift value */
u64 top_mem; /* top of memory below 4GB */
u64 top_mem2; /* top of memory above 4GB */
u32 dram_ctl_select_low; /* DRAM Controller Select Low Reg */
u32 dram_ctl_select_high; /* DRAM Controller Select High Reg */
u32 online_spare; /* On-Line spare Reg */
/* temp storage for when input is received from sysfs */
struct amd64_error_info_regs ctl_error_info;
/* place to store error injection parameters prior to issue */
struct error_injection injection;
/* Save old hw registers' values before we modified them */
u32 nbctl_mcgctl_saved; /* When true, following 2 are valid */
u32 old_nbctl;
unsigned long old_mcgctl; /* per core on this node */
/* MC Type Index value: socket F vs Family 10h */
u32 mc_type_index;
/* misc settings */
struct flags {
unsigned long cf8_extcfg:1;
} flags;
};
struct scrubrate {
u32 scrubval; /* bit pattern for scrub rate */
u32 bandwidth; /* bandwidth consumed (bytes/sec) */
};
extern struct scrubrate scrubrates[23];
extern u32 revf_quad_ddr2_shift[16];
extern const char *tt_msgs[4];
extern const char *ll_msgs[4];
extern const char *rrrr_msgs[16];
extern const char *to_msgs[2];
extern const char *pp_msgs[4];
extern const char *ii_msgs[4];
extern const char *ext_msgs[32];
extern const char *htlink_msgs[8];
#ifdef CONFIG_EDAC_DEBUG
#define NUM_DBG_ATTRS 9
#else
#define NUM_DBG_ATTRS 0
#endif
#ifdef CONFIG_EDAC_AMD64_ERROR_INJECTION
#define NUM_INJ_ATTRS 5
#else
#define NUM_INJ_ATTRS 0
#endif
extern struct mcidev_sysfs_attribute amd64_dbg_attrs[NUM_DBG_ATTRS],
amd64_inj_attrs[NUM_INJ_ATTRS];
/*
* Each of the PCI Device IDs types have their own set of hardware accessor
* functions and per device encoding/decoding logic.
*/
struct low_ops {
int (*probe_valid_hardware)(struct amd64_pvt *pvt);
int (*early_channel_count)(struct amd64_pvt *pvt);
u64 (*get_error_address)(struct mem_ctl_info *mci,
struct amd64_error_info_regs *info);
void (*read_dram_base_limit)(struct amd64_pvt *pvt, int dram);
void (*read_dram_ctl_register)(struct amd64_pvt *pvt);
void (*map_sysaddr_to_csrow)(struct mem_ctl_info *mci,
struct amd64_error_info_regs *info,
u64 SystemAddr);
int (*dbam_map_to_pages)(struct amd64_pvt *pvt, int dram_map);
};
struct amd64_family_type {
const char *ctl_name;
u16 addr_f1_ctl;
u16 misc_f3_ctl;
struct low_ops ops;
};
static struct amd64_family_type amd64_family_types[];
static inline const char *get_amd_family_name(int index)
{
return amd64_family_types[index].ctl_name;
}
static inline struct low_ops *family_ops(int index)
{
return &amd64_family_types[index].ops;
}
/*
* For future CPU versions, verify the following as new 'slow' rates appear and
* modify the necessary skip values for the supported CPU.
*/
#define K8_MIN_SCRUB_RATE_BITS 0x0
#define F10_MIN_SCRUB_RATE_BITS 0x5
#define F11_MIN_SCRUB_RATE_BITS 0x6
int amd64_process_error_info(struct mem_ctl_info *mci,
struct amd64_error_info_regs *info,
int handle_errors);
int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
u64 *hole_offset, u64 *hole_size);
#include "amd64_edac.h"
/*
* accept a hex value and store it into the virtual error register file, field:
* nbeal and nbeah. Assume virtual error values have already been set for: NBSL,
* NBSH and NBCFG. Then proceed to map the error values to a MC, CSROW and
* CHANNEL
*/
static ssize_t amd64_nbea_store(struct mem_ctl_info *mci, const char *data,
size_t count)
{
struct amd64_pvt *pvt = mci->pvt_info;
unsigned long long value;
int ret = 0;
ret = strict_strtoull(data, 16, &value);
if (ret != -EINVAL) {
debugf0("received NBEA= 0x%llx\n", value);
/* place the value into the virtual error packet */
pvt->ctl_error_info.nbeal = (u32) value;
value >>= 32;
pvt->ctl_error_info.nbeah = (u32) value;
/* Process the Mapping request */
/* TODO: Add race prevention */
amd64_process_error_info(mci, &pvt->ctl_error_info, 1);
return count;
}
return ret;
}
/* display back what the last NBEA (MCA NB Address (MC4_ADDR)) was written */
static ssize_t amd64_nbea_show(struct mem_ctl_info *mci, char *data)
{
struct amd64_pvt *pvt = mci->pvt_info;
u64 value;
value = pvt->ctl_error_info.nbeah;
value <<= 32;
value |= pvt->ctl_error_info.nbeal;
return sprintf(data, "%llx\n", value);
}
/* store the NBSL (MCA NB Status Low (MC4_STATUS)) value user desires */
static ssize_t amd64_nbsl_store(struct mem_ctl_info *mci, const char *data,
size_t count)
{
struct amd64_pvt *pvt = mci->pvt_info;
unsigned long value;
int ret = 0;
ret = strict_strtoul(data, 16, &value);
if (ret != -EINVAL) {
debugf0("received NBSL= 0x%lx\n", value);
pvt->ctl_error_info.nbsl = (u32) value;
return count;
}
return ret;
}
/* display back what the last NBSL value written */
static ssize_t amd64_nbsl_show(struct mem_ctl_info *mci, char *data)
{
struct amd64_pvt *pvt = mci->pvt_info;
u32 value;
value = pvt->ctl_error_info.nbsl;
return sprintf(data, "%x\n", value);
}
/* store the NBSH (MCA NB Status High) value user desires */
static ssize_t amd64_nbsh_store(struct mem_ctl_info *mci, const char *data,
size_t count)
{
struct amd64_pvt *pvt = mci->pvt_info;
unsigned long value;
int ret = 0;
ret = strict_strtoul(data, 16, &value);
if (ret != -EINVAL) {
debugf0("received NBSH= 0x%lx\n", value);
pvt->ctl_error_info.nbsh = (u32) value;
return count;
}
return ret;
}
/* display back what the last NBSH value written */
static ssize_t amd64_nbsh_show(struct mem_ctl_info *mci, char *data)
{
struct amd64_pvt *pvt = mci->pvt_info;
u32 value;
value = pvt->ctl_error_info.nbsh;
return sprintf(data, "%x\n", value);
}
/* accept and store the NBCFG (MCA NB Configuration) value user desires */
static ssize_t amd64_nbcfg_store(struct mem_ctl_info *mci,
const char *data, size_t count)
{
struct amd64_pvt *pvt = mci->pvt_info;
unsigned long value;
int ret = 0;
ret = strict_strtoul(data, 16, &value);
if (ret != -EINVAL) {
debugf0("received NBCFG= 0x%lx\n", value);
pvt->ctl_error_info.nbcfg = (u32) value;
return count;
}
return ret;
}
/* various show routines for the controls of a MCI */
static ssize_t amd64_nbcfg_show(struct mem_ctl_info *mci, char *data)
{
struct amd64_pvt *pvt = mci->pvt_info;
return sprintf(data, "%x\n", pvt->ctl_error_info.nbcfg);
}
static ssize_t amd64_dhar_show(struct mem_ctl_info *mci, char *data)
{
struct amd64_pvt *pvt = mci->pvt_info;
return sprintf(data, "%x\n", pvt->dhar);
}
static ssize_t amd64_dbam_show(struct mem_ctl_info *mci, char *data)
{
struct amd64_pvt *pvt = mci->pvt_info;
return sprintf(data, "%x\n", pvt->dbam0);
}
static ssize_t amd64_topmem_show(struct mem_ctl_info *mci, char *data)
{
struct amd64_pvt *pvt = mci->pvt_info;
return sprintf(data, "%llx\n", pvt->top_mem);
}
static ssize_t amd64_topmem2_show(struct mem_ctl_info *mci, char *data)
{
struct amd64_pvt *pvt = mci->pvt_info;
return sprintf(data, "%llx\n", pvt->top_mem2);
}
static ssize_t amd64_hole_show(struct mem_ctl_info *mci, char *data)
{
u64 hole_base = 0;
u64 hole_offset = 0;
u64 hole_size = 0;
amd64_get_dram_hole_info(mci, &hole_base, &hole_offset, &hole_size);
return sprintf(data, "%llx %llx %llx\n", hole_base, hole_offset,
hole_size);
}
/*
* update NUM_DBG_ATTRS in case you add new members
*/
struct mcidev_sysfs_attribute amd64_dbg_attrs[] = {
{
.attr = {
.name = "nbea_ctl",
.mode = (S_IRUGO | S_IWUSR)
},
.show = amd64_nbea_show,
.store = amd64_nbea_store,
},
{
.attr = {
.name = "nbsl_ctl",
.mode = (S_IRUGO | S_IWUSR)
},
.show = amd64_nbsl_show,
.store = amd64_nbsl_store,
},
{
.attr = {
.name = "nbsh_ctl",
.mode = (S_IRUGO | S_IWUSR)
},
.show = amd64_nbsh_show,
.store = amd64_nbsh_store,
},
{
.attr = {
.name = "nbcfg_ctl",
.mode = (S_IRUGO | S_IWUSR)
},
.show = amd64_nbcfg_show,
.store = amd64_nbcfg_store,
},
{
.attr = {
.name = "dhar",
.mode = (S_IRUGO)
},
.show = amd64_dhar_show,
.store = NULL,
},
{
.attr = {
.name = "dbam",
.mode = (S_IRUGO)
},
.show = amd64_dbam_show,
.store = NULL,
},
{
.attr = {
.name = "topmem",
.mode = (S_IRUGO)
},
.show = amd64_topmem_show,
.store = NULL,
},
{
.attr = {
.name = "topmem2",
.mode = (S_IRUGO)
},
.show = amd64_topmem2_show,
.store = NULL,
},
{
.attr = {
.name = "dram_hole",
.mode = (S_IRUGO)
},
.show = amd64_hole_show,
.store = NULL,
},
};
#include "amd64_edac.h"
/*
* See F2x80 for K8 and F2x[1,0]80 for Fam10 and later. The table below is only
* for DDR2 DRAM mapping.
*/
u32 revf_quad_ddr2_shift[] = {
0, /* 0000b NULL DIMM (128mb) */
28, /* 0001b 256mb */
29, /* 0010b 512mb */
29, /* 0011b 512mb */
29, /* 0100b 512mb */
30, /* 0101b 1gb */
30, /* 0110b 1gb */
31, /* 0111b 2gb */
31, /* 1000b 2gb */
32, /* 1001b 4gb */
32, /* 1010b 4gb */
33, /* 1011b 8gb */
0, /* 1100b future */
0, /* 1101b future */
0, /* 1110b future */
0 /* 1111b future */
};
/*
* Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing
* bandwidth to a valid bit pattern. The 'set' operation finds the 'matching-
* or higher value'.
*
*FIXME: Produce a better mapping/linearisation.
*/
struct scrubrate scrubrates[] = {
{ 0x01, 1600000000UL},
{ 0x02, 800000000UL},
{ 0x03, 400000000UL},
{ 0x04, 200000000UL},
{ 0x05, 100000000UL},
{ 0x06, 50000000UL},
{ 0x07, 25000000UL},
{ 0x08, 12284069UL},
{ 0x09, 6274509UL},
{ 0x0A, 3121951UL},
{ 0x0B, 1560975UL},
{ 0x0C, 781440UL},
{ 0x0D, 390720UL},
{ 0x0E, 195300UL},
{ 0x0F, 97650UL},
{ 0x10, 48854UL},
{ 0x11, 24427UL},
{ 0x12, 12213UL},
{ 0x13, 6101UL},
{ 0x14, 3051UL},
{ 0x15, 1523UL},
{ 0x16, 761UL},
{ 0x00, 0UL}, /* scrubbing off */
};
/*
* string representation for the different MCA reported error types, see F3x48
* or MSR0000_0411.
*/
const char *tt_msgs[] = { /* transaction type */
"instruction",
"data",
"generic",
"reserved"
};
const char *ll_msgs[] = { /* cache level */
"L0",
"L1",
"L2",
"L3/generic"
};
const char *rrrr_msgs[] = {
"generic",
"generic read",
"generic write",
"data read",
"data write",
"inst fetch",
"prefetch",
"evict",
"snoop",
"reserved RRRR= 9",
"reserved RRRR= 10",
"reserved RRRR= 11",
"reserved RRRR= 12",
"reserved RRRR= 13",
"reserved RRRR= 14",
"reserved RRRR= 15"
};
const char *pp_msgs[] = { /* participating processor */
"local node originated (SRC)",
"local node responded to request (RES)",
"local node observed as 3rd party (OBS)",
"generic"
};
const char *to_msgs[] = {
"no timeout",
"timed out"
};
const char *ii_msgs[] = { /* memory or i/o */
"mem access",
"reserved",
"i/o access",
"generic"
};
/* Map the 5 bits of Extended Error code to the string table. */
const char *ext_msgs[] = { /* extended error */
"K8 ECC error/F10 reserved", /* 0_0000b */
"CRC error", /* 0_0001b */
"sync error", /* 0_0010b */
"mst abort", /* 0_0011b */
"tgt abort", /* 0_0100b */
"GART error", /* 0_0101b */
"RMW error", /* 0_0110b */
"Wdog timer error", /* 0_0111b */
"F10-ECC/K8-Chipkill error", /* 0_1000b */
"DEV Error", /* 0_1001b */
"Link Data error", /* 0_1010b */
"Link or L3 Protocol error", /* 0_1011b */
"NB Array error", /* 0_1100b */
"DRAM Parity error", /* 0_1101b */
"Link Retry/GART Table Walk/DEV Table Walk error", /* 0_1110b */
"Res 0x0ff error", /* 0_1111b */
"Res 0x100 error", /* 1_0000b */
"Res 0x101 error", /* 1_0001b */
"Res 0x102 error", /* 1_0010b */
"Res 0x103 error", /* 1_0011b */
"Res 0x104 error", /* 1_0100b */
"Res 0x105 error", /* 1_0101b */
"Res 0x106 error", /* 1_0110b */
"Res 0x107 error", /* 1_0111b */
"Res 0x108 error", /* 1_1000b */
"Res 0x109 error", /* 1_1001b */
"Res 0x10A error", /* 1_1010b */
"Res 0x10B error", /* 1_1011b */
"L3 Cache Data error", /* 1_1100b */
"L3 CacheTag error", /* 1_1101b */
"L3 Cache LRU error", /* 1_1110b */
"Res 0x1FF error" /* 1_1111b */
};
const char *htlink_msgs[] = {
"none",
"1",
"2",
"1 2",
"3",
"1 3",
"2 3",
"1 2 3"
};
#include "amd64_edac.h"
/*
* store error injection section value which refers to one of 4 16-byte sections
* within a 64-byte cacheline
*
* range: 0..3
*/
static ssize_t amd64_inject_section_store(struct mem_ctl_info *mci,
const char *data, size_t count)
{
struct amd64_pvt *pvt = mci->pvt_info;
unsigned long value;
int ret = 0;
ret = strict_strtoul(data, 10, &value);
if (ret != -EINVAL) {
pvt->injection.section = (u32) value;
return count;
}
return ret;
}
/*
* store error injection word value which refers to one of 9 16-bit word of the
* 16-byte (128-bit + ECC bits) section
*
* range: 0..8
*/
static ssize_t amd64_inject_word_store(struct mem_ctl_info *mci,
const char *data, size_t count)
{
struct amd64_pvt *pvt = mci->pvt_info;
unsigned long value;
int ret = 0;
ret = strict_strtoul(data, 10, &value);
if (ret != -EINVAL) {
value = (value <= 8) ? value : 0;
pvt->injection.word = (u32) value;
return count;
}
return ret;
}
/*
* store 16 bit error injection vector which enables injecting errors to the
* corresponding bit within the error injection word above. When used during a
* DRAM ECC read, it holds the contents of the of the DRAM ECC bits.
*/
static ssize_t amd64_inject_ecc_vector_store(struct mem_ctl_info *mci,
const char *data, size_t count)
{
struct amd64_pvt *pvt = mci->pvt_info;
unsigned long value;
int ret = 0;
ret = strict_strtoul(data, 16, &value);
if (ret != -EINVAL) {
pvt->injection.bit_map = (u32) value & 0xFFFF;
return count;
}
return ret;
}
/*
* Do a DRAM ECC read. Assemble staged values in the pvt area, format into
* fields needed by the injection registers and read the NB Array Data Port.
*/
static ssize_t amd64_inject_read_store(struct mem_ctl_info *mci,
const char *data, size_t count)
{
struct amd64_pvt *pvt = mci->pvt_info;
unsigned long value;
u32 section, word_bits;
int ret = 0;
ret = strict_strtoul(data, 10, &value);
if (ret != -EINVAL) {
/* Form value to choose 16-byte section of cacheline */
section = F10_NB_ARRAY_DRAM_ECC |
SET_NB_ARRAY_ADDRESS(pvt->injection.section);
pci_write_config_dword(pvt->misc_f3_ctl,
F10_NB_ARRAY_ADDR, section);
word_bits = SET_NB_DRAM_INJECTION_READ(pvt->injection.word,
pvt->injection.bit_map);
/* Issue 'word' and 'bit' along with the READ request */
pci_write_config_dword(pvt->misc_f3_ctl,
F10_NB_ARRAY_DATA, word_bits);
debugf0("section=0x%x word_bits=0x%x\n", section, word_bits);
return count;
}
return ret;
}
/*
* Do a DRAM ECC write. Assemble staged values in the pvt area and format into
* fields needed by the injection registers.
*/
static ssize_t amd64_inject_write_store(struct mem_ctl_info *mci,
const char *data, size_t count)
{
struct amd64_pvt *pvt = mci->pvt_info;
unsigned long value;
u32 section, word_bits;
int ret = 0;
ret = strict_strtoul(data, 10, &value);
if (ret != -EINVAL) {
/* Form value to choose 16-byte section of cacheline */
section = F10_NB_ARRAY_DRAM_ECC |
SET_NB_ARRAY_ADDRESS(pvt->injection.section);
pci_write_config_dword(pvt->misc_f3_ctl,
F10_NB_ARRAY_ADDR, section);
word_bits = SET_NB_DRAM_INJECTION_WRITE(pvt->injection.word,
pvt->injection.bit_map);
/* Issue 'word' and 'bit' along with the READ request */
pci_write_config_dword(pvt->misc_f3_ctl,
F10_NB_ARRAY_DATA, word_bits);
debugf0("section=0x%x word_bits=0x%x\n", section, word_bits);
return count;
}
return ret;
}
/*
* update NUM_INJ_ATTRS in case you add new members
*/
struct mcidev_sysfs_attribute amd64_inj_attrs[] = {
{
.attr = {
.name = "inject_section",
.mode = (S_IRUGO | S_IWUSR)
},
.show = NULL,
.store = amd64_inject_section_store,
},
{
.attr = {
.name = "inject_word",
.mode = (S_IRUGO | S_IWUSR)
},
.show = NULL,
.store = amd64_inject_word_store,
},
{
.attr = {
.name = "inject_ecc_vector",
.mode = (S_IRUGO | S_IWUSR)
},
.show = NULL,
.store = amd64_inject_ecc_vector_store,
},
{
.attr = {
.name = "inject_write",
.mode = (S_IRUGO | S_IWUSR)
},
.show = NULL,
.store = amd64_inject_write_store,
},
{
.attr = {
.name = "inject_read",
.mode = (S_IRUGO | S_IWUSR)
},
.show = NULL,
.store = amd64_inject_read_store,
},
};
...@@ -76,10 +76,11 @@ ...@@ -76,10 +76,11 @@
extern int edac_debug_level; extern int edac_debug_level;
#ifndef CONFIG_EDAC_DEBUG_VERBOSE #ifndef CONFIG_EDAC_DEBUG_VERBOSE
#define edac_debug_printk(level, fmt, arg...) \ #define edac_debug_printk(level, fmt, arg...) \
do { \ do { \
if (level <= edac_debug_level) \ if (level <= edac_debug_level) \
edac_printk(KERN_DEBUG, EDAC_DEBUG, fmt, ##arg); \ edac_printk(KERN_DEBUG, EDAC_DEBUG, \
"%s: " fmt, __func__, ##arg); \
} while (0) } while (0)
#else /* CONFIG_EDAC_DEBUG_VERBOSE */ #else /* CONFIG_EDAC_DEBUG_VERBOSE */
#define edac_debug_printk(level, fmt, arg...) \ #define edac_debug_printk(level, fmt, arg...) \
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册