提交 41d8bfab 编写于 作者: B Borislav Petkov

amd64_edac: Improve DRAM address mapping

Drop static tables which map the bits in F2x80 to a chip select size in
favor of functions doing the mapping with some bit fiddling. Also, add
F15 support.
Signed-off-by: NBorislav Petkov <borislav.petkov@amd.com>
上级 5a5d2371
...@@ -24,51 +24,6 @@ static atomic_t drv_instances = ATOMIC_INIT(0); ...@@ -24,51 +24,6 @@ static atomic_t drv_instances = ATOMIC_INIT(0);
static struct mem_ctl_info **mcis; static struct mem_ctl_info **mcis;
static struct ecc_settings **ecc_stngs; static struct ecc_settings **ecc_stngs;
/*
* Address to DRAM bank mapping: see F2x80 for K8 and F2x[1,0]80 for Fam10 and
* later.
*/
static int ddr2_dbam_revCG[] = {
[0] = 32,
[1] = 64,
[2] = 128,
[3] = 256,
[4] = 512,
[5] = 1024,
[6] = 2048,
};
static int ddr2_dbam_revD[] = {
[0] = 32,
[1] = 64,
[2 ... 3] = 128,
[4] = 256,
[5] = 512,
[6] = 256,
[7] = 512,
[8 ... 9] = 1024,
[10] = 2048,
};
static int ddr2_dbam[] = { [0] = 128,
[1] = 256,
[2 ... 4] = 512,
[5 ... 6] = 1024,
[7 ... 8] = 2048,
[9 ... 10] = 4096,
[11] = 8192,
};
static int ddr3_dbam[] = { [0] = -1,
[1] = 256,
[2] = 512,
[3 ... 4] = -1,
[5 ... 6] = 1024,
[7 ... 8] = 2048,
[9 ... 10] = 4096,
[11] = 8192,
};
/* /*
* Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing * Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing
* bandwidth to a valid bit pattern. The 'set' operation finds the 'matching- * bandwidth to a valid bit pattern. The 'set' operation finds the 'matching-
...@@ -76,8 +31,6 @@ static int ddr3_dbam[] = { [0] = -1, ...@@ -76,8 +31,6 @@ static int ddr3_dbam[] = { [0] = -1,
* *
*FIXME: Produce a better mapping/linearisation. *FIXME: Produce a better mapping/linearisation.
*/ */
struct scrubrate { struct scrubrate {
u32 scrubval; /* bit pattern for scrub rate */ u32 scrubval; /* bit pattern for scrub rate */
u32 bandwidth; /* bandwidth consumed (bytes/sec) */ u32 bandwidth; /* bandwidth consumed (bytes/sec) */
...@@ -962,7 +915,7 @@ static int k8_early_channel_count(struct amd64_pvt *pvt) ...@@ -962,7 +915,7 @@ static int k8_early_channel_count(struct amd64_pvt *pvt)
if (pvt->ext_model >= K8_REV_F) if (pvt->ext_model >= K8_REV_F)
/* RevF (NPT) and later */ /* RevF (NPT) and later */
flag = pvt->dclr0 & F10_WIDTH_128; flag = pvt->dclr0 & WIDTH_128;
else else
/* RevE and earlier */ /* RevE and earlier */
flag = pvt->dclr0 & REVE_WIDTH_128; flag = pvt->dclr0 & REVE_WIDTH_128;
...@@ -1062,18 +1015,41 @@ static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr, ...@@ -1062,18 +1015,41 @@ static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
} }
} }
static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, int cs_mode) static int ddr2_cs_size(unsigned i, bool dct_width)
{ {
int *dbam_map; unsigned shift = 0;
if (pvt->ext_model >= K8_REV_F) if (i <= 2)
dbam_map = ddr2_dbam; shift = i;
else if (pvt->ext_model >= K8_REV_D) else if (!(i & 0x1))
dbam_map = ddr2_dbam_revD; shift = i >> 1;
else else
dbam_map = ddr2_dbam_revCG; shift = (i + 1) >> 1;
return 128 << (shift + !!dct_width);
}
static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
unsigned cs_mode)
{
u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
if (pvt->ext_model >= K8_REV_F) {
WARN_ON(cs_mode > 11);
return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
}
else if (pvt->ext_model >= K8_REV_D) {
WARN_ON(cs_mode > 10);
return dbam_map[cs_mode]; if (cs_mode == 3 || cs_mode == 8)
return 32 << (cs_mode - 1);
else
return 32 << cs_mode;
}
else {
WARN_ON(cs_mode > 6);
return 32 << cs_mode;
}
} }
/* /*
...@@ -1089,7 +1065,7 @@ static int f1x_early_channel_count(struct amd64_pvt *pvt) ...@@ -1089,7 +1065,7 @@ static int f1x_early_channel_count(struct amd64_pvt *pvt)
int i, j, channels = 0; int i, j, channels = 0;
/* On F10h, if we are in 128 bit mode, then we are using 2 channels */ /* On F10h, if we are in 128 bit mode, then we are using 2 channels */
if (boot_cpu_data.x86 == 0x10 && (pvt->dclr0 & F10_WIDTH_128)) if (boot_cpu_data.x86 == 0x10 && (pvt->dclr0 & WIDTH_128))
return 2; return 2;
/* /*
...@@ -1126,16 +1102,50 @@ static int f1x_early_channel_count(struct amd64_pvt *pvt) ...@@ -1126,16 +1102,50 @@ static int f1x_early_channel_count(struct amd64_pvt *pvt)
return channels; return channels;
} }
static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, int cs_mode) static int ddr3_cs_size(unsigned i, bool dct_width)
{
unsigned shift = 0;
int cs_size = 0;
if (i == 0 || i == 3 || i == 4)
cs_size = -1;
else if (i <= 2)
shift = i;
else if (i == 12)
shift = 7;
else if (!(i & 0x1))
shift = i >> 1;
else
shift = (i + 1) >> 1;
if (cs_size != -1)
cs_size = (128 * (1 << !!dct_width)) << shift;
return cs_size;
}
static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
unsigned cs_mode)
{ {
int *dbam_map; u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
WARN_ON(cs_mode > 11);
if (pvt->dchr0 & DDR3_MODE || pvt->dchr1 & DDR3_MODE) if (pvt->dchr0 & DDR3_MODE || pvt->dchr1 & DDR3_MODE)
dbam_map = ddr3_dbam; return ddr3_cs_size(cs_mode, dclr & WIDTH_128);
else else
dbam_map = ddr2_dbam; return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
}
/*
* F15h supports only 64bit DCT interfaces
*/
static int f15_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
unsigned cs_mode)
{
WARN_ON(cs_mode > 12);
return dbam_map[cs_mode]; return ddr3_cs_size(cs_mode, false);
} }
static void read_dram_ctl_register(struct amd64_pvt *pvt) static void read_dram_ctl_register(struct amd64_pvt *pvt)
...@@ -1528,7 +1538,7 @@ static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt) ...@@ -1528,7 +1538,7 @@ static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt)
u32 dbam = ctrl ? pvt->dbam1 : pvt->dbam0; u32 dbam = ctrl ? pvt->dbam1 : pvt->dbam0;
if (boot_cpu_data.x86 == 0xf) { if (boot_cpu_data.x86 == 0xf) {
if (pvt->dclr0 & F10_WIDTH_128) if (pvt->dclr0 & WIDTH_128)
factor = 1; factor = 1;
/* K8 families < revF not supported yet */ /* K8 families < revF not supported yet */
...@@ -1551,11 +1561,13 @@ static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt) ...@@ -1551,11 +1561,13 @@ static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt)
size0 = 0; size0 = 0;
if (dcsb[dimm*2] & DCSB_CS_ENABLE) if (dcsb[dimm*2] & DCSB_CS_ENABLE)
size0 = pvt->ops->dbam_to_cs(pvt, DBAM_DIMM(dimm, dbam)); size0 = pvt->ops->dbam_to_cs(pvt, ctrl,
DBAM_DIMM(dimm, dbam));
size1 = 0; size1 = 0;
if (dcsb[dimm*2 + 1] & DCSB_CS_ENABLE) if (dcsb[dimm*2 + 1] & DCSB_CS_ENABLE)
size1 = pvt->ops->dbam_to_cs(pvt, DBAM_DIMM(dimm, dbam)); size1 = pvt->ops->dbam_to_cs(pvt, ctrl,
DBAM_DIMM(dimm, dbam));
amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n", amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
dimm * 2, size0 << factor, dimm * 2, size0 << factor,
...@@ -1591,6 +1603,7 @@ static struct amd64_family_type amd64_family_types[] = { ...@@ -1591,6 +1603,7 @@ static struct amd64_family_type amd64_family_types[] = {
.ops = { .ops = {
.early_channel_count = f1x_early_channel_count, .early_channel_count = f1x_early_channel_count,
.map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow, .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
.dbam_to_cs = f15_dbam_to_chip_select,
.read_dct_pci_cfg = f15_read_dct_pci_cfg, .read_dct_pci_cfg = f15_read_dct_pci_cfg,
} }
}, },
...@@ -2030,7 +2043,7 @@ static void read_mc_regs(struct amd64_pvt *pvt) ...@@ -2030,7 +2043,7 @@ static void read_mc_regs(struct amd64_pvt *pvt)
* encompasses * encompasses
* *
*/ */
static u32 amd64_csrow_nr_pages(int csrow_nr, struct amd64_pvt *pvt) static u32 amd64_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr)
{ {
u32 cs_mode, nr_pages; u32 cs_mode, nr_pages;
...@@ -2043,7 +2056,7 @@ static u32 amd64_csrow_nr_pages(int csrow_nr, struct amd64_pvt *pvt) ...@@ -2043,7 +2056,7 @@ static u32 amd64_csrow_nr_pages(int csrow_nr, struct amd64_pvt *pvt)
*/ */
cs_mode = (pvt->dbam0 >> ((csrow_nr / 2) * 4)) & 0xF; cs_mode = (pvt->dbam0 >> ((csrow_nr / 2) * 4)) & 0xF;
nr_pages = pvt->ops->dbam_to_cs(pvt, cs_mode) << (20 - PAGE_SHIFT); nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode) << (20 - PAGE_SHIFT);
/* /*
* If dual channel then double the memory size of single channel. * If dual channel then double the memory size of single channel.
...@@ -2091,7 +2104,7 @@ static int init_csrows(struct mem_ctl_info *mci) ...@@ -2091,7 +2104,7 @@ static int init_csrows(struct mem_ctl_info *mci)
i, pvt->mc_node_id); i, pvt->mc_node_id);
empty = 0; empty = 0;
csrow->nr_pages = amd64_csrow_nr_pages(i, pvt); csrow->nr_pages = amd64_csrow_nr_pages(pvt, 0, i);
find_csrow_limits(mci, i, &input_addr_min, &input_addr_max); find_csrow_limits(mci, i, &input_addr_min, &input_addr_max);
sys_addr = input_addr_to_sys_addr(mci, input_addr_min); sys_addr = input_addr_to_sys_addr(mci, input_addr_min);
csrow->first_page = (u32) (sys_addr >> PAGE_SHIFT); csrow->first_page = (u32) (sys_addr >> PAGE_SHIFT);
......
...@@ -221,7 +221,7 @@ ...@@ -221,7 +221,7 @@
#define DCLR0 0x90 #define DCLR0 0x90
#define DCLR1 0x190 #define DCLR1 0x190
#define REVE_WIDTH_128 BIT(16) #define REVE_WIDTH_128 BIT(16)
#define F10_WIDTH_128 BIT(11) #define WIDTH_128 BIT(11)
#define DCHR0 0x94 #define DCHR0 0x94
#define DCHR1 0x194 #define DCHR1 0x194
...@@ -445,7 +445,7 @@ struct low_ops { ...@@ -445,7 +445,7 @@ struct low_ops {
int (*early_channel_count) (struct amd64_pvt *pvt); int (*early_channel_count) (struct amd64_pvt *pvt);
void (*map_sysaddr_to_csrow) (struct mem_ctl_info *mci, u64 sys_addr, void (*map_sysaddr_to_csrow) (struct mem_ctl_info *mci, u64 sys_addr,
u16 syndrome); u16 syndrome);
int (*dbam_to_cs) (struct amd64_pvt *pvt, int cs_mode); int (*dbam_to_cs) (struct amd64_pvt *pvt, u8 dct, unsigned cs_mode);
int (*read_dct_pci_cfg) (struct amd64_pvt *pvt, int offset, int (*read_dct_pci_cfg) (struct amd64_pvt *pvt, int offset,
u32 *val, const char *func); u32 *val, const char *func);
}; };
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册