提交 50542251 编写于 作者: B Borislav Petkov 提交者: H. Peter Anvin

x86, msr: Add support for non-contiguous cpumasks

The current rd/wrmsr_on_cpus helpers assume that the supplied
cpumasks are contiguous. However, there are machines out there
like some K8 multinode Opterons which have a non-contiguous core
enumeration on each node (e.g. cores 0,2 on node 0 instead of 0,1), see
http://www.gossamer-threads.com/lists/linux/kernel/1160268.

This patch fixes out-of-bounds writes (see URL above) by adding per-CPU
msr structs which are used on the respective cores.

Additionally, two helpers, msrs_{alloc,free}, are provided for use by
the callers of the MSR accessors.

Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Mauro Carvalho Chehab <mchehab@redhat.com>
Cc: Aristeu Rozanski <aris@redhat.com>
Cc: Randy Dunlap <randy.dunlap@oracle.com>
Cc: Doug Thompson <dougthompson@xmission.com>
Signed-off-by: NBorislav Petkov <borislav.petkov@amd.com>
LKML-Reference: <20091211171440.GD31998@aftab>
Signed-off-by: NH. Peter Anvin <hpa@zytor.com>
上级 5c6baba8
...@@ -244,6 +244,9 @@ do { \ ...@@ -244,6 +244,9 @@ do { \
#define write_rdtscp_aux(val) wrmsr(0xc0000103, (val), 0) #define write_rdtscp_aux(val) wrmsr(0xc0000103, (val), 0)
struct msr *msrs_alloc(void);
void msrs_free(struct msr *msrs);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
......
...@@ -7,7 +7,6 @@ struct msr_info { ...@@ -7,7 +7,6 @@ struct msr_info {
u32 msr_no; u32 msr_no;
struct msr reg; struct msr reg;
struct msr *msrs; struct msr *msrs;
int off;
int err; int err;
}; };
...@@ -18,7 +17,7 @@ static void __rdmsr_on_cpu(void *info) ...@@ -18,7 +17,7 @@ static void __rdmsr_on_cpu(void *info)
int this_cpu = raw_smp_processor_id(); int this_cpu = raw_smp_processor_id();
if (rv->msrs) if (rv->msrs)
reg = &rv->msrs[this_cpu - rv->off]; reg = per_cpu_ptr(rv->msrs, this_cpu);
else else
reg = &rv->reg; reg = &rv->reg;
...@@ -32,7 +31,7 @@ static void __wrmsr_on_cpu(void *info) ...@@ -32,7 +31,7 @@ static void __wrmsr_on_cpu(void *info)
int this_cpu = raw_smp_processor_id(); int this_cpu = raw_smp_processor_id();
if (rv->msrs) if (rv->msrs)
reg = &rv->msrs[this_cpu - rv->off]; reg = per_cpu_ptr(rv->msrs, this_cpu);
else else
reg = &rv->reg; reg = &rv->reg;
...@@ -80,7 +79,6 @@ static void __rwmsr_on_cpus(const struct cpumask *mask, u32 msr_no, ...@@ -80,7 +79,6 @@ static void __rwmsr_on_cpus(const struct cpumask *mask, u32 msr_no,
memset(&rv, 0, sizeof(rv)); memset(&rv, 0, sizeof(rv));
rv.off = cpumask_first(mask);
rv.msrs = msrs; rv.msrs = msrs;
rv.msr_no = msr_no; rv.msr_no = msr_no;
...@@ -120,6 +118,26 @@ void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs) ...@@ -120,6 +118,26 @@ void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs)
} }
EXPORT_SYMBOL(wrmsr_on_cpus); EXPORT_SYMBOL(wrmsr_on_cpus);
struct msr *msrs_alloc(void)
{
struct msr *msrs = NULL;
msrs = alloc_percpu(struct msr);
if (!msrs) {
pr_warning("%s: error allocating msrs\n", __func__);
return NULL;
}
return msrs;
}
EXPORT_SYMBOL(msrs_alloc);
void msrs_free(struct msr *msrs)
{
free_percpu(msrs);
}
EXPORT_SYMBOL(msrs_free);
/* These "safe" variants are slower and should be used when the target MSR /* These "safe" variants are slower and should be used when the target MSR
may not actually exist. */ may not actually exist. */
static void __rdmsr_safe_on_cpu(void *info) static void __rdmsr_safe_on_cpu(void *info)
......
...@@ -13,6 +13,8 @@ module_param(report_gart_errors, int, 0644); ...@@ -13,6 +13,8 @@ module_param(report_gart_errors, int, 0644);
static int ecc_enable_override; static int ecc_enable_override;
module_param(ecc_enable_override, int, 0644); module_param(ecc_enable_override, int, 0644);
static struct msr *msrs;
/* Lookup table for all possible MC control instances */ /* Lookup table for all possible MC control instances */
struct amd64_pvt; struct amd64_pvt;
static struct mem_ctl_info *mci_lookup[EDAC_MAX_NUMNODES]; static struct mem_ctl_info *mci_lookup[EDAC_MAX_NUMNODES];
...@@ -2495,8 +2497,7 @@ static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, int nid) ...@@ -2495,8 +2497,7 @@ static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, int nid)
static bool amd64_nb_mce_bank_enabled_on_node(int nid) static bool amd64_nb_mce_bank_enabled_on_node(int nid)
{ {
cpumask_var_t mask; cpumask_var_t mask;
struct msr *msrs; int cpu, nbe;
int cpu, nbe, idx = 0;
bool ret = false; bool ret = false;
if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) { if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
...@@ -2507,32 +2508,22 @@ static bool amd64_nb_mce_bank_enabled_on_node(int nid) ...@@ -2507,32 +2508,22 @@ static bool amd64_nb_mce_bank_enabled_on_node(int nid)
get_cpus_on_this_dct_cpumask(mask, nid); get_cpus_on_this_dct_cpumask(mask, nid);
msrs = kzalloc(sizeof(struct msr) * cpumask_weight(mask), GFP_KERNEL);
if (!msrs) {
amd64_printk(KERN_WARNING, "%s: error allocating msrs\n",
__func__);
free_cpumask_var(mask);
return false;
}
rdmsr_on_cpus(mask, MSR_IA32_MCG_CTL, msrs); rdmsr_on_cpus(mask, MSR_IA32_MCG_CTL, msrs);
for_each_cpu(cpu, mask) { for_each_cpu(cpu, mask) {
nbe = msrs[idx].l & K8_MSR_MCGCTL_NBE; struct msr *reg = per_cpu_ptr(msrs, cpu);
nbe = reg->l & K8_MSR_MCGCTL_NBE;
debugf0("core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n", debugf0("core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
cpu, msrs[idx].q, cpu, reg->q,
(nbe ? "enabled" : "disabled")); (nbe ? "enabled" : "disabled"));
if (!nbe) if (!nbe)
goto out; goto out;
idx++;
} }
ret = true; ret = true;
out: out:
kfree(msrs);
free_cpumask_var(mask); free_cpumask_var(mask);
return ret; return ret;
} }
...@@ -2540,8 +2531,7 @@ static bool amd64_nb_mce_bank_enabled_on_node(int nid) ...@@ -2540,8 +2531,7 @@ static bool amd64_nb_mce_bank_enabled_on_node(int nid)
static int amd64_toggle_ecc_err_reporting(struct amd64_pvt *pvt, bool on) static int amd64_toggle_ecc_err_reporting(struct amd64_pvt *pvt, bool on)
{ {
cpumask_var_t cmask; cpumask_var_t cmask;
struct msr *msrs = NULL; int cpu;
int cpu, idx = 0;
if (!zalloc_cpumask_var(&cmask, GFP_KERNEL)) { if (!zalloc_cpumask_var(&cmask, GFP_KERNEL)) {
amd64_printk(KERN_WARNING, "%s: error allocating mask\n", amd64_printk(KERN_WARNING, "%s: error allocating mask\n",
...@@ -2551,34 +2541,27 @@ static int amd64_toggle_ecc_err_reporting(struct amd64_pvt *pvt, bool on) ...@@ -2551,34 +2541,27 @@ static int amd64_toggle_ecc_err_reporting(struct amd64_pvt *pvt, bool on)
get_cpus_on_this_dct_cpumask(cmask, pvt->mc_node_id); get_cpus_on_this_dct_cpumask(cmask, pvt->mc_node_id);
msrs = kzalloc(sizeof(struct msr) * cpumask_weight(cmask), GFP_KERNEL);
if (!msrs) {
amd64_printk(KERN_WARNING, "%s: error allocating msrs\n",
__func__);
return -ENOMEM;
}
rdmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs); rdmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
for_each_cpu(cpu, cmask) { for_each_cpu(cpu, cmask) {
struct msr *reg = per_cpu_ptr(msrs, cpu);
if (on) { if (on) {
if (msrs[idx].l & K8_MSR_MCGCTL_NBE) if (reg->l & K8_MSR_MCGCTL_NBE)
pvt->flags.ecc_report = 1; pvt->flags.ecc_report = 1;
msrs[idx].l |= K8_MSR_MCGCTL_NBE; reg->l |= K8_MSR_MCGCTL_NBE;
} else { } else {
/* /*
* Turn off ECC reporting only when it was off before * Turn off ECC reporting only when it was off before
*/ */
if (!pvt->flags.ecc_report) if (!pvt->flags.ecc_report)
msrs[idx].l &= ~K8_MSR_MCGCTL_NBE; reg->l &= ~K8_MSR_MCGCTL_NBE;
} }
idx++;
} }
wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs); wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
kfree(msrs);
free_cpumask_var(cmask); free_cpumask_var(cmask);
return 0; return 0;
...@@ -3036,6 +3019,8 @@ static int __init amd64_edac_init(void) ...@@ -3036,6 +3019,8 @@ static int __init amd64_edac_init(void)
if (cache_k8_northbridges() < 0) if (cache_k8_northbridges() < 0)
return err; return err;
msrs = msrs_alloc();
err = pci_register_driver(&amd64_pci_driver); err = pci_register_driver(&amd64_pci_driver);
if (err) if (err)
return err; return err;
...@@ -3071,6 +3056,9 @@ static void __exit amd64_edac_exit(void) ...@@ -3071,6 +3056,9 @@ static void __exit amd64_edac_exit(void)
edac_pci_release_generic_ctl(amd64_ctl_pci); edac_pci_release_generic_ctl(amd64_ctl_pci);
pci_unregister_driver(&amd64_pci_driver); pci_unregister_driver(&amd64_pci_driver);
msrs_free(msrs);
msrs = NULL;
} }
module_init(amd64_edac_init); module_init(amd64_edac_init);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册