提交 53756d37 编写于 作者: J Jeremy Fitzhardinge 提交者: Ingo Molnar

x86: add set/clear_cpu_cap operations

The patch to suppress bitops-related warnings added a pile of ugly
casts.  Many of these were related to the management of x86 CPU
capabilities.  Clean these up by adding specific set/clear_cpu_cap
macros, and use them consistently.
Signed-off-by: NJeremy Fitzhardinge <jeremy@xensource.com>
Cc: Andi Kleen <ak@suse.de>
Signed-off-by: NIngo Molnar <mingo@elte.hu>
Signed-off-by: NThomas Gleixner <tglx@linutronix.de>
上级 5548fecd
...@@ -356,15 +356,15 @@ void alternatives_smp_switch(int smp) ...@@ -356,15 +356,15 @@ void alternatives_smp_switch(int smp)
spin_lock_irqsave(&smp_alt, flags); spin_lock_irqsave(&smp_alt, flags);
if (smp) { if (smp) {
printk(KERN_INFO "SMP alternatives: switching to SMP code\n"); printk(KERN_INFO "SMP alternatives: switching to SMP code\n");
clear_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability); clear_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);
clear_bit(X86_FEATURE_UP, cpu_data(0).x86_capability); clear_cpu_cap(&cpu_data(0), X86_FEATURE_UP);
list_for_each_entry(mod, &smp_alt_modules, next) list_for_each_entry(mod, &smp_alt_modules, next)
alternatives_smp_lock(mod->locks, mod->locks_end, alternatives_smp_lock(mod->locks, mod->locks_end,
mod->text, mod->text_end); mod->text, mod->text_end);
} else { } else {
printk(KERN_INFO "SMP alternatives: switching to UP code\n"); printk(KERN_INFO "SMP alternatives: switching to UP code\n");
set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability); set_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);
set_bit(X86_FEATURE_UP, cpu_data(0).x86_capability); set_cpu_cap(&cpu_data(0), X86_FEATURE_UP);
list_for_each_entry(mod, &smp_alt_modules, next) list_for_each_entry(mod, &smp_alt_modules, next)
alternatives_smp_unlock(mod->locks, mod->locks_end, alternatives_smp_unlock(mod->locks, mod->locks_end,
mod->text, mod->text_end); mod->text, mod->text_end);
...@@ -431,8 +431,9 @@ void __init alternative_instructions(void) ...@@ -431,8 +431,9 @@ void __init alternative_instructions(void)
if (smp_alt_once) { if (smp_alt_once) {
if (1 == num_possible_cpus()) { if (1 == num_possible_cpus()) {
printk(KERN_INFO "SMP alternatives: switching to UP code\n"); printk(KERN_INFO "SMP alternatives: switching to UP code\n");
set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability); set_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);
set_bit(X86_FEATURE_UP, cpu_data(0).x86_capability); set_cpu_cap(&cpu_data(0), X86_FEATURE_UP);
alternatives_smp_unlock(__smp_locks, __smp_locks_end, alternatives_smp_unlock(__smp_locks, __smp_locks_end,
_text, _etext); _text, _etext);
} }
......
...@@ -1078,7 +1078,7 @@ static int __init detect_init_APIC (void) ...@@ -1078,7 +1078,7 @@ static int __init detect_init_APIC (void)
printk(KERN_WARNING "Could not enable APIC!\n"); printk(KERN_WARNING "Could not enable APIC!\n");
return -1; return -1;
} }
set_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability); set_cpu_cap(&boot_cpu_data, X86_FEATURE_APIC);
mp_lapic_addr = APIC_DEFAULT_PHYS_BASE; mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
/* The BIOS may have set up the APIC at some other address */ /* The BIOS may have set up the APIC at some other address */
...@@ -1168,7 +1168,7 @@ void __init init_apic_mappings(void) ...@@ -1168,7 +1168,7 @@ void __init init_apic_mappings(void)
int __init APIC_init_uniprocessor (void) int __init APIC_init_uniprocessor (void)
{ {
if (enable_local_apic < 0) if (enable_local_apic < 0)
clear_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability); clear_cpu_cap(&boot_cpu_data, X86_FEATURE_APIC);
if (!smp_found_config && !cpu_has_apic) if (!smp_found_config && !cpu_has_apic)
return -1; return -1;
...@@ -1180,7 +1180,7 @@ int __init APIC_init_uniprocessor (void) ...@@ -1180,7 +1180,7 @@ int __init APIC_init_uniprocessor (void)
APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid])) { APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid])) {
printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n", printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n",
boot_cpu_physical_apicid); boot_cpu_physical_apicid);
clear_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability); clear_cpu_cap(&boot_cpu_data, X86_FEATURE_APIC);
return -1; return -1;
} }
...@@ -1536,7 +1536,7 @@ early_param("lapic", parse_lapic); ...@@ -1536,7 +1536,7 @@ early_param("lapic", parse_lapic);
static int __init parse_nolapic(char *arg) static int __init parse_nolapic(char *arg)
{ {
enable_local_apic = -1; enable_local_apic = -1;
clear_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability); clear_cpu_cap(&boot_cpu_data, X86_FEATURE_APIC);
return 0; return 0;
} }
early_param("nolapic", parse_nolapic); early_param("nolapic", parse_nolapic);
......
...@@ -1211,7 +1211,7 @@ early_param("apic", apic_set_verbosity); ...@@ -1211,7 +1211,7 @@ early_param("apic", apic_set_verbosity);
static __init int setup_disableapic(char *str) static __init int setup_disableapic(char *str)
{ {
disable_apic = 1; disable_apic = 1;
clear_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability); clear_cpu_cap(&boot_cpu_data, X86_FEATURE_APIC);
return 0; return 0;
} }
early_param("disableapic", setup_disableapic); early_param("disableapic", setup_disableapic);
......
...@@ -45,6 +45,6 @@ void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c) ...@@ -45,6 +45,6 @@ void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c)
&regs[CR_ECX], &regs[CR_EDX]); &regs[CR_ECX], &regs[CR_EDX]);
if (regs[cb->reg] & (1 << cb->bit)) if (regs[cb->reg] & (1 << cb->bit))
set_bit(cb->feature, c->x86_capability); set_cpu_cap(c, cb->feature);
} }
} }
...@@ -80,7 +80,7 @@ void mce_log(struct mce *mce) ...@@ -80,7 +80,7 @@ void mce_log(struct mce *mce)
/* When the buffer fills up discard new entries. Assume /* When the buffer fills up discard new entries. Assume
that the earlier errors are the more interesting. */ that the earlier errors are the more interesting. */
if (entry >= MCE_LOG_LEN) { if (entry >= MCE_LOG_LEN) {
set_bit(MCE_OVERFLOW, &mcelog.flags); set_bit(MCE_OVERFLOW, (unsigned long *)&mcelog.flags);
return; return;
} }
/* Old left over entry. Skip. */ /* Old left over entry. Skip. */
......
...@@ -238,7 +238,7 @@ static int __init parse_mem(char *arg) ...@@ -238,7 +238,7 @@ static int __init parse_mem(char *arg)
return -EINVAL; return -EINVAL;
if (strcmp(arg, "nopentium") == 0) { if (strcmp(arg, "nopentium") == 0) {
clear_bit(X86_FEATURE_PSE, boot_cpu_data.x86_capability); clear_cpu_cap(&boot_cpu_data, X86_FEATURE_PSE);
disable_pse = 1; disable_pse = 1;
} else { } else {
/* If the user specifies memory size, we /* If the user specifies memory size, we
......
...@@ -667,13 +667,13 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) ...@@ -667,13 +667,13 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
level = cpuid_eax(1); level = cpuid_eax(1);
if (c->x86 == 15 && ((level >= 0x0f48 && level < 0x0f50) || if (c->x86 == 15 && ((level >= 0x0f48 && level < 0x0f50) ||
level >= 0x0f58)) level >= 0x0f58))
set_bit(X86_FEATURE_REP_GOOD, (unsigned long *)&c->x86_capability); set_cpu_cap(c, X86_FEATURE_REP_GOOD);
if (c->x86 == 0x10 || c->x86 == 0x11) if (c->x86 == 0x10 || c->x86 == 0x11)
set_bit(X86_FEATURE_REP_GOOD, (unsigned long *)&c->x86_capability); set_cpu_cap(c, X86_FEATURE_REP_GOOD);
/* Enable workaround for FXSAVE leak */ /* Enable workaround for FXSAVE leak */
if (c->x86 >= 6) if (c->x86 >= 6)
set_bit(X86_FEATURE_FXSAVE_LEAK, (unsigned long *)&c->x86_capability); set_cpu_cap(c, X86_FEATURE_FXSAVE_LEAK);
level = get_model_name(c); level = get_model_name(c);
if (!level) { if (!level) {
...@@ -689,7 +689,7 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) ...@@ -689,7 +689,7 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
/* c->x86_power is 8000_0007 edx. Bit 8 is constant TSC */ /* c->x86_power is 8000_0007 edx. Bit 8 is constant TSC */
if (c->x86_power & (1<<8)) if (c->x86_power & (1<<8))
set_bit(X86_FEATURE_CONSTANT_TSC, (unsigned long *)&c->x86_capability); set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
/* Multi core CPU? */ /* Multi core CPU? */
if (c->extended_cpuid_level >= 0x80000008) if (c->extended_cpuid_level >= 0x80000008)
...@@ -702,14 +702,14 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) ...@@ -702,14 +702,14 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
num_cache_leaves = 3; num_cache_leaves = 3;
if (c->x86 == 0xf || c->x86 == 0x10 || c->x86 == 0x11) if (c->x86 == 0xf || c->x86 == 0x10 || c->x86 == 0x11)
set_bit(X86_FEATURE_K8, (unsigned long *)&c->x86_capability); set_cpu_cap(c, X86_FEATURE_K8);
/* RDTSC can be speculated around */ /* RDTSC can be speculated around */
clear_bit(X86_FEATURE_SYNC_RDTSC, (unsigned long *)&c->x86_capability); clear_cpu_cap(c, X86_FEATURE_SYNC_RDTSC);
/* Family 10 doesn't support C states in MWAIT so don't use it */ /* Family 10 doesn't support C states in MWAIT so don't use it */
if (c->x86 == 0x10 && !force_mwait) if (c->x86 == 0x10 && !force_mwait)
clear_bit(X86_FEATURE_MWAIT, (unsigned long *)&c->x86_capability); clear_cpu_cap(c, X86_FEATURE_MWAIT);
if (amd_apic_timer_broken()) if (amd_apic_timer_broken())
disable_apic_timer = 1; disable_apic_timer = 1;
...@@ -811,17 +811,16 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c) ...@@ -811,17 +811,16 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c)
unsigned eax = cpuid_eax(10); unsigned eax = cpuid_eax(10);
/* Check for version and the number of counters */ /* Check for version and the number of counters */
if ((eax & 0xff) && (((eax>>8) & 0xff) > 1)) if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
set_bit(X86_FEATURE_ARCH_PERFMON, set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON);
(unsigned long *)&c->x86_capability);
} }
if (cpu_has_ds) { if (cpu_has_ds) {
unsigned int l1, l2; unsigned int l1, l2;
rdmsr(MSR_IA32_MISC_ENABLE, l1, l2); rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
if (!(l1 & (1<<11))) if (!(l1 & (1<<11)))
set_bit(X86_FEATURE_BTS, (unsigned long *)c->x86_capability); set_cpu_cap(c, X86_FEATURE_BTS);
if (!(l1 & (1<<12))) if (!(l1 & (1<<12)))
set_bit(X86_FEATURE_PEBS, (unsigned long *)c->x86_capability); set_cpu_cap(c, X86_FEATURE_PEBS);
} }
n = c->extended_cpuid_level; n = c->extended_cpuid_level;
...@@ -840,13 +839,13 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c) ...@@ -840,13 +839,13 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c)
c->x86_cache_alignment = c->x86_clflush_size * 2; c->x86_cache_alignment = c->x86_clflush_size * 2;
if ((c->x86 == 0xf && c->x86_model >= 0x03) || if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
(c->x86 == 0x6 && c->x86_model >= 0x0e)) (c->x86 == 0x6 && c->x86_model >= 0x0e))
set_bit(X86_FEATURE_CONSTANT_TSC, (unsigned long *)&c->x86_capability); set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
if (c->x86 == 6) if (c->x86 == 6)
set_bit(X86_FEATURE_REP_GOOD, (unsigned long *)&c->x86_capability); set_cpu_cap(c, X86_FEATURE_REP_GOOD);
if (c->x86 == 15) if (c->x86 == 15)
set_bit(X86_FEATURE_SYNC_RDTSC, (unsigned long *)&c->x86_capability); set_cpu_cap(c, X86_FEATURE_SYNC_RDTSC);
else else
clear_bit(X86_FEATURE_SYNC_RDTSC, (unsigned long *)&c->x86_capability); clear_cpu_cap(c, X86_FEATURE_SYNC_RDTSC);
c->x86_max_cores = intel_num_cpu_cores(c); c->x86_max_cores = intel_num_cpu_cores(c);
srat_detect_node(); srat_detect_node();
......
...@@ -963,19 +963,19 @@ static int __init parse_vmi(char *arg) ...@@ -963,19 +963,19 @@ static int __init parse_vmi(char *arg)
return -EINVAL; return -EINVAL;
if (!strcmp(arg, "disable_pge")) { if (!strcmp(arg, "disable_pge")) {
clear_bit(X86_FEATURE_PGE, boot_cpu_data.x86_capability); clear_cpu_cap(&boot_cpu_data, X86_FEATURE_PGE);
disable_pge = 1; disable_pge = 1;
} else if (!strcmp(arg, "disable_pse")) { } else if (!strcmp(arg, "disable_pse")) {
clear_bit(X86_FEATURE_PSE, boot_cpu_data.x86_capability); clear_cpu_cap(&boot_cpu_data, X86_FEATURE_PSE);
disable_pse = 1; disable_pse = 1;
} else if (!strcmp(arg, "disable_sep")) { } else if (!strcmp(arg, "disable_sep")) {
clear_bit(X86_FEATURE_SEP, boot_cpu_data.x86_capability); clear_cpu_cap(&boot_cpu_data, X86_FEATURE_SEP);
disable_sep = 1; disable_sep = 1;
} else if (!strcmp(arg, "disable_tsc")) { } else if (!strcmp(arg, "disable_tsc")) {
clear_bit(X86_FEATURE_TSC, boot_cpu_data.x86_capability); clear_cpu_cap(&boot_cpu_data, X86_FEATURE_TSC);
disable_tsc = 1; disable_tsc = 1;
} else if (!strcmp(arg, "disable_mtrr")) { } else if (!strcmp(arg, "disable_mtrr")) {
clear_bit(X86_FEATURE_MTRR, boot_cpu_data.x86_capability); clear_cpu_cap(&boot_cpu_data, X86_FEATURE_MTRR);
disable_mtrr = 1; disable_mtrr = 1;
} else if (!strcmp(arg, "disable_timer")) { } else if (!strcmp(arg, "disable_timer")) {
disable_vmi_timer = 1; disable_vmi_timer = 1;
......
...@@ -124,9 +124,12 @@ ...@@ -124,9 +124,12 @@
(((bit)>>5)==6 && (1UL<<((bit)&31) & REQUIRED_MASK6)) || \ (((bit)>>5)==6 && (1UL<<((bit)&31) & REQUIRED_MASK6)) || \
(((bit)>>5)==7 && (1UL<<((bit)&31) & REQUIRED_MASK7)) ) \ (((bit)>>5)==7 && (1UL<<((bit)&31) & REQUIRED_MASK7)) ) \
? 1 : \ ? 1 : \
test_bit(bit, (unsigned long *)(c)->x86_capability)) test_bit(bit, (unsigned long *)((c)->x86_capability)))
#define boot_cpu_has(bit) cpu_has(&boot_cpu_data, bit) #define boot_cpu_has(bit) cpu_has(&boot_cpu_data, bit)
#define set_cpu_cap(c, bit) set_bit(bit, (unsigned long *)((c)->x86_capability))
#define clear_cpu_cap(c, bit) clear_bit(bit, (unsigned long *)((c)->x86_capability))
#define cpu_has_fpu boot_cpu_has(X86_FEATURE_FPU) #define cpu_has_fpu boot_cpu_has(X86_FEATURE_FPU)
#define cpu_has_vme boot_cpu_has(X86_FEATURE_VME) #define cpu_has_vme boot_cpu_has(X86_FEATURE_VME)
#define cpu_has_de boot_cpu_has(X86_FEATURE_DE) #define cpu_has_de boot_cpu_has(X86_FEATURE_DE)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册