提交 7b543a53 编写于 作者: T Tejun Heo

x86: Replace uses of current_cpu_data with this_cpu ops

Replace all uses of current_cpu_data with this_cpu operations on the
per cpu structure cpu_info.  The scala accesses are replaced with the
matching this_cpu ops which results in smaller and more efficient
code.

In the long run, it might be a good idea to remove cpu_data() macro
too and use per_cpu macro directly.

tj: updated description

Cc: Yinghai Lu <yinghai@kernel.org>
Cc: Ingo Molnar <mingo@elte.hu>
Acked-by: NH. Peter Anvin <hpa@zytor.com>
Acked-by: NTejun Heo <tj@kernel.org>
Signed-off-by: NChristoph Lameter <cl@linux.com>
Signed-off-by: NTejun Heo <tj@kernel.org>
上级 0a3aee0d
...@@ -141,10 +141,9 @@ extern __u32 cpu_caps_set[NCAPINTS]; ...@@ -141,10 +141,9 @@ extern __u32 cpu_caps_set[NCAPINTS];
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
DECLARE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info); DECLARE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
#define cpu_data(cpu) per_cpu(cpu_info, cpu) #define cpu_data(cpu) per_cpu(cpu_info, cpu)
#define current_cpu_data __get_cpu_var(cpu_info)
#else #else
#define cpu_info boot_cpu_data
#define cpu_data(cpu) boot_cpu_data #define cpu_data(cpu) boot_cpu_data
#define current_cpu_data boot_cpu_data
#endif #endif
extern const struct seq_operations cpuinfo_op; extern const struct seq_operations cpuinfo_op;
......
...@@ -516,7 +516,7 @@ static void __cpuinit setup_APIC_timer(void) ...@@ -516,7 +516,7 @@ static void __cpuinit setup_APIC_timer(void)
{ {
struct clock_event_device *levt = &__get_cpu_var(lapic_events); struct clock_event_device *levt = &__get_cpu_var(lapic_events);
if (cpu_has(&current_cpu_data, X86_FEATURE_ARAT)) { if (cpu_has(__this_cpu_ptr(&cpu_info), X86_FEATURE_ARAT)) {
lapic_clockevent.features &= ~CLOCK_EVT_FEAT_C3STOP; lapic_clockevent.features &= ~CLOCK_EVT_FEAT_C3STOP;
/* Make LAPIC timer preferrable over percpu HPET */ /* Make LAPIC timer preferrable over percpu HPET */
lapic_clockevent.rating = 150; lapic_clockevent.rating = 150;
......
...@@ -668,7 +668,7 @@ EXPORT_SYMBOL_GPL(amd_erratum_383); ...@@ -668,7 +668,7 @@ EXPORT_SYMBOL_GPL(amd_erratum_383);
bool cpu_has_amd_erratum(const int *erratum) bool cpu_has_amd_erratum(const int *erratum)
{ {
struct cpuinfo_x86 *cpu = &current_cpu_data; struct cpuinfo_x86 *cpu = __this_cpu_ptr(&cpu_info);
int osvw_id = *erratum++; int osvw_id = *erratum++;
u32 range; u32 range;
u32 ms; u32 ms;
......
...@@ -521,7 +521,7 @@ static void check_supported_cpu(void *_rc) ...@@ -521,7 +521,7 @@ static void check_supported_cpu(void *_rc)
*rc = -ENODEV; *rc = -ENODEV;
if (current_cpu_data.x86_vendor != X86_VENDOR_AMD) if (__this_cpu_read(cpu_info.x86_vendor) != X86_VENDOR_AMD)
return; return;
eax = cpuid_eax(CPUID_PROCESSOR_SIGNATURE); eax = cpuid_eax(CPUID_PROCESSOR_SIGNATURE);
......
...@@ -266,7 +266,7 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax, ...@@ -266,7 +266,7 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
line_size = l2.line_size; line_size = l2.line_size;
lines_per_tag = l2.lines_per_tag; lines_per_tag = l2.lines_per_tag;
/* cpu_data has errata corrections for K7 applied */ /* cpu_data has errata corrections for K7 applied */
size_in_kb = current_cpu_data.x86_cache_size; size_in_kb = __this_cpu_read(cpu_info.x86_cache_size);
break; break;
case 3: case 3:
if (!l3.val) if (!l3.val)
...@@ -288,7 +288,7 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax, ...@@ -288,7 +288,7 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
eax->split.type = types[leaf]; eax->split.type = types[leaf];
eax->split.level = levels[leaf]; eax->split.level = levels[leaf];
eax->split.num_threads_sharing = 0; eax->split.num_threads_sharing = 0;
eax->split.num_cores_on_die = current_cpu_data.x86_max_cores - 1; eax->split.num_cores_on_die = __this_cpu_read(cpu_info.x86_max_cores) - 1;
if (assoc == 0xffff) if (assoc == 0xffff)
......
...@@ -1159,7 +1159,7 @@ static void mce_start_timer(unsigned long data) ...@@ -1159,7 +1159,7 @@ static void mce_start_timer(unsigned long data)
WARN_ON(smp_processor_id() != data); WARN_ON(smp_processor_id() != data);
if (mce_available(&current_cpu_data)) { if (mce_available(__this_cpu_ptr(&cpu_info))) {
machine_check_poll(MCP_TIMESTAMP, machine_check_poll(MCP_TIMESTAMP,
&__get_cpu_var(mce_poll_banks)); &__get_cpu_var(mce_poll_banks));
} }
...@@ -1767,7 +1767,7 @@ static int mce_shutdown(struct sys_device *dev) ...@@ -1767,7 +1767,7 @@ static int mce_shutdown(struct sys_device *dev)
static int mce_resume(struct sys_device *dev) static int mce_resume(struct sys_device *dev)
{ {
__mcheck_cpu_init_generic(); __mcheck_cpu_init_generic();
__mcheck_cpu_init_vendor(&current_cpu_data); __mcheck_cpu_init_vendor(__this_cpu_ptr(&cpu_info));
return 0; return 0;
} }
...@@ -1775,7 +1775,7 @@ static int mce_resume(struct sys_device *dev) ...@@ -1775,7 +1775,7 @@ static int mce_resume(struct sys_device *dev)
static void mce_cpu_restart(void *data) static void mce_cpu_restart(void *data)
{ {
del_timer_sync(&__get_cpu_var(mce_timer)); del_timer_sync(&__get_cpu_var(mce_timer));
if (!mce_available(&current_cpu_data)) if (!mce_available(__this_cpu_ptr(&cpu_info)))
return; return;
__mcheck_cpu_init_generic(); __mcheck_cpu_init_generic();
__mcheck_cpu_init_timer(); __mcheck_cpu_init_timer();
...@@ -1790,7 +1790,7 @@ static void mce_restart(void) ...@@ -1790,7 +1790,7 @@ static void mce_restart(void)
/* Toggle features for corrected errors */ /* Toggle features for corrected errors */
static void mce_disable_ce(void *all) static void mce_disable_ce(void *all)
{ {
if (!mce_available(&current_cpu_data)) if (!mce_available(__this_cpu_ptr(&cpu_info)))
return; return;
if (all) if (all)
del_timer_sync(&__get_cpu_var(mce_timer)); del_timer_sync(&__get_cpu_var(mce_timer));
...@@ -1799,7 +1799,7 @@ static void mce_disable_ce(void *all) ...@@ -1799,7 +1799,7 @@ static void mce_disable_ce(void *all)
static void mce_enable_ce(void *all) static void mce_enable_ce(void *all)
{ {
if (!mce_available(&current_cpu_data)) if (!mce_available(__this_cpu_ptr(&cpu_info)))
return; return;
cmci_reenable(); cmci_reenable();
cmci_recheck(); cmci_recheck();
...@@ -2022,7 +2022,7 @@ static void __cpuinit mce_disable_cpu(void *h) ...@@ -2022,7 +2022,7 @@ static void __cpuinit mce_disable_cpu(void *h)
unsigned long action = *(unsigned long *)h; unsigned long action = *(unsigned long *)h;
int i; int i;
if (!mce_available(&current_cpu_data)) if (!mce_available(__this_cpu_ptr(&cpu_info)))
return; return;
if (!(action & CPU_TASKS_FROZEN)) if (!(action & CPU_TASKS_FROZEN))
...@@ -2040,7 +2040,7 @@ static void __cpuinit mce_reenable_cpu(void *h) ...@@ -2040,7 +2040,7 @@ static void __cpuinit mce_reenable_cpu(void *h)
unsigned long action = *(unsigned long *)h; unsigned long action = *(unsigned long *)h;
int i; int i;
if (!mce_available(&current_cpu_data)) if (!mce_available(__this_cpu_ptr(&cpu_info)))
return; return;
if (!(action & CPU_TASKS_FROZEN)) if (!(action & CPU_TASKS_FROZEN))
......
...@@ -130,7 +130,7 @@ void cmci_recheck(void) ...@@ -130,7 +130,7 @@ void cmci_recheck(void)
unsigned long flags; unsigned long flags;
int banks; int banks;
if (!mce_available(&current_cpu_data) || !cmci_supported(&banks)) if (!mce_available(__this_cpu_ptr(&cpu_info)) || !cmci_supported(&banks))
return; return;
local_irq_save(flags); local_irq_save(flags);
machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned)); machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned));
......
...@@ -445,7 +445,7 @@ void mwait_idle_with_hints(unsigned long ax, unsigned long cx) ...@@ -445,7 +445,7 @@ void mwait_idle_with_hints(unsigned long ax, unsigned long cx)
{ {
trace_power_start(POWER_CSTATE, (ax>>4)+1, smp_processor_id()); trace_power_start(POWER_CSTATE, (ax>>4)+1, smp_processor_id());
if (!need_resched()) { if (!need_resched()) {
if (cpu_has(&current_cpu_data, X86_FEATURE_CLFLUSH_MONITOR)) if (cpu_has(__this_cpu_ptr(&cpu_info), X86_FEATURE_CLFLUSH_MONITOR))
clflush((void *)&current_thread_info()->flags); clflush((void *)&current_thread_info()->flags);
__monitor((void *)&current_thread_info()->flags, 0, 0); __monitor((void *)&current_thread_info()->flags, 0, 0);
...@@ -460,7 +460,7 @@ static void mwait_idle(void) ...@@ -460,7 +460,7 @@ static void mwait_idle(void)
{ {
if (!need_resched()) { if (!need_resched()) {
trace_power_start(POWER_CSTATE, 1, smp_processor_id()); trace_power_start(POWER_CSTATE, 1, smp_processor_id());
if (cpu_has(&current_cpu_data, X86_FEATURE_CLFLUSH_MONITOR)) if (cpu_has(__this_cpu_ptr(&cpu_info), X86_FEATURE_CLFLUSH_MONITOR))
clflush((void *)&current_thread_info()->flags); clflush((void *)&current_thread_info()->flags);
__monitor((void *)&current_thread_info()->flags, 0, 0); __monitor((void *)&current_thread_info()->flags, 0, 0);
......
...@@ -430,7 +430,7 @@ void __cpuinit set_cpu_sibling_map(int cpu) ...@@ -430,7 +430,7 @@ void __cpuinit set_cpu_sibling_map(int cpu)
cpumask_set_cpu(cpu, c->llc_shared_map); cpumask_set_cpu(cpu, c->llc_shared_map);
if (current_cpu_data.x86_max_cores == 1) { if (__this_cpu_read(cpu_info.x86_max_cores) == 1) {
cpumask_copy(cpu_core_mask(cpu), cpu_sibling_mask(cpu)); cpumask_copy(cpu_core_mask(cpu), cpu_sibling_mask(cpu));
c->booted_cores = 1; c->booted_cores = 1;
return; return;
...@@ -1094,7 +1094,7 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus) ...@@ -1094,7 +1094,7 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
preempt_disable(); preempt_disable();
smp_cpu_index_default(); smp_cpu_index_default();
current_cpu_data = boot_cpu_data; memcpy(__this_cpu_ptr(&cpu_info), &boot_cpu_data, sizeof(cpu_info));
cpumask_copy(cpu_callin_mask, cpumask_of(0)); cpumask_copy(cpu_callin_mask, cpumask_of(0));
mb(); mb();
/* /*
...@@ -1397,11 +1397,11 @@ static inline void mwait_play_dead(void) ...@@ -1397,11 +1397,11 @@ static inline void mwait_play_dead(void)
int i; int i;
void *mwait_ptr; void *mwait_ptr;
if (!cpu_has(&current_cpu_data, X86_FEATURE_MWAIT)) if (!cpu_has(__this_cpu_ptr(&cpu_info), X86_FEATURE_MWAIT))
return; return;
if (!cpu_has(&current_cpu_data, X86_FEATURE_CLFLSH)) if (!cpu_has(__this_cpu_ptr(&cpu_info), X86_FEATURE_CLFLSH))
return; return;
if (current_cpu_data.cpuid_level < CPUID_MWAIT_LEAF) if (__this_cpu_read(cpu_info.cpuid_level) < CPUID_MWAIT_LEAF)
return; return;
eax = CPUID_MWAIT_LEAF; eax = CPUID_MWAIT_LEAF;
...@@ -1452,7 +1452,7 @@ static inline void mwait_play_dead(void) ...@@ -1452,7 +1452,7 @@ static inline void mwait_play_dead(void)
static inline void hlt_play_dead(void) static inline void hlt_play_dead(void)
{ {
if (current_cpu_data.x86 >= 4) if (__this_cpu_read(cpu_info.x86) >= 4)
wbinvd(); wbinvd();
while (1) { while (1) {
......
...@@ -95,8 +95,8 @@ static void ppro_setup_ctrs(struct op_x86_model_spec const *model, ...@@ -95,8 +95,8 @@ static void ppro_setup_ctrs(struct op_x86_model_spec const *model,
* counter width: * counter width:
*/ */
if (!(eax.split.version_id == 0 && if (!(eax.split.version_id == 0 &&
current_cpu_data.x86 == 6 && __this_cpu_read(cpu_info.x86) == 6 &&
current_cpu_data.x86_model == 15)) { __this_cpu_read(cpu_info.x86_model) == 15)) {
if (counter_width < eax.split.bit_width) if (counter_width < eax.split.bit_width)
counter_width = eax.split.bit_width; counter_width = eax.split.bit_width;
...@@ -235,8 +235,8 @@ static void arch_perfmon_setup_counters(void) ...@@ -235,8 +235,8 @@ static void arch_perfmon_setup_counters(void)
eax.full = cpuid_eax(0xa); eax.full = cpuid_eax(0xa);
/* Workaround for BIOS bugs in 6/15. Taken from perfmon2 */ /* Workaround for BIOS bugs in 6/15. Taken from perfmon2 */
if (eax.split.version_id == 0 && current_cpu_data.x86 == 6 && if (eax.split.version_id == 0 && __this_cpu_read(cpu_info.x86) == 6 &&
current_cpu_data.x86_model == 15) { __this_cpu_read(cpu_info.x86_model) == 15) {
eax.split.version_id = 2; eax.split.version_id = 2;
eax.split.num_counters = 2; eax.split.num_counters = 2;
eax.split.bit_width = 40; eax.split.bit_width = 40;
......
...@@ -377,7 +377,7 @@ static int init_timing_params(unsigned int new_duty_cycle, ...@@ -377,7 +377,7 @@ static int init_timing_params(unsigned int new_duty_cycle,
duty_cycle = new_duty_cycle; duty_cycle = new_duty_cycle;
freq = new_freq; freq = new_freq;
loops_per_sec = current_cpu_data.loops_per_jiffy; loops_per_sec = __this_cpu_read(cpu.info.loops_per_jiffy);
loops_per_sec *= HZ; loops_per_sec *= HZ;
/* How many clocks in a microsecond?, avoiding long long divide */ /* How many clocks in a microsecond?, avoiding long long divide */
...@@ -398,7 +398,7 @@ static int init_timing_params(unsigned int new_duty_cycle, ...@@ -398,7 +398,7 @@ static int init_timing_params(unsigned int new_duty_cycle,
dprintk("in init_timing_params, freq=%d, duty_cycle=%d, " dprintk("in init_timing_params, freq=%d, duty_cycle=%d, "
"clk/jiffy=%ld, pulse=%ld, space=%ld, " "clk/jiffy=%ld, pulse=%ld, space=%ld, "
"conv_us_to_clocks=%ld\n", "conv_us_to_clocks=%ld\n",
freq, duty_cycle, current_cpu_data.loops_per_jiffy, freq, duty_cycle, __this_cpu_read(cpu_info.loops_per_jiffy),
pulse_width, space_width, conv_us_to_clocks); pulse_width, space_width, conv_us_to_clocks);
return 0; return 0;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册