提交 245b2e70 编写于 作者: T Tejun Heo

percpu: clean up percpu variable definitions

Percpu variable definition is about to be updated such that all percpu
symbols including the static ones must be unique.  Update percpu
variable definitions accordingly.

* as,cfq: rename ioc_count uniquely

* cpufreq: rename cpu_dbs_info uniquely

* xen: move nesting_count out of xen_evtchn_do_upcall() and rename it

* mm: move ratelimits out of balance_dirty_pages_ratelimited_nr() and
  rename it

* ipv4,6: rename cookie_scratch uniquely

* x86 perf_counter: rename prev_left to pmc_prev_left, irq_entry to
  pmc_irq_entry and nmi_entry to pmc_nmi_entry

* perf_counter: rename disable_count to perf_disable_count

* ftrace: rename test_event_disable to ftrace_test_event_disable

* kmemleak: rename test_pointer to kmemleak_test_pointer

* mce: rename next_interval to mce_next_interval

[ Impact: percpu usage cleanups, no duplicate static percpu var names ]
Signed-off-by: NTejun Heo <tj@kernel.org>
Reviewed-by: NChristoph Lameter <cl@linux-foundation.org>
Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru>
Cc: Jens Axboe <jens.axboe@oracle.com>
Cc: Dave Jones <davej@redhat.com>
Cc: Jeremy Fitzhardinge <jeremy@xensource.com>
Cc: linux-mm <linux-mm@kvack.org>
Cc: David S. Miller <davem@davemloft.net>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Steven Rostedt <srostedt@redhat.com>
Cc: Li Zefan <lizf@cn.fujitsu.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Andi Kleen <andi@firstfloor.org>
上级 b9bf3121
...@@ -1091,7 +1091,7 @@ void mce_log_therm_throt_event(__u64 status) ...@@ -1091,7 +1091,7 @@ void mce_log_therm_throt_event(__u64 status)
*/ */
static int check_interval = 5 * 60; /* 5 minutes */ static int check_interval = 5 * 60; /* 5 minutes */
static DEFINE_PER_CPU(int, next_interval); /* in jiffies */ static DEFINE_PER_CPU(int, mce_next_interval); /* in jiffies */
static DEFINE_PER_CPU(struct timer_list, mce_timer); static DEFINE_PER_CPU(struct timer_list, mce_timer);
static void mcheck_timer(unsigned long data) static void mcheck_timer(unsigned long data)
...@@ -1110,7 +1110,7 @@ static void mcheck_timer(unsigned long data) ...@@ -1110,7 +1110,7 @@ static void mcheck_timer(unsigned long data)
* Alert userspace if needed. If we logged an MCE, reduce the * Alert userspace if needed. If we logged an MCE, reduce the
* polling interval, otherwise increase the polling interval. * polling interval, otherwise increase the polling interval.
*/ */
n = &__get_cpu_var(next_interval); n = &__get_cpu_var(mce_next_interval);
if (mce_notify_irq()) if (mce_notify_irq())
*n = max(*n/2, HZ/100); *n = max(*n/2, HZ/100);
else else
...@@ -1311,7 +1311,7 @@ static void mce_cpu_features(struct cpuinfo_x86 *c) ...@@ -1311,7 +1311,7 @@ static void mce_cpu_features(struct cpuinfo_x86 *c)
static void mce_init_timer(void) static void mce_init_timer(void)
{ {
struct timer_list *t = &__get_cpu_var(mce_timer); struct timer_list *t = &__get_cpu_var(mce_timer);
int *n = &__get_cpu_var(next_interval); int *n = &__get_cpu_var(mce_next_interval);
if (mce_ignore_ce) if (mce_ignore_ce)
return; return;
...@@ -1914,7 +1914,7 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) ...@@ -1914,7 +1914,7 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
case CPU_DOWN_FAILED: case CPU_DOWN_FAILED:
case CPU_DOWN_FAILED_FROZEN: case CPU_DOWN_FAILED_FROZEN:
t->expires = round_jiffies(jiffies + t->expires = round_jiffies(jiffies +
__get_cpu_var(next_interval)); __get_cpu_var(mce_next_interval));
add_timer_on(t, cpu); add_timer_on(t, cpu);
smp_call_function_single(cpu, mce_reenable_cpu, &action, 1); smp_call_function_single(cpu, mce_reenable_cpu, &action, 1);
break; break;
......
...@@ -862,7 +862,7 @@ amd_pmu_disable_counter(struct hw_perf_counter *hwc, int idx) ...@@ -862,7 +862,7 @@ amd_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
x86_pmu_disable_counter(hwc, idx); x86_pmu_disable_counter(hwc, idx);
} }
static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], prev_left); static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
/* /*
* Set the next IRQ period, based on the hwc->period_left value. * Set the next IRQ period, based on the hwc->period_left value.
...@@ -901,7 +901,7 @@ x86_perf_counter_set_period(struct perf_counter *counter, ...@@ -901,7 +901,7 @@ x86_perf_counter_set_period(struct perf_counter *counter,
if (left > x86_pmu.max_period) if (left > x86_pmu.max_period)
left = x86_pmu.max_period; left = x86_pmu.max_period;
per_cpu(prev_left[idx], smp_processor_id()) = left; per_cpu(pmc_prev_left[idx], smp_processor_id()) = left;
/* /*
* The hw counter starts counting from this counter offset, * The hw counter starts counting from this counter offset,
...@@ -1089,7 +1089,7 @@ void perf_counter_print_debug(void) ...@@ -1089,7 +1089,7 @@ void perf_counter_print_debug(void)
rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl); rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl);
rdmsrl(x86_pmu.perfctr + idx, pmc_count); rdmsrl(x86_pmu.perfctr + idx, pmc_count);
prev_left = per_cpu(prev_left[idx], cpu); prev_left = per_cpu(pmc_prev_left[idx], cpu);
pr_info("CPU#%d: gen-PMC%d ctrl: %016llx\n", pr_info("CPU#%d: gen-PMC%d ctrl: %016llx\n",
cpu, idx, pmc_ctrl); cpu, idx, pmc_ctrl);
...@@ -1561,8 +1561,8 @@ void callchain_store(struct perf_callchain_entry *entry, u64 ip) ...@@ -1561,8 +1561,8 @@ void callchain_store(struct perf_callchain_entry *entry, u64 ip)
entry->ip[entry->nr++] = ip; entry->ip[entry->nr++] = ip;
} }
static DEFINE_PER_CPU(struct perf_callchain_entry, irq_entry); static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry);
static DEFINE_PER_CPU(struct perf_callchain_entry, nmi_entry); static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_nmi_entry);
static void static void
...@@ -1709,9 +1709,9 @@ struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) ...@@ -1709,9 +1709,9 @@ struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
struct perf_callchain_entry *entry; struct perf_callchain_entry *entry;
if (in_nmi()) if (in_nmi())
entry = &__get_cpu_var(nmi_entry); entry = &__get_cpu_var(pmc_nmi_entry);
else else
entry = &__get_cpu_var(irq_entry); entry = &__get_cpu_var(pmc_irq_entry);
entry->nr = 0; entry->nr = 0;
......
...@@ -146,7 +146,7 @@ enum arq_state { ...@@ -146,7 +146,7 @@ enum arq_state {
#define RQ_STATE(rq) ((enum arq_state)(rq)->elevator_private2) #define RQ_STATE(rq) ((enum arq_state)(rq)->elevator_private2)
#define RQ_SET_STATE(rq, state) ((rq)->elevator_private2 = (void *) state) #define RQ_SET_STATE(rq, state) ((rq)->elevator_private2 = (void *) state)
static DEFINE_PER_CPU(unsigned long, ioc_count); static DEFINE_PER_CPU(unsigned long, as_ioc_count);
static struct completion *ioc_gone; static struct completion *ioc_gone;
static DEFINE_SPINLOCK(ioc_gone_lock); static DEFINE_SPINLOCK(ioc_gone_lock);
...@@ -161,7 +161,7 @@ static void as_antic_stop(struct as_data *ad); ...@@ -161,7 +161,7 @@ static void as_antic_stop(struct as_data *ad);
static void free_as_io_context(struct as_io_context *aic) static void free_as_io_context(struct as_io_context *aic)
{ {
kfree(aic); kfree(aic);
elv_ioc_count_dec(ioc_count); elv_ioc_count_dec(as_ioc_count);
if (ioc_gone) { if (ioc_gone) {
/* /*
* AS scheduler is exiting, grab exit lock and check * AS scheduler is exiting, grab exit lock and check
...@@ -169,7 +169,7 @@ static void free_as_io_context(struct as_io_context *aic) ...@@ -169,7 +169,7 @@ static void free_as_io_context(struct as_io_context *aic)
* complete ioc_gone and set it back to NULL. * complete ioc_gone and set it back to NULL.
*/ */
spin_lock(&ioc_gone_lock); spin_lock(&ioc_gone_lock);
if (ioc_gone && !elv_ioc_count_read(ioc_count)) { if (ioc_gone && !elv_ioc_count_read(as_ioc_count)) {
complete(ioc_gone); complete(ioc_gone);
ioc_gone = NULL; ioc_gone = NULL;
} }
...@@ -211,7 +211,7 @@ static struct as_io_context *alloc_as_io_context(void) ...@@ -211,7 +211,7 @@ static struct as_io_context *alloc_as_io_context(void)
ret->seek_total = 0; ret->seek_total = 0;
ret->seek_samples = 0; ret->seek_samples = 0;
ret->seek_mean = 0; ret->seek_mean = 0;
elv_ioc_count_inc(ioc_count); elv_ioc_count_inc(as_ioc_count);
} }
return ret; return ret;
...@@ -1507,7 +1507,7 @@ static void __exit as_exit(void) ...@@ -1507,7 +1507,7 @@ static void __exit as_exit(void)
ioc_gone = &all_gone; ioc_gone = &all_gone;
/* ioc_gone's update must be visible before reading ioc_count */ /* ioc_gone's update must be visible before reading ioc_count */
smp_wmb(); smp_wmb();
if (elv_ioc_count_read(ioc_count)) if (elv_ioc_count_read(as_ioc_count))
wait_for_completion(&all_gone); wait_for_completion(&all_gone);
synchronize_rcu(); synchronize_rcu();
} }
......
...@@ -48,7 +48,7 @@ static int cfq_slice_idle = HZ / 125; ...@@ -48,7 +48,7 @@ static int cfq_slice_idle = HZ / 125;
static struct kmem_cache *cfq_pool; static struct kmem_cache *cfq_pool;
static struct kmem_cache *cfq_ioc_pool; static struct kmem_cache *cfq_ioc_pool;
static DEFINE_PER_CPU(unsigned long, ioc_count); static DEFINE_PER_CPU(unsigned long, cfq_ioc_count);
static struct completion *ioc_gone; static struct completion *ioc_gone;
static DEFINE_SPINLOCK(ioc_gone_lock); static DEFINE_SPINLOCK(ioc_gone_lock);
...@@ -1422,7 +1422,7 @@ static void cfq_cic_free_rcu(struct rcu_head *head) ...@@ -1422,7 +1422,7 @@ static void cfq_cic_free_rcu(struct rcu_head *head)
cic = container_of(head, struct cfq_io_context, rcu_head); cic = container_of(head, struct cfq_io_context, rcu_head);
kmem_cache_free(cfq_ioc_pool, cic); kmem_cache_free(cfq_ioc_pool, cic);
elv_ioc_count_dec(ioc_count); elv_ioc_count_dec(cfq_ioc_count);
if (ioc_gone) { if (ioc_gone) {
/* /*
...@@ -1431,7 +1431,7 @@ static void cfq_cic_free_rcu(struct rcu_head *head) ...@@ -1431,7 +1431,7 @@ static void cfq_cic_free_rcu(struct rcu_head *head)
* complete ioc_gone and set it back to NULL * complete ioc_gone and set it back to NULL
*/ */
spin_lock(&ioc_gone_lock); spin_lock(&ioc_gone_lock);
if (ioc_gone && !elv_ioc_count_read(ioc_count)) { if (ioc_gone && !elv_ioc_count_read(cfq_ioc_count)) {
complete(ioc_gone); complete(ioc_gone);
ioc_gone = NULL; ioc_gone = NULL;
} }
...@@ -1557,7 +1557,7 @@ cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask) ...@@ -1557,7 +1557,7 @@ cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
INIT_HLIST_NODE(&cic->cic_list); INIT_HLIST_NODE(&cic->cic_list);
cic->dtor = cfq_free_io_context; cic->dtor = cfq_free_io_context;
cic->exit = cfq_exit_io_context; cic->exit = cfq_exit_io_context;
elv_ioc_count_inc(ioc_count); elv_ioc_count_inc(cfq_ioc_count);
} }
return cic; return cic;
...@@ -2658,7 +2658,7 @@ static void __exit cfq_exit(void) ...@@ -2658,7 +2658,7 @@ static void __exit cfq_exit(void)
* this also protects us from entering cfq_slab_kill() with * this also protects us from entering cfq_slab_kill() with
* pending RCU callbacks * pending RCU callbacks
*/ */
if (elv_ioc_count_read(ioc_count)) if (elv_ioc_count_read(cfq_ioc_count))
wait_for_completion(&all_gone); wait_for_completion(&all_gone);
cfq_slab_kill(); cfq_slab_kill();
} }
......
...@@ -65,7 +65,7 @@ struct cpu_dbs_info_s { ...@@ -65,7 +65,7 @@ struct cpu_dbs_info_s {
int cpu; int cpu;
unsigned int enable:1; unsigned int enable:1;
}; };
static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info); static DEFINE_PER_CPU(struct cpu_dbs_info_s, cs_cpu_dbs_info);
static unsigned int dbs_enable; /* number of CPUs using this policy */ static unsigned int dbs_enable; /* number of CPUs using this policy */
...@@ -138,7 +138,7 @@ dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val, ...@@ -138,7 +138,7 @@ dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
void *data) void *data)
{ {
struct cpufreq_freqs *freq = data; struct cpufreq_freqs *freq = data;
struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cpu_dbs_info, struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cs_cpu_dbs_info,
freq->cpu); freq->cpu);
struct cpufreq_policy *policy; struct cpufreq_policy *policy;
...@@ -298,7 +298,7 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy, ...@@ -298,7 +298,7 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy,
/* we need to re-evaluate prev_cpu_idle */ /* we need to re-evaluate prev_cpu_idle */
for_each_online_cpu(j) { for_each_online_cpu(j) {
struct cpu_dbs_info_s *dbs_info; struct cpu_dbs_info_s *dbs_info;
dbs_info = &per_cpu(cpu_dbs_info, j); dbs_info = &per_cpu(cs_cpu_dbs_info, j);
dbs_info->prev_cpu_idle = get_cpu_idle_time(j, dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
&dbs_info->prev_cpu_wall); &dbs_info->prev_cpu_wall);
if (dbs_tuners_ins.ignore_nice) if (dbs_tuners_ins.ignore_nice)
...@@ -388,7 +388,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) ...@@ -388,7 +388,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
cputime64_t cur_wall_time, cur_idle_time; cputime64_t cur_wall_time, cur_idle_time;
unsigned int idle_time, wall_time; unsigned int idle_time, wall_time;
j_dbs_info = &per_cpu(cpu_dbs_info, j); j_dbs_info = &per_cpu(cs_cpu_dbs_info, j);
cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); cur_idle_time = get_cpu_idle_time(j, &cur_wall_time);
...@@ -528,7 +528,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, ...@@ -528,7 +528,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
unsigned int j; unsigned int j;
int rc; int rc;
this_dbs_info = &per_cpu(cpu_dbs_info, cpu); this_dbs_info = &per_cpu(cs_cpu_dbs_info, cpu);
switch (event) { switch (event) {
case CPUFREQ_GOV_START: case CPUFREQ_GOV_START:
...@@ -548,7 +548,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, ...@@ -548,7 +548,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
for_each_cpu(j, policy->cpus) { for_each_cpu(j, policy->cpus) {
struct cpu_dbs_info_s *j_dbs_info; struct cpu_dbs_info_s *j_dbs_info;
j_dbs_info = &per_cpu(cpu_dbs_info, j); j_dbs_info = &per_cpu(cs_cpu_dbs_info, j);
j_dbs_info->cur_policy = policy; j_dbs_info->cur_policy = policy;
j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j, j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
......
...@@ -73,7 +73,7 @@ struct cpu_dbs_info_s { ...@@ -73,7 +73,7 @@ struct cpu_dbs_info_s {
unsigned int enable:1, unsigned int enable:1,
sample_type:1; sample_type:1;
}; };
static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info); static DEFINE_PER_CPU(struct cpu_dbs_info_s, od_cpu_dbs_info);
static unsigned int dbs_enable; /* number of CPUs using this policy */ static unsigned int dbs_enable; /* number of CPUs using this policy */
...@@ -151,7 +151,8 @@ static unsigned int powersave_bias_target(struct cpufreq_policy *policy, ...@@ -151,7 +151,8 @@ static unsigned int powersave_bias_target(struct cpufreq_policy *policy,
unsigned int freq_hi, freq_lo; unsigned int freq_hi, freq_lo;
unsigned int index = 0; unsigned int index = 0;
unsigned int jiffies_total, jiffies_hi, jiffies_lo; unsigned int jiffies_total, jiffies_hi, jiffies_lo;
struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, policy->cpu); struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info,
policy->cpu);
if (!dbs_info->freq_table) { if (!dbs_info->freq_table) {
dbs_info->freq_lo = 0; dbs_info->freq_lo = 0;
...@@ -196,7 +197,7 @@ static void ondemand_powersave_bias_init(void) ...@@ -196,7 +197,7 @@ static void ondemand_powersave_bias_init(void)
{ {
int i; int i;
for_each_online_cpu(i) { for_each_online_cpu(i) {
struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, i); struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, i);
dbs_info->freq_table = cpufreq_frequency_get_table(i); dbs_info->freq_table = cpufreq_frequency_get_table(i);
dbs_info->freq_lo = 0; dbs_info->freq_lo = 0;
} }
...@@ -297,7 +298,7 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy, ...@@ -297,7 +298,7 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy,
/* we need to re-evaluate prev_cpu_idle */ /* we need to re-evaluate prev_cpu_idle */
for_each_online_cpu(j) { for_each_online_cpu(j) {
struct cpu_dbs_info_s *dbs_info; struct cpu_dbs_info_s *dbs_info;
dbs_info = &per_cpu(cpu_dbs_info, j); dbs_info = &per_cpu(od_cpu_dbs_info, j);
dbs_info->prev_cpu_idle = get_cpu_idle_time(j, dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
&dbs_info->prev_cpu_wall); &dbs_info->prev_cpu_wall);
if (dbs_tuners_ins.ignore_nice) if (dbs_tuners_ins.ignore_nice)
...@@ -391,7 +392,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) ...@@ -391,7 +392,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
unsigned int load, load_freq; unsigned int load, load_freq;
int freq_avg; int freq_avg;
j_dbs_info = &per_cpu(cpu_dbs_info, j); j_dbs_info = &per_cpu(od_cpu_dbs_info, j);
cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); cur_idle_time = get_cpu_idle_time(j, &cur_wall_time);
...@@ -548,7 +549,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, ...@@ -548,7 +549,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
unsigned int j; unsigned int j;
int rc; int rc;
this_dbs_info = &per_cpu(cpu_dbs_info, cpu); this_dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
switch (event) { switch (event) {
case CPUFREQ_GOV_START: case CPUFREQ_GOV_START:
...@@ -570,7 +571,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, ...@@ -570,7 +571,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
for_each_cpu(j, policy->cpus) { for_each_cpu(j, policy->cpus) {
struct cpu_dbs_info_s *j_dbs_info; struct cpu_dbs_info_s *j_dbs_info;
j_dbs_info = &per_cpu(cpu_dbs_info, j); j_dbs_info = &per_cpu(od_cpu_dbs_info, j);
j_dbs_info->cur_policy = policy; j_dbs_info->cur_policy = policy;
j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j, j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
......
...@@ -602,6 +602,8 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id) ...@@ -602,6 +602,8 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
return IRQ_HANDLED; return IRQ_HANDLED;
} }
static DEFINE_PER_CPU(unsigned, xed_nesting_count);
/* /*
* Search the CPUs pending events bitmasks. For each one found, map * Search the CPUs pending events bitmasks. For each one found, map
* the event number to an irq, and feed it into do_IRQ() for * the event number to an irq, and feed it into do_IRQ() for
...@@ -617,7 +619,6 @@ void xen_evtchn_do_upcall(struct pt_regs *regs) ...@@ -617,7 +619,6 @@ void xen_evtchn_do_upcall(struct pt_regs *regs)
struct pt_regs *old_regs = set_irq_regs(regs); struct pt_regs *old_regs = set_irq_regs(regs);
struct shared_info *s = HYPERVISOR_shared_info; struct shared_info *s = HYPERVISOR_shared_info;
struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu); struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu);
static DEFINE_PER_CPU(unsigned, nesting_count);
unsigned count; unsigned count;
exit_idle(); exit_idle();
...@@ -628,7 +629,7 @@ void xen_evtchn_do_upcall(struct pt_regs *regs) ...@@ -628,7 +629,7 @@ void xen_evtchn_do_upcall(struct pt_regs *regs)
vcpu_info->evtchn_upcall_pending = 0; vcpu_info->evtchn_upcall_pending = 0;
if (__get_cpu_var(nesting_count)++) if (__get_cpu_var(xed_nesting_count)++)
goto out; goto out;
#ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */ #ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */
...@@ -653,8 +654,8 @@ void xen_evtchn_do_upcall(struct pt_regs *regs) ...@@ -653,8 +654,8 @@ void xen_evtchn_do_upcall(struct pt_regs *regs)
BUG_ON(!irqs_disabled()); BUG_ON(!irqs_disabled());
count = __get_cpu_var(nesting_count); count = __get_cpu_var(xed_nesting_count);
__get_cpu_var(nesting_count) = 0; __get_cpu_var(xed_nesting_count) = 0;
} while(count != 1); } while(count != 1);
out: out:
......
...@@ -98,16 +98,16 @@ hw_perf_group_sched_in(struct perf_counter *group_leader, ...@@ -98,16 +98,16 @@ hw_perf_group_sched_in(struct perf_counter *group_leader,
void __weak perf_counter_print_debug(void) { } void __weak perf_counter_print_debug(void) { }
static DEFINE_PER_CPU(int, disable_count); static DEFINE_PER_CPU(int, perf_disable_count);
void __perf_disable(void) void __perf_disable(void)
{ {
__get_cpu_var(disable_count)++; __get_cpu_var(perf_disable_count)++;
} }
bool __perf_enable(void) bool __perf_enable(void)
{ {
return !--__get_cpu_var(disable_count); return !--__get_cpu_var(perf_disable_count);
} }
void perf_disable(void) void perf_disable(void)
......
...@@ -1318,7 +1318,7 @@ static __init void event_trace_self_tests(void) ...@@ -1318,7 +1318,7 @@ static __init void event_trace_self_tests(void)
#ifdef CONFIG_FUNCTION_TRACER #ifdef CONFIG_FUNCTION_TRACER
static DEFINE_PER_CPU(atomic_t, test_event_disable); static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable);
static void static void
function_test_events_call(unsigned long ip, unsigned long parent_ip) function_test_events_call(unsigned long ip, unsigned long parent_ip)
...@@ -1334,7 +1334,7 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip) ...@@ -1334,7 +1334,7 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip)
pc = preempt_count(); pc = preempt_count();
resched = ftrace_preempt_disable(); resched = ftrace_preempt_disable();
cpu = raw_smp_processor_id(); cpu = raw_smp_processor_id();
disabled = atomic_inc_return(&per_cpu(test_event_disable, cpu)); disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu));
if (disabled != 1) if (disabled != 1)
goto out; goto out;
...@@ -1352,7 +1352,7 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip) ...@@ -1352,7 +1352,7 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip)
trace_nowake_buffer_unlock_commit(event, flags, pc); trace_nowake_buffer_unlock_commit(event, flags, pc);
out: out:
atomic_dec(&per_cpu(test_event_disable, cpu)); atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
ftrace_preempt_enable(resched); ftrace_preempt_enable(resched);
} }
......
...@@ -36,7 +36,7 @@ struct test_node { ...@@ -36,7 +36,7 @@ struct test_node {
}; };
static LIST_HEAD(test_list); static LIST_HEAD(test_list);
static DEFINE_PER_CPU(void *, test_pointer); static DEFINE_PER_CPU(void *, kmemleak_test_pointer);
/* /*
* Some very simple testing. This function needs to be extended for * Some very simple testing. This function needs to be extended for
...@@ -86,9 +86,9 @@ static int __init kmemleak_test_init(void) ...@@ -86,9 +86,9 @@ static int __init kmemleak_test_init(void)
} }
for_each_possible_cpu(i) { for_each_possible_cpu(i) {
per_cpu(test_pointer, i) = kmalloc(129, GFP_KERNEL); per_cpu(kmemleak_test_pointer, i) = kmalloc(129, GFP_KERNEL);
pr_info("kmemleak: kmalloc(129) = %p\n", pr_info("kmemleak: kmalloc(129) = %p\n",
per_cpu(test_pointer, i)); per_cpu(kmemleak_test_pointer, i));
} }
return 0; return 0;
......
...@@ -607,6 +607,8 @@ void set_page_dirty_balance(struct page *page, int page_mkwrite) ...@@ -607,6 +607,8 @@ void set_page_dirty_balance(struct page *page, int page_mkwrite)
} }
} }
static DEFINE_PER_CPU(unsigned long, bdp_ratelimits) = 0;
/** /**
* balance_dirty_pages_ratelimited_nr - balance dirty memory state * balance_dirty_pages_ratelimited_nr - balance dirty memory state
* @mapping: address_space which was dirtied * @mapping: address_space which was dirtied
...@@ -624,7 +626,6 @@ void set_page_dirty_balance(struct page *page, int page_mkwrite) ...@@ -624,7 +626,6 @@ void set_page_dirty_balance(struct page *page, int page_mkwrite)
void balance_dirty_pages_ratelimited_nr(struct address_space *mapping, void balance_dirty_pages_ratelimited_nr(struct address_space *mapping,
unsigned long nr_pages_dirtied) unsigned long nr_pages_dirtied)
{ {
static DEFINE_PER_CPU(unsigned long, ratelimits) = 0;
unsigned long ratelimit; unsigned long ratelimit;
unsigned long *p; unsigned long *p;
...@@ -637,7 +638,7 @@ void balance_dirty_pages_ratelimited_nr(struct address_space *mapping, ...@@ -637,7 +638,7 @@ void balance_dirty_pages_ratelimited_nr(struct address_space *mapping,
* tasks in balance_dirty_pages(). Period. * tasks in balance_dirty_pages(). Period.
*/ */
preempt_disable(); preempt_disable();
p = &__get_cpu_var(ratelimits); p = &__get_cpu_var(bdp_ratelimits);
*p += nr_pages_dirtied; *p += nr_pages_dirtied;
if (unlikely(*p >= ratelimit)) { if (unlikely(*p >= ratelimit)) {
*p = 0; *p = 0;
......
...@@ -37,12 +37,13 @@ __initcall(init_syncookies); ...@@ -37,12 +37,13 @@ __initcall(init_syncookies);
#define COOKIEBITS 24 /* Upper bits store count */ #define COOKIEBITS 24 /* Upper bits store count */
#define COOKIEMASK (((__u32)1 << COOKIEBITS) - 1) #define COOKIEMASK (((__u32)1 << COOKIEBITS) - 1)
static DEFINE_PER_CPU(__u32 [16 + 5 + SHA_WORKSPACE_WORDS], cookie_scratch); static DEFINE_PER_CPU(__u32 [16 + 5 + SHA_WORKSPACE_WORDS],
ipv4_cookie_scratch);
static u32 cookie_hash(__be32 saddr, __be32 daddr, __be16 sport, __be16 dport, static u32 cookie_hash(__be32 saddr, __be32 daddr, __be16 sport, __be16 dport,
u32 count, int c) u32 count, int c)
{ {
__u32 *tmp = __get_cpu_var(cookie_scratch); __u32 *tmp = __get_cpu_var(ipv4_cookie_scratch);
memcpy(tmp + 4, syncookie_secret[c], sizeof(syncookie_secret[c])); memcpy(tmp + 4, syncookie_secret[c], sizeof(syncookie_secret[c]));
tmp[0] = (__force u32)saddr; tmp[0] = (__force u32)saddr;
......
...@@ -74,12 +74,13 @@ static inline struct sock *get_cookie_sock(struct sock *sk, struct sk_buff *skb, ...@@ -74,12 +74,13 @@ static inline struct sock *get_cookie_sock(struct sock *sk, struct sk_buff *skb,
return child; return child;
} }
static DEFINE_PER_CPU(__u32 [16 + 5 + SHA_WORKSPACE_WORDS], cookie_scratch); static DEFINE_PER_CPU(__u32 [16 + 5 + SHA_WORKSPACE_WORDS],
ipv6_cookie_scratch);
static u32 cookie_hash(struct in6_addr *saddr, struct in6_addr *daddr, static u32 cookie_hash(struct in6_addr *saddr, struct in6_addr *daddr,
__be16 sport, __be16 dport, u32 count, int c) __be16 sport, __be16 dport, u32 count, int c)
{ {
__u32 *tmp = __get_cpu_var(cookie_scratch); __u32 *tmp = __get_cpu_var(ipv6_cookie_scratch);
/* /*
* we have 320 bits of information to hash, copy in the remaining * we have 320 bits of information to hash, copy in the remaining
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册