提交 22127e93 编写于 作者: C Christoph Lameter 提交者: Tejun Heo

time: Replace __get_cpu_var uses

Convert uses of __get_cpu_var for creating a address from a percpu
offset to this_cpu_ptr.

The two cases where get_cpu_var is used to actually access a percpu
variable are changed to use this_cpu_read/raw_cpu_read.
Reviewed-by: NThomas Gleixner <tglx@linutronix.de>
Signed-off-by: NChristoph Lameter <cl@linux.com>
Signed-off-by: NTejun Heo <tj@kernel.org>
上级 bb964a92
...@@ -28,7 +28,7 @@ static void dummy_timer_set_mode(enum clock_event_mode mode, ...@@ -28,7 +28,7 @@ static void dummy_timer_set_mode(enum clock_event_mode mode,
static void dummy_timer_setup(void) static void dummy_timer_setup(void)
{ {
int cpu = smp_processor_id(); int cpu = smp_processor_id();
struct clock_event_device *evt = __this_cpu_ptr(&dummy_timer_evt); struct clock_event_device *evt = raw_cpu_ptr(&dummy_timer_evt);
evt->name = "dummy_timer"; evt->name = "dummy_timer";
evt->features = CLOCK_EVT_FEAT_PERIODIC | evt->features = CLOCK_EVT_FEAT_PERIODIC |
......
...@@ -95,11 +95,11 @@ bool irq_work_queue(struct irq_work *work) ...@@ -95,11 +95,11 @@ bool irq_work_queue(struct irq_work *work)
/* If the work is "lazy", handle it from next tick if any */ /* If the work is "lazy", handle it from next tick if any */
if (work->flags & IRQ_WORK_LAZY) { if (work->flags & IRQ_WORK_LAZY) {
if (llist_add(&work->llnode, &__get_cpu_var(lazy_list)) && if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) &&
tick_nohz_tick_stopped()) tick_nohz_tick_stopped())
arch_irq_work_raise(); arch_irq_work_raise();
} else { } else {
if (llist_add(&work->llnode, &__get_cpu_var(raised_list))) if (llist_add(&work->llnode, this_cpu_ptr(&raised_list)))
arch_irq_work_raise(); arch_irq_work_raise();
} }
...@@ -113,8 +113,8 @@ bool irq_work_needs_cpu(void) ...@@ -113,8 +113,8 @@ bool irq_work_needs_cpu(void)
{ {
struct llist_head *raised, *lazy; struct llist_head *raised, *lazy;
raised = &__get_cpu_var(raised_list); raised = this_cpu_ptr(&raised_list);
lazy = &__get_cpu_var(lazy_list); lazy = this_cpu_ptr(&lazy_list);
if (llist_empty(raised) && llist_empty(lazy)) if (llist_empty(raised) && llist_empty(lazy))
return false; return false;
...@@ -166,8 +166,8 @@ static void irq_work_run_list(struct llist_head *list) ...@@ -166,8 +166,8 @@ static void irq_work_run_list(struct llist_head *list)
*/ */
void irq_work_run(void) void irq_work_run(void)
{ {
irq_work_run_list(&__get_cpu_var(raised_list)); irq_work_run_list(this_cpu_ptr(&raised_list));
irq_work_run_list(&__get_cpu_var(lazy_list)); irq_work_run_list(this_cpu_ptr(&lazy_list));
} }
EXPORT_SYMBOL_GPL(irq_work_run); EXPORT_SYMBOL_GPL(irq_work_run);
......
...@@ -134,7 +134,7 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(struct sched_clock_data, sched_clock_data); ...@@ -134,7 +134,7 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(struct sched_clock_data, sched_clock_data);
static inline struct sched_clock_data *this_scd(void) static inline struct sched_clock_data *this_scd(void)
{ {
return &__get_cpu_var(sched_clock_data); return this_cpu_ptr(&sched_clock_data);
} }
static inline struct sched_clock_data *cpu_sdc(int cpu) static inline struct sched_clock_data *cpu_sdc(int cpu)
......
...@@ -485,7 +485,7 @@ static void tasklet_action(struct softirq_action *a) ...@@ -485,7 +485,7 @@ static void tasklet_action(struct softirq_action *a)
local_irq_disable(); local_irq_disable();
list = __this_cpu_read(tasklet_vec.head); list = __this_cpu_read(tasklet_vec.head);
__this_cpu_write(tasklet_vec.head, NULL); __this_cpu_write(tasklet_vec.head, NULL);
__this_cpu_write(tasklet_vec.tail, &__get_cpu_var(tasklet_vec).head); __this_cpu_write(tasklet_vec.tail, this_cpu_ptr(&tasklet_vec.head));
local_irq_enable(); local_irq_enable();
while (list) { while (list) {
...@@ -521,7 +521,7 @@ static void tasklet_hi_action(struct softirq_action *a) ...@@ -521,7 +521,7 @@ static void tasklet_hi_action(struct softirq_action *a)
local_irq_disable(); local_irq_disable();
list = __this_cpu_read(tasklet_hi_vec.head); list = __this_cpu_read(tasklet_hi_vec.head);
__this_cpu_write(tasklet_hi_vec.head, NULL); __this_cpu_write(tasklet_hi_vec.head, NULL);
__this_cpu_write(tasklet_hi_vec.tail, &__get_cpu_var(tasklet_hi_vec).head); __this_cpu_write(tasklet_hi_vec.tail, this_cpu_ptr(&tasklet_hi_vec.head));
local_irq_enable(); local_irq_enable();
while (list) { while (list) {
......
...@@ -1144,7 +1144,7 @@ static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id, ...@@ -1144,7 +1144,7 @@ static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
memset(timer, 0, sizeof(struct hrtimer)); memset(timer, 0, sizeof(struct hrtimer));
cpu_base = &__raw_get_cpu_var(hrtimer_bases); cpu_base = raw_cpu_ptr(&hrtimer_bases);
if (clock_id == CLOCK_REALTIME && mode != HRTIMER_MODE_ABS) if (clock_id == CLOCK_REALTIME && mode != HRTIMER_MODE_ABS)
clock_id = CLOCK_MONOTONIC; clock_id = CLOCK_MONOTONIC;
...@@ -1187,7 +1187,7 @@ int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp) ...@@ -1187,7 +1187,7 @@ int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp)
struct hrtimer_cpu_base *cpu_base; struct hrtimer_cpu_base *cpu_base;
int base = hrtimer_clockid_to_base(which_clock); int base = hrtimer_clockid_to_base(which_clock);
cpu_base = &__raw_get_cpu_var(hrtimer_bases); cpu_base = raw_cpu_ptr(&hrtimer_bases);
*tp = ktime_to_timespec(cpu_base->clock_base[base].resolution); *tp = ktime_to_timespec(cpu_base->clock_base[base].resolution);
return 0; return 0;
...@@ -1376,7 +1376,7 @@ static void __hrtimer_peek_ahead_timers(void) ...@@ -1376,7 +1376,7 @@ static void __hrtimer_peek_ahead_timers(void)
if (!hrtimer_hres_active()) if (!hrtimer_hres_active())
return; return;
td = &__get_cpu_var(tick_cpu_device); td = this_cpu_ptr(&tick_cpu_device);
if (td && td->evtdev) if (td && td->evtdev)
hrtimer_interrupt(td->evtdev); hrtimer_interrupt(td->evtdev);
} }
......
...@@ -554,7 +554,7 @@ int tick_resume_broadcast_oneshot(struct clock_event_device *bc) ...@@ -554,7 +554,7 @@ int tick_resume_broadcast_oneshot(struct clock_event_device *bc)
void tick_check_oneshot_broadcast_this_cpu(void) void tick_check_oneshot_broadcast_this_cpu(void)
{ {
if (cpumask_test_cpu(smp_processor_id(), tick_broadcast_oneshot_mask)) { if (cpumask_test_cpu(smp_processor_id(), tick_broadcast_oneshot_mask)) {
struct tick_device *td = &__get_cpu_var(tick_cpu_device); struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
/* /*
* We might be in the middle of switching over from * We might be in the middle of switching over from
......
...@@ -224,7 +224,7 @@ static void tick_setup_device(struct tick_device *td, ...@@ -224,7 +224,7 @@ static void tick_setup_device(struct tick_device *td,
void tick_install_replacement(struct clock_event_device *newdev) void tick_install_replacement(struct clock_event_device *newdev)
{ {
struct tick_device *td = &__get_cpu_var(tick_cpu_device); struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
int cpu = smp_processor_id(); int cpu = smp_processor_id();
clockevents_exchange_device(td->evtdev, newdev); clockevents_exchange_device(td->evtdev, newdev);
...@@ -374,14 +374,14 @@ void tick_shutdown(unsigned int *cpup) ...@@ -374,14 +374,14 @@ void tick_shutdown(unsigned int *cpup)
void tick_suspend(void) void tick_suspend(void)
{ {
struct tick_device *td = &__get_cpu_var(tick_cpu_device); struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
clockevents_shutdown(td->evtdev); clockevents_shutdown(td->evtdev);
} }
void tick_resume(void) void tick_resume(void)
{ {
struct tick_device *td = &__get_cpu_var(tick_cpu_device); struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
int broadcast = tick_resume_broadcast(); int broadcast = tick_resume_broadcast();
clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_RESUME); clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_RESUME);
......
...@@ -59,7 +59,7 @@ void tick_setup_oneshot(struct clock_event_device *newdev, ...@@ -59,7 +59,7 @@ void tick_setup_oneshot(struct clock_event_device *newdev,
*/ */
int tick_switch_to_oneshot(void (*handler)(struct clock_event_device *)) int tick_switch_to_oneshot(void (*handler)(struct clock_event_device *))
{ {
struct tick_device *td = &__get_cpu_var(tick_cpu_device); struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
struct clock_event_device *dev = td->evtdev; struct clock_event_device *dev = td->evtdev;
if (!dev || !(dev->features & CLOCK_EVT_FEAT_ONESHOT) || if (!dev || !(dev->features & CLOCK_EVT_FEAT_ONESHOT) ||
......
...@@ -205,7 +205,7 @@ static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now); ...@@ -205,7 +205,7 @@ static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now);
*/ */
void __tick_nohz_full_check(void) void __tick_nohz_full_check(void)
{ {
struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
if (tick_nohz_full_cpu(smp_processor_id())) { if (tick_nohz_full_cpu(smp_processor_id())) {
if (ts->tick_stopped && !is_idle_task(current)) { if (ts->tick_stopped && !is_idle_task(current)) {
...@@ -545,7 +545,7 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts, ...@@ -545,7 +545,7 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
unsigned long seq, last_jiffies, next_jiffies, delta_jiffies; unsigned long seq, last_jiffies, next_jiffies, delta_jiffies;
ktime_t last_update, expires, ret = { .tv64 = 0 }; ktime_t last_update, expires, ret = { .tv64 = 0 };
unsigned long rcu_delta_jiffies; unsigned long rcu_delta_jiffies;
struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev; struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev);
u64 time_delta; u64 time_delta;
time_delta = timekeeping_max_deferment(); time_delta = timekeeping_max_deferment();
...@@ -813,7 +813,7 @@ void tick_nohz_idle_enter(void) ...@@ -813,7 +813,7 @@ void tick_nohz_idle_enter(void)
local_irq_disable(); local_irq_disable();
ts = &__get_cpu_var(tick_cpu_sched); ts = this_cpu_ptr(&tick_cpu_sched);
ts->inidle = 1; ts->inidle = 1;
__tick_nohz_idle_enter(ts); __tick_nohz_idle_enter(ts);
...@@ -831,7 +831,7 @@ EXPORT_SYMBOL_GPL(tick_nohz_idle_enter); ...@@ -831,7 +831,7 @@ EXPORT_SYMBOL_GPL(tick_nohz_idle_enter);
*/ */
void tick_nohz_irq_exit(void) void tick_nohz_irq_exit(void)
{ {
struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
if (ts->inidle) if (ts->inidle)
__tick_nohz_idle_enter(ts); __tick_nohz_idle_enter(ts);
...@@ -846,7 +846,7 @@ void tick_nohz_irq_exit(void) ...@@ -846,7 +846,7 @@ void tick_nohz_irq_exit(void)
*/ */
ktime_t tick_nohz_get_sleep_length(void) ktime_t tick_nohz_get_sleep_length(void)
{ {
struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
return ts->sleep_length; return ts->sleep_length;
} }
...@@ -959,7 +959,7 @@ static int tick_nohz_reprogram(struct tick_sched *ts, ktime_t now) ...@@ -959,7 +959,7 @@ static int tick_nohz_reprogram(struct tick_sched *ts, ktime_t now)
*/ */
static void tick_nohz_handler(struct clock_event_device *dev) static void tick_nohz_handler(struct clock_event_device *dev)
{ {
struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
struct pt_regs *regs = get_irq_regs(); struct pt_regs *regs = get_irq_regs();
ktime_t now = ktime_get(); ktime_t now = ktime_get();
...@@ -979,7 +979,7 @@ static void tick_nohz_handler(struct clock_event_device *dev) ...@@ -979,7 +979,7 @@ static void tick_nohz_handler(struct clock_event_device *dev)
*/ */
static void tick_nohz_switch_to_nohz(void) static void tick_nohz_switch_to_nohz(void)
{ {
struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
ktime_t next; ktime_t next;
if (!tick_nohz_enabled) if (!tick_nohz_enabled)
...@@ -1115,7 +1115,7 @@ early_param("skew_tick", skew_tick); ...@@ -1115,7 +1115,7 @@ early_param("skew_tick", skew_tick);
*/ */
void tick_setup_sched_timer(void) void tick_setup_sched_timer(void)
{ {
struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
ktime_t now = ktime_get(); ktime_t now = ktime_get();
/* /*
...@@ -1184,7 +1184,7 @@ void tick_clock_notify(void) ...@@ -1184,7 +1184,7 @@ void tick_clock_notify(void)
*/ */
void tick_oneshot_notify(void) void tick_oneshot_notify(void)
{ {
struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
set_bit(0, &ts->check_clocks); set_bit(0, &ts->check_clocks);
} }
...@@ -1199,7 +1199,7 @@ void tick_oneshot_notify(void) ...@@ -1199,7 +1199,7 @@ void tick_oneshot_notify(void)
*/ */
int tick_check_oneshot_change(int allow_nohz) int tick_check_oneshot_change(int allow_nohz)
{ {
struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
if (!test_and_clear_bit(0, &ts->check_clocks)) if (!test_and_clear_bit(0, &ts->check_clocks))
return 0; return 0;
......
...@@ -655,7 +655,7 @@ static inline void debug_assert_init(struct timer_list *timer) ...@@ -655,7 +655,7 @@ static inline void debug_assert_init(struct timer_list *timer)
static void do_init_timer(struct timer_list *timer, unsigned int flags, static void do_init_timer(struct timer_list *timer, unsigned int flags,
const char *name, struct lock_class_key *key) const char *name, struct lock_class_key *key)
{ {
struct tvec_base *base = __raw_get_cpu_var(tvec_bases); struct tvec_base *base = raw_cpu_read(tvec_bases);
timer->entry.next = NULL; timer->entry.next = NULL;
timer->base = (void *)((unsigned long)base | flags); timer->base = (void *)((unsigned long)base | flags);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册