提交 c6ae41e7 编写于 作者: A Alex Shi 提交者: Tejun Heo

x86: replace percpu_xxx funcs with this_cpu_xxx

Since percpu_xxx() serial functions are duplicated with this_cpu_xxx().
Removing percpu_xxx() definition and replacing them by this_cpu_xxx()
in code. There is no function change in this patch, just preparation for
later percpu_xxx serial function removing.

On x86 machine the this_cpu_xxx() serial functions are same as
__this_cpu_xxx() without no unnecessary premmpt enable/disable.

Thanks for Stephen Rothwell, he found and fixed a i386 build error in
the patch.

Also thanks for Andrew Morton, he kept updating the patchset in Linus'
tree.
Signed-off-by: NAlex Shi <alex.shi@intel.com>
Acked-by: NChristoph Lameter <cl@gentwo.org>
Acked-by: NTejun Heo <tj@kernel.org>
Acked-by: N"H. Peter Anvin" <hpa@zytor.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: NStephen Rothwell <sfr@canb.auug.org.au>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NTejun Heo <tj@kernel.org>
上级 19e8d69c
...@@ -229,7 +229,7 @@ static inline void __user *arch_compat_alloc_user_space(long len) ...@@ -229,7 +229,7 @@ static inline void __user *arch_compat_alloc_user_space(long len)
sp = task_pt_regs(current)->sp; sp = task_pt_regs(current)->sp;
} else { } else {
/* -128 for the x32 ABI redzone */ /* -128 for the x32 ABI redzone */
sp = percpu_read(old_rsp) - 128; sp = this_cpu_read(old_rsp) - 128;
} }
return (void __user *)round_down(sp - len, 16); return (void __user *)round_down(sp - len, 16);
......
...@@ -11,7 +11,7 @@ DECLARE_PER_CPU(struct task_struct *, current_task); ...@@ -11,7 +11,7 @@ DECLARE_PER_CPU(struct task_struct *, current_task);
static __always_inline struct task_struct *get_current(void) static __always_inline struct task_struct *get_current(void)
{ {
return percpu_read_stable(current_task); return this_cpu_read_stable(current_task);
} }
#define current get_current() #define current get_current()
......
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
#include <asm/mmu.h> #include <asm/mmu.h>
#include <linux/smp.h> #include <linux/smp.h>
#include <linux/percpu.h>
static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *info) static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *info)
{ {
......
...@@ -290,14 +290,14 @@ static inline int __thread_has_fpu(struct task_struct *tsk) ...@@ -290,14 +290,14 @@ static inline int __thread_has_fpu(struct task_struct *tsk)
static inline void __thread_clear_has_fpu(struct task_struct *tsk) static inline void __thread_clear_has_fpu(struct task_struct *tsk)
{ {
tsk->thread.fpu.has_fpu = 0; tsk->thread.fpu.has_fpu = 0;
percpu_write(fpu_owner_task, NULL); this_cpu_write(fpu_owner_task, NULL);
} }
/* Must be paired with a 'clts' before! */ /* Must be paired with a 'clts' before! */
static inline void __thread_set_has_fpu(struct task_struct *tsk) static inline void __thread_set_has_fpu(struct task_struct *tsk)
{ {
tsk->thread.fpu.has_fpu = 1; tsk->thread.fpu.has_fpu = 1;
percpu_write(fpu_owner_task, tsk); this_cpu_write(fpu_owner_task, tsk);
} }
/* /*
...@@ -344,7 +344,7 @@ typedef struct { int preload; } fpu_switch_t; ...@@ -344,7 +344,7 @@ typedef struct { int preload; } fpu_switch_t;
*/ */
static inline int fpu_lazy_restore(struct task_struct *new, unsigned int cpu) static inline int fpu_lazy_restore(struct task_struct *new, unsigned int cpu)
{ {
return new == percpu_read_stable(fpu_owner_task) && return new == this_cpu_read_stable(fpu_owner_task) &&
cpu == new->thread.fpu.last_cpu; cpu == new->thread.fpu.last_cpu;
} }
......
...@@ -35,14 +35,15 @@ DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); ...@@ -35,14 +35,15 @@ DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
#define __ARCH_IRQ_STAT #define __ARCH_IRQ_STAT
#define inc_irq_stat(member) percpu_inc(irq_stat.member) #define inc_irq_stat(member) this_cpu_inc(irq_stat.member)
#define local_softirq_pending() percpu_read(irq_stat.__softirq_pending) #define local_softirq_pending() this_cpu_read(irq_stat.__softirq_pending)
#define __ARCH_SET_SOFTIRQ_PENDING #define __ARCH_SET_SOFTIRQ_PENDING
#define set_softirq_pending(x) percpu_write(irq_stat.__softirq_pending, (x)) #define set_softirq_pending(x) \
#define or_softirq_pending(x) percpu_or(irq_stat.__softirq_pending, (x)) this_cpu_write(irq_stat.__softirq_pending, (x))
#define or_softirq_pending(x) this_cpu_or(irq_stat.__softirq_pending, (x))
extern void ack_bad_irq(unsigned int irq); extern void ack_bad_irq(unsigned int irq);
......
...@@ -15,7 +15,7 @@ DECLARE_PER_CPU(struct pt_regs *, irq_regs); ...@@ -15,7 +15,7 @@ DECLARE_PER_CPU(struct pt_regs *, irq_regs);
static inline struct pt_regs *get_irq_regs(void) static inline struct pt_regs *get_irq_regs(void)
{ {
return percpu_read(irq_regs); return this_cpu_read(irq_regs);
} }
static inline struct pt_regs *set_irq_regs(struct pt_regs *new_regs) static inline struct pt_regs *set_irq_regs(struct pt_regs *new_regs)
...@@ -23,7 +23,7 @@ static inline struct pt_regs *set_irq_regs(struct pt_regs *new_regs) ...@@ -23,7 +23,7 @@ static inline struct pt_regs *set_irq_regs(struct pt_regs *new_regs)
struct pt_regs *old_regs; struct pt_regs *old_regs;
old_regs = get_irq_regs(); old_regs = get_irq_regs();
percpu_write(irq_regs, new_regs); this_cpu_write(irq_regs, new_regs);
return old_regs; return old_regs;
} }
......
...@@ -25,8 +25,8 @@ void destroy_context(struct mm_struct *mm); ...@@ -25,8 +25,8 @@ void destroy_context(struct mm_struct *mm);
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{ {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK) if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY); this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
#endif #endif
} }
...@@ -37,8 +37,8 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, ...@@ -37,8 +37,8 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
if (likely(prev != next)) { if (likely(prev != next)) {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
percpu_write(cpu_tlbstate.state, TLBSTATE_OK); this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
percpu_write(cpu_tlbstate.active_mm, next); this_cpu_write(cpu_tlbstate.active_mm, next);
#endif #endif
cpumask_set_cpu(cpu, mm_cpumask(next)); cpumask_set_cpu(cpu, mm_cpumask(next));
...@@ -56,8 +56,8 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, ...@@ -56,8 +56,8 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
} }
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
else { else {
percpu_write(cpu_tlbstate.state, TLBSTATE_OK); this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next); BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next))) { if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next))) {
/* We were in lazy tlb mode and leave_mm disabled /* We were in lazy tlb mode and leave_mm disabled
......
...@@ -46,7 +46,7 @@ ...@@ -46,7 +46,7 @@
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
#define __percpu_prefix "%%"__stringify(__percpu_seg)":" #define __percpu_prefix "%%"__stringify(__percpu_seg)":"
#define __my_cpu_offset percpu_read(this_cpu_off) #define __my_cpu_offset this_cpu_read(this_cpu_off)
/* /*
* Compared to the generic __my_cpu_offset version, the following * Compared to the generic __my_cpu_offset version, the following
...@@ -352,15 +352,15 @@ do { \ ...@@ -352,15 +352,15 @@ do { \
/* /*
* percpu_read() makes gcc load the percpu variable every time it is * percpu_read() makes gcc load the percpu variable every time it is
* accessed while percpu_read_stable() allows the value to be cached. * accessed while this_cpu_read_stable() allows the value to be cached.
* percpu_read_stable() is more efficient and can be used if its value * this_cpu_read_stable() is more efficient and can be used if its value
* is guaranteed to be valid across cpus. The current users include * is guaranteed to be valid across cpus. The current users include
* get_current() and get_thread_info() both of which are actually * get_current() and get_thread_info() both of which are actually
* per-thread variables implemented as per-cpu variables and thus * per-thread variables implemented as per-cpu variables and thus
* stable for the duration of the respective task. * stable for the duration of the respective task.
*/ */
#define percpu_read(var) percpu_from_op("mov", var, "m" (var)) #define percpu_read(var) percpu_from_op("mov", var, "m" (var))
#define percpu_read_stable(var) percpu_from_op("mov", var, "p" (&(var))) #define this_cpu_read_stable(var) percpu_from_op("mov", var, "p" (&(var)))
#define percpu_write(var, val) percpu_to_op("mov", var, val) #define percpu_write(var, val) percpu_to_op("mov", var, val)
#define percpu_add(var, val) percpu_add_op(var, val) #define percpu_add(var, val) percpu_add_op(var, val)
#define percpu_sub(var, val) percpu_add_op(var, -(val)) #define percpu_sub(var, val) percpu_add_op(var, -(val))
......
...@@ -188,11 +188,11 @@ extern unsigned disabled_cpus __cpuinitdata; ...@@ -188,11 +188,11 @@ extern unsigned disabled_cpus __cpuinitdata;
* from the initial startup. We map APIC_BASE very early in page_setup(), * from the initial startup. We map APIC_BASE very early in page_setup(),
* so this is correct in the x86 case. * so this is correct in the x86 case.
*/ */
#define raw_smp_processor_id() (percpu_read(cpu_number)) #define raw_smp_processor_id() (this_cpu_read(cpu_number))
extern int safe_smp_processor_id(void); extern int safe_smp_processor_id(void);
#elif defined(CONFIG_X86_64_SMP) #elif defined(CONFIG_X86_64_SMP)
#define raw_smp_processor_id() (percpu_read(cpu_number)) #define raw_smp_processor_id() (this_cpu_read(cpu_number))
#define stack_smp_processor_id() \ #define stack_smp_processor_id() \
({ \ ({ \
......
...@@ -75,9 +75,9 @@ static __always_inline void boot_init_stack_canary(void) ...@@ -75,9 +75,9 @@ static __always_inline void boot_init_stack_canary(void)
current->stack_canary = canary; current->stack_canary = canary;
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
percpu_write(irq_stack_union.stack_canary, canary); this_cpu_write(irq_stack_union.stack_canary, canary);
#else #else
percpu_write(stack_canary.canary, canary); this_cpu_write(stack_canary.canary, canary);
#endif #endif
} }
......
...@@ -222,7 +222,7 @@ DECLARE_PER_CPU(unsigned long, kernel_stack); ...@@ -222,7 +222,7 @@ DECLARE_PER_CPU(unsigned long, kernel_stack);
static inline struct thread_info *current_thread_info(void) static inline struct thread_info *current_thread_info(void)
{ {
struct thread_info *ti; struct thread_info *ti;
ti = (void *)(percpu_read_stable(kernel_stack) + ti = (void *)(this_cpu_read_stable(kernel_stack) +
KERNEL_STACK_OFFSET - THREAD_SIZE); KERNEL_STACK_OFFSET - THREAD_SIZE);
return ti; return ti;
} }
......
...@@ -156,8 +156,8 @@ DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate); ...@@ -156,8 +156,8 @@ DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);
static inline void reset_lazy_tlbstate(void) static inline void reset_lazy_tlbstate(void)
{ {
percpu_write(cpu_tlbstate.state, 0); this_cpu_write(cpu_tlbstate.state, 0);
percpu_write(cpu_tlbstate.active_mm, &init_mm); this_cpu_write(cpu_tlbstate.active_mm, &init_mm);
} }
#endif /* SMP */ #endif /* SMP */
......
...@@ -1185,7 +1185,7 @@ void __cpuinit cpu_init(void) ...@@ -1185,7 +1185,7 @@ void __cpuinit cpu_init(void)
oist = &per_cpu(orig_ist, cpu); oist = &per_cpu(orig_ist, cpu);
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
if (cpu != 0 && percpu_read(numa_node) == 0 && if (cpu != 0 && this_cpu_read(numa_node) == 0 &&
early_cpu_to_node(cpu) != NUMA_NO_NODE) early_cpu_to_node(cpu) != NUMA_NO_NODE)
set_numa_node(early_cpu_to_node(cpu)); set_numa_node(early_cpu_to_node(cpu));
#endif #endif
......
...@@ -583,7 +583,7 @@ void machine_check_poll(enum mcp_flags flags, mce_banks_t *b) ...@@ -583,7 +583,7 @@ void machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
struct mce m; struct mce m;
int i; int i;
percpu_inc(mce_poll_count); this_cpu_inc(mce_poll_count);
mce_gather_info(&m, NULL); mce_gather_info(&m, NULL);
...@@ -1015,7 +1015,7 @@ void do_machine_check(struct pt_regs *regs, long error_code) ...@@ -1015,7 +1015,7 @@ void do_machine_check(struct pt_regs *regs, long error_code)
atomic_inc(&mce_entry); atomic_inc(&mce_entry);
percpu_inc(mce_exception_count); this_cpu_inc(mce_exception_count);
if (!banks) if (!banks)
goto out; goto out;
......
...@@ -88,7 +88,7 @@ void kernel_fpu_begin(void) ...@@ -88,7 +88,7 @@ void kernel_fpu_begin(void)
__thread_clear_has_fpu(me); __thread_clear_has_fpu(me);
/* We do 'stts()' in kernel_fpu_end() */ /* We do 'stts()' in kernel_fpu_end() */
} else { } else {
percpu_write(fpu_owner_task, NULL); this_cpu_write(fpu_owner_task, NULL);
clts(); clts();
} }
} }
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
#include <linux/cpumask.h> #include <linux/cpumask.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/percpu.h>
#include <asm/apic.h> #include <asm/apic.h>
#include <asm/nmi.h> #include <asm/nmi.h>
......
...@@ -241,16 +241,16 @@ static DEFINE_PER_CPU(enum paravirt_lazy_mode, paravirt_lazy_mode) = PARAVIRT_LA ...@@ -241,16 +241,16 @@ static DEFINE_PER_CPU(enum paravirt_lazy_mode, paravirt_lazy_mode) = PARAVIRT_LA
static inline void enter_lazy(enum paravirt_lazy_mode mode) static inline void enter_lazy(enum paravirt_lazy_mode mode)
{ {
BUG_ON(percpu_read(paravirt_lazy_mode) != PARAVIRT_LAZY_NONE); BUG_ON(this_cpu_read(paravirt_lazy_mode) != PARAVIRT_LAZY_NONE);
percpu_write(paravirt_lazy_mode, mode); this_cpu_write(paravirt_lazy_mode, mode);
} }
static void leave_lazy(enum paravirt_lazy_mode mode) static void leave_lazy(enum paravirt_lazy_mode mode)
{ {
BUG_ON(percpu_read(paravirt_lazy_mode) != mode); BUG_ON(this_cpu_read(paravirt_lazy_mode) != mode);
percpu_write(paravirt_lazy_mode, PARAVIRT_LAZY_NONE); this_cpu_write(paravirt_lazy_mode, PARAVIRT_LAZY_NONE);
} }
void paravirt_enter_lazy_mmu(void) void paravirt_enter_lazy_mmu(void)
...@@ -267,7 +267,7 @@ void paravirt_start_context_switch(struct task_struct *prev) ...@@ -267,7 +267,7 @@ void paravirt_start_context_switch(struct task_struct *prev)
{ {
BUG_ON(preemptible()); BUG_ON(preemptible());
if (percpu_read(paravirt_lazy_mode) == PARAVIRT_LAZY_MMU) { if (this_cpu_read(paravirt_lazy_mode) == PARAVIRT_LAZY_MMU) {
arch_leave_lazy_mmu_mode(); arch_leave_lazy_mmu_mode();
set_ti_thread_flag(task_thread_info(prev), TIF_LAZY_MMU_UPDATES); set_ti_thread_flag(task_thread_info(prev), TIF_LAZY_MMU_UPDATES);
} }
...@@ -289,7 +289,7 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void) ...@@ -289,7 +289,7 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
if (in_interrupt()) if (in_interrupt())
return PARAVIRT_LAZY_NONE; return PARAVIRT_LAZY_NONE;
return percpu_read(paravirt_lazy_mode); return this_cpu_read(paravirt_lazy_mode);
} }
void arch_flush_lazy_mmu_mode(void) void arch_flush_lazy_mmu_mode(void)
......
...@@ -377,7 +377,7 @@ static inline void play_dead(void) ...@@ -377,7 +377,7 @@ static inline void play_dead(void)
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
void enter_idle(void) void enter_idle(void)
{ {
percpu_write(is_idle, 1); this_cpu_write(is_idle, 1);
atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL); atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
} }
......
...@@ -302,7 +302,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) ...@@ -302,7 +302,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
switch_fpu_finish(next_p, fpu); switch_fpu_finish(next_p, fpu);
percpu_write(current_task, next_p); this_cpu_write(current_task, next_p);
return prev_p; return prev_p;
} }
......
...@@ -237,7 +237,7 @@ start_thread_common(struct pt_regs *regs, unsigned long new_ip, ...@@ -237,7 +237,7 @@ start_thread_common(struct pt_regs *regs, unsigned long new_ip,
current->thread.usersp = new_sp; current->thread.usersp = new_sp;
regs->ip = new_ip; regs->ip = new_ip;
regs->sp = new_sp; regs->sp = new_sp;
percpu_write(old_rsp, new_sp); this_cpu_write(old_rsp, new_sp);
regs->cs = _cs; regs->cs = _cs;
regs->ss = _ss; regs->ss = _ss;
regs->flags = X86_EFLAGS_IF; regs->flags = X86_EFLAGS_IF;
...@@ -359,11 +359,11 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) ...@@ -359,11 +359,11 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
/* /*
* Switch the PDA and FPU contexts. * Switch the PDA and FPU contexts.
*/ */
prev->usersp = percpu_read(old_rsp); prev->usersp = this_cpu_read(old_rsp);
percpu_write(old_rsp, next->usersp); this_cpu_write(old_rsp, next->usersp);
percpu_write(current_task, next_p); this_cpu_write(current_task, next_p);
percpu_write(kernel_stack, this_cpu_write(kernel_stack,
(unsigned long)task_stack_page(next_p) + (unsigned long)task_stack_page(next_p) +
THREAD_SIZE - KERNEL_STACK_OFFSET); THREAD_SIZE - KERNEL_STACK_OFFSET);
......
...@@ -61,10 +61,10 @@ static DEFINE_PER_CPU_READ_MOSTLY(int, tlb_vector_offset); ...@@ -61,10 +61,10 @@ static DEFINE_PER_CPU_READ_MOSTLY(int, tlb_vector_offset);
*/ */
void leave_mm(int cpu) void leave_mm(int cpu)
{ {
if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK) if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
BUG(); BUG();
cpumask_clear_cpu(cpu, cpumask_clear_cpu(cpu,
mm_cpumask(percpu_read(cpu_tlbstate.active_mm))); mm_cpumask(this_cpu_read(cpu_tlbstate.active_mm)));
load_cr3(swapper_pg_dir); load_cr3(swapper_pg_dir);
} }
EXPORT_SYMBOL_GPL(leave_mm); EXPORT_SYMBOL_GPL(leave_mm);
...@@ -152,8 +152,8 @@ void smp_invalidate_interrupt(struct pt_regs *regs) ...@@ -152,8 +152,8 @@ void smp_invalidate_interrupt(struct pt_regs *regs)
* BUG(); * BUG();
*/ */
if (f->flush_mm == percpu_read(cpu_tlbstate.active_mm)) { if (f->flush_mm == this_cpu_read(cpu_tlbstate.active_mm)) {
if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK) { if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) {
if (f->flush_va == TLB_FLUSH_ALL) if (f->flush_va == TLB_FLUSH_ALL)
local_flush_tlb(); local_flush_tlb();
else else
...@@ -322,7 +322,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long va) ...@@ -322,7 +322,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
static void do_flush_tlb_all(void *info) static void do_flush_tlb_all(void *info)
{ {
__flush_tlb_all(); __flush_tlb_all();
if (percpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY) if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY)
leave_mm(smp_processor_id()); leave_mm(smp_processor_id());
} }
......
...@@ -239,7 +239,7 @@ static inline int cpu_to_node(int cpu) ...@@ -239,7 +239,7 @@ static inline int cpu_to_node(int cpu)
#ifndef set_numa_node #ifndef set_numa_node
static inline void set_numa_node(int node) static inline void set_numa_node(int node)
{ {
percpu_write(numa_node, node); this_cpu_write(numa_node, node);
} }
#endif #endif
...@@ -274,7 +274,7 @@ DECLARE_PER_CPU(int, _numa_mem_); ...@@ -274,7 +274,7 @@ DECLARE_PER_CPU(int, _numa_mem_);
#ifndef set_numa_mem #ifndef set_numa_mem
static inline void set_numa_mem(int node) static inline void set_numa_mem(int node)
{ {
percpu_write(_numa_mem_, node); this_cpu_write(_numa_mem_, node);
} }
#endif #endif
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册