Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openeuler
Kernel
提交
dd521d1e
K
Kernel
项目概览
openeuler
/
Kernel
1 年多 前同步成功
通知
8
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
DevOps
流水线
流水线任务
计划
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
K
Kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
DevOps
DevOps
流水线
流水线任务
计划
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
流水线任务
提交
Issue看板
提交
dd521d1e
编写于
11月 05, 2014
作者:
M
Michael Ellerman
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'topic/get-cpu-var' into next
上级
8418804e
69111bac
变更
32
隐藏空白更改
内联
并排
Showing
32 changed file
with
108 addition
and
103 deletion
+108
-103
arch/powerpc/include/asm/hardirq.h
arch/powerpc/include/asm/hardirq.h
+6
-1
arch/powerpc/include/asm/tlbflush.h
arch/powerpc/include/asm/tlbflush.h
+2
-2
arch/powerpc/include/asm/xics.h
arch/powerpc/include/asm/xics.h
+4
-4
arch/powerpc/kernel/dbell.c
arch/powerpc/kernel/dbell.c
+1
-1
arch/powerpc/kernel/hw_breakpoint.c
arch/powerpc/kernel/hw_breakpoint.c
+3
-3
arch/powerpc/kernel/iommu.c
arch/powerpc/kernel/iommu.c
+1
-1
arch/powerpc/kernel/irq.c
arch/powerpc/kernel/irq.c
+2
-2
arch/powerpc/kernel/kgdb.c
arch/powerpc/kernel/kgdb.c
+1
-1
arch/powerpc/kernel/kprobes.c
arch/powerpc/kernel/kprobes.c
+3
-3
arch/powerpc/kernel/mce.c
arch/powerpc/kernel/mce.c
+12
-12
arch/powerpc/kernel/process.c
arch/powerpc/kernel/process.c
+5
-5
arch/powerpc/kernel/smp.c
arch/powerpc/kernel/smp.c
+3
-3
arch/powerpc/kernel/sysfs.c
arch/powerpc/kernel/sysfs.c
+2
-2
arch/powerpc/kernel/time.c
arch/powerpc/kernel/time.c
+11
-11
arch/powerpc/kernel/traps.c
arch/powerpc/kernel/traps.c
+4
-4
arch/powerpc/kvm/e500.c
arch/powerpc/kvm/e500.c
+7
-7
arch/powerpc/kvm/e500mc.c
arch/powerpc/kvm/e500mc.c
+2
-2
arch/powerpc/mm/hash_native_64.c
arch/powerpc/mm/hash_native_64.c
+1
-1
arch/powerpc/mm/hash_utils_64.c
arch/powerpc/mm/hash_utils_64.c
+1
-1
arch/powerpc/mm/hugetlbpage-book3e.c
arch/powerpc/mm/hugetlbpage-book3e.c
+3
-3
arch/powerpc/mm/hugetlbpage.c
arch/powerpc/mm/hugetlbpage.c
+1
-1
arch/powerpc/perf/core-book3s.c
arch/powerpc/perf/core-book3s.c
+11
-11
arch/powerpc/perf/core-fsl-emb.c
arch/powerpc/perf/core-fsl-emb.c
+3
-3
arch/powerpc/platforms/cell/interrupt.c
arch/powerpc/platforms/cell/interrupt.c
+3
-3
arch/powerpc/platforms/powernv/opal-tracepoints.c
arch/powerpc/platforms/powernv/opal-tracepoints.c
+2
-2
arch/powerpc/platforms/ps3/interrupt.c
arch/powerpc/platforms/ps3/interrupt.c
+1
-1
arch/powerpc/platforms/pseries/dtl.c
arch/powerpc/platforms/pseries/dtl.c
+1
-1
arch/powerpc/platforms/pseries/hvCall_inst.c
arch/powerpc/platforms/pseries/hvCall_inst.c
+2
-2
arch/powerpc/platforms/pseries/iommu.c
arch/powerpc/platforms/pseries/iommu.c
+4
-4
arch/powerpc/platforms/pseries/lpar.c
arch/powerpc/platforms/pseries/lpar.c
+3
-3
arch/powerpc/platforms/pseries/ras.c
arch/powerpc/platforms/pseries/ras.c
+2
-2
arch/powerpc/sysdev/xics/xics-common.c
arch/powerpc/sysdev/xics/xics-common.c
+1
-1
未找到文件。
arch/powerpc/include/asm/hardirq.h
浏览文件 @
dd521d1e
...
...
@@ -21,7 +21,12 @@ DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
#define __ARCH_IRQ_STAT
#define local_softirq_pending() __get_cpu_var(irq_stat).__softirq_pending
#define local_softirq_pending() __this_cpu_read(irq_stat.__softirq_pending)
#define __ARCH_SET_SOFTIRQ_PENDING
#define set_softirq_pending(x) __this_cpu_write(irq_stat.__softirq_pending, (x))
#define or_softirq_pending(x) __this_cpu_or(irq_stat.__softirq_pending, (x))
static
inline
void
ack_bad_irq
(
unsigned
int
irq
)
{
...
...
arch/powerpc/include/asm/tlbflush.h
浏览文件 @
dd521d1e
...
...
@@ -107,14 +107,14 @@ extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch);
static
inline
void
arch_enter_lazy_mmu_mode
(
void
)
{
struct
ppc64_tlb_batch
*
batch
=
&
__get_cpu_var
(
ppc64_tlb_batch
);
struct
ppc64_tlb_batch
*
batch
=
this_cpu_ptr
(
&
ppc64_tlb_batch
);
batch
->
active
=
1
;
}
static
inline
void
arch_leave_lazy_mmu_mode
(
void
)
{
struct
ppc64_tlb_batch
*
batch
=
&
__get_cpu_var
(
ppc64_tlb_batch
);
struct
ppc64_tlb_batch
*
batch
=
this_cpu_ptr
(
&
ppc64_tlb_batch
);
if
(
batch
->
index
)
__flush_tlb_pending
(
batch
);
...
...
arch/powerpc/include/asm/xics.h
浏览文件 @
dd521d1e
...
...
@@ -98,7 +98,7 @@ DECLARE_PER_CPU(struct xics_cppr, xics_cppr);
static
inline
void
xics_push_cppr
(
unsigned
int
vec
)
{
struct
xics_cppr
*
os_cppr
=
&
__get_cpu_var
(
xics_cppr
);
struct
xics_cppr
*
os_cppr
=
this_cpu_ptr
(
&
xics_cppr
);
if
(
WARN_ON
(
os_cppr
->
index
>=
MAX_NUM_PRIORITIES
-
1
))
return
;
...
...
@@ -111,7 +111,7 @@ static inline void xics_push_cppr(unsigned int vec)
static
inline
unsigned
char
xics_pop_cppr
(
void
)
{
struct
xics_cppr
*
os_cppr
=
&
__get_cpu_var
(
xics_cppr
);
struct
xics_cppr
*
os_cppr
=
this_cpu_ptr
(
&
xics_cppr
);
if
(
WARN_ON
(
os_cppr
->
index
<
1
))
return
LOWEST_PRIORITY
;
...
...
@@ -121,7 +121,7 @@ static inline unsigned char xics_pop_cppr(void)
static
inline
void
xics_set_base_cppr
(
unsigned
char
cppr
)
{
struct
xics_cppr
*
os_cppr
=
&
__get_cpu_var
(
xics_cppr
);
struct
xics_cppr
*
os_cppr
=
this_cpu_ptr
(
&
xics_cppr
);
/* we only really want to set the priority when there's
* just one cppr value on the stack
...
...
@@ -133,7 +133,7 @@ static inline void xics_set_base_cppr(unsigned char cppr)
static
inline
unsigned
char
xics_cppr_top
(
void
)
{
struct
xics_cppr
*
os_cppr
=
&
__get_cpu_var
(
xics_cppr
);
struct
xics_cppr
*
os_cppr
=
this_cpu_ptr
(
&
xics_cppr
);
return
os_cppr
->
stack
[
os_cppr
->
index
];
}
...
...
arch/powerpc/kernel/dbell.c
浏览文件 @
dd521d1e
...
...
@@ -41,7 +41,7 @@ void doorbell_exception(struct pt_regs *regs)
may_hard_irq_enable
();
__
get_cpu_var
(
irq_stat
).
doorbell_irqs
++
;
__
this_cpu_inc
(
irq_stat
.
doorbell_irqs
)
;
smp_ipi_demux
();
...
...
arch/powerpc/kernel/hw_breakpoint.c
浏览文件 @
dd521d1e
...
...
@@ -63,7 +63,7 @@ int hw_breakpoint_slots(int type)
int
arch_install_hw_breakpoint
(
struct
perf_event
*
bp
)
{
struct
arch_hw_breakpoint
*
info
=
counter_arch_bp
(
bp
);
struct
perf_event
**
slot
=
&
__get_cpu_var
(
bp_per_reg
);
struct
perf_event
**
slot
=
this_cpu_ptr
(
&
bp_per_reg
);
*
slot
=
bp
;
...
...
@@ -88,7 +88,7 @@ int arch_install_hw_breakpoint(struct perf_event *bp)
*/
void
arch_uninstall_hw_breakpoint
(
struct
perf_event
*
bp
)
{
struct
perf_event
**
slot
=
&
__get_cpu_var
(
bp_per_reg
);
struct
perf_event
**
slot
=
this_cpu_ptr
(
&
bp_per_reg
);
if
(
*
slot
!=
bp
)
{
WARN_ONCE
(
1
,
"Can't find the breakpoint"
);
...
...
@@ -226,7 +226,7 @@ int __kprobes hw_breakpoint_handler(struct die_args *args)
*/
rcu_read_lock
();
bp
=
__
get_cpu_var
(
bp_per_reg
);
bp
=
__
this_cpu_read
(
bp_per_reg
);
if
(
!
bp
)
goto
out
;
info
=
counter_arch_bp
(
bp
);
...
...
arch/powerpc/kernel/iommu.c
浏览文件 @
dd521d1e
...
...
@@ -208,7 +208,7 @@ static unsigned long iommu_range_alloc(struct device *dev,
* We don't need to disable preemption here because any CPU can
* safely use any IOMMU pool.
*/
pool_nr
=
__
raw_get_cpu_var
(
iommu_pool_hash
)
&
(
tbl
->
nr_pools
-
1
);
pool_nr
=
__
this_cpu_read
(
iommu_pool_hash
)
&
(
tbl
->
nr_pools
-
1
);
if
(
largealloc
)
pool
=
&
(
tbl
->
large_pool
);
...
...
arch/powerpc/kernel/irq.c
浏览文件 @
dd521d1e
...
...
@@ -114,7 +114,7 @@ static inline notrace void set_soft_enabled(unsigned long enable)
static
inline
notrace
int
decrementer_check_overflow
(
void
)
{
u64
now
=
get_tb_or_rtc
();
u64
*
next_tb
=
&
__get_cpu_var
(
decrementers_next_tb
);
u64
*
next_tb
=
this_cpu_ptr
(
&
decrementers_next_tb
);
return
now
>=
*
next_tb
;
}
...
...
@@ -499,7 +499,7 @@ void __do_irq(struct pt_regs *regs)
/* And finally process it */
if
(
unlikely
(
irq
==
NO_IRQ
))
__
get_cpu_var
(
irq_stat
).
spurious_irqs
++
;
__
this_cpu_inc
(
irq_stat
.
spurious_irqs
)
;
else
generic_handle_irq
(
irq
);
...
...
arch/powerpc/kernel/kgdb.c
浏览文件 @
dd521d1e
...
...
@@ -155,7 +155,7 @@ static int kgdb_singlestep(struct pt_regs *regs)
{
struct
thread_info
*
thread_info
,
*
exception_thread_info
;
struct
thread_info
*
backup_current_thread_info
=
&
__get_cpu_var
(
kgdb_thread_info
);
this_cpu_ptr
(
&
kgdb_thread_info
);
if
(
user_mode
(
regs
))
return
0
;
...
...
arch/powerpc/kernel/kprobes.c
浏览文件 @
dd521d1e
...
...
@@ -119,7 +119,7 @@ static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
static
void
__kprobes
restore_previous_kprobe
(
struct
kprobe_ctlblk
*
kcb
)
{
__
get_cpu_var
(
current_kprobe
)
=
kcb
->
prev_kprobe
.
kp
;
__
this_cpu_write
(
current_kprobe
,
kcb
->
prev_kprobe
.
kp
)
;
kcb
->
kprobe_status
=
kcb
->
prev_kprobe
.
status
;
kcb
->
kprobe_saved_msr
=
kcb
->
prev_kprobe
.
saved_msr
;
}
...
...
@@ -127,7 +127,7 @@ static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
static
void
__kprobes
set_current_kprobe
(
struct
kprobe
*
p
,
struct
pt_regs
*
regs
,
struct
kprobe_ctlblk
*
kcb
)
{
__
get_cpu_var
(
current_kprobe
)
=
p
;
__
this_cpu_write
(
current_kprobe
,
p
)
;
kcb
->
kprobe_saved_msr
=
regs
->
msr
;
}
...
...
@@ -192,7 +192,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
ret
=
1
;
goto
no_kprobe
;
}
p
=
__
get_cpu_var
(
current_kprobe
);
p
=
__
this_cpu_read
(
current_kprobe
);
if
(
p
->
break_handler
&&
p
->
break_handler
(
p
,
regs
))
{
goto
ss_probe
;
}
...
...
arch/powerpc/kernel/mce.c
浏览文件 @
dd521d1e
...
...
@@ -73,8 +73,8 @@ void save_mce_event(struct pt_regs *regs, long handled,
uint64_t
nip
,
uint64_t
addr
)
{
uint64_t
srr1
;
int
index
=
__
get_cpu_var
(
mce_nest_count
)
++
;
struct
machine_check_event
*
mce
=
&
__get_cpu_var
(
mce_event
[
index
]);
int
index
=
__
this_cpu_inc_return
(
mce_nest_count
)
;
struct
machine_check_event
*
mce
=
this_cpu_ptr
(
&
mce_event
[
index
]);
/*
* Return if we don't have enough space to log mce event.
...
...
@@ -143,7 +143,7 @@ void save_mce_event(struct pt_regs *regs, long handled,
*/
int
get_mce_event
(
struct
machine_check_event
*
mce
,
bool
release
)
{
int
index
=
__
get_cpu_var
(
mce_nest_count
)
-
1
;
int
index
=
__
this_cpu_read
(
mce_nest_count
)
-
1
;
struct
machine_check_event
*
mc_evt
;
int
ret
=
0
;
...
...
@@ -153,7 +153,7 @@ int get_mce_event(struct machine_check_event *mce, bool release)
/* Check if we have MCE info to process. */
if
(
index
<
MAX_MC_EVT
)
{
mc_evt
=
&
__get_cpu_var
(
mce_event
[
index
]);
mc_evt
=
this_cpu_ptr
(
&
mce_event
[
index
]);
/* Copy the event structure and release the original */
if
(
mce
)
*
mce
=
*
mc_evt
;
...
...
@@ -163,7 +163,7 @@ int get_mce_event(struct machine_check_event *mce, bool release)
}
/* Decrement the count to free the slot. */
if
(
release
)
__
get_cpu_var
(
mce_nest_count
)
--
;
__
this_cpu_dec
(
mce_nest_count
)
;
return
ret
;
}
...
...
@@ -184,13 +184,13 @@ void machine_check_queue_event(void)
if
(
!
get_mce_event
(
&
evt
,
MCE_EVENT_RELEASE
))
return
;
index
=
__
get_cpu_var
(
mce_queue_count
)
++
;
index
=
__
this_cpu_inc_return
(
mce_queue_count
)
;
/* If queue is full, just return for now. */
if
(
index
>=
MAX_MC_EVT
)
{
__
get_cpu_var
(
mce_queue_count
)
--
;
__
this_cpu_dec
(
mce_queue_count
)
;
return
;
}
__get_cpu_var
(
mce_event_queue
[
index
])
=
evt
;
memcpy
(
this_cpu_ptr
(
&
mce_event_queue
[
index
]),
&
evt
,
sizeof
(
evt
))
;
/* Queue irq work to process this event later. */
irq_work_queue
(
&
mce_event_process_work
);
...
...
@@ -208,11 +208,11 @@ static void machine_check_process_queued_event(struct irq_work *work)
* For now just print it to console.
* TODO: log this error event to FSP or nvram.
*/
while
(
__
get_cpu_var
(
mce_queue_count
)
>
0
)
{
index
=
__
get_cpu_var
(
mce_queue_count
)
-
1
;
while
(
__
this_cpu_read
(
mce_queue_count
)
>
0
)
{
index
=
__
this_cpu_read
(
mce_queue_count
)
-
1
;
machine_check_print_event_info
(
&
__get_cpu_var
(
mce_event_queue
[
index
]));
__
get_cpu_var
(
mce_queue_count
)
--
;
this_cpu_ptr
(
&
mce_event_queue
[
index
]));
__
this_cpu_dec
(
mce_queue_count
)
;
}
}
...
...
arch/powerpc/kernel/process.c
浏览文件 @
dd521d1e
...
...
@@ -499,7 +499,7 @@ static inline int set_dawr(struct arch_hw_breakpoint *brk)
void
__set_breakpoint
(
struct
arch_hw_breakpoint
*
brk
)
{
__get_cpu_var
(
current_brk
)
=
*
brk
;
memcpy
(
this_cpu_ptr
(
&
current_brk
),
brk
,
sizeof
(
*
brk
))
;
if
(
cpu_has_feature
(
CPU_FTR_DAWR
))
set_dawr
(
brk
);
...
...
@@ -842,7 +842,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
* schedule DABR
*/
#ifndef CONFIG_HAVE_HW_BREAKPOINT
if
(
unlikely
(
!
hw_brk_match
(
&
__get_cpu_var
(
current_brk
),
&
new
->
thread
.
hw_brk
)))
if
(
unlikely
(
!
hw_brk_match
(
this_cpu_ptr
(
&
current_brk
),
&
new
->
thread
.
hw_brk
)))
__set_breakpoint
(
&
new
->
thread
.
hw_brk
);
#endif
/* CONFIG_HAVE_HW_BREAKPOINT */
#endif
...
...
@@ -856,7 +856,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
* Collect processor utilization data per process
*/
if
(
firmware_has_feature
(
FW_FEATURE_SPLPAR
))
{
struct
cpu_usage
*
cu
=
&
__get_cpu_var
(
cpu_usage_array
);
struct
cpu_usage
*
cu
=
this_cpu_ptr
(
&
cpu_usage_array
);
long
unsigned
start_tb
,
current_tb
;
start_tb
=
old_thread
->
start_tb
;
cu
->
current_tb
=
current_tb
=
mfspr
(
SPRN_PURR
);
...
...
@@ -866,7 +866,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
#endif
/* CONFIG_PPC64 */
#ifdef CONFIG_PPC_BOOK3S_64
batch
=
&
__get_cpu_var
(
ppc64_tlb_batch
);
batch
=
this_cpu_ptr
(
&
ppc64_tlb_batch
);
if
(
batch
->
active
)
{
current_thread_info
()
->
local_flags
|=
_TLF_LAZY_MMU
;
if
(
batch
->
index
)
...
...
@@ -889,7 +889,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
#ifdef CONFIG_PPC_BOOK3S_64
if
(
current_thread_info
()
->
local_flags
&
_TLF_LAZY_MMU
)
{
current_thread_info
()
->
local_flags
&=
~
_TLF_LAZY_MMU
;
batch
=
&
__get_cpu_var
(
ppc64_tlb_batch
);
batch
=
this_cpu_ptr
(
&
ppc64_tlb_batch
);
batch
->
active
=
1
;
}
#endif
/* CONFIG_PPC_BOOK3S_64 */
...
...
arch/powerpc/kernel/smp.c
浏览文件 @
dd521d1e
...
...
@@ -243,7 +243,7 @@ void smp_muxed_ipi_message_pass(int cpu, int msg)
irqreturn_t
smp_ipi_demux
(
void
)
{
struct
cpu_messages
*
info
=
&
__get_cpu_var
(
ipi_message
);
struct
cpu_messages
*
info
=
this_cpu_ptr
(
&
ipi_message
);
unsigned
int
all
;
mb
();
/* order any irq clear */
...
...
@@ -442,9 +442,9 @@ void generic_mach_cpu_die(void)
idle_task_exit
();
cpu
=
smp_processor_id
();
printk
(
KERN_DEBUG
"CPU%d offline
\n
"
,
cpu
);
__
get_cpu_var
(
cpu_state
)
=
CPU_DEAD
;
__
this_cpu_write
(
cpu_state
,
CPU_DEAD
)
;
smp_wmb
();
while
(
__
get_cpu_var
(
cpu_state
)
!=
CPU_UP_PREPARE
)
while
(
__
this_cpu_read
(
cpu_state
)
!=
CPU_UP_PREPARE
)
cpu_relax
();
}
...
...
arch/powerpc/kernel/sysfs.c
浏览文件 @
dd521d1e
...
...
@@ -394,10 +394,10 @@ void ppc_enable_pmcs(void)
ppc_set_pmu_inuse
(
1
);
/* Only need to enable them once */
if
(
__
get_cpu_var
(
pmcs_enabled
))
if
(
__
this_cpu_read
(
pmcs_enabled
))
return
;
__
get_cpu_var
(
pmcs_enabled
)
=
1
;
__
this_cpu_write
(
pmcs_enabled
,
1
)
;
if
(
ppc_md
.
enable_pmcs
)
ppc_md
.
enable_pmcs
();
...
...
arch/powerpc/kernel/time.c
浏览文件 @
dd521d1e
...
...
@@ -458,9 +458,9 @@ static inline void clear_irq_work_pending(void)
DEFINE_PER_CPU
(
u8
,
irq_work_pending
);
#define set_irq_work_pending_flag() __
get_cpu_var(irq_work_pending) = 1
#define test_irq_work_pending() __
get_cpu_var
(irq_work_pending)
#define clear_irq_work_pending() __
get_cpu_var(irq_work_pending) = 0
#define set_irq_work_pending_flag() __
this_cpu_write(irq_work_pending, 1)
#define test_irq_work_pending() __
this_cpu_read
(irq_work_pending)
#define clear_irq_work_pending() __
this_cpu_write(irq_work_pending, 0)
#endif
/* 32 vs 64 bit */
...
...
@@ -482,8 +482,8 @@ void arch_irq_work_raise(void)
static
void
__timer_interrupt
(
void
)
{
struct
pt_regs
*
regs
=
get_irq_regs
();
u64
*
next_tb
=
&
__get_cpu_var
(
decrementers_next_tb
);
struct
clock_event_device
*
evt
=
&
__get_cpu_var
(
decrementers
);
u64
*
next_tb
=
this_cpu_ptr
(
&
decrementers_next_tb
);
struct
clock_event_device
*
evt
=
this_cpu_ptr
(
&
decrementers
);
u64
now
;
trace_timer_interrupt_entry
(
regs
);
...
...
@@ -498,7 +498,7 @@ static void __timer_interrupt(void)
*
next_tb
=
~
(
u64
)
0
;
if
(
evt
->
event_handler
)
evt
->
event_handler
(
evt
);
__
get_cpu_var
(
irq_stat
).
timer_irqs_event
++
;
__
this_cpu_inc
(
irq_stat
.
timer_irqs_event
)
;
}
else
{
now
=
*
next_tb
-
now
;
if
(
now
<=
DECREMENTER_MAX
)
...
...
@@ -506,13 +506,13 @@ static void __timer_interrupt(void)
/* We may have raced with new irq work */
if
(
test_irq_work_pending
())
set_dec
(
1
);
__
get_cpu_var
(
irq_stat
).
timer_irqs_others
++
;
__
this_cpu_inc
(
irq_stat
.
timer_irqs_others
)
;
}
#ifdef CONFIG_PPC64
/* collect purr register values often, for accurate calculations */
if
(
firmware_has_feature
(
FW_FEATURE_SPLPAR
))
{
struct
cpu_usage
*
cu
=
&
__get_cpu_var
(
cpu_usage_array
);
struct
cpu_usage
*
cu
=
this_cpu_ptr
(
&
cpu_usage_array
);
cu
->
current_tb
=
mfspr
(
SPRN_PURR
);
}
#endif
...
...
@@ -527,7 +527,7 @@ static void __timer_interrupt(void)
void
timer_interrupt
(
struct
pt_regs
*
regs
)
{
struct
pt_regs
*
old_regs
;
u64
*
next_tb
=
&
__get_cpu_var
(
decrementers_next_tb
);
u64
*
next_tb
=
this_cpu_ptr
(
&
decrementers_next_tb
);
/* Ensure a positive value is written to the decrementer, or else
* some CPUs will continue to take decrementer exceptions.
...
...
@@ -813,7 +813,7 @@ static void __init clocksource_init(void)
static
int
decrementer_set_next_event
(
unsigned
long
evt
,
struct
clock_event_device
*
dev
)
{
__
get_cpu_var
(
decrementers_next_tb
)
=
get_tb_or_rtc
()
+
evt
;
__
this_cpu_write
(
decrementers_next_tb
,
get_tb_or_rtc
()
+
evt
)
;
set_dec
(
evt
);
/* We may have raced with new irq work */
...
...
@@ -833,7 +833,7 @@ static void decrementer_set_mode(enum clock_event_mode mode,
/* Interrupt handler for the timer broadcast IPI */
void
tick_broadcast_ipi_handler
(
void
)
{
u64
*
next_tb
=
&
__get_cpu_var
(
decrementers_next_tb
);
u64
*
next_tb
=
this_cpu_ptr
(
&
decrementers_next_tb
);
*
next_tb
=
get_tb_or_rtc
();
__timer_interrupt
();
...
...
arch/powerpc/kernel/traps.c
浏览文件 @
dd521d1e
...
...
@@ -295,7 +295,7 @@ long machine_check_early(struct pt_regs *regs)
{
long
handled
=
0
;
__
get_cpu_var
(
irq_stat
).
mce_exceptions
++
;
__
this_cpu_inc
(
irq_stat
.
mce_exceptions
)
;
if
(
cur_cpu_spec
&&
cur_cpu_spec
->
machine_check_early
)
handled
=
cur_cpu_spec
->
machine_check_early
(
regs
);
...
...
@@ -304,7 +304,7 @@ long machine_check_early(struct pt_regs *regs)
long
hmi_exception_realmode
(
struct
pt_regs
*
regs
)
{
__
get_cpu_var
(
irq_stat
).
hmi_exceptions
++
;
__
this_cpu_inc
(
irq_stat
.
hmi_exceptions
)
;
if
(
ppc_md
.
hmi_exception_early
)
ppc_md
.
hmi_exception_early
(
regs
);
...
...
@@ -700,7 +700,7 @@ void machine_check_exception(struct pt_regs *regs)
enum
ctx_state
prev_state
=
exception_enter
();
int
recover
=
0
;
__
get_cpu_var
(
irq_stat
).
mce_exceptions
++
;
__
this_cpu_inc
(
irq_stat
.
mce_exceptions
)
;
/* See if any machine dependent calls. In theory, we would want
* to call the CPU first, and call the ppc_md. one if the CPU
...
...
@@ -1519,7 +1519,7 @@ void vsx_unavailable_tm(struct pt_regs *regs)
void
performance_monitor_exception
(
struct
pt_regs
*
regs
)
{
__
get_cpu_var
(
irq_stat
).
pmu_irqs
++
;
__
this_cpu_inc
(
irq_stat
.
pmu_irqs
)
;
perf_irq
(
regs
);
}
...
...
arch/powerpc/kvm/e500.c
浏览文件 @
dd521d1e
...
...
@@ -76,11 +76,11 @@ static inline int local_sid_setup_one(struct id *entry)
unsigned
long
sid
;
int
ret
=
-
1
;
sid
=
++
(
__get_cpu_var
(
pcpu_last_used_sid
)
);
sid
=
__this_cpu_inc_return
(
pcpu_last_used_sid
);
if
(
sid
<
NUM_TIDS
)
{
__
get_cpu_var
(
pcpu_sids
).
entry
[
sid
]
=
entry
;
__
this_cpu_write
(
pcpu_sids
)
entry
[
sid
],
entry
)
;
entry
->
val
=
sid
;
entry
->
pentry
=
&
__get_cpu_var
(
pcpu_sids
).
entry
[
sid
]
;
entry
->
pentry
=
this_cpu_ptr
(
&
pcpu_sids
.
entry
[
sid
])
;
ret
=
sid
;
}
...
...
@@ -108,8 +108,8 @@ static inline int local_sid_setup_one(struct id *entry)
static
inline
int
local_sid_lookup
(
struct
id
*
entry
)
{
if
(
entry
&&
entry
->
val
!=
0
&&
__
get_cpu_var
(
pcpu_sids
).
entry
[
entry
->
val
]
==
entry
&&
entry
->
pentry
==
&
__get_cpu_var
(
pcpu_sids
).
entry
[
entry
->
val
]
)
__
this_cpu_read
(
pcpu_sids
.
entry
[
entry
->
val
])
==
entry
&&
entry
->
pentry
==
this_cpu_ptr
(
&
pcpu_sids
.
entry
[
entry
->
val
])
)
return
entry
->
val
;
return
-
1
;
}
...
...
@@ -117,8 +117,8 @@ static inline int local_sid_lookup(struct id *entry)
/* Invalidate all id mappings on local core -- call with preempt disabled */
static
inline
void
local_sid_destroy_all
(
void
)
{
__
get_cpu_var
(
pcpu_last_used_sid
)
=
0
;
memset
(
&
__get_cpu_var
(
pcpu_sids
),
0
,
sizeof
(
__get_cpu_var
(
pcpu_sids
)
));
__
this_cpu_write
(
pcpu_last_used_sid
,
0
)
;
memset
(
this_cpu_ptr
(
&
pcpu_sids
),
0
,
sizeof
(
pcpu_sids
));
}
static
void
*
kvmppc_e500_id_table_alloc
(
struct
kvmppc_vcpu_e500
*
vcpu_e500
)
...
...
arch/powerpc/kvm/e500mc.c
浏览文件 @
dd521d1e
...
...
@@ -144,9 +144,9 @@ static void kvmppc_core_vcpu_load_e500mc(struct kvm_vcpu *vcpu, int cpu)
mtspr
(
SPRN_GESR
,
vcpu
->
arch
.
shared
->
esr
);
if
(
vcpu
->
arch
.
oldpir
!=
mfspr
(
SPRN_PIR
)
||
__
get_cpu_var
(
last_vcpu_of_lpid
)[
get_lpid
(
vcpu
)]
!=
vcpu
)
{
__
this_cpu_read
(
last_vcpu_of_lpid
[
get_lpid
(
vcpu
)])
!=
vcpu
)
{
kvmppc_e500_tlbil_all
(
vcpu_e500
);
__
get_cpu_var
(
last_vcpu_of_lpid
)[
get_lpid
(
vcpu
)]
=
vcpu
;
__
this_cpu_write
(
last_vcpu_of_lpid
[
get_lpid
(
vcpu
)],
vcpu
)
;
}
}
...
...
arch/powerpc/mm/hash_native_64.c
浏览文件 @
dd521d1e
...
...
@@ -629,7 +629,7 @@ static void native_flush_hash_range(unsigned long number, int local)
unsigned
long
want_v
;
unsigned
long
flags
;
real_pte_t
pte
;
struct
ppc64_tlb_batch
*
batch
=
&
__get_cpu_var
(
ppc64_tlb_batch
);
struct
ppc64_tlb_batch
*
batch
=
this_cpu_ptr
(
&
ppc64_tlb_batch
);
unsigned
long
psize
=
batch
->
psize
;
int
ssize
=
batch
->
ssize
;
int
i
;
...
...
arch/powerpc/mm/hash_utils_64.c
浏览文件 @
dd521d1e
...
...
@@ -1322,7 +1322,7 @@ void flush_hash_range(unsigned long number, int local)
else
{
int
i
;
struct
ppc64_tlb_batch
*
batch
=
&
__get_cpu_var
(
ppc64_tlb_batch
);
this_cpu_ptr
(
&
ppc64_tlb_batch
);
for
(
i
=
0
;
i
<
number
;
i
++
)
flush_hash_page
(
batch
->
vpn
[
i
],
batch
->
pte
[
i
],
...
...
arch/powerpc/mm/hugetlbpage-book3e.c
浏览文件 @
dd521d1e
...
...
@@ -33,13 +33,13 @@ static inline int tlb1_next(void)
ncams
=
mfspr
(
SPRN_TLB1CFG
)
&
TLBnCFG_N_ENTRY
;
index
=
__get_cpu_var
(
next_tlbcam_idx
);
index
=
this_cpu_read
(
next_tlbcam_idx
);
/* Just round-robin the entries and wrap when we hit the end */
if
(
unlikely
(
index
==
ncams
-
1
))
__
get_cpu_var
(
next_tlbcam_idx
)
=
tlbcam_index
;
__
this_cpu_write
(
next_tlbcam_idx
,
tlbcam_index
)
;
else
__
get_cpu_var
(
next_tlbcam_idx
)
++
;
__
this_cpu_inc
(
next_tlbcam_idx
)
;
return
index
;
}
...
...
arch/powerpc/mm/hugetlbpage.c
浏览文件 @
dd521d1e
...
...
@@ -462,7 +462,7 @@ static void hugepd_free(struct mmu_gather *tlb, void *hugepte)
{
struct
hugepd_freelist
**
batchp
;
batchp
=
&
get_cpu_var
(
hugepd_freelist_cur
);
batchp
=
this_cpu_ptr
(
&
hugepd_freelist_cur
);
if
(
atomic_read
(
&
tlb
->
mm
->
mm_users
)
<
2
||
cpumask_equal
(
mm_cpumask
(
tlb
->
mm
),
...
...
arch/powerpc/perf/core-book3s.c
浏览文件 @
dd521d1e
...
...
@@ -339,7 +339,7 @@ static void power_pmu_bhrb_reset(void)
static
void
power_pmu_bhrb_enable
(
struct
perf_event
*
event
)
{
struct
cpu_hw_events
*
cpuhw
=
&
__get_cpu_var
(
cpu_hw_events
);
struct
cpu_hw_events
*
cpuhw
=
this_cpu_ptr
(
&
cpu_hw_events
);
if
(
!
ppmu
->
bhrb_nr
)
return
;
...
...
@@ -354,7 +354,7 @@ static void power_pmu_bhrb_enable(struct perf_event *event)
static
void
power_pmu_bhrb_disable
(
struct
perf_event
*
event
)
{
struct
cpu_hw_events
*
cpuhw
=
&
__get_cpu_var
(
cpu_hw_events
);
struct
cpu_hw_events
*
cpuhw
=
this_cpu_ptr
(
&
cpu_hw_events
);
if
(
!
ppmu
->
bhrb_nr
)
return
;
...
...
@@ -1144,7 +1144,7 @@ static void power_pmu_disable(struct pmu *pmu)
if
(
!
ppmu
)
return
;
local_irq_save
(
flags
);
cpuhw
=
&
__get_cpu_var
(
cpu_hw_events
);
cpuhw
=
this_cpu_ptr
(
&
cpu_hw_events
);
if
(
!
cpuhw
->
disabled
)
{
/*
...
...
@@ -1211,7 +1211,7 @@ static void power_pmu_enable(struct pmu *pmu)
return
;
local_irq_save
(
flags
);
cpuhw
=
&
__get_cpu_var
(
cpu_hw_events
);
cpuhw
=
this_cpu_ptr
(
&
cpu_hw_events
);
if
(
!
cpuhw
->
disabled
)
goto
out
;
...
...
@@ -1403,7 +1403,7 @@ static int power_pmu_add(struct perf_event *event, int ef_flags)
* Add the event to the list (if there is room)
* and check whether the total set is still feasible.
*/
cpuhw
=
&
__get_cpu_var
(
cpu_hw_events
);
cpuhw
=
this_cpu_ptr
(
&
cpu_hw_events
);
n0
=
cpuhw
->
n_events
;
if
(
n0
>=
ppmu
->
n_counter
)
goto
out
;
...
...
@@ -1469,7 +1469,7 @@ static void power_pmu_del(struct perf_event *event, int ef_flags)
power_pmu_read
(
event
);
cpuhw
=
&
__get_cpu_var
(
cpu_hw_events
);
cpuhw
=
this_cpu_ptr
(
&
cpu_hw_events
);
for
(
i
=
0
;
i
<
cpuhw
->
n_events
;
++
i
)
{
if
(
event
==
cpuhw
->
event
[
i
])
{
while
(
++
i
<
cpuhw
->
n_events
)
{
...
...
@@ -1575,7 +1575,7 @@ static void power_pmu_stop(struct perf_event *event, int ef_flags)
*/
static
void
power_pmu_start_txn
(
struct
pmu
*
pmu
)
{
struct
cpu_hw_events
*
cpuhw
=
&
__get_cpu_var
(
cpu_hw_events
);
struct
cpu_hw_events
*
cpuhw
=
this_cpu_ptr
(
&
cpu_hw_events
);
perf_pmu_disable
(
pmu
);
cpuhw
->
group_flag
|=
PERF_EVENT_TXN
;
...
...
@@ -1589,7 +1589,7 @@ static void power_pmu_start_txn(struct pmu *pmu)
*/
static
void
power_pmu_cancel_txn
(
struct
pmu
*
pmu
)
{
struct
cpu_hw_events
*
cpuhw
=
&
__get_cpu_var
(
cpu_hw_events
);
struct
cpu_hw_events
*
cpuhw
=
this_cpu_ptr
(
&
cpu_hw_events
);
cpuhw
->
group_flag
&=
~
PERF_EVENT_TXN
;
perf_pmu_enable
(
pmu
);
...
...
@@ -1607,7 +1607,7 @@ static int power_pmu_commit_txn(struct pmu *pmu)
if
(
!
ppmu
)
return
-
EAGAIN
;
cpuhw
=
&
__get_cpu_var
(
cpu_hw_events
);
cpuhw
=
this_cpu_ptr
(
&
cpu_hw_events
);
n
=
cpuhw
->
n_events
;
if
(
check_excludes
(
cpuhw
->
event
,
cpuhw
->
flags
,
0
,
n
))
return
-
EAGAIN
;
...
...
@@ -1964,7 +1964,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
if
(
event
->
attr
.
sample_type
&
PERF_SAMPLE_BRANCH_STACK
)
{
struct
cpu_hw_events
*
cpuhw
;
cpuhw
=
&
__get_cpu_var
(
cpu_hw_events
);
cpuhw
=
this_cpu_ptr
(
&
cpu_hw_events
);
power_pmu_bhrb_read
(
cpuhw
);
data
.
br_stack
=
&
cpuhw
->
bhrb_stack
;
}
...
...
@@ -2037,7 +2037,7 @@ static bool pmc_overflow(unsigned long val)
static
void
perf_event_interrupt
(
struct
pt_regs
*
regs
)
{
int
i
,
j
;
struct
cpu_hw_events
*
cpuhw
=
&
__get_cpu_var
(
cpu_hw_events
);
struct
cpu_hw_events
*
cpuhw
=
this_cpu_ptr
(
&
cpu_hw_events
);
struct
perf_event
*
event
;
unsigned
long
val
[
8
];
int
found
,
active
;
...
...
arch/powerpc/perf/core-fsl-emb.c
浏览文件 @
dd521d1e
...
...
@@ -210,7 +210,7 @@ static void fsl_emb_pmu_disable(struct pmu *pmu)
unsigned
long
flags
;
local_irq_save
(
flags
);
cpuhw
=
&
__get_cpu_var
(
cpu_hw_events
);
cpuhw
=
this_cpu_ptr
(
&
cpu_hw_events
);
if
(
!
cpuhw
->
disabled
)
{
cpuhw
->
disabled
=
1
;
...
...
@@ -249,7 +249,7 @@ static void fsl_emb_pmu_enable(struct pmu *pmu)
unsigned
long
flags
;
local_irq_save
(
flags
);
cpuhw
=
&
__get_cpu_var
(
cpu_hw_events
);
cpuhw
=
this_cpu_ptr
(
&
cpu_hw_events
);
if
(
!
cpuhw
->
disabled
)
goto
out
;
...
...
@@ -653,7 +653,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
static
void
perf_event_interrupt
(
struct
pt_regs
*
regs
)
{
int
i
;
struct
cpu_hw_events
*
cpuhw
=
&
__get_cpu_var
(
cpu_hw_events
);
struct
cpu_hw_events
*
cpuhw
=
this_cpu_ptr
(
&
cpu_hw_events
);
struct
perf_event
*
event
;
unsigned
long
val
;
int
found
=
0
;
...
...
arch/powerpc/platforms/cell/interrupt.c
浏览文件 @
dd521d1e
...
...
@@ -82,7 +82,7 @@ static void iic_unmask(struct irq_data *d)
static
void
iic_eoi
(
struct
irq_data
*
d
)
{
struct
iic
*
iic
=
&
__get_cpu_var
(
cpu_iic
);
struct
iic
*
iic
=
this_cpu_ptr
(
&
cpu_iic
);
out_be64
(
&
iic
->
regs
->
prio
,
iic
->
eoi_stack
[
--
iic
->
eoi_ptr
]);
BUG_ON
(
iic
->
eoi_ptr
<
0
);
}
...
...
@@ -148,7 +148,7 @@ static unsigned int iic_get_irq(void)
struct
iic
*
iic
;
unsigned
int
virq
;
iic
=
&
__get_cpu_var
(
cpu_iic
);
iic
=
this_cpu_ptr
(
&
cpu_iic
);
*
(
unsigned
long
*
)
&
pending
=
in_be64
((
u64
__iomem
*
)
&
iic
->
regs
->
pending_destr
);
if
(
!
(
pending
.
flags
&
CBE_IIC_IRQ_VALID
))
...
...
@@ -163,7 +163,7 @@ static unsigned int iic_get_irq(void)
void
iic_setup_cpu
(
void
)
{
out_be64
(
&
__get_cpu_var
(
cpu_iic
).
regs
->
prio
,
0xff
);
out_be64
(
this_cpu_ptr
(
&
cpu_iic
.
regs
->
prio
)
,
0xff
);
}
u8
iic_get_target_id
(
int
cpu
)
...
...
arch/powerpc/platforms/powernv/opal-tracepoints.c
浏览文件 @
dd521d1e
...
...
@@ -48,7 +48,7 @@ void __trace_opal_entry(unsigned long opcode, unsigned long *args)
local_irq_save
(
flags
);
depth
=
&
__get_cpu_var
(
opal_trace_depth
);
depth
=
this_cpu_ptr
(
&
opal_trace_depth
);
if
(
*
depth
)
goto
out
;
...
...
@@ -69,7 +69,7 @@ void __trace_opal_exit(long opcode, unsigned long retval)
local_irq_save
(
flags
);
depth
=
&
__get_cpu_var
(
opal_trace_depth
);
depth
=
this_cpu_ptr
(
&
opal_trace_depth
);
if
(
*
depth
)
goto
out
;
...
...
arch/powerpc/platforms/ps3/interrupt.c
浏览文件 @
dd521d1e
...
...
@@ -711,7 +711,7 @@ void __init ps3_register_ipi_irq(unsigned int cpu, unsigned int virq)
static
unsigned
int
ps3_get_irq
(
void
)
{
struct
ps3_private
*
pd
=
&
__get_cpu_var
(
ps3_private
);
struct
ps3_private
*
pd
=
this_cpu_ptr
(
&
ps3_private
);
u64
x
=
(
pd
->
bmp
.
status
&
pd
->
bmp
.
mask
);
unsigned
int
plug
;
...
...
arch/powerpc/platforms/pseries/dtl.c
浏览文件 @
dd521d1e
...
...
@@ -75,7 +75,7 @@ static atomic_t dtl_count;
*/
static
void
consume_dtle
(
struct
dtl_entry
*
dtle
,
u64
index
)
{
struct
dtl_ring
*
dtlr
=
&
__get_cpu_var
(
dtl_rings
);
struct
dtl_ring
*
dtlr
=
this_cpu_ptr
(
&
dtl_rings
);
struct
dtl_entry
*
wp
=
dtlr
->
write_ptr
;
struct
lppaca
*
vpa
=
local_paca
->
lppaca_ptr
;
...
...
arch/powerpc/platforms/pseries/hvCall_inst.c
浏览文件 @
dd521d1e
...
...
@@ -110,7 +110,7 @@ static void probe_hcall_entry(void *ignored, unsigned long opcode, unsigned long
if
(
opcode
>
MAX_HCALL_OPCODE
)
return
;
h
=
&
__get_cpu_var
(
hcall_stats
)[
opcode
/
4
]
;
h
=
this_cpu_ptr
(
&
hcall_stats
[
opcode
/
4
])
;
h
->
tb_start
=
mftb
();
h
->
purr_start
=
mfspr
(
SPRN_PURR
);
}
...
...
@@ -123,7 +123,7 @@ static void probe_hcall_exit(void *ignored, unsigned long opcode, unsigned long
if
(
opcode
>
MAX_HCALL_OPCODE
)
return
;
h
=
&
__get_cpu_var
(
hcall_stats
)[
opcode
/
4
]
;
h
=
this_cpu_ptr
(
&
hcall_stats
[
opcode
/
4
])
;
h
->
num_calls
++
;
h
->
tb_total
+=
mftb
()
-
h
->
tb_start
;
h
->
purr_total
+=
mfspr
(
SPRN_PURR
)
-
h
->
purr_start
;
...
...
arch/powerpc/platforms/pseries/iommu.c
浏览文件 @
dd521d1e
...
...
@@ -199,7 +199,7 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
local_irq_save
(
flags
);
/* to protect tcep and the page behind it */
tcep
=
__
get_cpu_var
(
tce_page
);
tcep
=
__
this_cpu_read
(
tce_page
);
/* This is safe to do since interrupts are off when we're called
* from iommu_alloc{,_sg}()
...
...
@@ -212,7 +212,7 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
return
tce_build_pSeriesLP
(
tbl
,
tcenum
,
npages
,
uaddr
,
direction
,
attrs
);
}
__
get_cpu_var
(
tce_page
)
=
tcep
;
__
this_cpu_write
(
tce_page
,
tcep
)
;
}
rpn
=
__pa
(
uaddr
)
>>
TCE_SHIFT
;
...
...
@@ -398,7 +398,7 @@ static int tce_setrange_multi_pSeriesLP(unsigned long start_pfn,
long
l
,
limit
;
local_irq_disable
();
/* to protect tcep and the page behind it */
tcep
=
__
get_cpu_var
(
tce_page
);
tcep
=
__
this_cpu_read
(
tce_page
);
if
(
!
tcep
)
{
tcep
=
(
__be64
*
)
__get_free_page
(
GFP_ATOMIC
);
...
...
@@ -406,7 +406,7 @@ static int tce_setrange_multi_pSeriesLP(unsigned long start_pfn,
local_irq_enable
();
return
-
ENOMEM
;
}
__
get_cpu_var
(
tce_page
)
=
tcep
;
__
this_cpu_write
(
tce_page
,
tcep
)
;
}
proto_tce
=
TCE_PCI_READ
|
TCE_PCI_WRITE
;
...
...
arch/powerpc/platforms/pseries/lpar.c
浏览文件 @
dd521d1e
...
...
@@ -515,7 +515,7 @@ static void pSeries_lpar_flush_hash_range(unsigned long number, int local)
unsigned
long
vpn
;
unsigned
long
i
,
pix
,
rc
;
unsigned
long
flags
=
0
;
struct
ppc64_tlb_batch
*
batch
=
&
__get_cpu_var
(
ppc64_tlb_batch
);
struct
ppc64_tlb_batch
*
batch
=
this_cpu_ptr
(
&
ppc64_tlb_batch
);
int
lock_tlbie
=
!
mmu_has_feature
(
MMU_FTR_LOCKLESS_TLBIE
);
unsigned
long
param
[
9
];
unsigned
long
hash
,
index
,
shift
,
hidx
,
slot
;
...
...
@@ -705,7 +705,7 @@ void __trace_hcall_entry(unsigned long opcode, unsigned long *args)
local_irq_save
(
flags
);
depth
=
&
__get_cpu_var
(
hcall_trace_depth
);
depth
=
this_cpu_ptr
(
&
hcall_trace_depth
);
if
(
*
depth
)
goto
out
;
...
...
@@ -730,7 +730,7 @@ void __trace_hcall_exit(long opcode, unsigned long retval,
local_irq_save
(
flags
);
depth
=
&
__get_cpu_var
(
hcall_trace_depth
);
depth
=
this_cpu_ptr
(
&
hcall_trace_depth
);
if
(
*
depth
)
goto
out
;
...
...
arch/powerpc/platforms/pseries/ras.c
浏览文件 @
dd521d1e
...
...
@@ -302,8 +302,8 @@ static struct rtas_error_log *fwnmi_get_errinfo(struct pt_regs *regs)
/* If it isn't an extended log we can use the per cpu 64bit buffer */
h
=
(
struct
rtas_error_log
*
)
&
savep
[
1
];
if
(
!
rtas_error_extended
(
h
))
{
memcpy
(
&
__get_cpu_var
(
mce_data_buf
),
h
,
sizeof
(
__u64
));
errhdr
=
(
struct
rtas_error_log
*
)
&
__get_cpu_var
(
mce_data_buf
);
memcpy
(
this_cpu_ptr
(
&
mce_data_buf
),
h
,
sizeof
(
__u64
));
errhdr
=
(
struct
rtas_error_log
*
)
this_cpu_ptr
(
&
mce_data_buf
);
}
else
{
int
len
,
error_log_length
;
...
...
arch/powerpc/sysdev/xics/xics-common.c
浏览文件 @
dd521d1e
...
...
@@ -155,7 +155,7 @@ int __init xics_smp_probe(void)
void
xics_teardown_cpu
(
void
)
{
struct
xics_cppr
*
os_cppr
=
&
__get_cpu_var
(
xics_cppr
);
struct
xics_cppr
*
os_cppr
=
this_cpu_ptr
(
&
xics_cppr
);
/*
* we have to reset the cppr index to 0 because we're
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录