Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openeuler
raspberrypi-kernel
提交
cd5bc89d
R
raspberrypi-kernel
项目概览
openeuler
/
raspberrypi-kernel
通知
13
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
R
raspberrypi-kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
cd5bc89d
编写于
16年前
作者:
D
David S. Miller
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
sparc64: Use cpumask_t pointers and for_each_cpu_mask_nr() in xcall_deliver.
Signed-off-by:
N
David S. Miller
<
davem@davemloft.net
>
上级
622824db
变更
1
隐藏空白更改
内联
并排
Showing
1 changed file
with
21 addition
and
18 deletion
+21
-18
arch/sparc64/kernel/smp.c
arch/sparc64/kernel/smp.c
+21
-18
未找到文件。
arch/sparc64/kernel/smp.c
浏览文件 @
cd5bc89d
...
...
@@ -459,13 +459,13 @@ static void spitfire_xcall_helper(u64 data0, u64 data1, u64 data2, u64 pstate, u
}
}
static
inline
void
spitfire_xcall_deliver
(
u64
data0
,
u64
data1
,
u64
data2
,
c
pumask_t
mask
)
static
inline
void
spitfire_xcall_deliver
(
u64
data0
,
u64
data1
,
u64
data2
,
c
onst
cpumask_t
*
mask
)
{
u64
pstate
;
int
i
;
__asm__
__volatile__
(
"rdpr %%pstate, %0"
:
"=r"
(
pstate
));
for_each_cpu_mask
(
i
,
mask
)
for_each_cpu_mask
_nr
(
i
,
*
mask
)
spitfire_xcall_helper
(
data0
,
data1
,
data2
,
pstate
,
i
);
}
...
...
@@ -473,14 +473,17 @@ static inline void spitfire_xcall_deliver(u64 data0, u64 data1, u64 data2, cpuma
* packet, but we have no use for that. However we do take advantage of
* the new pipelining feature (ie. dispatch to multiple cpus simultaneously).
*/
static
void
cheetah_xcall_deliver
(
u64
data0
,
u64
data1
,
u64
data2
,
c
pumask_t
mask
)
static
void
cheetah_xcall_deliver
(
u64
data0
,
u64
data1
,
u64
data2
,
c
onst
cpumask_t
*
mask_p
)
{
u64
pstate
,
ver
,
busy_mask
;
int
nack_busy_id
,
is_jbus
,
need_more
;
cpumask_t
mask
;
if
(
cpus_empty
(
mask
))
if
(
cpus_empty
(
*
mask_p
))
return
;
mask
=
*
mask_p
;
/* Unfortunately, someone at Sun had the brilliant idea to make the
* busy/nack fields hard-coded by ITID number for this Ultra-III
* derivative processor.
...
...
@@ -511,7 +514,7 @@ static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mas
{
int
i
;
for_each_cpu_mask
(
i
,
mask
)
{
for_each_cpu_mask
_nr
(
i
,
mask
)
{
u64
target
=
(
i
<<
14
)
|
0x70
;
if
(
is_jbus
)
{
...
...
@@ -550,7 +553,7 @@ static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mas
:
:
"r"
(
pstate
));
if
(
unlikely
(
need_more
))
{
int
i
,
cnt
=
0
;
for_each_cpu_mask
(
i
,
mask
)
{
for_each_cpu_mask
_nr
(
i
,
mask
)
{
cpu_clear
(
i
,
mask
);
cnt
++
;
if
(
cnt
==
32
)
...
...
@@ -584,7 +587,7 @@ static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mas
/* Clear out the mask bits for cpus which did not
* NACK us.
*/
for_each_cpu_mask
(
i
,
mask
)
{
for_each_cpu_mask
_nr
(
i
,
mask
)
{
u64
check_mask
;
if
(
is_jbus
)
...
...
@@ -605,16 +608,16 @@ static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mas
}
/* Multi-cpu list version. */
static
void
hypervisor_xcall_deliver
(
u64
data0
,
u64
data1
,
u64
data2
,
c
pumask_t
mask
)
static
void
hypervisor_xcall_deliver
(
u64
data0
,
u64
data1
,
u64
data2
,
c
onst
cpumask_t
*
mask
)
{
int
cnt
,
retries
,
this_cpu
,
prev_sent
,
i
;
unsigned
long
flags
,
status
;
cpumask_t
error_mask
;
struct
trap_per_cpu
*
tb
;
u16
*
cpu_list
;
u64
*
mondo
;
cpumask_t
error_mask
;
unsigned
long
flags
,
status
;
int
cnt
,
retries
,
this_cpu
,
prev_sent
,
i
;
if
(
cpus_empty
(
mask
))
if
(
cpus_empty
(
*
mask
))
return
;
/* We have to do this whole thing with interrupts fully disabled.
...
...
@@ -642,7 +645,7 @@ static void hypervisor_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t
/* Setup the initial cpu list. */
cnt
=
0
;
for_each_cpu_mask
(
i
,
mask
)
for_each_cpu_mask
_nr
(
i
,
*
mask
)
cpu_list
[
cnt
++
]
=
i
;
cpus_clear
(
error_mask
);
...
...
@@ -729,7 +732,7 @@ static void hypervisor_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t
"were in error state
\n
"
,
this_cpu
);
printk
(
KERN_CRIT
"CPU[%d]: Error mask [ "
,
this_cpu
);
for_each_cpu_mask
(
i
,
error_mask
)
for_each_cpu_mask
_nr
(
i
,
error_mask
)
printk
(
"%d "
,
i
);
printk
(
"]
\n
"
);
return
;
...
...
@@ -756,7 +759,7 @@ static void hypervisor_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t
printk
(
"]
\n
"
);
}
static
void
(
*
xcall_deliver
)(
u64
,
u64
,
u64
,
c
pumask_t
);
static
void
(
*
xcall_deliver
)(
u64
,
u64
,
u64
,
c
onst
cpumask_t
*
);
/* Send cross call to all processors mentioned in MASK
* except self.
...
...
@@ -769,7 +772,7 @@ static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 d
cpus_and
(
mask
,
mask
,
cpu_online_map
);
cpu_clear
(
this_cpu
,
mask
);
xcall_deliver
(
data0
,
data1
,
data2
,
mask
);
xcall_deliver
(
data0
,
data1
,
data2
,
&
mask
);
/* NOTE: Caller runs local copy on master. */
put_cpu
();
...
...
@@ -903,7 +906,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
}
if
(
data0
)
{
xcall_deliver
(
data0
,
__pa
(
pg_addr
),
(
u64
)
pg_addr
,
mask
);
(
u64
)
pg_addr
,
&
mask
);
#ifdef CONFIG_DEBUG_DCFLUSH
atomic_inc
(
&
dcpage_flushes_xcall
);
#endif
...
...
@@ -945,7 +948,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
}
if
(
data0
)
{
xcall_deliver
(
data0
,
__pa
(
pg_addr
),
(
u64
)
pg_addr
,
mask
);
(
u64
)
pg_addr
,
&
mask
);
#ifdef CONFIG_DEBUG_DCFLUSH
atomic_inc
(
&
dcpage_flushes_xcall
);
#endif
...
...
This diff is collapsed.
Click to expand it.
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录
新手
引导
客服
返回
顶部