Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openeuler
Kernel
提交
b4c286e6
K
Kernel
项目概览
openeuler
/
Kernel
1 年多 前同步成功
通知
8
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
DevOps
流水线
流水线任务
计划
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
K
Kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
DevOps
DevOps
流水线
流水线任务
计划
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
流水线任务
提交
Issue看板
提交
b4c286e6
编写于
6月 18, 2008
作者:
I
Ingo Molnar
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
SGI UV: clean up arch/x86/kernel/tlb_uv.c
Signed-off-by:
N
Ingo Molnar
<
mingo@elte.hu
>
上级
dc163a41
变更
1
隐藏空白更改
内联
并排
Showing
1 changed file
with
62 addition
and
45 deletion
+62
-45
arch/x86/kernel/tlb_uv.c
arch/x86/kernel/tlb_uv.c
+62
-45
未找到文件。
arch/x86/kernel/tlb_uv.c
浏览文件 @
b4c286e6
...
@@ -11,19 +11,22 @@
...
@@ -11,19 +11,22 @@
#include <linux/kernel.h>
#include <linux/kernel.h>
#include <asm/mmu_context.h>
#include <asm/mmu_context.h>
#include <asm/idle.h>
#include <asm/genapic.h>
#include <asm/uv/uv_hub.h>
#include <asm/uv/uv_mmrs.h>
#include <asm/uv/uv_mmrs.h>
#include <asm/uv/uv_hub.h>
#include <asm/uv/uv_bau.h>
#include <asm/uv/uv_bau.h>
#include <asm/genapic.h>
#include <asm/idle.h>
#include <asm/tsc.h>
#include <asm/tsc.h>
#include <mach_apic.h>
#include <mach_apic.h>
static
struct
bau_control
**
uv_bau_table_bases
__read_mostly
;
static
struct
bau_control
**
uv_bau_table_bases
__read_mostly
;
static
int
uv_bau_retry_limit
__read_mostly
;
static
int
uv_bau_retry_limit
__read_mostly
;
static
int
uv_nshift
__read_mostly
;
/* position of pnode (which is nasid>>1) */
static
unsigned
long
uv_mmask
__read_mostly
;
/* position of pnode (which is nasid>>1): */
static
int
uv_nshift
__read_mostly
;
static
unsigned
long
uv_mmask
__read_mostly
;
static
DEFINE_PER_CPU
(
struct
ptc_stats
,
ptcstats
);
static
DEFINE_PER_CPU
(
struct
ptc_stats
,
ptcstats
);
static
DEFINE_PER_CPU
(
struct
bau_control
,
bau_control
);
static
DEFINE_PER_CPU
(
struct
bau_control
,
bau_control
);
...
@@ -37,8 +40,8 @@ static DEFINE_PER_CPU(struct bau_control, bau_control);
...
@@ -37,8 +40,8 @@ static DEFINE_PER_CPU(struct bau_control, bau_control);
* be sent (the hardware will only do one reply per message).
* be sent (the hardware will only do one reply per message).
*/
*/
static
void
uv_reply_to_message
(
int
resource
,
static
void
uv_reply_to_message
(
int
resource
,
struct
bau_payload_queue_entry
*
msg
,
struct
bau_payload_queue_entry
*
msg
,
struct
bau_msg_status
*
msp
)
struct
bau_msg_status
*
msp
)
{
{
unsigned
long
dw
;
unsigned
long
dw
;
...
@@ -55,11 +58,11 @@ static void uv_reply_to_message(int resource,
...
@@ -55,11 +58,11 @@ static void uv_reply_to_message(int resource,
* Other cpu's may come here at the same time for this message.
* Other cpu's may come here at the same time for this message.
*/
*/
static
void
uv_bau_process_message
(
struct
bau_payload_queue_entry
*
msg
,
static
void
uv_bau_process_message
(
struct
bau_payload_queue_entry
*
msg
,
int
msg_slot
,
int
sw_ack_slot
)
int
msg_slot
,
int
sw_ack_slot
)
{
{
int
cpu
;
unsigned
long
this_cpu_mask
;
unsigned
long
this_cpu_mask
;
struct
bau_msg_status
*
msp
;
struct
bau_msg_status
*
msp
;
int
cpu
;
msp
=
__get_cpu_var
(
bau_control
).
msg_statuses
+
msg_slot
;
msp
=
__get_cpu_var
(
bau_control
).
msg_statuses
+
msg_slot
;
cpu
=
uv_blade_processor_id
();
cpu
=
uv_blade_processor_id
();
...
@@ -96,11 +99,11 @@ static void uv_bau_process_message(struct bau_payload_queue_entry *msg,
...
@@ -96,11 +99,11 @@ static void uv_bau_process_message(struct bau_payload_queue_entry *msg,
*/
*/
static
int
uv_examine_destination
(
struct
bau_control
*
bau_tablesp
,
int
sender
)
static
int
uv_examine_destination
(
struct
bau_control
*
bau_tablesp
,
int
sender
)
{
{
int
i
;
int
j
;
int
count
=
0
;
struct
bau_payload_queue_entry
*
msg
;
struct
bau_payload_queue_entry
*
msg
;
struct
bau_msg_status
*
msp
;
struct
bau_msg_status
*
msp
;
int
count
=
0
;
int
i
;
int
j
;
for
(
msg
=
bau_tablesp
->
va_queue_first
,
i
=
0
;
i
<
DEST_Q_SIZE
;
for
(
msg
=
bau_tablesp
->
va_queue_first
,
i
=
0
;
i
<
DEST_Q_SIZE
;
msg
++
,
i
++
)
{
msg
++
,
i
++
)
{
...
@@ -111,7 +114,7 @@ static int uv_examine_destination(struct bau_control *bau_tablesp, int sender)
...
@@ -111,7 +114,7 @@ static int uv_examine_destination(struct bau_control *bau_tablesp, int sender)
i
,
msg
->
address
,
msg
->
acknowledge_count
,
i
,
msg
->
address
,
msg
->
acknowledge_count
,
msg
->
number_of_cpus
);
msg
->
number_of_cpus
);
for
(
j
=
0
;
j
<
msg
->
number_of_cpus
;
j
++
)
{
for
(
j
=
0
;
j
<
msg
->
number_of_cpus
;
j
++
)
{
if
(
!
((
long
)
1
<<
j
&
msp
->
seen_by
.
bits
))
{
if
(
!
((
1L
<<
j
)
&
msp
->
seen_by
.
bits
))
{
count
++
;
count
++
;
printk
(
"%d "
,
j
);
printk
(
"%d "
,
j
);
}
}
...
@@ -135,8 +138,7 @@ static int uv_examine_destinations(struct bau_target_nodemask *distribution)
...
@@ -135,8 +138,7 @@ static int uv_examine_destinations(struct bau_target_nodemask *distribution)
int
count
=
0
;
int
count
=
0
;
sender
=
smp_processor_id
();
sender
=
smp_processor_id
();
for
(
i
=
0
;
i
<
(
sizeof
(
struct
bau_target_nodemask
)
*
BITSPERBYTE
);
for
(
i
=
0
;
i
<
sizeof
(
struct
bau_target_nodemask
)
*
BITSPERBYTE
;
i
++
)
{
i
++
)
{
if
(
!
bau_node_isset
(
i
,
distribution
))
if
(
!
bau_node_isset
(
i
,
distribution
))
continue
;
continue
;
count
+=
uv_examine_destination
(
uv_bau_table_bases
[
i
],
sender
);
count
+=
uv_examine_destination
(
uv_bau_table_bases
[
i
],
sender
);
...
@@ -217,11 +219,11 @@ int uv_flush_send_and_wait(int cpu, int this_blade, struct bau_desc *bau_desc,
...
@@ -217,11 +219,11 @@ int uv_flush_send_and_wait(int cpu, int this_blade, struct bau_desc *bau_desc,
{
{
int
completion_status
=
0
;
int
completion_status
=
0
;
int
right_shift
;
int
right_shift
;
int
bit
;
int
blade
;
int
tries
=
0
;
int
tries
=
0
;
unsigned
long
index
;
int
blade
;
int
bit
;
unsigned
long
mmr_offset
;
unsigned
long
mmr_offset
;
unsigned
long
index
;
cycles_t
time1
;
cycles_t
time1
;
cycles_t
time2
;
cycles_t
time2
;
...
@@ -294,7 +296,7 @@ int uv_flush_send_and_wait(int cpu, int this_blade, struct bau_desc *bau_desc,
...
@@ -294,7 +296,7 @@ int uv_flush_send_and_wait(int cpu, int this_blade, struct bau_desc *bau_desc,
* Returns 0 if some remote flushing remains to be done.
* Returns 0 if some remote flushing remains to be done.
*/
*/
int
uv_flush_tlb_others
(
cpumask_t
*
cpumaskp
,
struct
mm_struct
*
mm
,
int
uv_flush_tlb_others
(
cpumask_t
*
cpumaskp
,
struct
mm_struct
*
mm
,
unsigned
long
va
)
unsigned
long
va
)
{
{
int
i
;
int
i
;
int
bit
;
int
bit
;
...
@@ -356,12 +358,12 @@ int uv_flush_tlb_others(cpumask_t *cpumaskp, struct mm_struct *mm,
...
@@ -356,12 +358,12 @@ int uv_flush_tlb_others(cpumask_t *cpumaskp, struct mm_struct *mm,
*/
*/
void
uv_bau_message_interrupt
(
struct
pt_regs
*
regs
)
void
uv_bau_message_interrupt
(
struct
pt_regs
*
regs
)
{
{
struct
bau_payload_queue_entry
*
pqp
;
struct
bau_payload_queue_entry
*
msg
;
struct
bau_payload_queue_entry
*
va_queue_first
;
struct
bau_payload_queue_entry
*
va_queue_first
;
struct
bau_payload_queue_entry
*
va_queue_last
;
struct
bau_payload_queue_entry
*
va_queue_last
;
struct
bau_payload_queue_entry
*
msg
;
struct
pt_regs
*
old_regs
=
set_irq_regs
(
regs
);
struct
pt_regs
*
old_regs
=
set_irq_regs
(
regs
);
cycles_t
time1
,
time2
;
cycles_t
time1
;
cycles_t
time2
;
int
msg_slot
;
int
msg_slot
;
int
sw_ack_slot
;
int
sw_ack_slot
;
int
fw
;
int
fw
;
...
@@ -376,13 +378,14 @@ void uv_bau_message_interrupt(struct pt_regs *regs)
...
@@ -376,13 +378,14 @@ void uv_bau_message_interrupt(struct pt_regs *regs)
local_pnode
=
uv_blade_to_pnode
(
uv_numa_blade_id
());
local_pnode
=
uv_blade_to_pnode
(
uv_numa_blade_id
());
pqp
=
va_queue_first
=
__get_cpu_var
(
bau_control
).
va_queue_first
;
va_queue_first
=
__get_cpu_var
(
bau_control
).
va_queue_first
;
va_queue_last
=
__get_cpu_var
(
bau_control
).
va_queue_last
;
va_queue_last
=
__get_cpu_var
(
bau_control
).
va_queue_last
;
msg
=
__get_cpu_var
(
bau_control
).
bau_msg_head
;
msg
=
__get_cpu_var
(
bau_control
).
bau_msg_head
;
while
(
msg
->
sw_ack_vector
)
{
while
(
msg
->
sw_ack_vector
)
{
count
++
;
count
++
;
fw
=
msg
->
sw_ack_vector
;
fw
=
msg
->
sw_ack_vector
;
msg_slot
=
msg
-
pqp
;
msg_slot
=
msg
-
va_queue_first
;
sw_ack_slot
=
ffs
(
fw
)
-
1
;
sw_ack_slot
=
ffs
(
fw
)
-
1
;
uv_bau_process_message
(
msg
,
msg_slot
,
sw_ack_slot
);
uv_bau_process_message
(
msg
,
msg_slot
,
sw_ack_slot
);
...
@@ -484,7 +487,7 @@ static int uv_ptc_seq_show(struct seq_file *file, void *data)
...
@@ -484,7 +487,7 @@ static int uv_ptc_seq_show(struct seq_file *file, void *data)
* >0: retry limit
* >0: retry limit
*/
*/
static
ssize_t
uv_ptc_proc_write
(
struct
file
*
file
,
const
char
__user
*
user
,
static
ssize_t
uv_ptc_proc_write
(
struct
file
*
file
,
const
char
__user
*
user
,
size_t
count
,
loff_t
*
data
)
size_t
count
,
loff_t
*
data
)
{
{
long
newmode
;
long
newmode
;
char
optstr
[
64
];
char
optstr
[
64
];
...
@@ -587,42 +590,48 @@ static struct bau_control * __init uv_table_bases_init(int blade, int node)
...
@@ -587,42 +590,48 @@ static struct bau_control * __init uv_table_bases_init(int blade, int node)
bau_tabp
=
bau_tabp
=
kmalloc_node
(
sizeof
(
struct
bau_control
),
GFP_KERNEL
,
node
);
kmalloc_node
(
sizeof
(
struct
bau_control
),
GFP_KERNEL
,
node
);
BUG_ON
(
!
bau_tabp
);
BUG_ON
(
!
bau_tabp
);
bau_tabp
->
msg_statuses
=
bau_tabp
->
msg_statuses
=
kmalloc_node
(
sizeof
(
struct
bau_msg_status
)
*
kmalloc_node
(
sizeof
(
struct
bau_msg_status
)
*
DEST_Q_SIZE
,
GFP_KERNEL
,
node
);
DEST_Q_SIZE
,
GFP_KERNEL
,
node
);
BUG_ON
(
!
bau_tabp
->
msg_statuses
);
BUG_ON
(
!
bau_tabp
->
msg_statuses
);
for
(
i
=
0
,
msp
=
bau_tabp
->
msg_statuses
;
i
<
DEST_Q_SIZE
;
i
++
,
msp
++
)
for
(
i
=
0
,
msp
=
bau_tabp
->
msg_statuses
;
i
<
DEST_Q_SIZE
;
i
++
,
msp
++
)
bau_cpubits_clear
(
&
msp
->
seen_by
,
(
int
)
bau_cpubits_clear
(
&
msp
->
seen_by
,
(
int
)
uv_blade_nr_possible_cpus
(
blade
));
uv_blade_nr_possible_cpus
(
blade
));
bau_tabp
->
watching
=
bau_tabp
->
watching
=
kmalloc_node
(
sizeof
(
int
)
*
DEST_NUM_RESOURCES
,
GFP_KERNEL
,
node
);
kmalloc_node
(
sizeof
(
int
)
*
DEST_NUM_RESOURCES
,
GFP_KERNEL
,
node
);
BUG_ON
(
!
bau_tabp
->
watching
);
BUG_ON
(
!
bau_tabp
->
watching
);
for
(
i
=
0
,
ip
=
bau_tabp
->
watching
;
i
<
DEST_Q_SIZE
;
i
++
,
ip
++
)
{
for
(
i
=
0
,
ip
=
bau_tabp
->
watching
;
i
<
DEST_Q_SIZE
;
i
++
,
ip
++
)
*
ip
=
0
;
*
ip
=
0
;
}
uv_bau_table_bases
[
blade
]
=
bau_tabp
;
uv_bau_table_bases
[
blade
]
=
bau_tabp
;
return
bau_tabsp
;
return
bau_tabsp
;
}
}
/*
/*
* finish the initialization of the per-blade control structures
* finish the initialization of the per-blade control structures
*/
*/
static
void
__init
uv_table_bases_finish
(
int
blade
,
int
node
,
int
cur_cpu
,
static
void
__init
struct
bau_control
*
bau_tablesp
,
uv_table_bases_finish
(
int
blade
,
int
node
,
int
cur_cpu
,
struct
bau_desc
*
adp
)
struct
bau_control
*
bau_tablesp
,
struct
bau_desc
*
adp
)
{
{
int
i
;
struct
bau_control
*
bcp
;
struct
bau_control
*
bcp
;
int
i
;
for
(
i
=
cur_cpu
;
i
<
(
cur_cpu
+
uv_blade_nr_possible_cpus
(
blade
));
for
(
i
=
cur_cpu
;
i
<
cur_cpu
+
uv_blade_nr_possible_cpus
(
blade
);
i
++
)
{
i
++
)
{
bcp
=
(
struct
bau_control
*
)
&
per_cpu
(
bau_control
,
i
);
bcp
=
(
struct
bau_control
*
)
&
per_cpu
(
bau_control
,
i
);
bcp
->
bau_msg_head
=
bau_tablesp
->
va_queue_first
;
bcp
->
va_queue_first
=
bau_tablesp
->
va_queue_first
;
bcp
->
bau_msg_head
=
bau_tablesp
->
va_queue_first
;
bcp
->
va_queue_last
=
bau_tablesp
->
va_queue_last
;
bcp
->
va_queue_first
=
bau_tablesp
->
va_queue_first
;
bcp
->
watching
=
bau_tablesp
->
watching
;
bcp
->
va_queue_last
=
bau_tablesp
->
va_queue_last
;
bcp
->
msg_statuses
=
bau_tablesp
->
msg_statuses
;
bcp
->
watching
=
bau_tablesp
->
watching
;
bcp
->
descriptor_base
=
adp
;
bcp
->
msg_statuses
=
bau_tablesp
->
msg_statuses
;
bcp
->
descriptor_base
=
adp
;
}
}
}
}
...
@@ -643,14 +652,18 @@ uv_activation_descriptor_init(int node, int pnode)
...
@@ -643,14 +652,18 @@ uv_activation_descriptor_init(int node, int pnode)
adp
=
(
struct
bau_desc
*
)
adp
=
(
struct
bau_desc
*
)
kmalloc_node
(
16384
,
GFP_KERNEL
,
node
);
kmalloc_node
(
16384
,
GFP_KERNEL
,
node
);
BUG_ON
(
!
adp
);
BUG_ON
(
!
adp
);
pa
=
__pa
((
unsigned
long
)
adp
);
pa
=
__pa
((
unsigned
long
)
adp
);
n
=
pa
>>
uv_nshift
;
n
=
pa
>>
uv_nshift
;
m
=
pa
&
uv_mmask
;
m
=
pa
&
uv_mmask
;
mmr_image
=
uv_read_global_mmr64
(
pnode
,
UVH_LB_BAU_SB_DESCRIPTOR_BASE
);
mmr_image
=
uv_read_global_mmr64
(
pnode
,
UVH_LB_BAU_SB_DESCRIPTOR_BASE
);
if
(
mmr_image
)
if
(
mmr_image
)
{
uv_write_global_mmr64
(
pnode
,
(
unsigned
long
)
uv_write_global_mmr64
(
pnode
,
(
unsigned
long
)
UVH_LB_BAU_SB_DESCRIPTOR_BASE
,
UVH_LB_BAU_SB_DESCRIPTOR_BASE
,
(
n
<<
UV_DESC_BASE_PNODE_SHIFT
|
m
));
(
n
<<
UV_DESC_BASE_PNODE_SHIFT
|
m
));
}
for
(
i
=
0
,
ad2
=
adp
;
i
<
UV_ACTIVATION_DESCRIPTOR_SIZE
;
i
++
,
ad2
++
)
{
for
(
i
=
0
,
ad2
=
adp
;
i
<
UV_ACTIVATION_DESCRIPTOR_SIZE
;
i
++
,
ad2
++
)
{
memset
(
ad2
,
0
,
sizeof
(
struct
bau_desc
));
memset
(
ad2
,
0
,
sizeof
(
struct
bau_desc
));
ad2
->
header
.
sw_ack_flag
=
1
;
ad2
->
header
.
sw_ack_flag
=
1
;
...
@@ -669,16 +682,17 @@ uv_activation_descriptor_init(int node, int pnode)
...
@@ -669,16 +682,17 @@ uv_activation_descriptor_init(int node, int pnode)
/*
/*
* initialize the destination side's receiving buffers
* initialize the destination side's receiving buffers
*/
*/
static
struct
bau_payload_queue_entry
*
__init
uv_payload_queue_init
(
int
node
,
static
struct
bau_payload_queue_entry
*
__init
int
pnode
,
struct
bau_control
*
bau_tablesp
)
uv_payload_queue_init
(
int
node
,
int
pnode
,
struct
bau_control
*
bau_tablesp
)
{
{
char
*
cp
;
struct
bau_payload_queue_entry
*
pqp
;
struct
bau_payload_queue_entry
*
pqp
;
char
*
cp
;
pqp
=
(
struct
bau_payload_queue_entry
*
)
kmalloc_node
(
pqp
=
(
struct
bau_payload_queue_entry
*
)
kmalloc_node
(
(
DEST_Q_SIZE
+
1
)
*
sizeof
(
struct
bau_payload_queue_entry
),
(
DEST_Q_SIZE
+
1
)
*
sizeof
(
struct
bau_payload_queue_entry
),
GFP_KERNEL
,
node
);
GFP_KERNEL
,
node
);
BUG_ON
(
!
pqp
);
BUG_ON
(
!
pqp
);
cp
=
(
char
*
)
pqp
+
31
;
cp
=
(
char
*
)
pqp
+
31
;
pqp
=
(
struct
bau_payload_queue_entry
*
)(((
unsigned
long
)
cp
>>
5
)
<<
5
);
pqp
=
(
struct
bau_payload_queue_entry
*
)(((
unsigned
long
)
cp
>>
5
)
<<
5
);
bau_tablesp
->
va_queue_first
=
pqp
;
bau_tablesp
->
va_queue_first
=
pqp
;
...
@@ -694,6 +708,7 @@ static struct bau_payload_queue_entry * __init uv_payload_queue_init(int node,
...
@@ -694,6 +708,7 @@ static struct bau_payload_queue_entry * __init uv_payload_queue_init(int node,
(
unsigned
long
)
(
unsigned
long
)
uv_physnodeaddr
(
bau_tablesp
->
va_queue_last
));
uv_physnodeaddr
(
bau_tablesp
->
va_queue_last
));
memset
(
pqp
,
0
,
sizeof
(
struct
bau_payload_queue_entry
)
*
DEST_Q_SIZE
);
memset
(
pqp
,
0
,
sizeof
(
struct
bau_payload_queue_entry
)
*
DEST_Q_SIZE
);
return
pqp
;
return
pqp
;
}
}
...
@@ -756,6 +771,7 @@ static int __init uv_bau_init(void)
...
@@ -756,6 +771,7 @@ static int __init uv_bau_init(void)
uv_bau_table_bases
=
(
struct
bau_control
**
)
uv_bau_table_bases
=
(
struct
bau_control
**
)
kmalloc
(
nblades
*
sizeof
(
struct
bau_control
*
),
GFP_KERNEL
);
kmalloc
(
nblades
*
sizeof
(
struct
bau_control
*
),
GFP_KERNEL
);
BUG_ON
(
!
uv_bau_table_bases
);
BUG_ON
(
!
uv_bau_table_bases
);
last_blade
=
-
1
;
last_blade
=
-
1
;
for_each_online_node
(
node
)
{
for_each_online_node
(
node
)
{
blade
=
uv_node_to_blade_id
(
node
);
blade
=
uv_node_to_blade_id
(
node
);
...
@@ -767,6 +783,7 @@ static int __init uv_bau_init(void)
...
@@ -767,6 +783,7 @@ static int __init uv_bau_init(void)
}
}
set_intr_gate
(
UV_BAU_MESSAGE
,
uv_bau_message_intr1
);
set_intr_gate
(
UV_BAU_MESSAGE
,
uv_bau_message_intr1
);
uv_enable_timeouts
();
uv_enable_timeouts
();
return
0
;
return
0
;
}
}
__initcall
(
uv_bau_init
);
__initcall
(
uv_bau_init
);
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录