Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openeuler
raspberrypi-kernel
提交
5d0859ce
R
raspberrypi-kernel
项目概览
openeuler
/
raspberrypi-kernel
通知
13
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
R
raspberrypi-kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
5d0859ce
编写于
2月 26, 2009
作者:
I
Ingo Molnar
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'sched/clock' into tracing/ftrace
Conflicts: kernel/sched_clock.c
上级
14131f2f
83ce4009
变更
3
隐藏空白更改
内联
并排
Showing
3 changed file
with
39 addition
and
25 deletion
+39
-25
arch/x86/kernel/cpu/intel.c
arch/x86/kernel/cpu/intel.c
+7
-1
include/linux/sched.h
include/linux/sched.h
+10
-0
kernel/sched_clock.c
kernel/sched_clock.c
+22
-24
未找到文件。
arch/x86/kernel/cpu/intel.c
浏览文件 @
5d0859ce
...
...
@@ -4,6 +4,7 @@
#include <linux/string.h>
#include <linux/bitops.h>
#include <linux/smp.h>
#include <linux/sched.h>
#include <linux/thread_info.h>
#include <linux/module.h>
...
...
@@ -56,11 +57,16 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
/*
* c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
* with P/T states and does not stop in deep C-states
* with P/T states and does not stop in deep C-states.
*
* It is also reliable across cores and sockets. (but not across
* cabinets - we turn it off in that case explicitly.)
*/
if
(
c
->
x86_power
&
(
1
<<
8
))
{
set_cpu_cap
(
c
,
X86_FEATURE_CONSTANT_TSC
);
set_cpu_cap
(
c
,
X86_FEATURE_NONSTOP_TSC
);
set_cpu_cap
(
c
,
X86_FEATURE_TSC_RELIABLE
);
sched_clock_stable
=
1
;
}
}
...
...
include/linux/sched.h
浏览文件 @
5d0859ce
...
...
@@ -1672,6 +1672,16 @@ static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
return
set_cpus_allowed_ptr
(
p
,
&
new_mask
);
}
/*
* Architectures can set this to 1 if they have specified
* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig,
* but then during bootup it turns out that sched_clock()
* is reliable after all:
*/
#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
extern
int
sched_clock_stable
;
#endif
extern
unsigned
long
long
sched_clock
(
void
);
extern
void
sched_clock_init
(
void
);
...
...
kernel/sched_clock.c
浏览文件 @
5d0859ce
...
...
@@ -24,12 +24,12 @@
* The clock: sched_clock_cpu() is monotonic per cpu, and should be somewhat
* consistent between cpus (never more than 2 jiffies difference).
*/
#include <linux/sched.h>
#include <linux/percpu.h>
#include <linux/spinlock.h>
#include <linux/ktime.h>
#include <linux/module.h>
#include <linux/hardirq.h>
#include <linux/module.h>
#include <linux/percpu.h>
#include <linux/ktime.h>
#include <linux/sched.h>
/*
* Scheduler clock - returns current time in nanosec units.
...
...
@@ -44,6 +44,10 @@ unsigned long long __attribute__((weak)) sched_clock(void)
static
__read_mostly
int
sched_clock_running
;
#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
__read_mostly
int
sched_clock_stable
;
#else
static
const
int
sched_clock_stable
=
1
;
#endif
struct
sched_clock_data
{
/*
...
...
@@ -88,7 +92,7 @@ void sched_clock_init(void)
}
/*
* min,max except they take wrapping into account
* min,
max except they take wrapping into account
*/
static
inline
u64
wrap_min
(
u64
x
,
u64
y
)
...
...
@@ -117,10 +121,13 @@ static u64 __update_sched_clock(struct sched_clock_data *scd, u64 now)
if
(
unlikely
(
delta
<
0
))
delta
=
0
;
if
(
unlikely
(
!
sched_clock_running
))
return
0ull
;
/*
* scd->clock = clamp(scd->tick_gtod + delta,
*
max(scd->tick_gtod, scd->clock),
*
scd->tick_gtod + TICK_NSEC);
* max(scd->tick_gtod, scd->clock),
* scd->tick_gtod + TICK_NSEC);
*/
clock
=
scd
->
tick_gtod
+
delta
;
...
...
@@ -149,8 +156,11 @@ static void lock_double_clock(struct sched_clock_data *data1,
u64
sched_clock_cpu
(
int
cpu
)
{
struct
sched_clock_data
*
scd
=
cpu_sdc
(
cpu
);
u64
now
,
clock
,
this_clock
,
remote_clock
;
struct
sched_clock_data
*
scd
;
if
(
sched_clock_stable
)
return
sched_clock
();
/*
* Normally this is not called in NMI context - but if it is,
...
...
@@ -162,6 +172,7 @@ u64 sched_clock_cpu(int cpu)
if
(
unlikely
(
!
sched_clock_running
))
return
0ull
;
scd
=
cpu_sdc
(
cpu
);
WARN_ON_ONCE
(
!
irqs_disabled
());
now
=
sched_clock
();
...
...
@@ -201,6 +212,8 @@ u64 sched_clock_cpu(int cpu)
return
clock
;
}
#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
void
sched_clock_tick
(
void
)
{
struct
sched_clock_data
*
scd
=
this_scd
();
...
...
@@ -243,22 +256,7 @@ void sched_clock_idle_wakeup_event(u64 delta_ns)
}
EXPORT_SYMBOL_GPL
(
sched_clock_idle_wakeup_event
);
#else
/* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
void
sched_clock_init
(
void
)
{
sched_clock_running
=
1
;
}
u64
sched_clock_cpu
(
int
cpu
)
{
if
(
unlikely
(
!
sched_clock_running
))
return
0
;
return
sched_clock
();
}
#endif
#endif
/* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
unsigned
long
long
cpu_clock
(
int
cpu
)
{
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录