Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openanolis
cloud-kernel
提交
7c526e1f
cloud-kernel
项目概览
openanolis
/
cloud-kernel
1 年多 前同步成功
通知
160
Star
36
Fork
7
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
10
列表
看板
标记
里程碑
合并请求
2
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
cloud-kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
10
Issue
10
列表
看板
标记
里程碑
合并请求
2
合并请求
2
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
7c526e1f
编写于
3月 26, 2009
作者:
I
Ingo Molnar
浏览文件
操作
浏览文件
下载
差异文件
Merge branches 'timers/new-apis', 'timers/ntp' and 'timers/urgent' into timers/core
上级
e8684605
74019224
a2a5ac86
37bebc70
变更
8
隐藏空白更改
内联
并排
Showing
8 changed file
with
357 addition
and
234 deletion
+357
-234
arch/powerpc/platforms/cell/spufs/sched.c
arch/powerpc/platforms/cell/spufs/sched.c
+1
-1
drivers/infiniband/hw/ipath/ipath_driver.c
drivers/infiniband/hw/ipath/ipath_driver.c
+3
-3
include/linux/timer.h
include/linux/timer.h
+2
-20
include/linux/timex.h
include/linux/timex.h
+1
-1
kernel/posix-cpu-timers.c
kernel/posix-cpu-timers.c
+2
-1
kernel/relay.c
kernel/relay.c
+1
-1
kernel/time/ntp.c
kernel/time/ntp.c
+274
-170
kernel/timer.c
kernel/timer.c
+73
-37
未找到文件。
arch/powerpc/platforms/cell/spufs/sched.c
浏览文件 @
7c526e1f
...
...
@@ -508,7 +508,7 @@ static void __spu_add_to_rq(struct spu_context *ctx)
list_add_tail
(
&
ctx
->
rq
,
&
spu_prio
->
runq
[
ctx
->
prio
]);
set_bit
(
ctx
->
prio
,
spu_prio
->
bitmap
);
if
(
!
spu_prio
->
nr_waiting
++
)
__
mod_timer
(
&
spusched_timer
,
jiffies
+
SPUSCHED_TICK
);
mod_timer
(
&
spusched_timer
,
jiffies
+
SPUSCHED_TICK
);
}
}
...
...
drivers/infiniband/hw/ipath/ipath_driver.c
浏览文件 @
7c526e1f
...
...
@@ -2715,7 +2715,7 @@ static void ipath_hol_signal_up(struct ipath_devdata *dd)
* to prevent HoL blocking, then start the HoL timer that
* periodically continues, then stop procs, so they can detect
* link down if they want, and do something about it.
* Timer may already be running, so use
__
mod_timer, not add_timer.
* Timer may already be running, so use mod_timer, not add_timer.
*/
void
ipath_hol_down
(
struct
ipath_devdata
*
dd
)
{
...
...
@@ -2724,7 +2724,7 @@ void ipath_hol_down(struct ipath_devdata *dd)
dd
->
ipath_hol_next
=
IPATH_HOL_DOWNCONT
;
dd
->
ipath_hol_timer
.
expires
=
jiffies
+
msecs_to_jiffies
(
ipath_hol_timeout_ms
);
__
mod_timer
(
&
dd
->
ipath_hol_timer
,
dd
->
ipath_hol_timer
.
expires
);
mod_timer
(
&
dd
->
ipath_hol_timer
,
dd
->
ipath_hol_timer
.
expires
);
}
/*
...
...
@@ -2763,7 +2763,7 @@ void ipath_hol_event(unsigned long opaque)
else
{
dd
->
ipath_hol_timer
.
expires
=
jiffies
+
msecs_to_jiffies
(
ipath_hol_timeout_ms
);
__
mod_timer
(
&
dd
->
ipath_hol_timer
,
mod_timer
(
&
dd
->
ipath_hol_timer
,
dd
->
ipath_hol_timer
.
expires
);
}
}
...
...
include/linux/timer.h
浏览文件 @
7c526e1f
...
...
@@ -86,8 +86,8 @@ static inline int timer_pending(const struct timer_list * timer)
extern
void
add_timer_on
(
struct
timer_list
*
timer
,
int
cpu
);
extern
int
del_timer
(
struct
timer_list
*
timer
);
extern
int
__mod_timer
(
struct
timer_list
*
timer
,
unsigned
long
expires
);
extern
int
mod_timer
(
struct
timer_list
*
timer
,
unsigned
long
expires
);
extern
int
mod_timer_pending
(
struct
timer_list
*
timer
,
unsigned
long
expires
);
/*
* The jiffies value which is added to now, when there is no timer
...
...
@@ -146,25 +146,7 @@ static inline void timer_stats_timer_clear_start_info(struct timer_list *timer)
}
#endif
/**
* add_timer - start a timer
* @timer: the timer to be added
*
* The kernel will do a ->function(->data) callback from the
* timer interrupt at the ->expires point in the future. The
* current time is 'jiffies'.
*
* The timer's ->expires, ->function (and if the handler uses it, ->data)
* fields must be set prior calling this function.
*
* Timers with an ->expires field in the past will be executed in the next
* timer tick.
*/
static
inline
void
add_timer
(
struct
timer_list
*
timer
)
{
BUG_ON
(
timer_pending
(
timer
));
__mod_timer
(
timer
,
timer
->
expires
);
}
extern
void
add_timer
(
struct
timer_list
*
timer
);
#ifdef CONFIG_SMP
extern
int
try_to_del_timer_sync
(
struct
timer_list
*
timer
);
...
...
include/linux/timex.h
浏览文件 @
7c526e1f
...
...
@@ -190,7 +190,7 @@ struct timex {
* offset and maximum frequency tolerance.
*/
#define SHIFT_USEC 16
/* frequency offset scale (shift) */
#define PPM_SCALE (NSEC_PER_USEC << (NTP_SCALE_SHIFT - SHIFT_USEC))
#define PPM_SCALE (
(s64)
NSEC_PER_USEC << (NTP_SCALE_SHIFT - SHIFT_USEC))
#define PPM_SCALE_INV_SHIFT 19
#define PPM_SCALE_INV ((1ll << (PPM_SCALE_INV_SHIFT + NTP_SCALE_SHIFT)) / \
PPM_SCALE + 1)
...
...
kernel/posix-cpu-timers.c
浏览文件 @
7c526e1f
...
...
@@ -1370,7 +1370,8 @@ static inline int fastpath_timer_check(struct task_struct *tsk)
if
(
task_cputime_expired
(
&
group_sample
,
&
sig
->
cputime_expires
))
return
1
;
}
return
0
;
return
sig
->
rlim
[
RLIMIT_CPU
].
rlim_cur
!=
RLIM_INFINITY
;
}
/*
...
...
kernel/relay.c
浏览文件 @
7c526e1f
...
...
@@ -750,7 +750,7 @@ size_t relay_switch_subbuf(struct rchan_buf *buf, size_t length)
* from the scheduler (trying to re-grab
* rq->lock), so defer it.
*/
__
mod_timer
(
&
buf
->
timer
,
jiffies
+
1
);
mod_timer
(
&
buf
->
timer
,
jiffies
+
1
);
}
old
=
buf
->
data
;
...
...
kernel/time/ntp.c
浏览文件 @
7c526e1f
/*
* linux/kernel/time/ntp.c
*
* NTP state machine interfaces and logic.
*
* This code was mainly moved from kernel/timer.c and kernel/time.c
* Please see those files for relevant copyright info and historical
* changelogs.
*/
#include <linux/mm.h>
#include <linux/time.h>
#include <linux/timex.h>
#include <linux/jiffies.h>
#include <linux/hrtimer.h>
#include <linux/capability.h>
#include <linux/math64.h>
#include <linux/clocksource.h>
#include <linux/workqueue.h>
#include <asm/timex.h>
#include <linux/hrtimer.h>
#include <linux/jiffies.h>
#include <linux/math64.h>
#include <linux/timex.h>
#include <linux/time.h>
#include <linux/mm.h>
/*
*
Timekeeping variables
*
NTP timekeeping variables:
*/
unsigned
long
tick_usec
=
TICK_USEC
;
/* USER_HZ period (usec) */
unsigned
long
tick_nsec
;
/* ACTHZ period (nsec) */
u64
tick_length
;
static
u64
tick_length_base
;
static
struct
hrtimer
leap_timer
;
/* USER_HZ period (usecs): */
unsigned
long
tick_usec
=
TICK_USEC
;
#define MAX_TICKADJ 500
/* microsecs */
#define MAX_TICKADJ_SCALED (((u64)(MAX_TICKADJ * NSEC_PER_USEC) << \
NTP_SCALE_SHIFT) / NTP_INTERVAL_FREQ)
/* ACTHZ period (nsecs): */
unsigned
long
tick_nsec
;
u64
tick_length
;
static
u64
tick_length_base
;
static
struct
hrtimer
leap_timer
;
#define MAX_TICKADJ 500LL
/* usecs */
#define MAX_TICKADJ_SCALED \
(((MAX_TICKADJ * NSEC_PER_USEC) << NTP_SCALE_SHIFT) / NTP_INTERVAL_FREQ)
/*
* phase-lock loop variables
*/
/* TIME_ERROR prevents overwriting the CMOS clock */
static
int
time_state
=
TIME_OK
;
/* clock synchronization status */
int
time_status
=
STA_UNSYNC
;
/* clock status bits */
static
long
time_tai
;
/* TAI offset (s) */
static
s64
time_offset
;
/* time adjustment (ns) */
static
long
time_constant
=
2
;
/* pll time constant */
long
time_maxerror
=
NTP_PHASE_LIMIT
;
/* maximum error (us) */
long
time_esterror
=
NTP_PHASE_LIMIT
;
/* estimated error (us) */
static
s64
time_freq
;
/* frequency offset (scaled ns/s)*/
static
long
time_reftime
;
/* time at last adjustment (s) */
long
time_adjust
;
static
long
ntp_tick_adj
;
/*
* clock synchronization status
*
* (TIME_ERROR prevents overwriting the CMOS clock)
*/
static
int
time_state
=
TIME_OK
;
/* clock status bits: */
int
time_status
=
STA_UNSYNC
;
/* TAI offset (secs): */
static
long
time_tai
;
/* time adjustment (nsecs): */
static
s64
time_offset
;
/* pll time constant: */
static
long
time_constant
=
2
;
/* maximum error (usecs): */
long
time_maxerror
=
NTP_PHASE_LIMIT
;
/* estimated error (usecs): */
long
time_esterror
=
NTP_PHASE_LIMIT
;
/* frequency offset (scaled nsecs/secs): */
static
s64
time_freq
;
/* time at last adjustment (secs): */
static
long
time_reftime
;
long
time_adjust
;
/* constant (boot-param configurable) NTP tick adjustment (upscaled) */
static
s64
ntp_tick_adj
;
/*
* NTP methods:
*/
/*
* Update (tick_length, tick_length_base, tick_nsec), based
* on (tick_usec, ntp_tick_adj, time_freq):
*/
static
void
ntp_update_frequency
(
void
)
{
u64
second_length
=
(
u64
)(
tick_usec
*
NSEC_PER_USEC
*
USER_HZ
)
<<
NTP_SCALE_SHIFT
;
second_length
+=
(
s64
)
ntp_tick_adj
<<
NTP_SCALE_SHIFT
;
second_length
+=
time_freq
;
u64
second_length
;
u64
new_base
;
second_length
=
(
u64
)(
tick_usec
*
NSEC_PER_USEC
*
USER_HZ
)
<<
NTP_SCALE_SHIFT
;
second_length
+=
ntp_tick_adj
;
second_length
+=
time_freq
;
tick_length_base
=
second_length
;
tick_nsec
=
div_u64
(
second_length
,
HZ
)
>>
NTP_SCALE_SHIFT
;
new_base
=
div_u64
(
second_length
,
NTP_INTERVAL_FREQ
);
tick_nsec
=
div_u64
(
second_length
,
HZ
)
>>
NTP_SCALE_SHIFT
;
tick_length_base
=
div_u64
(
tick_length_base
,
NTP_INTERVAL_FREQ
);
/*
* Don't wait for the next second_overflow, apply
* the change to the tick length immediately:
*/
tick_length
+=
new_base
-
tick_length_base
;
tick_length_base
=
new_base
;
}
static
inline
s64
ntp_update_offset_fll
(
s64
offset64
,
long
secs
)
{
time_status
&=
~
STA_MODE
;
if
(
secs
<
MINSEC
)
return
0
;
if
(
!
(
time_status
&
STA_FLL
)
&&
(
secs
<=
MAXSEC
))
return
0
;
time_status
|=
STA_MODE
;
return
div_s64
(
offset64
<<
(
NTP_SCALE_SHIFT
-
SHIFT_FLL
),
secs
);
}
static
void
ntp_update_offset
(
long
offset
)
{
long
mtemp
;
s64
freq_adj
;
s64
offset64
;
long
secs
;
if
(
!
(
time_status
&
STA_PLL
))
return
;
...
...
@@ -84,24 +142,23 @@ static void ntp_update_offset(long offset)
* Select how the frequency is to be controlled
* and in which mode (PLL or FLL).
*/
if
(
time_status
&
STA_FREQHOLD
||
time_reftime
==
0
)
time_reftime
=
xtime
.
tv_sec
;
mtemp
=
xtime
.
tv_sec
-
time_reftime
;
secs
=
xtime
.
tv_sec
-
time_reftime
;
if
(
unlikely
(
time_status
&
STA_FREQHOLD
))
secs
=
0
;
time_reftime
=
xtime
.
tv_sec
;
freq_adj
=
(
s64
)
offset
*
mtemp
;
freq_adj
<<=
NTP_SCALE_SHIFT
-
2
*
(
SHIFT_PLL
+
2
+
time_constant
);
time_status
&=
~
STA_MODE
;
if
(
mtemp
>=
MINSEC
&&
(
time_status
&
STA_FLL
||
mtemp
>
MAXSEC
))
{
freq_adj
+=
div_s64
((
s64
)
offset
<<
(
NTP_SCALE_SHIFT
-
SHIFT_FLL
),
mtemp
);
time_status
|=
STA_MODE
;
}
freq_adj
+=
time_freq
;
freq_adj
=
min
(
freq_adj
,
MAXFREQ_SCALED
);
time_freq
=
max
(
freq_adj
,
-
MAXFREQ_SCALED
);
offset64
=
offset
;
freq_adj
=
(
offset64
*
secs
)
<<
(
NTP_SCALE_SHIFT
-
2
*
(
SHIFT_PLL
+
2
+
time_constant
));
time_offset
=
div_s64
((
s64
)
offset
<<
NTP_SCALE_SHIFT
,
NTP_INTERVAL_FREQ
);
freq_adj
+=
ntp_update_offset_fll
(
offset64
,
secs
);
freq_adj
=
min
(
freq_adj
+
time_freq
,
MAXFREQ_SCALED
);
time_freq
=
max
(
freq_adj
,
-
MAXFREQ_SCALED
);
time_offset
=
div_s64
(
offset64
<<
NTP_SCALE_SHIFT
,
NTP_INTERVAL_FREQ
);
}
/**
...
...
@@ -111,15 +168,15 @@ static void ntp_update_offset(long offset)
*/
void
ntp_clear
(
void
)
{
time_adjust
=
0
;
/* stop active adjtime() */
time_status
|=
STA_UNSYNC
;
time_maxerror
=
NTP_PHASE_LIMIT
;
time_esterror
=
NTP_PHASE_LIMIT
;
time_adjust
=
0
;
/* stop active adjtime() */
time_status
|=
STA_UNSYNC
;
time_maxerror
=
NTP_PHASE_LIMIT
;
time_esterror
=
NTP_PHASE_LIMIT
;
ntp_update_frequency
();
tick_length
=
tick_length_base
;
time_offset
=
0
;
tick_length
=
tick_length_base
;
time_offset
=
0
;
}
/*
...
...
@@ -140,8 +197,8 @@ static enum hrtimer_restart ntp_leap_second(struct hrtimer *timer)
xtime
.
tv_sec
--
;
wall_to_monotonic
.
tv_sec
++
;
time_state
=
TIME_OOP
;
printk
(
KERN_NOTICE
"Clock: "
"
inserting leap second 23:59:60 UTC
\n
"
);
printk
(
KERN_NOTICE
"Clock:
inserting leap second 23:59:60 UTC
\n
"
);
hrtimer_add_expires_ns
(
&
leap_timer
,
NSEC_PER_SEC
);
res
=
HRTIMER_RESTART
;
break
;
...
...
@@ -150,8 +207,8 @@ static enum hrtimer_restart ntp_leap_second(struct hrtimer *timer)
time_tai
--
;
wall_to_monotonic
.
tv_sec
--
;
time_state
=
TIME_WAIT
;
printk
(
KERN_NOTICE
"Clock: "
"
deleting leap second 23:59:59 UTC
\n
"
);
printk
(
KERN_NOTICE
"Clock:
deleting leap second 23:59:59 UTC
\n
"
);
break
;
case
TIME_OOP
:
time_tai
++
;
...
...
@@ -179,7 +236,7 @@ static enum hrtimer_restart ntp_leap_second(struct hrtimer *timer)
*/
void
second_overflow
(
void
)
{
s64
time_adj
;
s64
delta
;
/* Bump the maxerror field */
time_maxerror
+=
MAXFREQ
/
NSEC_PER_USEC
;
...
...
@@ -192,24 +249,30 @@ void second_overflow(void)
* Compute the phase adjustment for the next second. The offset is
* reduced by a fixed factor times the time constant.
*/
tick_length
=
tick_length_base
;
time_adj
=
shift_right
(
time_offset
,
SHIFT_PLL
+
time_constant
);
time_offset
-=
time_adj
;
tick_length
+=
time_adj
;
if
(
unlikely
(
time_adjust
))
{
if
(
time_adjust
>
MAX_TICKADJ
)
{
time_adjust
-=
MAX_TICKADJ
;
tick_length
+=
MAX_TICKADJ_SCALED
;
}
else
if
(
time_adjust
<
-
MAX_TICKADJ
)
{
time_adjust
+=
MAX_TICKADJ
;
tick_length
-=
MAX_TICKADJ_SCALED
;
}
else
{
tick_length
+=
(
s64
)(
time_adjust
*
NSEC_PER_USEC
/
NTP_INTERVAL_FREQ
)
<<
NTP_SCALE_SHIFT
;
time_adjust
=
0
;
}
tick_length
=
tick_length_base
;
delta
=
shift_right
(
time_offset
,
SHIFT_PLL
+
time_constant
);
time_offset
-=
delta
;
tick_length
+=
delta
;
if
(
!
time_adjust
)
return
;
if
(
time_adjust
>
MAX_TICKADJ
)
{
time_adjust
-=
MAX_TICKADJ
;
tick_length
+=
MAX_TICKADJ_SCALED
;
return
;
}
if
(
time_adjust
<
-
MAX_TICKADJ
)
{
time_adjust
+=
MAX_TICKADJ
;
tick_length
-=
MAX_TICKADJ_SCALED
;
return
;
}
tick_length
+=
(
s64
)(
time_adjust
*
NSEC_PER_USEC
/
NTP_INTERVAL_FREQ
)
<<
NTP_SCALE_SHIFT
;
time_adjust
=
0
;
}
#ifdef CONFIG_GENERIC_CMOS_UPDATE
...
...
@@ -233,12 +296,13 @@ static void sync_cmos_clock(struct work_struct *work)
* This code is run on a timer. If the clock is set, that timer
* may not expire at the correct time. Thus, we adjust...
*/
if
(
!
ntp_synced
())
if
(
!
ntp_synced
())
{
/*
* Not synced, exit, do not restart a timer (if one is
* running, let it run out).
*/
return
;
}
getnstimeofday
(
&
now
);
if
(
abs
(
now
.
tv_nsec
-
(
NSEC_PER_SEC
/
2
))
<=
tick_nsec
/
2
)
...
...
@@ -270,7 +334,116 @@ static void notify_cmos_timer(void)
static
inline
void
notify_cmos_timer
(
void
)
{
}
#endif
/* adjtimex mainly allows reading (and writing, if superuser) of
/*
* Start the leap seconds timer:
*/
static
inline
void
ntp_start_leap_timer
(
struct
timespec
*
ts
)
{
long
now
=
ts
->
tv_sec
;
if
(
time_status
&
STA_INS
)
{
time_state
=
TIME_INS
;
now
+=
86400
-
now
%
86400
;
hrtimer_start
(
&
leap_timer
,
ktime_set
(
now
,
0
),
HRTIMER_MODE_ABS
);
return
;
}
if
(
time_status
&
STA_DEL
)
{
time_state
=
TIME_DEL
;
now
+=
86400
-
(
now
+
1
)
%
86400
;
hrtimer_start
(
&
leap_timer
,
ktime_set
(
now
,
0
),
HRTIMER_MODE_ABS
);
}
}
/*
* Propagate a new txc->status value into the NTP state:
*/
static
inline
void
process_adj_status
(
struct
timex
*
txc
,
struct
timespec
*
ts
)
{
if
((
time_status
&
STA_PLL
)
&&
!
(
txc
->
status
&
STA_PLL
))
{
time_state
=
TIME_OK
;
time_status
=
STA_UNSYNC
;
}
/*
* If we turn on PLL adjustments then reset the
* reference time to current time.
*/
if
(
!
(
time_status
&
STA_PLL
)
&&
(
txc
->
status
&
STA_PLL
))
time_reftime
=
xtime
.
tv_sec
;
/* only set allowed bits */
time_status
&=
STA_RONLY
;
time_status
|=
txc
->
status
&
~
STA_RONLY
;
switch
(
time_state
)
{
case
TIME_OK
:
ntp_start_leap_timer
(
ts
);
break
;
case
TIME_INS
:
case
TIME_DEL
:
time_state
=
TIME_OK
;
ntp_start_leap_timer
(
ts
);
case
TIME_WAIT
:
if
(
!
(
time_status
&
(
STA_INS
|
STA_DEL
)))
time_state
=
TIME_OK
;
break
;
case
TIME_OOP
:
hrtimer_restart
(
&
leap_timer
);
break
;
}
}
/*
* Called with the xtime lock held, so we can access and modify
* all the global NTP state:
*/
static
inline
void
process_adjtimex_modes
(
struct
timex
*
txc
,
struct
timespec
*
ts
)
{
if
(
txc
->
modes
&
ADJ_STATUS
)
process_adj_status
(
txc
,
ts
);
if
(
txc
->
modes
&
ADJ_NANO
)
time_status
|=
STA_NANO
;
if
(
txc
->
modes
&
ADJ_MICRO
)
time_status
&=
~
STA_NANO
;
if
(
txc
->
modes
&
ADJ_FREQUENCY
)
{
time_freq
=
txc
->
freq
*
PPM_SCALE
;
time_freq
=
min
(
time_freq
,
MAXFREQ_SCALED
);
time_freq
=
max
(
time_freq
,
-
MAXFREQ_SCALED
);
}
if
(
txc
->
modes
&
ADJ_MAXERROR
)
time_maxerror
=
txc
->
maxerror
;
if
(
txc
->
modes
&
ADJ_ESTERROR
)
time_esterror
=
txc
->
esterror
;
if
(
txc
->
modes
&
ADJ_TIMECONST
)
{
time_constant
=
txc
->
constant
;
if
(
!
(
time_status
&
STA_NANO
))
time_constant
+=
4
;
time_constant
=
min
(
time_constant
,
(
long
)
MAXTC
);
time_constant
=
max
(
time_constant
,
0l
);
}
if
(
txc
->
modes
&
ADJ_TAI
&&
txc
->
constant
>
0
)
time_tai
=
txc
->
constant
;
if
(
txc
->
modes
&
ADJ_OFFSET
)
ntp_update_offset
(
txc
->
offset
);
if
(
txc
->
modes
&
ADJ_TICK
)
tick_usec
=
txc
->
tick
;
if
(
txc
->
modes
&
(
ADJ_TICK
|
ADJ_FREQUENCY
|
ADJ_OFFSET
))
ntp_update_frequency
();
}
/*
* adjtimex mainly allows reading (and writing, if superuser) of
* kernel time-keeping variables. used by xntpd.
*/
int
do_adjtimex
(
struct
timex
*
txc
)
...
...
@@ -291,11 +464,14 @@ int do_adjtimex(struct timex *txc)
if
(
txc
->
modes
&&
!
capable
(
CAP_SYS_TIME
))
return
-
EPERM
;
/* if the quartz is off by more than 10% something is VERY wrong! */
/*
* if the quartz is off by more than 10% then
* something is VERY wrong!
*/
if
(
txc
->
modes
&
ADJ_TICK
&&
(
txc
->
tick
<
900000
/
USER_HZ
||
txc
->
tick
>
1100000
/
USER_HZ
))
return
-
EINVAL
;
return
-
EINVAL
;
if
(
txc
->
modes
&
ADJ_STATUS
&&
time_state
!=
TIME_OK
)
hrtimer_cancel
(
&
leap_timer
);
...
...
@@ -305,7 +481,6 @@ int do_adjtimex(struct timex *txc)
write_seqlock_irq
(
&
xtime_lock
);
/* If there are input parameters, then process them */
if
(
txc
->
modes
&
ADJ_ADJTIME
)
{
long
save_adjust
=
time_adjust
;
...
...
@@ -315,98 +490,24 @@ int do_adjtimex(struct timex *txc)
ntp_update_frequency
();
}
txc
->
offset
=
save_adjust
;
goto
adj_done
;
}
if
(
txc
->
modes
)
{
long
sec
;
if
(
txc
->
modes
&
ADJ_STATUS
)
{
if
((
time_status
&
STA_PLL
)
&&
!
(
txc
->
status
&
STA_PLL
))
{
time_state
=
TIME_OK
;
time_status
=
STA_UNSYNC
;
}
/* only set allowed bits */
time_status
&=
STA_RONLY
;
time_status
|=
txc
->
status
&
~
STA_RONLY
;
switch
(
time_state
)
{
case
TIME_OK
:
start_timer:
sec
=
ts
.
tv_sec
;
if
(
time_status
&
STA_INS
)
{
time_state
=
TIME_INS
;
sec
+=
86400
-
sec
%
86400
;
hrtimer_start
(
&
leap_timer
,
ktime_set
(
sec
,
0
),
HRTIMER_MODE_ABS
);
}
else
if
(
time_status
&
STA_DEL
)
{
time_state
=
TIME_DEL
;
sec
+=
86400
-
(
sec
+
1
)
%
86400
;
hrtimer_start
(
&
leap_timer
,
ktime_set
(
sec
,
0
),
HRTIMER_MODE_ABS
);
}
break
;
case
TIME_INS
:
case
TIME_DEL
:
time_state
=
TIME_OK
;
goto
start_timer
;
break
;
case
TIME_WAIT
:
if
(
!
(
time_status
&
(
STA_INS
|
STA_DEL
)))
time_state
=
TIME_OK
;
break
;
case
TIME_OOP
:
hrtimer_restart
(
&
leap_timer
);
break
;
}
}
if
(
txc
->
modes
&
ADJ_NANO
)
time_status
|=
STA_NANO
;
if
(
txc
->
modes
&
ADJ_MICRO
)
time_status
&=
~
STA_NANO
;
if
(
txc
->
modes
&
ADJ_FREQUENCY
)
{
time_freq
=
(
s64
)
txc
->
freq
*
PPM_SCALE
;
time_freq
=
min
(
time_freq
,
MAXFREQ_SCALED
);
time_freq
=
max
(
time_freq
,
-
MAXFREQ_SCALED
);
}
if
(
txc
->
modes
&
ADJ_MAXERROR
)
time_maxerror
=
txc
->
maxerror
;
if
(
txc
->
modes
&
ADJ_ESTERROR
)
time_esterror
=
txc
->
esterror
;
if
(
txc
->
modes
&
ADJ_TIMECONST
)
{
time_constant
=
txc
->
constant
;
if
(
!
(
time_status
&
STA_NANO
))
time_constant
+=
4
;
time_constant
=
min
(
time_constant
,
(
long
)
MAXTC
);
time_constant
=
max
(
time_constant
,
0l
);
}
if
(
txc
->
modes
&
ADJ_TAI
&&
txc
->
constant
>
0
)
time_tai
=
txc
->
constant
;
if
(
txc
->
modes
&
ADJ_OFFSET
)
ntp_update_offset
(
txc
->
offset
);
if
(
txc
->
modes
&
ADJ_TICK
)
tick_usec
=
txc
->
tick
;
}
else
{
if
(
txc
->
modes
&
(
ADJ_TICK
|
ADJ_FREQUENCY
|
ADJ_OFFSET
))
ntp_update_frequency
();
}
/* If there are input parameters, then process them: */
if
(
txc
->
modes
)
process_adjtimex_modes
(
txc
,
&
ts
);
txc
->
offset
=
shift_right
(
time_offset
*
NTP_INTERVAL_FREQ
,
txc
->
offset
=
shift_right
(
time_offset
*
NTP_INTERVAL_FREQ
,
NTP_SCALE_SHIFT
);
if
(
!
(
time_status
&
STA_NANO
))
txc
->
offset
/=
NSEC_PER_USEC
;
if
(
!
(
time_status
&
STA_NANO
))
txc
->
offset
/=
NSEC_PER_USEC
;
}
adj_done:
result
=
time_state
;
/* mostly `TIME_OK' */
if
(
time_status
&
(
STA_UNSYNC
|
STA_CLOCKERR
))
result
=
TIME_ERROR
;
txc
->
freq
=
shift_right
((
time_freq
>>
PPM_SCALE_INV_SHIFT
)
*
(
s64
)
PPM_SCALE_INV
,
NTP_SCALE_SHIFT
);
PPM_SCALE_INV
,
NTP_SCALE_SHIFT
);
txc
->
maxerror
=
time_maxerror
;
txc
->
esterror
=
time_esterror
;
txc
->
status
=
time_status
;
...
...
@@ -425,6 +526,7 @@ int do_adjtimex(struct timex *txc)
txc
->
calcnt
=
0
;
txc
->
errcnt
=
0
;
txc
->
stbcnt
=
0
;
write_sequnlock_irq
(
&
xtime_lock
);
txc
->
time
.
tv_sec
=
ts
.
tv_sec
;
...
...
@@ -440,6 +542,8 @@ int do_adjtimex(struct timex *txc)
static
int
__init
ntp_tick_adj_setup
(
char
*
str
)
{
ntp_tick_adj
=
simple_strtol
(
str
,
NULL
,
0
);
ntp_tick_adj
<<=
NTP_SCALE_SHIFT
;
return
1
;
}
...
...
kernel/timer.c
浏览文件 @
7c526e1f
...
...
@@ -589,11 +589,14 @@ static struct tvec_base *lock_timer_base(struct timer_list *timer,
}
}
int
__mod_timer
(
struct
timer_list
*
timer
,
unsigned
long
expires
)
static
inline
int
__mod_timer
(
struct
timer_list
*
timer
,
unsigned
long
expires
,
bool
pending_only
)
{
struct
tvec_base
*
base
,
*
new_base
;
unsigned
long
flags
;
int
ret
=
0
;
int
ret
;
ret
=
0
;
timer_stats_timer_set_start_info
(
timer
);
BUG_ON
(
!
timer
->
function
);
...
...
@@ -603,6 +606,9 @@ int __mod_timer(struct timer_list *timer, unsigned long expires)
if
(
timer_pending
(
timer
))
{
detach_timer
(
timer
,
0
);
ret
=
1
;
}
else
{
if
(
pending_only
)
goto
out_unlock
;
}
debug_timer_activate
(
timer
);
...
...
@@ -629,42 +635,28 @@ int __mod_timer(struct timer_list *timer, unsigned long expires)
timer
->
expires
=
expires
;
internal_add_timer
(
base
,
timer
);
out_unlock:
spin_unlock_irqrestore
(
&
base
->
lock
,
flags
);
return
ret
;
}
EXPORT_SYMBOL
(
__mod_timer
);
/**
*
add_timer_on - start a timer on a particular CPU
* @timer: the
timer to be add
ed
* @
cpu: the CPU to start it on
*
mod_timer_pending - modify a pending timer's timeout
* @timer: the
pending timer to be modifi
ed
* @
expires: new timeout in jiffies
*
* This is not very scalable on SMP. Double adds are not possible.
* mod_timer_pending() is the same for pending timers as mod_timer(),
* but will not re-activate and modify already deleted timers.
*
* It is useful for unserialized use of timers.
*/
void
add_timer_on
(
struct
timer_list
*
timer
,
int
cpu
)
int
mod_timer_pending
(
struct
timer_list
*
timer
,
unsigned
long
expires
)
{
struct
tvec_base
*
base
=
per_cpu
(
tvec_bases
,
cpu
);
unsigned
long
flags
;
timer_stats_timer_set_start_info
(
timer
);
BUG_ON
(
timer_pending
(
timer
)
||
!
timer
->
function
);
spin_lock_irqsave
(
&
base
->
lock
,
flags
);
timer_set_base
(
timer
,
base
);
debug_timer_activate
(
timer
);
internal_add_timer
(
base
,
timer
);
/*
* Check whether the other CPU is idle and needs to be
* triggered to reevaluate the timer wheel when nohz is
* active. We are protected against the other CPU fiddling
* with the timer by holding the timer base lock. This also
* makes sure that a CPU on the way to idle can not evaluate
* the timer wheel.
*/
wake_up_idle_cpu
(
cpu
);
spin_unlock_irqrestore
(
&
base
->
lock
,
flags
);
return
__mod_timer
(
timer
,
expires
,
true
);
}
EXPORT_SYMBOL
(
mod_timer_pending
);
/**
* mod_timer - modify a timer's timeout
...
...
@@ -688,9 +680,6 @@ void add_timer_on(struct timer_list *timer, int cpu)
*/
int
mod_timer
(
struct
timer_list
*
timer
,
unsigned
long
expires
)
{
BUG_ON
(
!
timer
->
function
);
timer_stats_timer_set_start_info
(
timer
);
/*
* This is a common optimization triggered by the
* networking code - if the timer is re-modified
...
...
@@ -699,11 +688,61 @@ int mod_timer(struct timer_list *timer, unsigned long expires)
if
(
timer
->
expires
==
expires
&&
timer_pending
(
timer
))
return
1
;
return
__mod_timer
(
timer
,
expires
);
return
__mod_timer
(
timer
,
expires
,
false
);
}
EXPORT_SYMBOL
(
mod_timer
);
/**
* add_timer - start a timer
* @timer: the timer to be added
*
* The kernel will do a ->function(->data) callback from the
* timer interrupt at the ->expires point in the future. The
* current time is 'jiffies'.
*
* The timer's ->expires, ->function (and if the handler uses it, ->data)
* fields must be set prior calling this function.
*
* Timers with an ->expires field in the past will be executed in the next
* timer tick.
*/
void
add_timer
(
struct
timer_list
*
timer
)
{
BUG_ON
(
timer_pending
(
timer
));
mod_timer
(
timer
,
timer
->
expires
);
}
EXPORT_SYMBOL
(
add_timer
);
/**
* add_timer_on - start a timer on a particular CPU
* @timer: the timer to be added
* @cpu: the CPU to start it on
*
* This is not very scalable on SMP. Double adds are not possible.
*/
void
add_timer_on
(
struct
timer_list
*
timer
,
int
cpu
)
{
struct
tvec_base
*
base
=
per_cpu
(
tvec_bases
,
cpu
);
unsigned
long
flags
;
timer_stats_timer_set_start_info
(
timer
);
BUG_ON
(
timer_pending
(
timer
)
||
!
timer
->
function
);
spin_lock_irqsave
(
&
base
->
lock
,
flags
);
timer_set_base
(
timer
,
base
);
debug_timer_activate
(
timer
);
internal_add_timer
(
base
,
timer
);
/*
* Check whether the other CPU is idle and needs to be
* triggered to reevaluate the timer wheel when nohz is
* active. We are protected against the other CPU fiddling
* with the timer by holding the timer base lock. This also
* makes sure that a CPU on the way to idle can not evaluate
* the timer wheel.
*/
wake_up_idle_cpu
(
cpu
);
spin_unlock_irqrestore
(
&
base
->
lock
,
flags
);
}
/**
* del_timer - deactive a timer.
* @timer: the timer to be deactivated
...
...
@@ -733,7 +772,6 @@ int del_timer(struct timer_list *timer)
return
ret
;
}
EXPORT_SYMBOL
(
del_timer
);
#ifdef CONFIG_SMP
...
...
@@ -767,7 +805,6 @@ int try_to_del_timer_sync(struct timer_list *timer)
return
ret
;
}
EXPORT_SYMBOL
(
try_to_del_timer_sync
);
/**
...
...
@@ -796,7 +833,6 @@ int del_timer_sync(struct timer_list *timer)
cpu_relax
();
}
}
EXPORT_SYMBOL
(
del_timer_sync
);
#endif
...
...
@@ -1268,7 +1304,7 @@ signed long __sched schedule_timeout(signed long timeout)
expire
=
timeout
+
jiffies
;
setup_timer_on_stack
(
&
timer
,
process_timeout
,
(
unsigned
long
)
current
);
__mod_timer
(
&
timer
,
expire
);
__mod_timer
(
&
timer
,
expire
,
false
);
schedule
();
del_singleshot_timer_sync
(
&
timer
);
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录