提交 b62ad9ab 编写于 作者: L Linus Torvalds

Merge branch 'timers-timekeeping-for-linus' of...

Merge branch 'timers-timekeeping-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'timers-timekeeping-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  um: Fix read_persistent_clock fallout
  kgdb: Do not access xtime directly
  powerpc: Clean up obsolete code relating to decrementer and timebase
  powerpc: Rework VDSO gettimeofday to prevent time going backwards
  clocksource: Add __clocksource_updatefreq_hz/khz methods
  x86: Convert common clocksources to use clocksource_register_hz/khz
  timekeeping: Make xtime and wall_to_monotonic static
  hrtimer: Cleanup direct access to wall_to_monotonic
  um: Convert to use read_persistent_clock
  timkeeping: Fix update_vsyscall to provide wall_to_monotonic offset
  powerpc: Cleanup xtime usage
  powerpc: Simplify update_vsyscall
  time: Kill off CONFIG_GENERIC_TIME
  time: Implement timespec_add
  x86: Fix vtime/file timestamp inconsistencies

Trivial conflicts in Documentation/feature-removal-schedule.txt

Much less trivial conflicts in arch/powerpc/kernel/time.c resolved as
per Thomas' earlier merge commit 47916be4 ("Merge branch
'powerpc.cherry-picks' into timers/clocksource")
......@@ -445,16 +445,6 @@ Who: Jan Kiszka <jan.kiszka@web.de>
----------------------------
What: xtime, wall_to_monotonic
When: 2.6.36+
Files: kernel/time/timekeeping.c include/linux/time.h
Why: Cleaning up timekeeping internal values. Please use
existing timekeeping accessor functions to access
the equivalent functionality.
Who: John Stultz <johnstul@us.ibm.com>
----------------------------
What: KVM paravirt mmu host support
When: January 2011
Why: The paravirt mmu host support is slower than non-paravirt mmu, both
......
......@@ -73,7 +73,6 @@ parameter is applicable:
MTD MTD (Memory Technology Device) support is enabled.
NET Appropriate network support is enabled.
NUMA NUMA support is enabled.
GENERIC_TIME The generic timeofday code is enabled.
NFS Appropriate NFS support is enabled.
OSS OSS sound support is enabled.
PV_OPS A paravirtualized kernel is enabled.
......@@ -470,7 +469,7 @@ and is between 256 and 4096 characters. It is defined in the file
clocksource is not available, it defaults to PIT.
Format: { pit | tsc | cyclone | pmtmr }
clocksource= [GENERIC_TIME] Override the default clocksource
clocksource= Override the default clocksource
Format: <string>
Override the default clocksource and use the clocksource
with the name specified.
......
......@@ -47,10 +47,6 @@ config GENERIC_CALIBRATE_DELAY
bool
default y
config GENERIC_TIME
bool
default y
config GENERIC_CMOS_UPDATE
def_bool y
......
......@@ -43,10 +43,6 @@ config SYS_SUPPORTS_APM_EMULATION
config GENERIC_GPIO
bool
config GENERIC_TIME
bool
default y
config ARCH_USES_GETTIMEOFFSET
bool
default n
......
......@@ -45,9 +45,6 @@ config GENERIC_IRQ_PROBE
config RWSEM_GENERIC_SPINLOCK
def_bool y
config GENERIC_TIME
def_bool y
config GENERIC_CLOCKEVENTS
def_bool y
......
......@@ -614,9 +614,6 @@ comment "Kernel Timer/Scheduler"
source kernel/Kconfig.hz
config GENERIC_TIME
def_bool y
config GENERIC_CLOCKEVENTS
bool "Generic clock events"
default y
......
......@@ -20,9 +20,6 @@ config RWSEM_GENERIC_SPINLOCK
config RWSEM_XCHGADD_ALGORITHM
bool
config GENERIC_TIME
def_bool y
config GENERIC_CMOS_UPDATE
def_bool y
......
......@@ -40,10 +40,6 @@ config GENERIC_HARDIRQS_NO__DO_IRQ
bool
default y
config GENERIC_TIME
bool
default y
config TIME_LOW_RES
bool
default y
......
......@@ -62,10 +62,6 @@ config GENERIC_CALIBRATE_DELAY
bool
default y
config GENERIC_TIME
bool
default y
config GENERIC_BUG
bool
depends on BUG
......
......@@ -82,10 +82,6 @@ config GENERIC_CALIBRATE_DELAY
bool
default y
config GENERIC_TIME
bool
default y
config GENERIC_TIME_VSYSCALL
bool
default y
......
......@@ -471,7 +471,8 @@ void update_vsyscall_tz(void)
{
}
void update_vsyscall(struct timespec *wall, struct clocksource *c, u32 mult)
void update_vsyscall(struct timespec *wall, struct timespec *wtm,
struct clocksource *c, u32 mult)
{
unsigned long flags;
......@@ -487,9 +488,9 @@ void update_vsyscall(struct timespec *wall, struct clocksource *c, u32 mult)
/* copy kernel time structures */
fsyscall_gtod_data.wall_time.tv_sec = wall->tv_sec;
fsyscall_gtod_data.wall_time.tv_nsec = wall->tv_nsec;
fsyscall_gtod_data.monotonic_time.tv_sec = wall_to_monotonic.tv_sec
fsyscall_gtod_data.monotonic_time.tv_sec = wtm->tv_sec
+ wall->tv_sec;
fsyscall_gtod_data.monotonic_time.tv_nsec = wall_to_monotonic.tv_nsec
fsyscall_gtod_data.monotonic_time.tv_nsec = wtm->tv_nsec
+ wall->tv_nsec;
/* normalize */
......
......@@ -44,9 +44,6 @@ config HZ
int
default 100
config GENERIC_TIME
def_bool y
config ARCH_USES_GETTIMEOFFSET
def_bool y
......
......@@ -59,9 +59,6 @@ config HZ
int
default 100
config GENERIC_TIME
def_bool y
config ARCH_USES_GETTIMEOFFSET
def_bool y
......
......@@ -63,10 +63,6 @@ config GENERIC_CALIBRATE_DELAY
bool
default y
config GENERIC_TIME
bool
default y
config GENERIC_CMOS_UPDATE
bool
default y
......
......@@ -51,9 +51,6 @@ config GENERIC_IRQ_PROBE
config GENERIC_CALIBRATE_DELAY
def_bool y
config GENERIC_TIME
def_bool y
config GENERIC_TIME_VSYSCALL
def_bool n
......
......@@ -758,10 +758,6 @@ config GENERIC_CLOCKEVENTS
bool
default y
config GENERIC_TIME
bool
default y
config GENERIC_CMOS_UPDATE
bool
default y
......
......@@ -46,9 +46,6 @@ config GENERIC_FIND_NEXT_BIT
config GENERIC_HWEIGHT
def_bool y
config GENERIC_TIME
def_bool y
config GENERIC_BUG
def_bool y
......
......@@ -66,10 +66,6 @@ config GENERIC_CALIBRATE_DELAY
bool
default y
config GENERIC_TIME
bool
default y
config TIME_LOW_RES
bool
depends on SMP
......
......@@ -29,9 +29,6 @@ config MMU
config GENERIC_CMOS_UPDATE
def_bool y
config GENERIC_TIME
def_bool y
config GENERIC_TIME_VSYSCALL
def_bool y
......
......@@ -796,10 +796,30 @@ static cycle_t timebase_read(struct clocksource *cs)
return (cycle_t)get_tb();
}
static inline void update_gtod(u64 new_tb_stamp, u64 new_stamp_xsec,
u64 new_tb_to_xs, struct timespec *now,
u32 frac_sec)
void update_vsyscall(struct timespec *wall_time, struct timespec *wtm,
struct clocksource *clock, u32 mult)
{
u64 new_tb_to_xs, new_stamp_xsec;
u32 frac_sec;
if (clock != &clocksource_timebase)
return;
/* Make userspace gettimeofday spin until we're done. */
++vdso_data->tb_update_count;
smp_mb();
/* XXX this assumes clock->shift == 22 */
/* 4611686018 ~= 2^(20+64-22) / 1e9 */
new_tb_to_xs = (u64) mult * 4611686018ULL;
new_stamp_xsec = (u64) wall_time->tv_nsec * XSEC_PER_SEC;
do_div(new_stamp_xsec, 1000000000);
new_stamp_xsec += (u64) wall_time->tv_sec * XSEC_PER_SEC;
BUG_ON(wall_time->tv_nsec >= NSEC_PER_SEC);
/* this is tv_nsec / 1e9 as a 0.32 fraction */
frac_sec = ((u64) wall_time->tv_nsec * 18446744073ULL) >> 32;
/*
* tb_update_count is used to allow the userspace gettimeofday code
* to assure itself that it sees a consistent view of the tb_to_xs and
......@@ -811,43 +831,17 @@ static inline void update_gtod(u64 new_tb_stamp, u64 new_stamp_xsec,
* We expect the caller to have done the first increment of
* vdso_data->tb_update_count already.
*/
vdso_data->tb_orig_stamp = new_tb_stamp;
vdso_data->tb_orig_stamp = clock->cycle_last;
vdso_data->stamp_xsec = new_stamp_xsec;
vdso_data->tb_to_xs = new_tb_to_xs;
vdso_data->wtom_clock_sec = wall_to_monotonic.tv_sec;
vdso_data->wtom_clock_nsec = wall_to_monotonic.tv_nsec;
vdso_data->stamp_xtime = *now;
vdso_data->wtom_clock_sec = wtm->tv_sec;
vdso_data->wtom_clock_nsec = wtm->tv_nsec;
vdso_data->stamp_xtime = *wall_time;
vdso_data->stamp_sec_fraction = frac_sec;
smp_wmb();
++(vdso_data->tb_update_count);
}
void update_vsyscall(struct timespec *wall_time, struct clocksource *clock,
u32 mult)
{
u64 t2x, stamp_xsec;
u32 frac_sec;
if (clock != &clocksource_timebase)
return;
/* Make userspace gettimeofday spin until we're done. */
++vdso_data->tb_update_count;
smp_mb();
/* XXX this assumes clock->shift == 22 */
/* 4611686018 ~= 2^(20+64-22) / 1e9 */
t2x = (u64) mult * 4611686018ULL;
stamp_xsec = (u64) wall_time->tv_nsec * XSEC_PER_SEC;
do_div(stamp_xsec, 1000000000);
stamp_xsec += (u64) wall_time->tv_sec * XSEC_PER_SEC;
BUG_ON(wall_time->tv_nsec >= NSEC_PER_SEC);
/* this is tv_nsec / 1e9 as a 0.32 fraction */
frac_sec = ((u64) wall_time->tv_nsec * 18446744073ULL) >> 32;
update_gtod(clock->cycle_last, stamp_xsec, t2x, wall_time, frac_sec);
}
void update_vsyscall_tz(void)
{
/* Make userspace gettimeofday spin until we're done. */
......
......@@ -40,9 +40,6 @@ config ARCH_HAS_ILOG2_U64
config GENERIC_HWEIGHT
def_bool y
config GENERIC_TIME
def_bool y
config GENERIC_TIME_VSYSCALL
def_bool y
......
......@@ -207,8 +207,8 @@ struct clocksource * __init clocksource_default_clock(void)
return &clocksource_tod;
}
void update_vsyscall(struct timespec *wall_time, struct clocksource *clock,
u32 mult)
void update_vsyscall(struct timespec *wall_time, struct timespec *wtm,
struct clocksource *clock, u32 mult)
{
if (clock != &clocksource_tod)
return;
......@@ -219,8 +219,8 @@ void update_vsyscall(struct timespec *wall_time, struct clocksource *clock,
vdso_data->xtime_tod_stamp = clock->cycle_last;
vdso_data->xtime_clock_sec = wall_time->tv_sec;
vdso_data->xtime_clock_nsec = wall_time->tv_nsec;
vdso_data->wtom_clock_sec = wall_to_monotonic.tv_sec;
vdso_data->wtom_clock_nsec = wall_to_monotonic.tv_nsec;
vdso_data->wtom_clock_sec = wtm->tv_sec;
vdso_data->wtom_clock_nsec = wtm->tv_nsec;
vdso_data->ntp_mult = mult;
smp_wmb();
++vdso_data->tb_update_count;
......
......@@ -55,9 +55,6 @@ config GENERIC_CALIBRATE_DELAY
config GENERIC_CLOCKEVENTS
def_bool y
config GENERIC_TIME
def_bool y
config SCHED_NO_NO_OMIT_FRAME_POINTER
def_bool y
......
......@@ -98,9 +98,6 @@ config GENERIC_CALIBRATE_DELAY
config GENERIC_IOMAP
bool
config GENERIC_TIME
def_bool y
config GENERIC_CLOCKEVENTS
def_bool y
......
......@@ -67,9 +67,6 @@ config BITS
default 32 if SPARC32
default 64 if SPARC64
config GENERIC_TIME
def_bool y
config ARCH_USES_GETTIMEOFFSET
bool
default y if SPARC32
......
......@@ -55,10 +55,6 @@ config GENERIC_BUG
default y
depends on BUG
config GENERIC_TIME
bool
default y
config GENERIC_CLOCKEVENTS
bool
default y
......
......@@ -102,16 +102,16 @@ static void __init setup_itimer(void)
clockevents_register_device(&itimer_clockevent);
}
void __init time_init(void)
void read_persistent_clock(struct timespec *ts)
{
long long nsecs;
timer_init();
long long nsecs = os_nsecs();
nsecs = os_nsecs();
set_normalized_timespec(&wall_to_monotonic, -nsecs / NSEC_PER_SEC,
-nsecs % NSEC_PER_SEC);
set_normalized_timespec(&xtime, nsecs / NSEC_PER_SEC,
set_normalized_timespec(ts, nsecs / NSEC_PER_SEC,
nsecs % NSEC_PER_SEC);
}
void __init time_init(void)
{
timer_init();
late_time_init = setup_itimer;
}
......@@ -73,9 +73,6 @@ config ARCH_DEFCONFIG
default "arch/x86/configs/i386_defconfig" if X86_32
default "arch/x86/configs/x86_64_defconfig" if X86_64
config GENERIC_TIME
def_bool y
config GENERIC_CMOS_UPDATE
def_bool y
......@@ -2047,7 +2044,7 @@ config SCx200
config SCx200HR_TIMER
tristate "NatSemi SCx200 27MHz High-Resolution Timer Support"
depends on SCx200 && GENERIC_TIME
depends on SCx200
default y
---help---
This driver provides a clocksource built upon the on-chip
......
......@@ -16,7 +16,6 @@
#include <asm/hpet.h>
#define HPET_MASK CLOCKSOURCE_MASK(32)
#define HPET_SHIFT 22
/* FSEC = 10^-15
NSEC = 10^-9 */
......@@ -787,7 +786,6 @@ static struct clocksource clocksource_hpet = {
.rating = 250,
.read = read_hpet,
.mask = HPET_MASK,
.shift = HPET_SHIFT,
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
.resume = hpet_resume_counter,
#ifdef CONFIG_X86_64
......@@ -798,6 +796,7 @@ static struct clocksource clocksource_hpet = {
static int hpet_clocksource_register(void)
{
u64 start, now;
u64 hpet_freq;
cycle_t t1;
/* Start the counter */
......@@ -832,9 +831,15 @@ static int hpet_clocksource_register(void)
* mult = (hpet_period * 2^shift)/10^6
* mult = (hpet_period << shift)/FSEC_PER_NSEC
*/
clocksource_hpet.mult = div_sc(hpet_period, FSEC_PER_NSEC, HPET_SHIFT);
clocksource_register(&clocksource_hpet);
/* Need to convert hpet_period (fsec/cyc) to cyc/sec:
*
* cyc/sec = FSEC_PER_SEC/hpet_period(fsec/cyc)
* cyc/sec = (FSEC_PER_NSEC * NSEC_PER_SEC)/hpet_period
*/
hpet_freq = FSEC_PER_NSEC * NSEC_PER_SEC;
do_div(hpet_freq, hpet_period);
clocksource_register_hz(&clocksource_hpet, (u32)hpet_freq);
return 0;
}
......
......@@ -751,7 +751,6 @@ static struct clocksource clocksource_tsc = {
.read = read_tsc,
.resume = resume_tsc,
.mask = CLOCKSOURCE_MASK(64),
.shift = 22,
.flags = CLOCK_SOURCE_IS_CONTINUOUS |
CLOCK_SOURCE_MUST_VERIFY,
#ifdef CONFIG_X86_64
......@@ -845,8 +844,6 @@ __cpuinit int unsynchronized_tsc(void)
static void __init init_tsc_clocksource(void)
{
clocksource_tsc.mult = clocksource_khz2mult(tsc_khz,
clocksource_tsc.shift);
if (tsc_clocksource_reliable)
clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
/* lower the rating if we already know its unstable: */
......@@ -854,7 +851,7 @@ static void __init init_tsc_clocksource(void)
clocksource_tsc.rating = 0;
clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS;
}
clocksource_register(&clocksource_tsc);
clocksource_register_khz(&clocksource_tsc, tsc_khz);
}
#ifdef CONFIG_X86_64
......
......@@ -73,8 +73,8 @@ void update_vsyscall_tz(void)
write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags);
}
void update_vsyscall(struct timespec *wall_time, struct clocksource *clock,
u32 mult)
void update_vsyscall(struct timespec *wall_time, struct timespec *wtm,
struct clocksource *clock, u32 mult)
{
unsigned long flags;
......@@ -87,7 +87,7 @@ void update_vsyscall(struct timespec *wall_time, struct clocksource *clock,
vsyscall_gtod_data.clock.shift = clock->shift;
vsyscall_gtod_data.wall_time_sec = wall_time->tv_sec;
vsyscall_gtod_data.wall_time_nsec = wall_time->tv_nsec;
vsyscall_gtod_data.wall_to_monotonic = wall_to_monotonic;
vsyscall_gtod_data.wall_to_monotonic = *wtm;
vsyscall_gtod_data.wall_time_coarse = __current_kernel_time();
write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags);
}
......@@ -169,13 +169,18 @@ int __vsyscall(0) vgettimeofday(struct timeval * tv, struct timezone * tz)
* unlikely */
time_t __vsyscall(1) vtime(time_t *t)
{
struct timeval tv;
unsigned seq;
time_t result;
if (unlikely(!__vsyscall_gtod_data.sysctl_enabled))
return time_syscall(t);
vgettimeofday(&tv, NULL);
result = tv.tv_sec;
do {
seq = read_seqbegin(&__vsyscall_gtod_data.lock);
result = __vsyscall_gtod_data.wall_time_sec;
} while (read_seqretry(&__vsyscall_gtod_data.lock, seq));
if (t)
*t = result;
return result;
......
......@@ -48,9 +48,6 @@ config HZ
int
default 100
config GENERIC_TIME
def_bool y
source "init/Kconfig"
source "kernel/Kconfig.freezer"
......
......@@ -101,7 +101,9 @@ obj-y += firmware/
obj-$(CONFIG_CRYPTO) += crypto/
obj-$(CONFIG_SUPERH) += sh/
obj-$(CONFIG_ARCH_SHMOBILE) += sh/
obj-$(CONFIG_GENERIC_TIME) += clocksource/
ifndef CONFIG_ARCH_USES_GETTIMEOFFSET
obj-y += clocksource/
endif
obj-$(CONFIG_DMA_ENGINE) += dma/
obj-$(CONFIG_DCA) += dca/
obj-$(CONFIG_HID) += hid/
......
......@@ -77,7 +77,7 @@ static void power_saving_mwait_init(void)
power_saving_mwait_eax = (highest_cstate << MWAIT_SUBSTATE_SIZE) |
(highest_subcstate - 1);
#if defined(CONFIG_GENERIC_TIME) && defined(CONFIG_X86)
#if defined(CONFIG_X86)
switch (boot_cpu_data.x86_vendor) {
case X86_VENDOR_AMD:
case X86_VENDOR_INTEL:
......
......@@ -264,7 +264,7 @@ int acpi_processor_resume(struct acpi_device * device)
return 0;
}
#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86)
#if defined(CONFIG_X86)
static void tsc_check_state(int state)
{
switch (boot_cpu_data.x86_vendor) {
......
......@@ -68,10 +68,7 @@ static struct clocksource clocksource_acpi_pm = {
.rating = 200,
.read = acpi_pm_read,
.mask = (cycle_t)ACPI_PM_MASK,
.mult = 0, /*to be calculated*/
.shift = 22,
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
......@@ -190,9 +187,6 @@ static int __init init_acpi_pm_clocksource(void)
if (!pmtmr_ioport)
return -ENODEV;
clocksource_acpi_pm.mult = clocksource_hz2mult(PMTMR_TICKS_PER_SEC,
clocksource_acpi_pm.shift);
/* "verify" this timing source: */
for (j = 0; j < ACPI_PM_MONOTONICITY_CHECKS; j++) {
udelay(100 * j);
......@@ -220,7 +214,8 @@ static int __init init_acpi_pm_clocksource(void)
if (verify_pmtmr_rate() != 0)
return -ENODEV;
return clocksource_register(&clocksource_acpi_pm);
return clocksource_register_hz(&clocksource_acpi_pm,
PMTMR_TICKS_PER_SEC);
}
/* We use fs_initcall because we want the PCI fixups to have run
......
......@@ -72,7 +72,7 @@ config ATMEL_TCLIB
config ATMEL_TCB_CLKSRC
bool "TC Block Clocksource"
depends on ATMEL_TCLIB && GENERIC_TIME
depends on ATMEL_TCLIB
default y
help
Select this to get a high precision clocksource based on a
......@@ -240,7 +240,7 @@ config CS5535_MFGPT_DEFAULT_IRQ
config CS5535_CLOCK_EVENT_SRC
tristate "CS5535/CS5536 high-res timer (MFGPT) events"
depends on GENERIC_TIME && GENERIC_CLOCKEVENTS && CS5535_MFGPT
depends on GENERIC_CLOCKEVENTS && CS5535_MFGPT
help
This driver provides a clock event source based on the MFGPT
timer(s) in the CS5535 and CS5536 companion chips.
......
......@@ -292,6 +292,8 @@ clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 minsec);
*/
extern int
__clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq);
extern void
__clocksource_updatefreq_scale(struct clocksource *cs, u32 scale, u32 freq);
static inline int clocksource_register_hz(struct clocksource *cs, u32 hz)
{
......@@ -303,6 +305,15 @@ static inline int clocksource_register_khz(struct clocksource *cs, u32 khz)
return __clocksource_register_scale(cs, 1000, khz);
}
static inline void __clocksource_updatefreq_hz(struct clocksource *cs, u32 hz)
{
__clocksource_updatefreq_scale(cs, 1, hz);
}
static inline void __clocksource_updatefreq_khz(struct clocksource *cs, u32 khz)
{
__clocksource_updatefreq_scale(cs, 1000, khz);
}
static inline void
clocksource_calc_mult_shift(struct clocksource *cs, u32 freq, u32 minsec)
......@@ -313,11 +324,13 @@ clocksource_calc_mult_shift(struct clocksource *cs, u32 freq, u32 minsec)
#ifdef CONFIG_GENERIC_TIME_VSYSCALL
extern void
update_vsyscall(struct timespec *ts, struct clocksource *c, u32 mult);
update_vsyscall(struct timespec *ts, struct timespec *wtm,
struct clocksource *c, u32 mult);
extern void update_vsyscall_tz(void);
#else
static inline void
update_vsyscall(struct timespec *ts, struct clocksource *c, u32 mult)
update_vsyscall(struct timespec *ts, struct timespec *wtm,
struct clocksource *c, u32 mult)
{
}
......
......@@ -76,9 +76,25 @@ extern unsigned long mktime(const unsigned int year, const unsigned int mon,
const unsigned int min, const unsigned int sec);
extern void set_normalized_timespec(struct timespec *ts, time_t sec, s64 nsec);
/*
* timespec_add_safe assumes both values are positive and checks
* for overflow. It will return TIME_T_MAX if the reutrn would be
* smaller then either of the arguments.
*/
extern struct timespec timespec_add_safe(const struct timespec lhs,
const struct timespec rhs);
static inline struct timespec timespec_add(struct timespec lhs,
struct timespec rhs)
{
struct timespec ts_delta;
set_normalized_timespec(&ts_delta, lhs.tv_sec + rhs.tv_sec,
lhs.tv_nsec + rhs.tv_nsec);
return ts_delta;
}
/*
* sub = lhs - rhs, in normalized form
*/
......@@ -97,8 +113,6 @@ static inline struct timespec timespec_sub(struct timespec lhs,
#define timespec_valid(ts) \
(((ts)->tv_sec >= 0) && (((unsigned long) (ts)->tv_nsec) < NSEC_PER_SEC))
extern struct timespec xtime;
extern struct timespec wall_to_monotonic;
extern seqlock_t xtime_lock;
extern void read_persistent_clock(struct timespec *ts);
......@@ -110,7 +124,8 @@ extern int timekeeping_suspended;
unsigned long get_seconds(void);
struct timespec current_kernel_time(void);
struct timespec __current_kernel_time(void); /* does not hold xtime_lock */
struct timespec __current_kernel_time(void); /* does not take xtime_lock */
struct timespec __get_wall_to_monotonic(void); /* does not take xtime_lock */
struct timespec get_monotonic_coarse(void);
#define CURRENT_TIME (current_kernel_time())
......
......@@ -2548,6 +2548,7 @@ static void kdb_sysinfo(struct sysinfo *val)
*/
static int kdb_summary(int argc, const char **argv)
{
struct timespec now;
struct kdb_tm tm;
struct sysinfo val;
......@@ -2562,7 +2563,8 @@ static int kdb_summary(int argc, const char **argv)
kdb_printf("domainname %s\n", init_uts_ns.name.domainname);
kdb_printf("ccversion %s\n", __stringify(CCVERSION));
kdb_gmtime(&xtime, &tm);
now = __current_kernel_time();
kdb_gmtime(&now, &tm);
kdb_printf("date %04d-%02d-%02d %02d:%02d:%02d "
"tz_minuteswest %d\n",
1900+tm.tm_year, tm.tm_mon+1, tm.tm_mday,
......
......@@ -90,7 +90,7 @@ static void hrtimer_get_softirq_time(struct hrtimer_cpu_base *base)
do {
seq = read_seqbegin(&xtime_lock);
xts = __current_kernel_time();
tom = wall_to_monotonic;
tom = __get_wall_to_monotonic();
} while (read_seqretry(&xtime_lock, seq));
xtim = timespec_to_ktime(xts);
......@@ -608,7 +608,7 @@ static int hrtimer_reprogram(struct hrtimer *timer,
static void retrigger_next_event(void *arg)
{
struct hrtimer_cpu_base *base;
struct timespec realtime_offset;
struct timespec realtime_offset, wtm;
unsigned long seq;
if (!hrtimer_hres_active())
......@@ -616,10 +616,9 @@ static void retrigger_next_event(void *arg)
do {
seq = read_seqbegin(&xtime_lock);
set_normalized_timespec(&realtime_offset,
-wall_to_monotonic.tv_sec,
-wall_to_monotonic.tv_nsec);
wtm = __get_wall_to_monotonic();
} while (read_seqretry(&xtime_lock, seq));
set_normalized_timespec(&realtime_offset, -wtm.tv_sec, -wtm.tv_nsec);
base = &__get_cpu_var(hrtimer_bases);
......
......@@ -300,22 +300,6 @@ struct timespec timespec_trunc(struct timespec t, unsigned gran)
}
EXPORT_SYMBOL(timespec_trunc);
#ifndef CONFIG_GENERIC_TIME
/*
* Simulate gettimeofday using do_gettimeofday which only allows a timeval
* and therefore only yields usec accuracy
*/
void getnstimeofday(struct timespec *tv)
{
struct timeval x;
do_gettimeofday(&x);
tv->tv_sec = x.tv_sec;
tv->tv_nsec = x.tv_usec * NSEC_PER_USEC;
}
EXPORT_SYMBOL_GPL(getnstimeofday);
#endif
/* Converts Gregorian date to seconds since 1970-01-01 00:00:00.
* Assumes input in normal date format, i.e. 1980-12-31 23:59:59
* => year=1980, mon=12, day=31, hour=23, min=59, sec=59.
......
......@@ -6,7 +6,7 @@ config TICK_ONESHOT
config NO_HZ
bool "Tickless System (Dynamic Ticks)"
depends on GENERIC_TIME && GENERIC_CLOCKEVENTS
depends on !ARCH_USES_GETTIMEOFFSET && GENERIC_CLOCKEVENTS
select TICK_ONESHOT
help
This option enables a tickless system: timer interrupts will
......@@ -15,7 +15,7 @@ config NO_HZ
config HIGH_RES_TIMERS
bool "High Resolution Timer Support"
depends on GENERIC_TIME && GENERIC_CLOCKEVENTS
depends on !ARCH_USES_GETTIMEOFFSET && GENERIC_CLOCKEVENTS
select TICK_ONESHOT
help
This option enables high resolution timer support. If your
......
......@@ -531,7 +531,7 @@ static u64 clocksource_max_deferment(struct clocksource *cs)
return max_nsecs - (max_nsecs >> 5);
}
#ifdef CONFIG_GENERIC_TIME
#ifndef CONFIG_ARCH_USES_GETTIMEOFFSET
/**
* clocksource_select - Select the best clocksource available
......@@ -577,7 +577,7 @@ static void clocksource_select(void)
}
}
#else /* CONFIG_GENERIC_TIME */
#else /* !CONFIG_ARCH_USES_GETTIMEOFFSET */
static inline void clocksource_select(void) { }
......@@ -639,19 +639,18 @@ static void clocksource_enqueue(struct clocksource *cs)
#define MAX_UPDATE_LENGTH 5 /* Seconds */
/**
* __clocksource_register_scale - Used to install new clocksources
* __clocksource_updatefreq_scale - Used update clocksource with new freq
* @t: clocksource to be registered
* @scale: Scale factor multiplied against freq to get clocksource hz
* @freq: clocksource frequency (cycles per second) divided by scale
*
* Returns -EBUSY if registration fails, zero otherwise.
* This should only be called from the clocksource->enable() method.
*
* This *SHOULD NOT* be called directly! Please use the
* clocksource_register_hz() or clocksource_register_khz helper functions.
* clocksource_updatefreq_hz() or clocksource_updatefreq_khz helper functions.
*/
int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq)
void __clocksource_updatefreq_scale(struct clocksource *cs, u32 scale, u32 freq)
{
/*
* Ideally we want to use some of the limits used in
* clocksource_max_deferment, to provide a more informed
......@@ -662,7 +661,27 @@ int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq)
NSEC_PER_SEC/scale,
MAX_UPDATE_LENGTH*scale);
cs->max_idle_ns = clocksource_max_deferment(cs);
}
EXPORT_SYMBOL_GPL(__clocksource_updatefreq_scale);
/**
* __clocksource_register_scale - Used to install new clocksources
* @t: clocksource to be registered
* @scale: Scale factor multiplied against freq to get clocksource hz
* @freq: clocksource frequency (cycles per second) divided by scale
*
* Returns -EBUSY if registration fails, zero otherwise.
*
* This *SHOULD NOT* be called directly! Please use the
* clocksource_register_hz() or clocksource_register_khz helper functions.
*/
int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq)
{
/* Intialize mult/shift and max_idle_ns */
__clocksource_updatefreq_scale(cs, scale, freq);
/* Add clocksource to the clcoksource list */
mutex_lock(&clocksource_mutex);
clocksource_enqueue(cs);
clocksource_select();
......
......@@ -153,8 +153,8 @@ __cacheline_aligned_in_smp DEFINE_SEQLOCK(xtime_lock);
* - wall_to_monotonic is no longer the boot time, getboottime must be
* used instead.
*/
struct timespec xtime __attribute__ ((aligned (16)));
struct timespec wall_to_monotonic __attribute__ ((aligned (16)));
static struct timespec xtime __attribute__ ((aligned (16)));
static struct timespec wall_to_monotonic __attribute__ ((aligned (16)));
static struct timespec total_sleep_time;
/*
......@@ -170,11 +170,10 @@ void timekeeping_leap_insert(int leapsecond)
{
xtime.tv_sec += leapsecond;
wall_to_monotonic.tv_sec -= leapsecond;
update_vsyscall(&xtime, timekeeper.clock, timekeeper.mult);
update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock,
timekeeper.mult);
}
#ifdef CONFIG_GENERIC_TIME
/**
* timekeeping_forward_now - update clock to the current time
*
......@@ -328,7 +327,8 @@ int do_settimeofday(struct timespec *tv)
timekeeper.ntp_error = 0;
ntp_clear();
update_vsyscall(&xtime, timekeeper.clock, timekeeper.mult);
update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock,
timekeeper.mult);
write_sequnlock_irqrestore(&xtime_lock, flags);
......@@ -376,52 +376,6 @@ void timekeeping_notify(struct clocksource *clock)
tick_clock_notify();
}
#else /* GENERIC_TIME */
static inline void timekeeping_forward_now(void) { }
/**
* ktime_get - get the monotonic time in ktime_t format
*
* returns the time in ktime_t format
*/
ktime_t ktime_get(void)
{
struct timespec now;
ktime_get_ts(&now);
return timespec_to_ktime(now);
}
EXPORT_SYMBOL_GPL(ktime_get);
/**
* ktime_get_ts - get the monotonic clock in timespec format
* @ts: pointer to timespec variable
*
* The function calculates the monotonic clock from the realtime
* clock and the wall_to_monotonic offset and stores the result
* in normalized timespec format in the variable pointed to by @ts.
*/
void ktime_get_ts(struct timespec *ts)
{
struct timespec tomono;
unsigned long seq;
do {
seq = read_seqbegin(&xtime_lock);
getnstimeofday(ts);
tomono = wall_to_monotonic;
} while (read_seqretry(&xtime_lock, seq));
set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec,
ts->tv_nsec + tomono.tv_nsec);
}
EXPORT_SYMBOL_GPL(ktime_get_ts);
#endif /* !GENERIC_TIME */
/**
* ktime_get_real - get the real (wall-) time in ktime_t format
*
......@@ -579,9 +533,9 @@ static int timekeeping_resume(struct sys_device *dev)
if (timespec_compare(&ts, &timekeeping_suspend_time) > 0) {
ts = timespec_sub(ts, timekeeping_suspend_time);
xtime = timespec_add_safe(xtime, ts);
xtime = timespec_add(xtime, ts);
wall_to_monotonic = timespec_sub(wall_to_monotonic, ts);
total_sleep_time = timespec_add_safe(total_sleep_time, ts);
total_sleep_time = timespec_add(total_sleep_time, ts);
}
/* re-base the last cycle value */
timekeeper.clock->cycle_last = timekeeper.clock->read(timekeeper.clock);
......@@ -784,10 +738,11 @@ void update_wall_time(void)
return;
clock = timekeeper.clock;
#ifdef CONFIG_GENERIC_TIME
offset = (clock->read(clock) - clock->cycle_last) & clock->mask;
#else
#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
offset = timekeeper.cycle_interval;
#else
offset = (clock->read(clock) - clock->cycle_last) & clock->mask;
#endif
timekeeper.xtime_nsec = (s64)xtime.tv_nsec << timekeeper.shift;
......@@ -856,7 +811,8 @@ void update_wall_time(void)
}
/* check to see if there is a new clocksource to use */
update_vsyscall(&xtime, timekeeper.clock, timekeeper.mult);
update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock,
timekeeper.mult);
}
/**
......@@ -887,7 +843,7 @@ EXPORT_SYMBOL_GPL(getboottime);
*/
void monotonic_to_bootbased(struct timespec *ts)
{
*ts = timespec_add_safe(*ts, total_sleep_time);
*ts = timespec_add(*ts, total_sleep_time);
}
EXPORT_SYMBOL_GPL(monotonic_to_bootbased);
......@@ -902,6 +858,11 @@ struct timespec __current_kernel_time(void)
return xtime;
}
struct timespec __get_wall_to_monotonic(void)
{
return wall_to_monotonic;
}
struct timespec current_kernel_time(void)
{
struct timespec now;
......
......@@ -153,7 +153,7 @@ config IRQSOFF_TRACER
bool "Interrupts-off Latency Tracer"
default n
depends on TRACE_IRQFLAGS_SUPPORT
depends on GENERIC_TIME
depends on !ARCH_USES_GETTIMEOFFSET
select TRACE_IRQFLAGS
select GENERIC_TRACER
select TRACER_MAX_TRACE
......@@ -175,7 +175,7 @@ config IRQSOFF_TRACER
config PREEMPT_TRACER
bool "Preemption-off Latency Tracer"
default n
depends on GENERIC_TIME
depends on !ARCH_USES_GETTIMEOFFSET
depends on PREEMPT
select GENERIC_TRACER
select TRACER_MAX_TRACE
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册