提交 e74b5680 编写于 作者: P Paul Mundt

sh: Turn off IRQs around get_timer_offset() calls.

Since all of the sys_timer sources currently do this on their own
within the ->get_offset() path, it's more sensible to just have
the caller take care of it when grabbing xtime_lock. Incidentally,
this is more in line with what others (ie, ARM) are doing already.
Signed-off-by: NPaul Mundt <lethal@linux-sh.org>
上级 bca7c207
......@@ -50,15 +50,20 @@ unsigned long long __attribute__ ((weak)) sched_clock(void)
#ifndef CONFIG_GENERIC_TIME
void do_gettimeofday(struct timeval *tv)
{
unsigned long flags;
unsigned long seq;
unsigned long usec, sec;
do {
seq = read_seqbegin(&xtime_lock);
/*
* Turn off IRQs when grabbing xtime_lock, so that
* the sys_timer get_offset code doesn't have to handle it.
*/
seq = read_seqbegin_irqsave(&xtime_lock, flags);
usec = get_timer_offset();
sec = xtime.tv_sec;
usec += xtime.tv_nsec / 1000;
} while (read_seqretry(&xtime_lock, seq));
usec += xtime.tv_nsec / NSEC_PER_USEC;
} while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
while (usec >= 1000000) {
usec -= 1000000;
......@@ -85,7 +90,7 @@ int do_settimeofday(struct timespec *tv)
* wall time. Discover what correction gettimeofday() would have
* made, and then undo it!
*/
nsec -= 1000 * get_timer_offset();
nsec -= get_timer_offset() * NSEC_PER_USEC;
wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
......
......@@ -11,7 +11,6 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/seqlock.h>
#include <asm/timer.h>
#include <asm/rtc.h>
......@@ -46,13 +45,9 @@
#error "Unknown CPU SUBTYPE"
#endif
static DEFINE_SPINLOCK(cmt0_lock);
static unsigned long cmt_timer_get_offset(void)
{
int count;
unsigned long flags;
static unsigned short count_p = 0xffff; /* for the first call after boot */
static unsigned long jiffies_p = 0;
......@@ -61,7 +56,6 @@ static unsigned long cmt_timer_get_offset(void)
*/
unsigned long jiffies_t;
spin_lock_irqsave(&cmt0_lock, flags);
/* timer count may underflow right here */
count = ctrl_inw(CMT_CMCOR_0);
count -= ctrl_inw(CMT_CMCNT_0);
......@@ -88,7 +82,6 @@ static unsigned long cmt_timer_get_offset(void)
jiffies_p = jiffies_t;
count_p = count;
spin_unlock_irqrestore(&cmt0_lock, flags);
count = ((LATCH-1) - count) * TICK_SIZE;
count = (count + LATCH/2) / LATCH;
......@@ -122,7 +115,7 @@ static irqreturn_t cmt_timer_interrupt(int irq, void *dev_id)
static struct irqaction cmt_irq = {
.name = "timer",
.handler = cmt_timer_interrupt,
.flags = IRQF_DISABLED,
.flags = IRQF_DISABLED | IRQF_TIMER,
.mask = CPU_MASK_NONE,
};
......
......@@ -12,7 +12,6 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/seqlock.h>
#include <asm/timer.h>
#include <asm/io.h>
......@@ -28,9 +27,6 @@
* However, we can implement channel cascade if we go the overflow route and
* get away with using 2 MTU2 channels as a 32-bit timer.
*/
static DEFINE_SPINLOCK(mtu2_lock);
#define MTU2_TSTR 0xfffe4280
#define MTU2_TCR_1 0xfffe4380
#define MTU2_TMDR_1 0xfffe4381
......@@ -55,8 +51,6 @@ static DEFINE_SPINLOCK(mtu2_lock);
static unsigned long mtu2_timer_get_offset(void)
{
int count;
unsigned long flags;
static int count_p = 0x7fff; /* for the first call after boot */
static unsigned long jiffies_p = 0;
......@@ -65,7 +59,6 @@ static unsigned long mtu2_timer_get_offset(void)
*/
unsigned long jiffies_t;
spin_lock_irqsave(&mtu2_lock, flags);
/* timer count may underflow right here */
count = ctrl_inw(MTU2_TCNT_1); /* read the latched count */
......@@ -90,7 +83,6 @@ static unsigned long mtu2_timer_get_offset(void)
jiffies_p = jiffies_t;
count_p = count;
spin_unlock_irqrestore(&mtu2_lock, flags);
count = ((LATCH-1) - count) * TICK_SIZE;
count = (count + LATCH/2) / LATCH;
......@@ -118,7 +110,7 @@ static irqreturn_t mtu2_timer_interrupt(int irq, void *dev_id)
static struct irqaction mtu2_irq = {
.name = "timer",
.handler = mtu2_timer_interrupt,
.flags = IRQF_DISABLED,
.flags = IRQF_DISABLED | IRQF_TIMER,
.mask = CPU_MASK_NONE,
};
......
......@@ -17,7 +17,6 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/seqlock.h>
#include <asm/timer.h>
#include <asm/rtc.h>
......@@ -31,13 +30,9 @@
#define TMU0_TCR_CALIB 0x0000
static DEFINE_SPINLOCK(tmu0_lock);
static unsigned long tmu_timer_get_offset(void)
{
int count;
unsigned long flags;
static int count_p = 0x7fffffff; /* for the first call after boot */
static unsigned long jiffies_p = 0;
......@@ -46,7 +41,6 @@ static unsigned long tmu_timer_get_offset(void)
*/
unsigned long jiffies_t;
spin_lock_irqsave(&tmu0_lock, flags);
/* timer count may underflow right here */
count = ctrl_inl(TMU0_TCNT); /* read the latched count */
......@@ -72,7 +66,6 @@ static unsigned long tmu_timer_get_offset(void)
jiffies_p = jiffies_t;
count_p = count;
spin_unlock_irqrestore(&tmu0_lock, flags);
count = ((LATCH-1) - count) * TICK_SIZE;
count = (count + LATCH/2) / LATCH;
......@@ -106,7 +99,7 @@ static irqreturn_t tmu_timer_interrupt(int irq, void *dummy)
static struct irqaction tmu_irq = {
.name = "timer",
.handler = tmu_timer_interrupt,
.flags = IRQF_DISABLED,
.flags = IRQF_DISABLED | IRQF_TIMER,
.mask = CPU_MASK_NONE,
};
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册