time.c 5.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124
/*
 * arch/xtensa/kernel/time.c
 *
 * Timer and clock support.
 *
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 *
 * Copyright (C) 2005 Tensilica Inc.
 *
 * Chris Zankel <chris@zankel.net>
 */

#include <linux/config.h>
#include <linux/errno.h>
#include <linux/time.h>
#include <linux/timex.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/profile.h>
#include <linux/delay.h>

#include <asm/timex.h>
#include <asm/platform.h>


extern volatile unsigned long wall_jiffies;

u64 jiffies_64 = INITIAL_JIFFIES;
EXPORT_SYMBOL(jiffies_64);

spinlock_t rtc_lock = SPIN_LOCK_UNLOCKED;
EXPORT_SYMBOL(rtc_lock);


#ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT
unsigned long ccount_per_jiffy;		/* per 1/HZ */
unsigned long ccount_nsec;		/* nsec per ccount increment */
#endif

unsigned int last_ccount_stamp;
static long last_rtc_update = 0;

/*
 * Scheduler clock - returns current tim in nanosec units.
 */

unsigned long long sched_clock(void)
{
	return (unsigned long long)jiffies * (1000000000 / HZ);
}

static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs);
static struct irqaction timer_irqaction = {
	.handler =	timer_interrupt,
	.flags =	SA_INTERRUPT,
	.name =		"timer",
};

void __init time_init(void)
{
	time_t sec_o, sec_n = 0;

	/* The platform must provide a function to calibrate the processor
	 * speed for the CALIBRATE.
	 */

#if CONFIG_XTENSA_CALIBRATE_CCOUNT
	printk("Calibrating CPU frequency ");
	platform_calibrate_ccount();
	printk("%d.%02d MHz\n", (int)ccount_per_jiffy/(1000000/HZ),
			(int)(ccount_per_jiffy/(10000/HZ))%100);
#endif

	/* Set time from RTC (if provided) */

	if (platform_get_rtc_time(&sec_o) == 0)
		while (platform_get_rtc_time(&sec_n))
			if (sec_o != sec_n)
				break;

	xtime.tv_nsec = 0;
	last_rtc_update = xtime.tv_sec = sec_n;
	last_ccount_stamp = get_ccount();

	set_normalized_timespec(&wall_to_monotonic,
		-xtime.tv_sec, -xtime.tv_nsec);

	/* Initialize the linux timer interrupt. */

	setup_irq(LINUX_TIMER_INT, &timer_irqaction);
	set_linux_timer(get_ccount() + CCOUNT_PER_JIFFY);
}


int do_settimeofday(struct timespec *tv)
{
	time_t wtm_sec, sec = tv->tv_sec;
	long wtm_nsec, nsec = tv->tv_nsec;
	unsigned long ccount;

	if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
		return -EINVAL;

	write_seqlock_irq(&xtime_lock);

	/* This is revolting. We need to set "xtime" correctly. However, the
	 * value in this location is the value at the most recent update of
	 * wall time.  Discover what correction gettimeofday() would have
	 * made, and then undo it!
	 */
	ccount = get_ccount();
	nsec -= (ccount - last_ccount_stamp) * CCOUNT_NSEC;
	nsec -= (jiffies - wall_jiffies) * CCOUNT_PER_JIFFY * CCOUNT_NSEC;

	wtm_sec  = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
	wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);

	set_normalized_timespec(&xtime, sec, nsec);
	set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);

J
john stultz 已提交
125
	ntp_clear();
126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183
	write_sequnlock_irq(&xtime_lock);
	return 0;
}

EXPORT_SYMBOL(do_settimeofday);


void do_gettimeofday(struct timeval *tv)
{
	unsigned long flags;
	unsigned long sec, usec, delta, lost, seq;

	do {
		seq = read_seqbegin_irqsave(&xtime_lock, flags);

		delta = get_ccount() - last_ccount_stamp;
		sec = xtime.tv_sec;
		usec = (xtime.tv_nsec / NSEC_PER_USEC);

		lost = jiffies - wall_jiffies;

	} while (read_seqretry_irqrestore(&xtime_lock, seq, flags));

	usec += lost * (1000000UL/HZ) + (delta * CCOUNT_NSEC) / NSEC_PER_USEC;
	for (; usec >= 1000000; sec++, usec -= 1000000)
		;

	tv->tv_sec = sec;
	tv->tv_usec = usec;
}

EXPORT_SYMBOL(do_gettimeofday);

/*
 * The timer interrupt is called HZ times per second.
 */

irqreturn_t timer_interrupt (int irq, void *dev_id, struct pt_regs *regs)
{

	unsigned long next;

	next = get_linux_timer();

again:
	while ((signed long)(get_ccount() - next) > 0) {

		profile_tick(CPU_PROFILING, regs);
#ifndef CONFIG_SMP
		update_process_times(user_mode(regs));
#endif

		write_seqlock(&xtime_lock);

		last_ccount_stamp = next;
		next += CCOUNT_PER_JIFFY;
		do_timer (regs); /* Linux handler in kernel/timer.c */

J
john stultz 已提交
184
		if (ntp_synced() &&
185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224
		    xtime.tv_sec - last_rtc_update >= 659 &&
		    abs((xtime.tv_nsec/1000)-(1000000-1000000/HZ))<5000000/HZ &&
		    jiffies - wall_jiffies == 1) {

			if (platform_set_rtc_time(xtime.tv_sec+1) == 0)
				last_rtc_update = xtime.tv_sec+1;
			else
				/* Do it again in 60 s */
				last_rtc_update += 60;
		}
		write_sequnlock(&xtime_lock);
	}

	/* NOTE: writing CCOMPAREn clears the interrupt.  */

	set_linux_timer (next);

	/* Make sure we didn't miss any tick... */

	if ((signed long)(get_ccount() - next) > 0)
		goto again;

	/* Allow platform to do something usefull (Wdog). */

	platform_heartbeat();

	return IRQ_HANDLED;
}

#ifndef CONFIG_GENERIC_CALIBRATE_DELAY
void __devinit calibrate_delay(void)
{
	loops_per_jiffy = CCOUNT_PER_JIFFY;
	printk("Calibrating delay loop (skipped)... "
	       "%lu.%02lu BogoMIPS preset\n",
	       loops_per_jiffy/(1000000/HZ),
	       (loops_per_jiffy/(10000/HZ)) % 100);
}
#endif