提交 a1659d6d 编写于 作者: R Richard Henderson 提交者: Matt Turner

alpha: Switch to GENERIC_CLOCKEVENTS

This allows us to get rid of some hacky code for SMP.  Get rid of
some cycle counter hackery that's now handled by generic code via
clocksource + clock_event_device objects.
Signed-off-by: NRichard Henderson <rth@twiddle.net>
上级 db2d3260
...@@ -16,6 +16,7 @@ config ALPHA ...@@ -16,6 +16,7 @@ config ALPHA
select ARCH_WANT_IPC_PARSE_VERSION select ARCH_WANT_IPC_PARSE_VERSION
select ARCH_HAVE_NMI_SAFE_CMPXCHG select ARCH_HAVE_NMI_SAFE_CMPXCHG
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
select GENERIC_CLOCKEVENTS
select GENERIC_SMP_IDLE_THREAD select GENERIC_SMP_IDLE_THREAD
select GENERIC_STRNCPY_FROM_USER select GENERIC_STRNCPY_FROM_USER
select GENERIC_STRNLEN_USER select GENERIC_STRNLEN_USER
......
...@@ -66,21 +66,7 @@ do_entInt(unsigned long type, unsigned long vector, ...@@ -66,21 +66,7 @@ do_entInt(unsigned long type, unsigned long vector,
break; break;
case 1: case 1:
old_regs = set_irq_regs(regs); old_regs = set_irq_regs(regs);
#ifdef CONFIG_SMP
{
long cpu;
smp_percpu_timer_interrupt(regs);
cpu = smp_processor_id();
if (cpu != boot_cpuid) {
kstat_incr_irqs_this_cpu(RTC_IRQ, irq_to_desc(RTC_IRQ));
} else {
handle_irq(RTC_IRQ);
}
}
#else
handle_irq(RTC_IRQ); handle_irq(RTC_IRQ);
#endif
set_irq_regs(old_regs); set_irq_regs(old_regs);
return; return;
case 2: case 2:
......
...@@ -135,13 +135,13 @@ extern void unregister_srm_console(void); ...@@ -135,13 +135,13 @@ extern void unregister_srm_console(void);
/* smp.c */ /* smp.c */
extern void setup_smp(void); extern void setup_smp(void);
extern void handle_ipi(struct pt_regs *); extern void handle_ipi(struct pt_regs *);
extern void smp_percpu_timer_interrupt(struct pt_regs *);
/* bios32.c */ /* bios32.c */
/* extern void reset_for_srm(void); */ /* extern void reset_for_srm(void); */
/* time.c */ /* time.c */
extern irqreturn_t timer_interrupt(int irq, void *dev); extern irqreturn_t timer_interrupt(int irq, void *dev);
extern void init_clockevent(void);
extern void common_init_rtc(void); extern void common_init_rtc(void);
extern unsigned long est_cycle_freq; extern unsigned long est_cycle_freq;
......
...@@ -138,9 +138,11 @@ smp_callin(void) ...@@ -138,9 +138,11 @@ smp_callin(void)
/* Get our local ticker going. */ /* Get our local ticker going. */
smp_setup_percpu_timer(cpuid); smp_setup_percpu_timer(cpuid);
init_clockevent();
/* Call platform-specific callin, if specified */ /* Call platform-specific callin, if specified */
if (alpha_mv.smp_callin) alpha_mv.smp_callin(); if (alpha_mv.smp_callin)
alpha_mv.smp_callin();
/* All kernel threads share the same mm context. */ /* All kernel threads share the same mm context. */
atomic_inc(&init_mm.mm_count); atomic_inc(&init_mm.mm_count);
...@@ -498,35 +500,6 @@ smp_cpus_done(unsigned int max_cpus) ...@@ -498,35 +500,6 @@ smp_cpus_done(unsigned int max_cpus)
((bogosum + 2500) / (5000/HZ)) % 100); ((bogosum + 2500) / (5000/HZ)) % 100);
} }
void
smp_percpu_timer_interrupt(struct pt_regs *regs)
{
struct pt_regs *old_regs;
int cpu = smp_processor_id();
unsigned long user = user_mode(regs);
struct cpuinfo_alpha *data = &cpu_data[cpu];
old_regs = set_irq_regs(regs);
/* Record kernel PC. */
profile_tick(CPU_PROFILING);
if (!--data->prof_counter) {
/* We need to make like a normal interrupt -- otherwise
timer interrupts ignore the global interrupt lock,
which would be a Bad Thing. */
irq_enter();
update_process_times(user);
data->prof_counter = data->prof_multiplier;
irq_exit();
}
set_irq_regs(old_regs);
}
int int
setup_profiling_timer(unsigned int multiplier) setup_profiling_timer(unsigned int multiplier)
{ {
......
...@@ -42,6 +42,7 @@ ...@@ -42,6 +42,7 @@
#include <linux/time.h> #include <linux/time.h>
#include <linux/timex.h> #include <linux/timex.h>
#include <linux/clocksource.h> #include <linux/clocksource.h>
#include <linux/clockchips.h>
#include "proto.h" #include "proto.h"
#include "irq_impl.h" #include "irq_impl.h"
...@@ -49,25 +50,6 @@ ...@@ -49,25 +50,6 @@
DEFINE_SPINLOCK(rtc_lock); DEFINE_SPINLOCK(rtc_lock);
EXPORT_SYMBOL(rtc_lock); EXPORT_SYMBOL(rtc_lock);
#define TICK_SIZE (tick_nsec / 1000)
/*
* Shift amount by which scaled_ticks_per_cycle is scaled. Shifting
* by 48 gives us 16 bits for HZ while keeping the accuracy good even
* for large CPU clock rates.
*/
#define FIX_SHIFT 48
/* lump static variables together for more efficient access: */
static struct {
/* cycle counter last time it got invoked */
__u32 last_time;
/* ticks/cycle * 2^48 */
unsigned long scaled_ticks_per_cycle;
/* partial unused tick */
unsigned long partial_tick;
} state;
unsigned long est_cycle_freq; unsigned long est_cycle_freq;
#ifdef CONFIG_IRQ_WORK #ifdef CONFIG_IRQ_WORK
...@@ -96,49 +78,64 @@ static inline __u32 rpcc(void) ...@@ -96,49 +78,64 @@ static inline __u32 rpcc(void)
return __builtin_alpha_rpcc(); return __builtin_alpha_rpcc();
} }
/* /*
* timer_interrupt() needs to keep up the real-time clock, * The RTC as a clock_event_device primitive.
* as well as call the "xtime_update()" routine every clocktick
*/ */
irqreturn_t timer_interrupt(int irq, void *dev)
{
unsigned long delta;
__u32 now;
long nticks;
#ifndef CONFIG_SMP static DEFINE_PER_CPU(struct clock_event_device, cpu_ce);
/* Not SMP, do kernel PC profiling here. */
profile_tick(CPU_PROFILING);
#endif
/* irqreturn_t
* Calculate how many ticks have passed since the last update, timer_interrupt(int irq, void *dev)
* including any previous partial leftover. Save any resulting {
* fraction for the next pass. int cpu = smp_processor_id();
*/ struct clock_event_device *ce = &per_cpu(cpu_ce, cpu);
now = rpcc();
delta = now - state.last_time;
state.last_time = now;
delta = delta * state.scaled_ticks_per_cycle + state.partial_tick;
state.partial_tick = delta & ((1UL << FIX_SHIFT) - 1);
nticks = delta >> FIX_SHIFT;
if (nticks) /* Don't run the hook for UNUSED or SHUTDOWN. */
xtime_update(nticks); if (likely(ce->mode == CLOCK_EVT_MODE_PERIODIC))
ce->event_handler(ce);
if (test_irq_work_pending()) { if (test_irq_work_pending()) {
clear_irq_work_pending(); clear_irq_work_pending();
irq_work_run(); irq_work_run();
} }
#ifndef CONFIG_SMP
while (nticks--)
update_process_times(user_mode(get_irq_regs()));
#endif
return IRQ_HANDLED; return IRQ_HANDLED;
} }
static void
rtc_ce_set_mode(enum clock_event_mode mode, struct clock_event_device *ce)
{
/* The mode member of CE is updated in generic code.
Since we only support periodic events, nothing to do. */
}
static int
rtc_ce_set_next_event(unsigned long evt, struct clock_event_device *ce)
{
/* This hook is for oneshot mode, which we don't support. */
return -EINVAL;
}
void __init
init_clockevent(void)
{
int cpu = smp_processor_id();
struct clock_event_device *ce = &per_cpu(cpu_ce, cpu);
*ce = (struct clock_event_device){
.name = "rtc",
.features = CLOCK_EVT_FEAT_PERIODIC,
.rating = 100,
.cpumask = cpumask_of(cpu),
.set_mode = rtc_ce_set_mode,
.set_next_event = rtc_ce_set_next_event,
};
clockevents_config_and_register(ce, CONFIG_HZ, 0, 0);
}
void __init void __init
common_init_rtc(void) common_init_rtc(void)
{ {
...@@ -372,22 +369,9 @@ time_init(void) ...@@ -372,22 +369,9 @@ time_init(void)
clocksource_register_hz(&clocksource_rpcc, cycle_freq); clocksource_register_hz(&clocksource_rpcc, cycle_freq);
#endif #endif
/* From John Bowman <bowman@math.ualberta.ca>: allow the values
to settle, as the Update-In-Progress bit going low isn't good
enough on some hardware. 2ms is our guess; we haven't found
bogomips yet, but this is close on a 500Mhz box. */
__delay(1000000);
if (HZ > (1<<16)) {
extern void __you_loose (void);
__you_loose();
}
state.last_time = cc1;
state.scaled_ticks_per_cycle
= ((unsigned long) HZ << FIX_SHIFT) / cycle_freq;
state.partial_tick = 0L;
/* Startup the timer source. */ /* Startup the timer source. */
alpha_mv.init_rtc(); alpha_mv.init_rtc();
/* Start up the clock event device. */
init_clockevent();
} }
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册