提交 56957940 编写于 作者: V Vineet Gupta

ARC: opencode arc_request_percpu_irq

- The idea is to remove the API usage since it has a subltle
  design flaw - relies on being called on cpu0 first. This is true for
  some early per cpu irqs such as TIMER/IPI, but not for late probed
  per cpu peripherals such a perf. And it's usage in perf has already
  bitten us once: see c6317bc7
  ("ARCv2: perf: Ensure perf intr gets enabled on all cores") where we
  ended up open coding it anyways

- The seeming duplication will go away once we start using cpu notifier
  for timer setup
Signed-off-by: NVineet Gupta <vgupta@synopsys.com>
上级 db4c4426
......@@ -26,8 +26,5 @@
extern void arc_init_IRQ(void);
void arc_local_timer_setup(void);
void arc_request_percpu_irq(int irq, int cpu,
irqreturn_t (*isr)(int irq, void *dev),
const char *irq_nm, void *percpu_dev);
#endif
......@@ -50,32 +50,3 @@ void arch_do_IRQ(unsigned int irq, struct pt_regs *regs)
irq_exit();
set_irq_regs(old_regs);
}
/*
* API called for requesting percpu interrupts - called by each CPU
* - For boot CPU, actually request the IRQ with genirq core + enables
* - For subsequent callers only enable called locally
*
* Relies on being called by boot cpu first (i.e. request called ahead) of
* any enable as expected by genirq. Hence Suitable only for TIMER, IPI
* which are guaranteed to be setup on boot core first.
* Late probed peripherals such as perf can't use this as there no guarantee
* of being called on boot CPU first.
*/
void arc_request_percpu_irq(int irq, int cpu,
irqreturn_t (*isr)(int irq, void *dev),
const char *irq_nm,
void *percpu_dev)
{
/* Boot cpu calls request, all call enable */
if (!cpu) {
int rc;
rc = request_percpu_irq(irq, isr, irq_nm, percpu_dev);
if (rc)
panic("Percpu IRQ request failed for %d\n", irq);
}
enable_percpu_irq(irq, 0);
}
......@@ -346,6 +346,10 @@ irqreturn_t do_IPI(int irq, void *dev_id)
/*
* API called by platform code to hookup arch-common ISR to their IPI IRQ
*
* Note: If IPI is provided by platform (vs. say ARC MCIP), their intc setup/map
* function needs to call call irq_set_percpu_devid() for IPI IRQ, otherwise
* request_percpu_irq() below will fail
*/
static DEFINE_PER_CPU(int, ipi_dev);
......@@ -353,7 +357,16 @@ int smp_ipi_irq_setup(int cpu, int irq)
{
int *dev = per_cpu_ptr(&ipi_dev, cpu);
arc_request_percpu_irq(irq, cpu, do_IPI, "IPI Interrupt", dev);
/* Boot cpu calls request, all call enable */
if (!cpu) {
int rc;
rc = request_percpu_irq(irq, do_IPI, "IPI Interrupt", dev);
if (rc)
panic("Percpu IRQ request failed for %d\n", irq);
}
enable_percpu_irq(irq, 0);
return 0;
}
......@@ -251,14 +251,22 @@ void arc_local_timer_setup()
{
struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device);
int cpu = smp_processor_id();
int irq = TIMER0_IRQ;
evt->cpumask = cpumask_of(cpu);
clockevents_config_and_register(evt, arc_get_core_freq(),
0, ARC_TIMER_MAX);
/* setup the per-cpu timer IRQ handler - for all cpus */
arc_request_percpu_irq(TIMER0_IRQ, cpu, timer_irq_handler,
"Timer0 (per-cpu-tick)", evt);
if (!cpu) {
int rc;
rc = request_percpu_irq(irq, timer_irq_handler,
"Timer0 (per-cpu-tick)", evt);
if (rc)
panic("Percpu IRQ request failed for TIMER\n");
}
enable_percpu_irq(irq, 0);
}
/*
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册