提交 3b16cf87 编写于 作者: J Jens Axboe

x86: convert to generic helpers for IPI function calls

This converts x86, x86-64, and xen to use the new helpers for
smp_call_function() and friends, and adds support for
smp_call_function_single().
Acked-by: NIngo Molnar <mingo@elte.hu>
Acked-by: NJeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Signed-off-by: NJens Axboe <jens.axboe@oracle.com>
上级 3d442233
...@@ -168,6 +168,7 @@ config GENERIC_PENDING_IRQ ...@@ -168,6 +168,7 @@ config GENERIC_PENDING_IRQ
config X86_SMP config X86_SMP
bool bool
depends on SMP && ((X86_32 && !X86_VOYAGER) || X86_64) depends on SMP && ((X86_32 && !X86_VOYAGER) || X86_64)
select USE_GENERIC_SMP_HELPERS
default y default y
config X86_32_SMP config X86_32_SMP
......
...@@ -1358,6 +1358,10 @@ void __init smp_intr_init(void) ...@@ -1358,6 +1358,10 @@ void __init smp_intr_init(void)
/* IPI for generic function call */ /* IPI for generic function call */
set_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt); set_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt);
/* IPI for single call function */
set_intr_gate(CALL_FUNCTION_SINGLE_VECTOR,
call_function_single_interrupt);
} }
#endif #endif
......
...@@ -711,6 +711,9 @@ END(invalidate_interrupt\num) ...@@ -711,6 +711,9 @@ END(invalidate_interrupt\num)
ENTRY(call_function_interrupt) ENTRY(call_function_interrupt)
apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt
END(call_function_interrupt) END(call_function_interrupt)
ENTRY(call_function_single_interrupt)
apicinterrupt CALL_FUNCTION_SINGLE_VECTOR,smp_call_function_single_interrupt
END(call_function_single_interrupt)
ENTRY(irq_move_cleanup_interrupt) ENTRY(irq_move_cleanup_interrupt)
apicinterrupt IRQ_MOVE_CLEANUP_VECTOR,smp_irq_move_cleanup_interrupt apicinterrupt IRQ_MOVE_CLEANUP_VECTOR,smp_irq_move_cleanup_interrupt
END(irq_move_cleanup_interrupt) END(irq_move_cleanup_interrupt)
......
...@@ -494,6 +494,10 @@ void __init native_init_IRQ(void) ...@@ -494,6 +494,10 @@ void __init native_init_IRQ(void)
/* IPI for generic function call */ /* IPI for generic function call */
set_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt); set_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt);
/* IPI for generic single function call */
set_intr_gate(CALL_FUNCTION_SINGLE_VECTOR,
call_function_single_interrupt);
/* Low priority IPI to cleanup after moving an irq */ /* Low priority IPI to cleanup after moving an irq */
set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt); set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt);
#endif #endif
......
...@@ -121,132 +121,23 @@ static void native_smp_send_reschedule(int cpu) ...@@ -121,132 +121,23 @@ static void native_smp_send_reschedule(int cpu)
send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR); send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR);
} }
/* void native_send_call_func_single_ipi(int cpu)
* Structure and data for smp_call_function(). This is designed to minimise
* static memory requirements. It also looks cleaner.
*/
static DEFINE_SPINLOCK(call_lock);
struct call_data_struct {
void (*func) (void *info);
void *info;
atomic_t started;
atomic_t finished;
int wait;
};
void lock_ipi_call_lock(void)
{ {
spin_lock_irq(&call_lock); send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNCTION_SINGLE_VECTOR);
}
void unlock_ipi_call_lock(void)
{
spin_unlock_irq(&call_lock);
}
static struct call_data_struct *call_data;
static void __smp_call_function(void (*func) (void *info), void *info,
int nonatomic, int wait)
{
struct call_data_struct data;
int cpus = num_online_cpus() - 1;
if (!cpus)
return;
data.func = func;
data.info = info;
atomic_set(&data.started, 0);
data.wait = wait;
if (wait)
atomic_set(&data.finished, 0);
call_data = &data;
mb();
/* Send a message to all other CPUs and wait for them to respond */
send_IPI_allbutself(CALL_FUNCTION_VECTOR);
/* Wait for response */
while (atomic_read(&data.started) != cpus)
cpu_relax();
if (wait)
while (atomic_read(&data.finished) != cpus)
cpu_relax();
} }
void native_send_call_func_ipi(cpumask_t mask)
/**
* smp_call_function_mask(): Run a function on a set of other CPUs.
* @mask: The set of cpus to run on. Must not include the current cpu.
* @func: The function to run. This must be fast and non-blocking.
* @info: An arbitrary pointer to pass to the function.
* @wait: If true, wait (atomically) until function has completed on other CPUs.
*
* Returns 0 on success, else a negative status code.
*
* If @wait is true, then returns once @func has returned; otherwise
* it returns just before the target cpu calls @func.
*
* You must not call this function with disabled interrupts or from a
* hardware interrupt handler or from a bottom half handler.
*/
static int
native_smp_call_function_mask(cpumask_t mask,
void (*func)(void *), void *info,
int wait)
{ {
struct call_data_struct data;
cpumask_t allbutself; cpumask_t allbutself;
int cpus;
/* Can deadlock when called with interrupts disabled */
WARN_ON(irqs_disabled());
/* Holding any lock stops cpus from going down. */
spin_lock(&call_lock);
allbutself = cpu_online_map; allbutself = cpu_online_map;
cpu_clear(smp_processor_id(), allbutself); cpu_clear(smp_processor_id(), allbutself);
cpus_and(mask, mask, allbutself);
cpus = cpus_weight(mask);
if (!cpus) {
spin_unlock(&call_lock);
return 0;
}
data.func = func;
data.info = info;
atomic_set(&data.started, 0);
data.wait = wait;
if (wait)
atomic_set(&data.finished, 0);
call_data = &data;
wmb();
/* Send a message to other CPUs */
if (cpus_equal(mask, allbutself) && if (cpus_equal(mask, allbutself) &&
cpus_equal(cpu_online_map, cpu_callout_map)) cpus_equal(cpu_online_map, cpu_callout_map))
send_IPI_allbutself(CALL_FUNCTION_VECTOR); send_IPI_allbutself(CALL_FUNCTION_VECTOR);
else else
send_IPI_mask(mask, CALL_FUNCTION_VECTOR); send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
/* Wait for response */
while (atomic_read(&data.started) != cpus)
cpu_relax();
if (wait)
while (atomic_read(&data.finished) != cpus)
cpu_relax();
spin_unlock(&call_lock);
return 0;
} }
static void stop_this_cpu(void *dummy) static void stop_this_cpu(void *dummy)
...@@ -268,18 +159,13 @@ static void stop_this_cpu(void *dummy) ...@@ -268,18 +159,13 @@ static void stop_this_cpu(void *dummy)
static void native_smp_send_stop(void) static void native_smp_send_stop(void)
{ {
int nolock;
unsigned long flags; unsigned long flags;
if (reboot_force) if (reboot_force)
return; return;
/* Don't deadlock on the call lock in panic */ smp_call_function(stop_this_cpu, NULL, 0, 0);
nolock = !spin_trylock(&call_lock);
local_irq_save(flags); local_irq_save(flags);
__smp_call_function(stop_this_cpu, NULL, 0, 0);
if (!nolock)
spin_unlock(&call_lock);
disable_local_APIC(); disable_local_APIC();
local_irq_restore(flags); local_irq_restore(flags);
} }
...@@ -301,33 +187,28 @@ void smp_reschedule_interrupt(struct pt_regs *regs) ...@@ -301,33 +187,28 @@ void smp_reschedule_interrupt(struct pt_regs *regs)
void smp_call_function_interrupt(struct pt_regs *regs) void smp_call_function_interrupt(struct pt_regs *regs)
{ {
void (*func) (void *info) = call_data->func;
void *info = call_data->info;
int wait = call_data->wait;
ack_APIC_irq(); ack_APIC_irq();
/*
* Notify initiating CPU that I've grabbed the data and am
* about to execute the function
*/
mb();
atomic_inc(&call_data->started);
/*
* At this point the info structure may be out of scope unless wait==1
*/
irq_enter(); irq_enter();
(*func)(info); generic_smp_call_function_interrupt();
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
__get_cpu_var(irq_stat).irq_call_count++; __get_cpu_var(irq_stat).irq_call_count++;
#else #else
add_pda(irq_call_count, 1); add_pda(irq_call_count, 1);
#endif #endif
irq_exit(); irq_exit();
}
if (wait) { void smp_call_function_single_interrupt(void)
mb(); {
atomic_inc(&call_data->finished); ack_APIC_irq();
} irq_enter();
generic_smp_call_function_single_interrupt();
#ifdef CONFIG_X86_32
__get_cpu_var(irq_stat).irq_call_count++;
#else
add_pda(irq_call_count, 1);
#endif
irq_exit();
} }
struct smp_ops smp_ops = { struct smp_ops smp_ops = {
...@@ -338,7 +219,8 @@ struct smp_ops smp_ops = { ...@@ -338,7 +219,8 @@ struct smp_ops smp_ops = {
.smp_send_stop = native_smp_send_stop, .smp_send_stop = native_smp_send_stop,
.smp_send_reschedule = native_smp_send_reschedule, .smp_send_reschedule = native_smp_send_reschedule,
.smp_call_function_mask = native_smp_call_function_mask,
.send_call_func_ipi = native_send_call_func_ipi,
.send_call_func_single_ipi = native_send_call_func_single_ipi,
}; };
EXPORT_SYMBOL_GPL(smp_ops); EXPORT_SYMBOL_GPL(smp_ops);
...@@ -345,7 +345,7 @@ static void __cpuinit start_secondary(void *unused) ...@@ -345,7 +345,7 @@ static void __cpuinit start_secondary(void *unused)
* lock helps us to not include this cpu in a currently in progress * lock helps us to not include this cpu in a currently in progress
* smp_call_function(). * smp_call_function().
*/ */
lock_ipi_call_lock(); ipi_call_lock_irq();
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
spin_lock(&vector_lock); spin_lock(&vector_lock);
...@@ -357,7 +357,7 @@ static void __cpuinit start_secondary(void *unused) ...@@ -357,7 +357,7 @@ static void __cpuinit start_secondary(void *unused)
spin_unlock(&vector_lock); spin_unlock(&vector_lock);
#endif #endif
cpu_set(smp_processor_id(), cpu_online_map); cpu_set(smp_processor_id(), cpu_online_map);
unlock_ipi_call_lock(); ipi_call_unlock_irq();
per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
setup_secondary_clock(); setup_secondary_clock();
......
...@@ -25,59 +25,3 @@ __cpuinit void init_gdt(int cpu) ...@@ -25,59 +25,3 @@ __cpuinit void init_gdt(int cpu)
per_cpu(cpu_number, cpu) = cpu; per_cpu(cpu_number, cpu) = cpu;
} }
#endif #endif
/**
* smp_call_function(): Run a function on all other CPUs.
* @func: The function to run. This must be fast and non-blocking.
* @info: An arbitrary pointer to pass to the function.
* @nonatomic: Unused.
* @wait: If true, wait (atomically) until function has completed on other CPUs.
*
* Returns 0 on success, else a negative status code.
*
* If @wait is true, then returns once @func has returned; otherwise
* it returns just before the target cpu calls @func.
*
* You must not call this function with disabled interrupts or from a
* hardware interrupt handler or from a bottom half handler.
*/
int smp_call_function(void (*func) (void *info), void *info, int nonatomic,
int wait)
{
return smp_call_function_mask(cpu_online_map, func, info, wait);
}
EXPORT_SYMBOL(smp_call_function);
/**
* smp_call_function_single - Run a function on a specific CPU
* @cpu: The target CPU. Cannot be the calling CPU.
* @func: The function to run. This must be fast and non-blocking.
* @info: An arbitrary pointer to pass to the function.
* @nonatomic: Unused.
* @wait: If true, wait until function has completed on other CPUs.
*
* Returns 0 on success, else a negative status code.
*
* If @wait is true, then returns once @func has returned; otherwise
* it returns just before the target cpu calls @func.
*/
int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
int nonatomic, int wait)
{
/* prevent preemption and reschedule on another processor */
int ret;
int me = get_cpu();
if (cpu == me) {
local_irq_disable();
func(info);
local_irq_enable();
put_cpu();
return 0;
}
ret = smp_call_function_mask(cpumask_of_cpu(cpu), func, info, wait);
put_cpu();
return ret;
}
EXPORT_SYMBOL(smp_call_function_single);
...@@ -955,94 +955,24 @@ static void smp_stop_cpu_function(void *dummy) ...@@ -955,94 +955,24 @@ static void smp_stop_cpu_function(void *dummy)
halt(); halt();
} }
static DEFINE_SPINLOCK(call_lock);
struct call_data_struct {
void (*func) (void *info);
void *info;
volatile unsigned long started;
volatile unsigned long finished;
int wait;
};
static struct call_data_struct *call_data;
/* execute a thread on a new CPU. The function to be called must be /* execute a thread on a new CPU. The function to be called must be
* previously set up. This is used to schedule a function for * previously set up. This is used to schedule a function for
* execution on all CPUs - set up the function then broadcast a * execution on all CPUs - set up the function then broadcast a
* function_interrupt CPI to come here on each CPU */ * function_interrupt CPI to come here on each CPU */
static void smp_call_function_interrupt(void) static void smp_call_function_interrupt(void)
{ {
void (*func) (void *info) = call_data->func;
void *info = call_data->info;
/* must take copy of wait because call_data may be replaced
* unless the function is waiting for us to finish */
int wait = call_data->wait;
__u8 cpu = smp_processor_id();
/*
* Notify initiating CPU that I've grabbed the data and am
* about to execute the function
*/
mb();
if (!test_and_clear_bit(cpu, &call_data->started)) {
/* If the bit wasn't set, this could be a replay */
printk(KERN_WARNING "VOYAGER SMP: CPU %d received call funtion"
" with no call pending\n", cpu);
return;
}
/*
* At this point the info structure may be out of scope unless wait==1
*/
irq_enter(); irq_enter();
(*func) (info); generic_smp_call_function_interrupt();
__get_cpu_var(irq_stat).irq_call_count++; __get_cpu_var(irq_stat).irq_call_count++;
irq_exit(); irq_exit();
if (wait) {
mb();
clear_bit(cpu, &call_data->finished);
}
} }
static int static void smp_call_function_single_interrupt(void)
voyager_smp_call_function_mask(cpumask_t cpumask,
void (*func) (void *info), void *info, int wait)
{ {
struct call_data_struct data; irq_enter();
u32 mask = cpus_addr(cpumask)[0]; generic_smp_call_function_single_interrupt();
__get_cpu_var(irq_stat).irq_call_count++;
mask &= ~(1 << smp_processor_id()); irq_exit();
if (!mask)
return 0;
/* Can deadlock when called with interrupts disabled */
WARN_ON(irqs_disabled());
data.func = func;
data.info = info;
data.started = mask;
data.wait = wait;
if (wait)
data.finished = mask;
spin_lock(&call_lock);
call_data = &data;
wmb();
/* Send a message to all other CPUs and wait for them to respond */
send_CPI(mask, VIC_CALL_FUNCTION_CPI);
/* Wait for response */
while (data.started)
barrier();
if (wait)
while (data.finished)
barrier();
spin_unlock(&call_lock);
return 0;
} }
/* Sorry about the name. In an APIC based system, the APICs /* Sorry about the name. In an APIC based system, the APICs
...@@ -1099,6 +1029,12 @@ void smp_qic_call_function_interrupt(struct pt_regs *regs) ...@@ -1099,6 +1029,12 @@ void smp_qic_call_function_interrupt(struct pt_regs *regs)
smp_call_function_interrupt(); smp_call_function_interrupt();
} }
void smp_qic_call_function_single_interrupt(struct pt_regs *regs)
{
ack_QIC_CPI(QIC_CALL_FUNCTION_SINGLE_CPI);
smp_call_function_single_interrupt();
}
void smp_vic_cpi_interrupt(struct pt_regs *regs) void smp_vic_cpi_interrupt(struct pt_regs *regs)
{ {
struct pt_regs *old_regs = set_irq_regs(regs); struct pt_regs *old_regs = set_irq_regs(regs);
...@@ -1119,6 +1055,8 @@ void smp_vic_cpi_interrupt(struct pt_regs *regs) ...@@ -1119,6 +1055,8 @@ void smp_vic_cpi_interrupt(struct pt_regs *regs)
smp_enable_irq_interrupt(); smp_enable_irq_interrupt();
if (test_and_clear_bit(VIC_CALL_FUNCTION_CPI, &vic_cpi_mailbox[cpu])) if (test_and_clear_bit(VIC_CALL_FUNCTION_CPI, &vic_cpi_mailbox[cpu]))
smp_call_function_interrupt(); smp_call_function_interrupt();
if (test_and_clear_bit(VIC_CALL_FUNCTION_SINGLE_CPI, &vic_cpi_mailbox[cpu]))
smp_call_function_single_interrupt();
set_irq_regs(old_regs); set_irq_regs(old_regs);
} }
...@@ -1862,5 +1800,7 @@ struct smp_ops smp_ops = { ...@@ -1862,5 +1800,7 @@ struct smp_ops smp_ops = {
.smp_send_stop = voyager_smp_send_stop, .smp_send_stop = voyager_smp_send_stop,
.smp_send_reschedule = voyager_smp_send_reschedule, .smp_send_reschedule = voyager_smp_send_reschedule,
.smp_call_function_mask = voyager_smp_call_function_mask,
.send_call_func_ipi = native_send_call_func_ipi,
.send_call_func_single_ipi = native_send_call_func_single_ipi,
}; };
...@@ -1108,7 +1108,9 @@ static const struct smp_ops xen_smp_ops __initdata = { ...@@ -1108,7 +1108,9 @@ static const struct smp_ops xen_smp_ops __initdata = {
.smp_send_stop = xen_smp_send_stop, .smp_send_stop = xen_smp_send_stop,
.smp_send_reschedule = xen_smp_send_reschedule, .smp_send_reschedule = xen_smp_send_reschedule,
.smp_call_function_mask = xen_smp_call_function_mask,
.send_call_func_ipi = xen_smp_send_call_function_ipi,
.send_call_func_single_ipi = xen_smp_send_call_function_single_ipi,
}; };
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
......
...@@ -558,7 +558,7 @@ static void drop_mm_ref(struct mm_struct *mm) ...@@ -558,7 +558,7 @@ static void drop_mm_ref(struct mm_struct *mm)
} }
if (!cpus_empty(mask)) if (!cpus_empty(mask))
xen_smp_call_function_mask(mask, drop_other_mm_ref, mm, 1); smp_call_function_mask(mask, drop_other_mm_ref, mm, 1);
} }
#else #else
static void drop_mm_ref(struct mm_struct *mm) static void drop_mm_ref(struct mm_struct *mm)
......
...@@ -36,27 +36,14 @@ ...@@ -36,27 +36,14 @@
#include "mmu.h" #include "mmu.h"
static cpumask_t xen_cpu_initialized_map; static cpumask_t xen_cpu_initialized_map;
static DEFINE_PER_CPU(int, resched_irq) = -1;
static DEFINE_PER_CPU(int, callfunc_irq) = -1;
static DEFINE_PER_CPU(int, debug_irq) = -1;
/*
* Structure and data for smp_call_function(). This is designed to minimise
* static memory requirements. It also looks cleaner.
*/
static DEFINE_SPINLOCK(call_lock);
struct call_data_struct { static DEFINE_PER_CPU(int, resched_irq);
void (*func) (void *info); static DEFINE_PER_CPU(int, callfunc_irq);
void *info; static DEFINE_PER_CPU(int, callfuncsingle_irq);
atomic_t started; static DEFINE_PER_CPU(int, debug_irq) = -1;
atomic_t finished;
int wait;
};
static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id); static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id);
static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id);
static struct call_data_struct *call_data;
/* /*
* Reschedule call back. Nothing to do, * Reschedule call back. Nothing to do,
...@@ -122,6 +109,17 @@ static int xen_smp_intr_init(unsigned int cpu) ...@@ -122,6 +109,17 @@ static int xen_smp_intr_init(unsigned int cpu)
goto fail; goto fail;
per_cpu(debug_irq, cpu) = rc; per_cpu(debug_irq, cpu) = rc;
callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu);
rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR,
cpu,
xen_call_function_single_interrupt,
IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
callfunc_name,
NULL);
if (rc < 0)
goto fail;
per_cpu(callfuncsingle_irq, cpu) = rc;
return 0; return 0;
fail: fail:
...@@ -131,6 +129,9 @@ static int xen_smp_intr_init(unsigned int cpu) ...@@ -131,6 +129,9 @@ static int xen_smp_intr_init(unsigned int cpu)
unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL); unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL);
if (per_cpu(debug_irq, cpu) >= 0) if (per_cpu(debug_irq, cpu) >= 0)
unbind_from_irqhandler(per_cpu(debug_irq, cpu), NULL); unbind_from_irqhandler(per_cpu(debug_irq, cpu), NULL);
if (per_cpu(callfuncsingle_irq, cpu) >= 0)
unbind_from_irqhandler(per_cpu(callfuncsingle_irq, cpu), NULL);
return rc; return rc;
} }
...@@ -338,7 +339,6 @@ void xen_smp_send_reschedule(int cpu) ...@@ -338,7 +339,6 @@ void xen_smp_send_reschedule(int cpu)
xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR); xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR);
} }
static void xen_send_IPI_mask(cpumask_t mask, enum ipi_vector vector) static void xen_send_IPI_mask(cpumask_t mask, enum ipi_vector vector)
{ {
unsigned cpu; unsigned cpu;
...@@ -349,83 +349,42 @@ static void xen_send_IPI_mask(cpumask_t mask, enum ipi_vector vector) ...@@ -349,83 +349,42 @@ static void xen_send_IPI_mask(cpumask_t mask, enum ipi_vector vector)
xen_send_IPI_one(cpu, vector); xen_send_IPI_one(cpu, vector);
} }
void xen_smp_send_call_function_ipi(cpumask_t mask)
{
int cpu;
xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR);
/* Make sure other vcpus get a chance to run if they need to. */
for_each_cpu_mask(cpu, mask) {
if (xen_vcpu_stolen(cpu)) {
HYPERVISOR_sched_op(SCHEDOP_yield, 0);
break;
}
}
}
void xen_smp_send_call_function_single_ipi(int cpu)
{
xen_send_IPI_mask(cpumask_of_cpu(cpu), XEN_CALL_FUNCTION_SINGLE_VECTOR);
}
static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id) static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id)
{ {
void (*func) (void *info) = call_data->func;
void *info = call_data->info;
int wait = call_data->wait;
/*
* Notify initiating CPU that I've grabbed the data and am
* about to execute the function
*/
mb();
atomic_inc(&call_data->started);
/*
* At this point the info structure may be out of scope unless wait==1
*/
irq_enter(); irq_enter();
(*func)(info); generic_smp_call_function_interrupt();
__get_cpu_var(irq_stat).irq_call_count++; __get_cpu_var(irq_stat).irq_call_count++;
irq_exit(); irq_exit();
if (wait) {
mb(); /* commit everything before setting finished */
atomic_inc(&call_data->finished);
}
return IRQ_HANDLED; return IRQ_HANDLED;
} }
int xen_smp_call_function_mask(cpumask_t mask, void (*func)(void *), static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id)
void *info, int wait)
{ {
struct call_data_struct data; irq_enter();
int cpus, cpu; generic_smp_call_function_single_interrupt();
bool yield; __get_cpu_var(irq_stat).irq_call_count++;
irq_exit();
/* Holding any lock stops cpus from going down. */
spin_lock(&call_lock);
cpu_clear(smp_processor_id(), mask);
cpus = cpus_weight(mask);
if (!cpus) {
spin_unlock(&call_lock);
return 0;
}
/* Can deadlock when called with interrupts disabled */
WARN_ON(irqs_disabled());
data.func = func;
data.info = info;
atomic_set(&data.started, 0);
data.wait = wait;
if (wait)
atomic_set(&data.finished, 0);
call_data = &data;
mb(); /* write everything before IPI */
/* Send a message to other CPUs and wait for them to respond */
xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR);
/* Make sure other vcpus get a chance to run if they need to. */
yield = false;
for_each_cpu_mask(cpu, mask)
if (xen_vcpu_stolen(cpu))
yield = true;
if (yield)
HYPERVISOR_sched_op(SCHEDOP_yield, 0);
/* Wait for response */
while (atomic_read(&data.started) != cpus ||
(wait && atomic_read(&data.finished) != cpus))
cpu_relax();
spin_unlock(&call_lock);
return 0; return IRQ_HANDLED;
} }
...@@ -46,13 +46,8 @@ void xen_smp_cpus_done(unsigned int max_cpus); ...@@ -46,13 +46,8 @@ void xen_smp_cpus_done(unsigned int max_cpus);
void xen_smp_send_stop(void); void xen_smp_send_stop(void);
void xen_smp_send_reschedule(int cpu); void xen_smp_send_reschedule(int cpu);
int xen_smp_call_function (void (*func) (void *info), void *info, int nonatomic, void xen_smp_send_call_function_ipi(cpumask_t mask);
int wait); void xen_smp_send_call_function_single_ipi(int cpu);
int xen_smp_call_function_single(int cpu, void (*func) (void *info), void *info,
int nonatomic, int wait);
int xen_smp_call_function_mask(cpumask_t mask, void (*func)(void *),
void *info, int wait);
/* Declare an asm function, along with symbols needed to make it /* Declare an asm function, along with symbols needed to make it
......
...@@ -32,6 +32,7 @@ extern void (*const interrupt[NR_IRQS])(void); ...@@ -32,6 +32,7 @@ extern void (*const interrupt[NR_IRQS])(void);
void reschedule_interrupt(void); void reschedule_interrupt(void);
void invalidate_interrupt(void); void invalidate_interrupt(void);
void call_function_interrupt(void); void call_function_interrupt(void);
void call_function_single_interrupt(void);
#endif #endif
#ifdef CONFIG_X86_LOCAL_APIC #ifdef CONFIG_X86_LOCAL_APIC
......
...@@ -68,6 +68,7 @@ ...@@ -68,6 +68,7 @@
#define ERROR_APIC_VECTOR 0xfe #define ERROR_APIC_VECTOR 0xfe
#define RESCHEDULE_VECTOR 0xfd #define RESCHEDULE_VECTOR 0xfd
#define CALL_FUNCTION_VECTOR 0xfc #define CALL_FUNCTION_VECTOR 0xfc
#define CALL_FUNCTION_SINGLE_VECTOR 0xfb
/* fb free - please don't readd KDB here because it's useless /* fb free - please don't readd KDB here because it's useless
(hint - think what a NMI bit does to a vector) */ (hint - think what a NMI bit does to a vector) */
#define THERMAL_APIC_VECTOR 0xfa #define THERMAL_APIC_VECTOR 0xfa
...@@ -102,6 +103,7 @@ void spurious_interrupt(void); ...@@ -102,6 +103,7 @@ void spurious_interrupt(void);
void error_interrupt(void); void error_interrupt(void);
void reschedule_interrupt(void); void reschedule_interrupt(void);
void call_function_interrupt(void); void call_function_interrupt(void);
void call_function_single_interrupt(void);
void irq_move_cleanup_interrupt(void); void irq_move_cleanup_interrupt(void);
void invalidate_interrupt0(void); void invalidate_interrupt0(void);
void invalidate_interrupt1(void); void invalidate_interrupt1(void);
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
BUILD_INTERRUPT(reschedule_interrupt,RESCHEDULE_VECTOR) BUILD_INTERRUPT(reschedule_interrupt,RESCHEDULE_VECTOR)
BUILD_INTERRUPT(invalidate_interrupt,INVALIDATE_TLB_VECTOR) BUILD_INTERRUPT(invalidate_interrupt,INVALIDATE_TLB_VECTOR)
BUILD_INTERRUPT(call_function_interrupt,CALL_FUNCTION_VECTOR) BUILD_INTERRUPT(call_function_interrupt,CALL_FUNCTION_VECTOR)
BUILD_INTERRUPT(call_function_single_interrupt,CALL_FUNCTION_SINGLE_VECTOR)
#endif #endif
/* /*
......
...@@ -48,6 +48,7 @@ ...@@ -48,6 +48,7 @@
#define INVALIDATE_TLB_VECTOR 0xfd #define INVALIDATE_TLB_VECTOR 0xfd
#define RESCHEDULE_VECTOR 0xfc #define RESCHEDULE_VECTOR 0xfc
#define CALL_FUNCTION_VECTOR 0xfb #define CALL_FUNCTION_VECTOR 0xfb
#define CALL_FUNCTION_SINGLE_VECTOR 0xfa
#define THERMAL_APIC_VECTOR 0xf0 #define THERMAL_APIC_VECTOR 0xf0
/* /*
......
...@@ -23,4 +23,4 @@ BUILD_INTERRUPT(qic_invalidate_interrupt, QIC_INVALIDATE_CPI); ...@@ -23,4 +23,4 @@ BUILD_INTERRUPT(qic_invalidate_interrupt, QIC_INVALIDATE_CPI);
BUILD_INTERRUPT(qic_reschedule_interrupt, QIC_RESCHEDULE_CPI); BUILD_INTERRUPT(qic_reschedule_interrupt, QIC_RESCHEDULE_CPI);
BUILD_INTERRUPT(qic_enable_irq_interrupt, QIC_ENABLE_IRQ_CPI); BUILD_INTERRUPT(qic_enable_irq_interrupt, QIC_ENABLE_IRQ_CPI);
BUILD_INTERRUPT(qic_call_function_interrupt, QIC_CALL_FUNCTION_CPI); BUILD_INTERRUPT(qic_call_function_interrupt, QIC_CALL_FUNCTION_CPI);
BUILD_INTERRUPT(qic_call_function_single_interrupt, QIC_CALL_FUNCTION_SINGLE_CPI);
...@@ -33,6 +33,7 @@ ...@@ -33,6 +33,7 @@
#define VIC_RESCHEDULE_CPI 4 #define VIC_RESCHEDULE_CPI 4
#define VIC_ENABLE_IRQ_CPI 5 #define VIC_ENABLE_IRQ_CPI 5
#define VIC_CALL_FUNCTION_CPI 6 #define VIC_CALL_FUNCTION_CPI 6
#define VIC_CALL_FUNCTION_SINGLE_CPI 7
/* Now the QIC CPIs: Since we don't need the two initial levels, /* Now the QIC CPIs: Since we don't need the two initial levels,
* these are 2 less than the VIC CPIs */ * these are 2 less than the VIC CPIs */
...@@ -42,9 +43,10 @@ ...@@ -42,9 +43,10 @@
#define QIC_RESCHEDULE_CPI (VIC_RESCHEDULE_CPI - QIC_CPI_OFFSET) #define QIC_RESCHEDULE_CPI (VIC_RESCHEDULE_CPI - QIC_CPI_OFFSET)
#define QIC_ENABLE_IRQ_CPI (VIC_ENABLE_IRQ_CPI - QIC_CPI_OFFSET) #define QIC_ENABLE_IRQ_CPI (VIC_ENABLE_IRQ_CPI - QIC_CPI_OFFSET)
#define QIC_CALL_FUNCTION_CPI (VIC_CALL_FUNCTION_CPI - QIC_CPI_OFFSET) #define QIC_CALL_FUNCTION_CPI (VIC_CALL_FUNCTION_CPI - QIC_CPI_OFFSET)
#define QIC_CALL_FUNCTION_SINGLE_CPI (VIC_CALL_FUNCTION_SINGLE_CPI - QIC_CPI_OFFSET)
#define VIC_START_FAKE_CPI VIC_TIMER_CPI #define VIC_START_FAKE_CPI VIC_TIMER_CPI
#define VIC_END_FAKE_CPI VIC_CALL_FUNCTION_CPI #define VIC_END_FAKE_CPI VIC_CALL_FUNCTION_SINGLE_CPI
/* this is the SYS_INT CPI. */ /* this is the SYS_INT CPI. */
#define VIC_SYS_INT 8 #define VIC_SYS_INT 8
......
...@@ -59,9 +59,9 @@ struct smp_ops { ...@@ -59,9 +59,9 @@ struct smp_ops {
void (*smp_send_stop)(void); void (*smp_send_stop)(void);
void (*smp_send_reschedule)(int cpu); void (*smp_send_reschedule)(int cpu);
int (*smp_call_function_mask)(cpumask_t mask,
void (*func)(void *info), void *info, void (*send_call_func_ipi)(cpumask_t mask);
int wait); void (*send_call_func_single_ipi)(int cpu);
}; };
/* Globals due to paravirt */ /* Globals due to paravirt */
...@@ -103,17 +103,22 @@ static inline void smp_send_reschedule(int cpu) ...@@ -103,17 +103,22 @@ static inline void smp_send_reschedule(int cpu)
smp_ops.smp_send_reschedule(cpu); smp_ops.smp_send_reschedule(cpu);
} }
static inline int smp_call_function_mask(cpumask_t mask, static inline void arch_send_call_function_single_ipi(int cpu)
void (*func) (void *info), void *info, {
int wait) smp_ops.send_call_func_single_ipi(cpu);
}
static inline void arch_send_call_function_ipi(cpumask_t mask)
{ {
return smp_ops.smp_call_function_mask(mask, func, info, wait); smp_ops.send_call_func_ipi(mask);
} }
void native_smp_prepare_boot_cpu(void); void native_smp_prepare_boot_cpu(void);
void native_smp_prepare_cpus(unsigned int max_cpus); void native_smp_prepare_cpus(unsigned int max_cpus);
void native_smp_cpus_done(unsigned int max_cpus); void native_smp_cpus_done(unsigned int max_cpus);
int native_cpu_up(unsigned int cpunum); int native_cpu_up(unsigned int cpunum);
void native_send_call_func_ipi(cpumask_t mask);
void native_send_call_func_single_ipi(int cpu);
extern int __cpu_disable(void); extern int __cpu_disable(void);
extern void __cpu_die(unsigned int cpu); extern void __cpu_die(unsigned int cpu);
...@@ -202,7 +207,5 @@ extern void cpu_uninit(void); ...@@ -202,7 +207,5 @@ extern void cpu_uninit(void);
#endif #endif
extern void smp_alloc_memory(void); extern void smp_alloc_memory(void);
extern void lock_ipi_call_lock(void);
extern void unlock_ipi_call_lock(void);
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif #endif
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
enum ipi_vector { enum ipi_vector {
XEN_RESCHEDULE_VECTOR, XEN_RESCHEDULE_VECTOR,
XEN_CALL_FUNCTION_VECTOR, XEN_CALL_FUNCTION_VECTOR,
XEN_CALL_FUNCTION_SINGLE_VECTOR,
XEN_NR_IPIS, XEN_NR_IPIS,
}; };
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册