提交 0d1edf46 编写于 作者: J Jeremy Fitzhardinge 提交者: Ingo Molnar

xen: compile irq functions without -pg for ftrace

For some reason I managed to miss a bunch of irq-related functions
which also need to be compiled without -pg when using ftrace.  This
patch moves them into their own file, and starts a cleanup process
I've been meaning to do anyway.
Signed-off-by: NJeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Cc: Sam Ravnborg <sam@ravnborg.org>
Cc: "Alex Nixon (Intern)" <Alex.Nixon@eu.citrix.com>
Cc: Eduardo Habkost <ehabkost@redhat.com>
Signed-off-by: NIngo Molnar <mingo@elte.hu>
上级 eac4345b
...@@ -2,9 +2,10 @@ ifdef CONFIG_FTRACE ...@@ -2,9 +2,10 @@ ifdef CONFIG_FTRACE
# Do not profile debug and lowlevel utilities # Do not profile debug and lowlevel utilities
CFLAGS_REMOVE_spinlock.o = -pg CFLAGS_REMOVE_spinlock.o = -pg
CFLAGS_REMOVE_time.o = -pg CFLAGS_REMOVE_time.o = -pg
CFLAGS_REMOVE_irq.o = -pg
endif endif
obj-y := enlighten.o setup.o multicalls.o mmu.o \ obj-y := enlighten.o setup.o multicalls.o mmu.o irq.o \
time.o xen-asm_$(BITS).o grant-table.o suspend.o time.o xen-asm_$(BITS).o grant-table.o suspend.o
obj-$(CONFIG_SMP) += smp.o spinlock.o obj-$(CONFIG_SMP) += smp.o spinlock.o
...@@ -30,7 +30,6 @@ ...@@ -30,7 +30,6 @@
#include <xen/interface/xen.h> #include <xen/interface/xen.h>
#include <xen/interface/physdev.h> #include <xen/interface/physdev.h>
#include <xen/interface/vcpu.h> #include <xen/interface/vcpu.h>
#include <xen/interface/sched.h>
#include <xen/features.h> #include <xen/features.h>
#include <xen/page.h> #include <xen/page.h>
#include <xen/hvc-console.h> #include <xen/hvc-console.h>
...@@ -226,94 +225,6 @@ static unsigned long xen_get_debugreg(int reg) ...@@ -226,94 +225,6 @@ static unsigned long xen_get_debugreg(int reg)
return HYPERVISOR_get_debugreg(reg); return HYPERVISOR_get_debugreg(reg);
} }
static unsigned long xen_save_fl(void)
{
struct vcpu_info *vcpu;
unsigned long flags;
vcpu = x86_read_percpu(xen_vcpu);
/* flag has opposite sense of mask */
flags = !vcpu->evtchn_upcall_mask;
/* convert to IF type flag
-0 -> 0x00000000
-1 -> 0xffffffff
*/
return (-flags) & X86_EFLAGS_IF;
}
static void xen_restore_fl(unsigned long flags)
{
struct vcpu_info *vcpu;
/* convert from IF type flag */
flags = !(flags & X86_EFLAGS_IF);
/* There's a one instruction preempt window here. We need to
make sure we're don't switch CPUs between getting the vcpu
pointer and updating the mask. */
preempt_disable();
vcpu = x86_read_percpu(xen_vcpu);
vcpu->evtchn_upcall_mask = flags;
preempt_enable_no_resched();
/* Doesn't matter if we get preempted here, because any
pending event will get dealt with anyway. */
if (flags == 0) {
preempt_check_resched();
barrier(); /* unmask then check (avoid races) */
if (unlikely(vcpu->evtchn_upcall_pending))
force_evtchn_callback();
}
}
static void xen_irq_disable(void)
{
/* There's a one instruction preempt window here. We need to
make sure we're don't switch CPUs between getting the vcpu
pointer and updating the mask. */
preempt_disable();
x86_read_percpu(xen_vcpu)->evtchn_upcall_mask = 1;
preempt_enable_no_resched();
}
static void xen_irq_enable(void)
{
struct vcpu_info *vcpu;
/* We don't need to worry about being preempted here, since
either a) interrupts are disabled, so no preemption, or b)
the caller is confused and is trying to re-enable interrupts
on an indeterminate processor. */
vcpu = x86_read_percpu(xen_vcpu);
vcpu->evtchn_upcall_mask = 0;
/* Doesn't matter if we get preempted here, because any
pending event will get dealt with anyway. */
barrier(); /* unmask then check (avoid races) */
if (unlikely(vcpu->evtchn_upcall_pending))
force_evtchn_callback();
}
static void xen_safe_halt(void)
{
/* Blocking includes an implicit local_irq_enable(). */
if (HYPERVISOR_sched_op(SCHEDOP_block, NULL) != 0)
BUG();
}
static void xen_halt(void)
{
if (irqs_disabled())
HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
else
xen_safe_halt();
}
static void xen_leave_lazy(void) static void xen_leave_lazy(void)
{ {
paravirt_leave_lazy(paravirt_get_lazy_mode()); paravirt_leave_lazy(paravirt_get_lazy_mode());
...@@ -1308,36 +1219,6 @@ static const struct pv_cpu_ops xen_cpu_ops __initdata = { ...@@ -1308,36 +1219,6 @@ static const struct pv_cpu_ops xen_cpu_ops __initdata = {
}, },
}; };
static void __init __xen_init_IRQ(void)
{
#ifdef CONFIG_X86_64
int i;
/* Create identity vector->irq map */
for(i = 0; i < NR_VECTORS; i++) {
int cpu;
for_each_possible_cpu(cpu)
per_cpu(vector_irq, cpu)[i] = i;
}
#endif /* CONFIG_X86_64 */
xen_init_IRQ();
}
static const struct pv_irq_ops xen_irq_ops __initdata = {
.init_IRQ = __xen_init_IRQ,
.save_fl = xen_save_fl,
.restore_fl = xen_restore_fl,
.irq_disable = xen_irq_disable,
.irq_enable = xen_irq_enable,
.safe_halt = xen_safe_halt,
.halt = xen_halt,
#ifdef CONFIG_X86_64
.adjust_exception_frame = xen_adjust_exception_frame,
#endif
};
static const struct pv_apic_ops xen_apic_ops __initdata = { static const struct pv_apic_ops xen_apic_ops __initdata = {
#ifdef CONFIG_X86_LOCAL_APIC #ifdef CONFIG_X86_LOCAL_APIC
.apic_write = xen_apic_write, .apic_write = xen_apic_write,
...@@ -1740,10 +1621,11 @@ asmlinkage void __init xen_start_kernel(void) ...@@ -1740,10 +1621,11 @@ asmlinkage void __init xen_start_kernel(void)
pv_init_ops = xen_init_ops; pv_init_ops = xen_init_ops;
pv_time_ops = xen_time_ops; pv_time_ops = xen_time_ops;
pv_cpu_ops = xen_cpu_ops; pv_cpu_ops = xen_cpu_ops;
pv_irq_ops = xen_irq_ops;
pv_apic_ops = xen_apic_ops; pv_apic_ops = xen_apic_ops;
pv_mmu_ops = xen_mmu_ops; pv_mmu_ops = xen_mmu_ops;
xen_init_irq_ops();
if (xen_feature(XENFEAT_mmu_pt_update_preserve_ad)) { if (xen_feature(XENFEAT_mmu_pt_update_preserve_ad)) {
pv_mmu_ops.ptep_modify_prot_start = xen_ptep_modify_prot_start; pv_mmu_ops.ptep_modify_prot_start = xen_ptep_modify_prot_start;
pv_mmu_ops.ptep_modify_prot_commit = xen_ptep_modify_prot_commit; pv_mmu_ops.ptep_modify_prot_commit = xen_ptep_modify_prot_commit;
......
#include <linux/hardirq.h>
#include <xen/interface/xen.h>
#include <xen/interface/sched.h>
#include <xen/interface/vcpu.h>
#include <asm/xen/hypercall.h>
#include <asm/xen/hypervisor.h>
#include "xen-ops.h"
/*
* Force a proper event-channel callback from Xen after clearing the
* callback mask. We do this in a very simple manner, by making a call
* down into Xen. The pending flag will be checked by Xen on return.
*/
void xen_force_evtchn_callback(void)
{
(void)HYPERVISOR_xen_version(0, NULL);
}
static void __init __xen_init_IRQ(void)
{
#ifdef CONFIG_X86_64
int i;
/* Create identity vector->irq map */
for(i = 0; i < NR_VECTORS; i++) {
int cpu;
for_each_possible_cpu(cpu)
per_cpu(vector_irq, cpu)[i] = i;
}
#endif /* CONFIG_X86_64 */
xen_init_IRQ();
}
static unsigned long xen_save_fl(void)
{
struct vcpu_info *vcpu;
unsigned long flags;
vcpu = x86_read_percpu(xen_vcpu);
/* flag has opposite sense of mask */
flags = !vcpu->evtchn_upcall_mask;
/* convert to IF type flag
-0 -> 0x00000000
-1 -> 0xffffffff
*/
return (-flags) & X86_EFLAGS_IF;
}
static void xen_restore_fl(unsigned long flags)
{
struct vcpu_info *vcpu;
/* convert from IF type flag */
flags = !(flags & X86_EFLAGS_IF);
/* There's a one instruction preempt window here. We need to
make sure we're don't switch CPUs between getting the vcpu
pointer and updating the mask. */
preempt_disable();
vcpu = x86_read_percpu(xen_vcpu);
vcpu->evtchn_upcall_mask = flags;
preempt_enable_no_resched();
/* Doesn't matter if we get preempted here, because any
pending event will get dealt with anyway. */
if (flags == 0) {
preempt_check_resched();
barrier(); /* unmask then check (avoid races) */
if (unlikely(vcpu->evtchn_upcall_pending))
xen_force_evtchn_callback();
}
}
static void xen_irq_disable(void)
{
/* There's a one instruction preempt window here. We need to
make sure we're don't switch CPUs between getting the vcpu
pointer and updating the mask. */
preempt_disable();
x86_read_percpu(xen_vcpu)->evtchn_upcall_mask = 1;
preempt_enable_no_resched();
}
static void xen_irq_enable(void)
{
struct vcpu_info *vcpu;
/* We don't need to worry about being preempted here, since
either a) interrupts are disabled, so no preemption, or b)
the caller is confused and is trying to re-enable interrupts
on an indeterminate processor. */
vcpu = x86_read_percpu(xen_vcpu);
vcpu->evtchn_upcall_mask = 0;
/* Doesn't matter if we get preempted here, because any
pending event will get dealt with anyway. */
barrier(); /* unmask then check (avoid races) */
if (unlikely(vcpu->evtchn_upcall_pending))
xen_force_evtchn_callback();
}
static void xen_safe_halt(void)
{
/* Blocking includes an implicit local_irq_enable(). */
if (HYPERVISOR_sched_op(SCHEDOP_block, NULL) != 0)
BUG();
}
static void xen_halt(void)
{
if (irqs_disabled())
HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
else
xen_safe_halt();
}
static const struct pv_irq_ops xen_irq_ops __initdata = {
.init_IRQ = __xen_init_IRQ,
.save_fl = xen_save_fl,
.restore_fl = xen_restore_fl,
.irq_disable = xen_irq_disable,
.irq_enable = xen_irq_enable,
.safe_halt = xen_safe_halt,
.halt = xen_halt,
#ifdef CONFIG_X86_64
.adjust_exception_frame = xen_adjust_exception_frame,
#endif
};
void __init xen_init_irq_ops()
{
pv_irq_ops = xen_irq_ops;
}
...@@ -298,7 +298,7 @@ check_events: ...@@ -298,7 +298,7 @@ check_events:
push %eax push %eax
push %ecx push %ecx
push %edx push %edx
call force_evtchn_callback call xen_force_evtchn_callback
pop %edx pop %edx
pop %ecx pop %ecx
pop %eax pop %eax
......
...@@ -122,7 +122,7 @@ check_events: ...@@ -122,7 +122,7 @@ check_events:
push %r9 push %r9
push %r10 push %r10
push %r11 push %r11
call force_evtchn_callback call xen_force_evtchn_callback
pop %r11 pop %r11
pop %r10 pop %r10
pop %r9 pop %r9
......
...@@ -31,6 +31,7 @@ void xen_vcpu_restore(void); ...@@ -31,6 +31,7 @@ void xen_vcpu_restore(void);
void __init xen_build_dynamic_phys_to_machine(void); void __init xen_build_dynamic_phys_to_machine(void);
void xen_init_irq_ops(void);
void xen_setup_timer(int cpu); void xen_setup_timer(int cpu);
void xen_setup_cpu_clockevents(void); void xen_setup_cpu_clockevents(void);
unsigned long xen_tsc_khz(void); unsigned long xen_tsc_khz(void);
......
...@@ -84,17 +84,6 @@ static int irq_bindcount[NR_IRQS]; ...@@ -84,17 +84,6 @@ static int irq_bindcount[NR_IRQS];
/* Xen will never allocate port zero for any purpose. */ /* Xen will never allocate port zero for any purpose. */
#define VALID_EVTCHN(chn) ((chn) != 0) #define VALID_EVTCHN(chn) ((chn) != 0)
/*
* Force a proper event-channel callback from Xen after clearing the
* callback mask. We do this in a very simple manner, by making a call
* down into Xen. The pending flag will be checked by Xen on return.
*/
void force_evtchn_callback(void)
{
(void)HYPERVISOR_xen_version(0, NULL);
}
EXPORT_SYMBOL_GPL(force_evtchn_callback);
static struct irq_chip xen_dynamic_chip; static struct irq_chip xen_dynamic_chip;
/* Constructor for packed IRQ information. */ /* Constructor for packed IRQ information. */
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册